Skip to content

Commit cb6fb59

Browse files
committed
sphinx_test: adds batched processing unit tests
1 parent e926602 commit cb6fb59

File tree

1 file changed

+155
-12
lines changed

1 file changed

+155
-12
lines changed

sphinx_test.go

Lines changed: 155 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ import (
99
"strconv"
1010
"testing"
1111

12-
"github.com/Crypt-iQ/lightning-onion/persistlog"
1312
"github.com/davecgh/go-spew/spew"
1413
"github.com/roasbeef/btcd/btcec"
1514
"github.com/roasbeef/btcd/chaincfg"
@@ -100,7 +99,9 @@ func newTestRoute(numHops int) ([]*Router, *[]HopData, *OnionPacket, error) {
10099
" random key for sphinx node: %v", err)
101100
}
102101

103-
nodes[i] = NewRouter(privKey, &chaincfg.MainNetParams, nil)
102+
dbPath := strconv.Itoa(i)
103+
104+
nodes[i] = NewRouter(dbPath, privKey, &chaincfg.MainNetParams, nil)
104105
}
105106

106107
// Gather all the pub keys in the path.
@@ -182,7 +183,7 @@ func TestBolt4Packet(t *testing.T) {
182183

183184
// shutdown deletes the temporary directory that the test database uses
184185
// and handles closing the database.
185-
func shutdown(dir string, d *persistlog.DecayedLog) {
186+
func shutdown(dir string, d ReplayLog) {
186187
os.RemoveAll(dir)
187188
d.Stop()
188189
}
@@ -197,9 +198,9 @@ func TestSphinxCorrectness(t *testing.T) {
197198
// reaching the final destination.
198199
for i := 0; i < len(nodes); i++ {
199200
// Start each node's DecayedLog and defer shutdown
200-
var tempDir = strconv.Itoa(i)
201-
nodes[i].d.Start(tempDir)
202-
defer shutdown(tempDir, nodes[i].d)
201+
tempDir := strconv.Itoa(i)
202+
nodes[i].log.Start()
203+
defer shutdown(tempDir, nodes[i].log)
203204

204205
hop := nodes[i]
205206

@@ -262,8 +263,8 @@ func TestSphinxSingleHop(t *testing.T) {
262263
}
263264

264265
// Start the DecayedLog and defer shutdown
265-
nodes[0].d.Start("0")
266-
defer shutdown("0", nodes[0].d)
266+
nodes[0].log.Start()
267+
defer shutdown("0", nodes[0].log)
267268

268269
// Simulating a direct single-hop payment, send the sphinx packet to
269270
// the destination node, making it process the packet fully.
@@ -289,8 +290,8 @@ func TestSphinxNodeRelpay(t *testing.T) {
289290
}
290291

291292
// Start the DecayedLog and defer shutdown
292-
nodes[0].d.Start("0")
293-
defer shutdown("0", nodes[0].d)
293+
nodes[0].log.Start()
294+
defer shutdown("0", nodes[0].log)
294295

295296
// Allow the node to process the initial packet, this should proceed
296297
// without any failures.
@@ -305,6 +306,148 @@ func TestSphinxNodeRelpay(t *testing.T) {
305306
}
306307
}
307308

309+
func TestSphinxNodeRelpaySameBatch(t *testing.T) {
310+
// We'd like to ensure that the sphinx node itself rejects all replayed
311+
// packets which share the same shared secret.
312+
nodes, _, fwdMsg, err := newTestRoute(NumMaxHops)
313+
if err != nil {
314+
t.Fatalf("unable to create test route: %v", err)
315+
}
316+
317+
// Start the DecayedLog and defer shutdown
318+
nodes[0].log.Start()
319+
defer shutdown("0", nodes[0].log)
320+
321+
tx := nodes[0].BeginTxn([]byte("0"), 2)
322+
323+
// Allow the node to process the initial packet, this should proceed
324+
// without any failures.
325+
if err := tx.ProcessOnionPacket(0, fwdMsg, nil); err != nil {
326+
t.Fatalf("unable to process sphinx packet: %v", err)
327+
}
328+
329+
// Now, force the node to process the packet a second time, this call
330+
// should not fail, even though the batch has internally recorded this
331+
// as a duplicate.
332+
err = tx.ProcessOnionPacket(1, fwdMsg, nil)
333+
if err != nil {
334+
t.Fatalf("adding duplicate sphinx packet to batch should not "+
335+
"result in an error, instead got: %v", err)
336+
}
337+
338+
// Commit the batch to disk, then we will inspect the replay set to
339+
// ensure the duplicate entry was properly included.
340+
_, replaySet, err := tx.Commit()
341+
if err != nil {
342+
t.Fatalf("unable to commit batch of sphinx packets: %v", err)
343+
}
344+
345+
if replaySet.Contains(0) {
346+
t.Fatalf("index 0 was not expected to be in replay set")
347+
}
348+
349+
if !replaySet.Contains(1) {
350+
t.Fatalf("expected replay set to contain duplicate packet " +
351+
"at index 1")
352+
}
353+
}
354+
355+
func TestSphinxNodeRelpayLaterBatch(t *testing.T) {
356+
// We'd like to ensure that the sphinx node itself rejects all replayed
357+
// packets which share the same shared secret.
358+
nodes, _, fwdMsg, err := newTestRoute(NumMaxHops)
359+
if err != nil {
360+
t.Fatalf("unable to create test route: %v", err)
361+
}
362+
363+
// Start the DecayedLog and defer shutdown
364+
nodes[0].log.Start()
365+
defer shutdown("0", nodes[0].log)
366+
367+
tx := nodes[0].BeginTxn([]byte("0"), 1)
368+
369+
// Allow the node to process the initial packet, this should proceed
370+
// without any failures.
371+
if err := tx.ProcessOnionPacket(uint16(0), fwdMsg, nil); err != nil {
372+
t.Fatalf("unable to process sphinx packet: %v", err)
373+
}
374+
375+
_, _, err = tx.Commit()
376+
if err != nil {
377+
t.Fatalf("unable to commit sphinx batch: %v", err)
378+
}
379+
380+
tx2 := nodes[0].BeginTxn([]byte("1"), 1)
381+
382+
// Now, force the node to process the packet a second time, this should
383+
// fail with a detected replay error.
384+
err = tx2.ProcessOnionPacket(uint16(0), fwdMsg, nil)
385+
if err != nil {
386+
t.Fatalf("sphinx packet replay should not have been rejected, "+
387+
"instead error is %v", err)
388+
}
389+
390+
_, replays, err := tx2.Commit()
391+
if err != nil {
392+
t.Fatalf("unable to commit second sphinx batch: %v", err)
393+
}
394+
395+
if !replays.Contains(0) {
396+
t.Fatalf("expected replay set to contain index: %v", 0)
397+
}
398+
}
399+
400+
func TestSphinxNodeRelpayBatchIdempotency(t *testing.T) {
401+
// We'd like to ensure that the sphinx node itself rejects all replayed
402+
// packets which share the same shared secret.
403+
nodes, _, fwdMsg, err := newTestRoute(NumMaxHops)
404+
if err != nil {
405+
t.Fatalf("unable to create test route: %v", err)
406+
}
407+
408+
// Start the DecayedLog and defer shutdown
409+
nodes[0].log.Start()
410+
defer shutdown("0", nodes[0].log)
411+
412+
tx := nodes[0].BeginTxn([]byte("0"), 1)
413+
414+
// Allow the node to process the initial packet, this should proceed
415+
// without any failures.
416+
if err := tx.ProcessOnionPacket(uint16(0), fwdMsg, nil); err != nil {
417+
t.Fatalf("unable to process sphinx packet: %v", err)
418+
}
419+
420+
packets, replays, err := tx.Commit()
421+
if err != nil {
422+
t.Fatalf("unable to commit sphinx batch: %v", err)
423+
}
424+
425+
tx2 := nodes[0].BeginTxn([]byte("0"), 1)
426+
427+
// Now, force the node to process the packet a second time, this should
428+
// not fail with a detected replay error.
429+
err = tx2.ProcessOnionPacket(uint16(0), fwdMsg, nil)
430+
if err != nil {
431+
t.Fatalf("sphinx packet replay should not have been rejected, "+
432+
"instead error is %v", err)
433+
}
434+
435+
packets2, replays2, err := tx2.Commit()
436+
if err != nil {
437+
t.Fatalf("unable to commit second sphinx batch: %v", err)
438+
}
439+
440+
if replays.Size() != replays2.Size() {
441+
t.Fatalf("expected replay set to be %v, instead got %v",
442+
replays, replays2)
443+
}
444+
445+
if !reflect.DeepEqual(packets, packets2) {
446+
t.Fatalf("expected packets to be %v, instead go %v",
447+
packets, packets2)
448+
}
449+
}
450+
308451
func TestSphinxAssocData(t *testing.T) {
309452
// We want to make sure that the associated data is considered in the
310453
// HMAC creation
@@ -314,8 +457,8 @@ func TestSphinxAssocData(t *testing.T) {
314457
}
315458

316459
// Start the DecayedLog and defer shutdown
317-
nodes[0].d.Start("0")
318-
defer shutdown("0", nodes[0].d)
460+
nodes[0].log.Start()
461+
defer shutdown("0", nodes[0].log)
319462

320463
if _, err := nodes[0].ProcessOnionPacket(fwdMsg, []byte("somethingelse")); err == nil {
321464
t.Fatalf("we should fail when associated data changes")

0 commit comments

Comments
 (0)