Skip to content

Commit 9e4b184

Browse files
authored
Merge pull request #16 from cfromknecht/sphinx-replay
Strong Replay Protection and Batched Processing
2 parents dbb6dc0 + 7291f19 commit 9e4b184

File tree

12 files changed

+1396
-70
lines changed

12 files changed

+1396
-70
lines changed

batch.go

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
package sphinx
2+
3+
import "errors"
4+
5+
// ErrAlreadyCommitted signals that an entry could not be added to the
6+
// batch because it has already been persisted.
7+
var ErrAlreadyCommitted = errors.New("cannot add to batch after committing")
8+
9+
// Batch is an object used to incrementally construct a set of entries to add to
10+
// the replay log. After construction is completed, it can be added to the log
11+
// using the PutBatch method.
12+
type Batch struct {
13+
// isCommitted denotes whether or not this batch has been successfully
14+
// written to disk.
15+
isCommitted bool
16+
17+
// id is a unique, caller chosen identifier for this batch.
18+
id []byte
19+
20+
// entries stores the set of all potential entries that might get
21+
// written to the replay log. Some entries may be skipped after
22+
// examining the on-disk content at the time of commit..
23+
entries map[uint16]batchEntry
24+
25+
// replayCache is an in memory lookup-table, which stores the hash
26+
// prefix of entries already added to this batch. This allows a quick
27+
// mechanism for intra-batch duplicate detection.
28+
replayCache map[HashPrefix]struct{}
29+
30+
// replaySet contains the sequence numbers of all entries that were
31+
// detected as replays. The set is finalized upon writing the batch to
32+
// disk, and merges replays detected by the replay cache and on-disk
33+
// replay log.
34+
replaySet *ReplaySet
35+
}
36+
37+
// NewBatch initializes an object for constructing a set of entries to
38+
// atomically add to a replay log. Batches are identified by byte slice, which
39+
// allows the caller to safely process the same batch twice and get an
40+
// idempotent result.
41+
func NewBatch(id []byte) *Batch {
42+
return &Batch{
43+
id: id,
44+
entries: make(map[uint16]batchEntry),
45+
replayCache: make(map[HashPrefix]struct{}),
46+
replaySet: NewReplaySet(),
47+
}
48+
}
49+
50+
// Put inserts a hash-prefix/CLTV pair into the current batch. This method only
51+
// returns an error in the event that the batch was already committed to disk.
52+
// Decisions regarding whether or not a particular sequence number is a replay
53+
// is ultimately reported via the batch's ReplaySet after committing to disk.
54+
func (b *Batch) Put(seqNum uint16, hashPrefix *HashPrefix, cltv uint32) error {
55+
// Abort if this batch was already written to disk.
56+
if b.isCommitted {
57+
return ErrAlreadyCommitted
58+
}
59+
60+
// Check to see if this hash prefix is already included in this batch.
61+
// If so, we will opportunistically mark this index as replayed.
62+
if _, ok := b.replayCache[*hashPrefix]; ok {
63+
b.replaySet.Add(seqNum)
64+
return nil
65+
}
66+
67+
// Otherwise, this is a distinct hash prefix for this batch. Add it to
68+
// our list of entries that we will try to write to disk. Each of these
69+
// entries will be checked again during the commit to see if any other
70+
// on-disk entries contain the same hash prefix.
71+
b.entries[seqNum] = batchEntry{
72+
hashPrefix: *hashPrefix,
73+
cltv: cltv,
74+
}
75+
76+
// Finally, add this hash prefix to our in-memory replay cache, this
77+
// will be consulted upon further adds to check for duplicates in the
78+
// same batch.
79+
b.replayCache[*hashPrefix] = struct{}{}
80+
81+
return nil
82+
}
83+
84+
// batchEntry is a tuple of a secret's hash prefix and the corresponding CLTV at
85+
// which the onion blob from which the secret was derived expires.
86+
type batchEntry struct {
87+
hashPrefix HashPrefix
88+
cltv uint32
89+
}

bench_test.go

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -60,19 +60,29 @@ func BenchmarkProcessPacket(b *testing.B) {
6060
b.Fatalf("unable to create test route: %v", err)
6161
}
6262
b.ReportAllocs()
63+
path[0].log.Start()
64+
defer shutdown("0", path[0].log)
6365
b.StartTimer()
6466

6567
var (
6668
pkt *ProcessedPacket
6769
)
6870
for i := 0; i < b.N; i++ {
69-
pkt, err = path[0].ProcessOnionPacket(sphinxPacket, nil)
71+
pkt, err = path[0].ProcessOnionPacket(sphinxPacket, nil, uint32(i))
7072
if err != nil {
71-
b.Fatalf("unable to process packet: %v", err)
73+
b.Fatalf("unable to process packet %d: %v", i, err)
7274
}
7375

7476
b.StopTimer()
75-
path[0].seenSecrets = make(map[[sharedSecretSize]byte]struct{})
77+
router := path[0]
78+
shutdown("0", router.log)
79+
path[0] = &Router{
80+
nodeID: router.nodeID,
81+
nodeAddr: router.nodeAddr,
82+
onionKey: router.onionKey,
83+
log: NewDecayedLog("0", nil),
84+
}
85+
path[0].log.Start()
7686
b.StartTimer()
7787
}
7888

cmd/main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ func main() {
7676
}
7777

7878
privkey, _ := btcec.PrivKeyFromBytes(btcec.S256(), binKey)
79-
s := sphinx.NewRouter(privkey, &chaincfg.TestNet3Params)
79+
s := sphinx.NewRouter(privkey, &chaincfg.TestNet3Params, nil)
8080

8181
var packet sphinx.OnionPacket
8282
err = packet.Decode(bytes.NewBuffer(binMsg))

0 commit comments

Comments
 (0)