diff --git a/book/api/metrics-generated.md b/book/api/metrics-generated.md
index 917c6d1e0fd..759a19f5de0 100644
--- a/book/api/metrics-generated.md
+++ b/book/api/metrics-generated.md
@@ -1118,3 +1118,70 @@
| benchs_transactions_sent | counter | Number of benchmark packets sent |
+
+## Snp Tile
+
+
+
+| Metric | Type | Description |
+|--------|------|-------------|
+| snp_all_dest_meta_cnt | counter | The number of destinations from which we have metadata (all connections) |
+| snp_all_dest_meta_snp_available_cnt | counter | The number of SNP-available destination (all connections) |
+| snp_all_dest_meta_snp_enabled_cnt | counter | The number of SNP-enabled destinations (all connections) |
+| snp_all_conn_cur_total | counter | The number of current created connections (all connections) |
+| snp_all_conn_cur_established | counter | The number of currently established connections (all connections) |
+| snp_all_conn_acc_total | counter | The number of accumulated created connections (all connections) |
+| snp_all_conn_acc_established | counter | The number of accumulated established connections (all connections) |
+| snp_all_conn_acc_dropped | counter | The number of accumulated dropped connections (all connections) |
+| snp_all_conn_acc_dropped_handshake | counter | The number of accumulated dropped connections during handshake (all connections) |
+| snp_all_conn_acc_dropped_established | counter | The number of accumulated dropped connections that had been established (all connections) |
+| snp_all_conn_acc_dropped_set_identity | counter | The number of accumulated dropped connections due to set identity (all connections) |
+| snp_all_tx_bytes_via_udp_to_snp_avail_cnt | counter | The number of bytes sent via UDP to a SNP-available destination (all connections) |
+| snp_all_tx_pkts_via_udp_to_snp_avail_cnt | counter | The number of packets sent via UDP to a SNP-available destination (all connections) |
+| snp_all_tx_bytes_via_udp_cnt | counter | The number of bytes sent via UDP (all connections) |
+| snp_all_tx_bytes_via_snp_cnt | counter | The number of bytes sent via SNP (all connections) |
+| snp_all_tx_pkts_via_udp_cnt | counter | The number of packets sent via UDP (all connections) |
+| snp_all_tx_pkts_via_snp_cnt | counter | The number of packets sent via SNP (all connections) |
+| snp_all_tx_pkts_dropped_no_credits_cnt | counter | The number of packets that were not sent due to insufficient flow credits (all connections) |
+| snp_all_rx_bytes_cnt | counter | The number of bytes received (all connections) |
+| snp_all_rx_bytes_via_udp_cnt | counter | The number of bytes received via UDP (all connections) |
+| snp_all_rx_bytes_via_snp_cnt | counter | The number of bytes received via SNP (all connections) |
+| snp_all_rx_pkts_cnt | counter | The number of packets received (all connections) |
+| snp_all_rx_pkts_via_udp_cnt | counter | The number of packets received via UDP (all connections) |
+| snp_all_rx_pkts_via_snp_cnt | counter | The number of packets received via SNP (all connections) |
+| snp_all_rx_pkts_dropped_no_credits_cnt | counter | The number of packets that were received but dropped due to insufficient flow credits (all connections) |
+| snp_all_mcast_tx_pkts_cnt | counter | The number of packets sent to a multicast channel (all connections) |
+| snp_all_mcast_tx_bytes_cnt | counter | The number of bytes sent to a multicast channel (all connections) |
+| snp_all_mcast_rx_pkts_cnt | counter | The number of packets received from a multicast channel (all connections) |
+| snp_all_mcast_rx_bytes_cnt | counter | The number of bytes received from a multicast channel (all connections) |
+| snp_enf_dest_meta_cnt | counter | The number of destinations from which we have metadata (SNP-enforced connections only) |
+| snp_enf_dest_meta_snp_available_cnt | counter | The number of SNP-available destination (SNP-enforced connections only) |
+| snp_enf_dest_meta_snp_enabled_cnt | counter | The number of SNP-enabled destinations (SNP-enforced connections only) |
+| snp_enf_conn_cur_total | counter | The number of current created connections (SNP-enforced connections only) |
+| snp_enf_conn_cur_established | counter | The number of currently established connections (SNP-enforced connections only) |
+| snp_enf_conn_acc_total | counter | The number of accumulated created connections (SNP-enforced connections only) |
+| snp_enf_conn_acc_established | counter | The number of accumulated established connections (SNP-enforced connections only) |
+| snp_enf_conn_acc_dropped | counter | The number of accumulated dropped connections (SNP-enforced connections only) |
+| snp_enf_conn_acc_dropped_handshake | counter | The number of accumulated dropped connections during handshake (SNP-enforced connections only) |
+| snp_enf_conn_acc_dropped_established | counter | The number of accumulated dropped connections that had been established (SNP-enforced connections only) |
+| snp_enf_conn_acc_dropped_set_identity | counter | The number of accumulated dropped connections due to set identity (SNP-enforced connections only) |
+| snp_enf_tx_bytes_via_udp_to_snp_avail_cnt | counter | The number of bytes sent via UDP to a SNP-available destination (SNP-enforced connections only) |
+| snp_enf_tx_pkts_via_udp_to_snp_avail_cnt | counter | The number of packets sent via UDP to a SNP-available destination (SNP-enforced connections only) |
+| snp_enf_tx_bytes_via_udp_cnt | counter | The number of bytes sent via UDP (SNP-enforced connections only) |
+| snp_enf_tx_bytes_via_snp_cnt | counter | The number of bytes sent via SNP (SNP-enforced connections only) |
+| snp_enf_tx_pkts_via_udp_cnt | counter | The number of packets sent via UDP (SNP-enforced connections only) |
+| snp_enf_tx_pkts_via_snp_cnt | counter | The number of packets sent via SNP (SNP-enforced connections only) |
+| snp_enf_tx_pkts_dropped_no_credits_cnt | counter | The number of packets that were not sent due to insufficient flow credits (SNP-enforced connections only) |
+| snp_enf_rx_bytes_cnt | counter | The number of bytes received (SNP-enforced connections only) |
+| snp_enf_rx_bytes_via_udp_cnt | counter | The number of bytes received via UDP (SNP-enforced connections only) |
+| snp_enf_rx_bytes_via_snp_cnt | counter | The number of bytes received via SNP (SNP-enforced connections only) |
+| snp_enf_rx_pkts_cnt | counter | The number of packets received (SNP-enforced connections only) |
+| snp_enf_rx_pkts_via_udp_cnt | counter | The number of packets received via UDP (SNP-enforced connections only) |
+| snp_enf_rx_pkts_via_snp_cnt | counter | The number of packets received via SNP (SNP-enforced connections only) |
+| snp_enf_rx_pkts_dropped_no_credits_cnt | counter | The number of packets that were received but dropped due to insufficient flow credits (SNP-enforced connections only) |
+| snp_enf_mcast_tx_pkts_cnt | counter | The number of packets sent to a multicast channel (SNP-enforced connections only) |
+| snp_enf_mcast_tx_bytes_cnt | counter | The number of bytes sent to a multicast channel (SNP-enforced connections only) |
+| snp_enf_mcast_rx_pkts_cnt | counter | The number of packets received from a multicast channel (SNP-enforced connections only) |
+| snp_enf_mcast_rx_bytes_cnt | counter | The number of bytes received from a multicast channel (SNP-enforced connections only) |
+
+
diff --git a/src/app/fdctl/Local.mk b/src/app/fdctl/Local.mk
index 2125c641108..0b3e1023507 100644
--- a/src/app/fdctl/Local.mk
+++ b/src/app/fdctl/Local.mk
@@ -39,7 +39,7 @@ $(call add-objs,commands/run_agave,fd_fdctl)
$(call make-lib,fdctl_version)
$(call add-objs,version,fdctl_version)
-$(call make-bin-rust,fdctl,main,fd_fdctl fdctl_shared fdctl_platform fd_discoh fd_disco agave_validator fd_flamenco fd_funk fd_quic fd_tls fd_reedsol fd_waltz fd_tango fd_ballet fd_util fdctl_version)
+$(call make-bin-rust,fdctl,main,fd_fdctl fdctl_shared fdctl_platform fd_discoh fd_disco agave_validator fd_flamenco fd_funk fd_quic fd_tls fd_snp fd_reedsol fd_waltz fd_tango fd_ballet fd_util fdctl_version)
check-agave-hash:
@$(eval AGAVE_COMMIT_LS_TREE=$(shell git ls-tree HEAD | grep agave | awk '{print $$3}'))
diff --git a/src/app/fdctl/config/default.toml b/src/app/fdctl/config/default.toml
index 03f8d433193..f997abf1c65 100644
--- a/src/app/fdctl/config/default.toml
+++ b/src/app/fdctl/config/default.toml
@@ -1435,6 +1435,19 @@ dynamic_port_range = "8900-9000"
# slow clients but also increase memory usage.
send_buffer_size_mb = 5120
+ # SNP tile handles the Solana Network Protocol (SNP), both ingress
+ # and egress.
+ [tiles.snp]
+ # SNP is currently optional, and disabled by default.
+ enabled = false
+
+ # With SNP currently optional, the validator auto detects if
+ # peers have SNP enabled or not, and gracefully degrades to
+ # UDP for peers that don't support SNP.
+ # SNP can be enforced for a specific set of destinations.
+ # For these, SNP will be required and not downgraded to UDP.
+ enforced_destinations = []
+
# These options can be useful for development, but should not be used
# when connecting to a live cluster, as they may cause the validator to
# be unstable or have degraded performance or security. The program
diff --git a/src/app/fdctl/main.c b/src/app/fdctl/main.c
index c148bd47708..9f9b839e5b6 100644
--- a/src/app/fdctl/main.c
+++ b/src/app/fdctl/main.c
@@ -50,6 +50,7 @@ extern fd_topo_run_tile_t fd_tile_verify;
extern fd_topo_run_tile_t fd_tile_dedup;
extern fd_topo_run_tile_t fd_tile_pack;
extern fd_topo_run_tile_t fd_tile_shred;
+extern fd_topo_run_tile_t fd_tile_snp;
extern fd_topo_run_tile_t fd_tile_sign;
extern fd_topo_run_tile_t fd_tile_metric;
extern fd_topo_run_tile_t fd_tile_cswtch;
@@ -70,6 +71,7 @@ fd_topo_run_tile_t * TILES[] = {
&fd_tile_dedup,
&fd_tile_pack,
&fd_tile_shred,
+ &fd_tile_snp,
&fd_tile_sign,
&fd_tile_metric,
&fd_tile_cswtch,
diff --git a/src/app/fdctl/topology.c b/src/app/fdctl/topology.c
index b10ba36f896..0e66747f0ed 100644
--- a/src/app/fdctl/topology.c
+++ b/src/app/fdctl/topology.c
@@ -47,10 +47,30 @@ fd_topo_initialize( config_t * config ) {
topo->max_page_size = fd_cstr_to_shmem_page_sz( config->hugetlbfs.max_page_size );
topo->gigantic_page_threshold = config->hugetlbfs.gigantic_page_threshold_mib << 20;
+ /* SNP variables. */
+ ulong snp_tile_cnt = 0; /* currently not configurable by the user. */
+ const char * shred_out_wksp = "net_shred";
+ const char * shred_out_link = "shred_net";
+ const char * shred_in_link = "net_shred";
+ ulong shred_in_link_cnt = net_tile_cnt;
+ const char * net_out_shred_link = "net_shred";
+ const char * net_in_shred_link = "shred_net";
+ ulong net_in_shred_link_cnt = shred_tile_cnt;
+ if( config->tiles.snp.enabled ) {
+ snp_tile_cnt = 1; /* currently not configurable by the user. */
+ shred_out_wksp = "shred_snp";
+ shred_out_link = "shred_snp";
+ shred_in_link = "snp_shred";
+ shred_in_link_cnt = snp_tile_cnt;
+ net_out_shred_link = "net_shred"; /* this is hard-coded inside the net tile. */
+ net_in_shred_link = "snp_net";
+ net_in_shred_link_cnt = snp_tile_cnt;
+ }
+
/* topo, name */
fd_topob_wksp( topo, "metric_in" );
fd_topob_wksp( topo, "net_quic" );
- fd_topob_wksp( topo, "net_shred" );
+ fd_topob_wksp( topo, shred_out_wksp );
fd_topob_wksp( topo, "quic_verify" );
fd_topob_wksp( topo, "verify_dedup" );
fd_topob_wksp( topo, "dedup_resolv" );
@@ -82,11 +102,20 @@ fd_topo_initialize( config_t * config ) {
fd_topob_wksp( topo, "metric" );
fd_topob_wksp( topo, "cswtch" );
+ /* SNP workspaces. */
+ if( config->tiles.snp.enabled ) {
+ fd_topob_wksp( topo, "snp_shred" );
+ fd_topob_wksp( topo, "snp_net" );
+ fd_topob_wksp( topo, "snp_sign" );
+ fd_topob_wksp( topo, "sign_snp" );
+ fd_topob_wksp( topo, "snp" );
+ }
+
#define FOR(cnt) for( ulong i=0UL; inet.ingress_buffer_size, FD_NET_MTU, 1UL );
- FOR(shred_tile_cnt) fd_topob_link( topo, "shred_net", "net_shred", 32768UL, FD_NET_MTU, 1UL );
+ FOR(shred_tile_cnt) fd_topob_link( topo, shred_out_link, shred_out_wksp, 32768UL, FD_NET_MTU, 1UL );
FOR(quic_tile_cnt) fd_topob_link( topo, "quic_verify", "quic_verify", config->tiles.verify.receive_buffer_size, FD_TPU_REASM_MTU, config->tiles.quic.txn_reassembly_count );
FOR(verify_tile_cnt) fd_topob_link( topo, "verify_dedup", "verify_dedup", config->tiles.verify.receive_buffer_size, FD_TPU_PARSED_MTU, 1UL );
/**/ fd_topob_link( topo, "gossip_dedup", "gossip_dedup", 2048UL, FD_TPU_RAW_MTU, 1UL );
@@ -112,6 +141,14 @@ fd_topo_initialize( config_t * config ) {
FOR(shred_tile_cnt) fd_topob_link( topo, "shred_sign", "shred_sign", 128UL, 32UL, 1UL );
FOR(shred_tile_cnt) fd_topob_link( topo, "sign_shred", "sign_shred", 128UL, 64UL, 1UL );
+ /* SNP links. */
+ if( config->tiles.snp.enabled ) {
+ FOR(snp_tile_cnt) fd_topob_link( topo, shred_in_link, "snp_shred", 32768UL, FD_NET_MTU, 1UL );
+ FOR(snp_tile_cnt) fd_topob_link( topo, net_in_shred_link, "snp_net", 32768UL, FD_NET_MTU, 1UL );
+ FOR(snp_tile_cnt) fd_topob_link( topo, "snp_sign", "snp_sign", 16384UL, 40UL, 1UL );
+ FOR(snp_tile_cnt) fd_topob_link( topo, "sign_snp", "sign_snp", 16384UL, 64UL, 1UL );
+ }
+
ushort parsed_tile_to_cpu[ FD_TILE_MAX ];
/* Unassigned tiles will be floating, unless auto topology is enabled. */
for( ulong i=0UL; ilayout.net_tile_count, &config->net, config->tiles.netlink.max_routes, config->tiles.netlink.max_peer_routes, config->tiles.netlink.max_neighbors, tile_to_cpu );
- FOR(net_tile_cnt) fd_topos_net_rx_link( topo, "net_quic", i, config->net.ingress_buffer_size );
- FOR(net_tile_cnt) fd_topos_net_rx_link( topo, "net_shred", i, config->net.ingress_buffer_size );
+ FOR(net_tile_cnt) fd_topos_net_rx_link( topo, "net_quic", i, config->net.ingress_buffer_size );
+ FOR(net_tile_cnt) fd_topos_net_rx_link( topo, net_out_shred_link, i, config->net.ingress_buffer_size );
/* topo, tile_name, tile_wksp, metrics_wksp, cpu_idx, is_agave, uses_keyswitch */
FOR(quic_tile_cnt) fd_topob_tile( topo, "quic", "quic", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 0 );
@@ -157,12 +194,16 @@ fd_topo_initialize( config_t * config ) {
/**/ fd_topob_tile( topo, "sign", "sign", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 1 );
/**/ fd_topob_tile( topo, "metric", "metric", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 0 );
/**/ fd_topob_tile( topo, "cswtch", "cswtch", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 0 );
+ /* SNP tiles. */
+ if( FD_LIKELY( config->tiles.snp.enabled ) ) {
+ FOR(snp_tile_cnt) fd_topob_tile( topo, "snp", "snp", "metric_in", tile_to_cpu[ topo->tile_cnt ], 0, 1 );
+ }
/* topo, tile_name, tile_kind_id, fseq_wksp, link_name, link_kind_id, reliable, polled */
for( ulong j=0UL; jtiles.snp.enabled ) ) {
+ FOR(snp_tile_cnt) for( ulong j=0UL; jtile_cnt ) )
FD_LOG_ERR(( "The topology you are using has %lu tiles, but the CPU affinity specified in the config tile as [layout.affinity] only provides for %lu cores. "
@@ -503,6 +566,20 @@ fd_topo_configure_tile( fd_topo_tile_t * tile,
}
tile->shred.adtl_dests_leader_cnt = config->tiles.shred.additional_shred_destinations_leader_cnt;
+ tile->shred.is_snp_enabled = config->tiles.snp.enabled;
+
+ } else if( FD_UNLIKELY( !strcmp( tile->name, "snp" ) ) ) {
+ strncpy( tile->snp.identity_key_path, config->paths.identity_key, sizeof(tile->snp.identity_key_path) );
+
+ tile->snp.depth = config->topo.links[ tile->out_link_id[ 0 ] ].depth;
+
+ tile->snp.enforced_destinations_cnt = config->tiles.snp.enforced_destinations_cnt;
+ for( ulong i=0UL; itiles.snp.enforced_destinations_cnt; i++ ) {
+ parse_ip_port( "tiles.snp.enforced_destinations",
+ config->tiles.snp.enforced_destinations[ i ],
+ &tile->snp.enforced_destinations[ i ] );
+ }
+
} else if( FD_UNLIKELY( !strcmp( tile->name, "store" ) ) ) {
tile->store.disable_blockstore_from_slot = config->development.bench.disable_blockstore_from_slot;
diff --git a/src/app/fddev/Local.mk b/src/app/fddev/Local.mk
index 5442b19a215..566b4ab4a38 100644
--- a/src/app/fddev/Local.mk
+++ b/src/app/fddev/Local.mk
@@ -11,7 +11,7 @@ $(call add-objs,commands/configure/blockstore,fd_fddev)
$(call add-objs,commands/bench,fd_fddev)
$(call add-objs,commands/dev,fd_fddev)
-$(call make-bin-rust,fddev,main,fd_fddev fd_fdctl fddev_shared fdctl_shared fdctl_platform fd_discoh fd_disco agave_validator fd_flamenco fd_funk fd_quic fd_tls fd_reedsol fd_waltz fd_tango fd_ballet fd_util fdctl_version)
+$(call make-bin-rust,fddev,main,fd_fddev fd_fdctl fddev_shared fdctl_shared fdctl_platform fd_discoh fd_disco agave_validator fd_flamenco fd_funk fd_quic fd_tls fd_snp fd_reedsol fd_waltz fd_tango fd_ballet fd_util fdctl_version)
ifeq (run,$(firstword $(MAKECMDGOALS)))
RUN_ARGS := $(wordlist 2,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS))
@@ -35,7 +35,7 @@ endif
monitor: bin
$(OBJDIR)/bin/fddev monitor $(MONITOR_ARGS)
-$(call make-integration-test,test_fddev,tests/test_fddev,fd_fddev fd_fdctl fddev_shared fdctl_shared fdctl_platform fd_discoh fd_disco agave_validator fd_flamenco fd_funk fd_quic fd_tls fd_reedsol fd_waltz fd_tango fd_ballet fd_util fdctl_version)
+$(call make-integration-test,test_fddev,tests/test_fddev,fd_fddev fd_fdctl fddev_shared fdctl_shared fdctl_platform fd_discoh fd_disco agave_validator fd_flamenco fd_funk fd_quic fd_tls fd_snp fd_reedsol fd_waltz fd_tango fd_ballet fd_util fdctl_version)
$(call run-integration-test,test_fddev)
endif
diff --git a/src/app/fddev/main.h b/src/app/fddev/main.h
index be102bf79bc..9b7b3bded05 100644
--- a/src/app/fddev/main.h
+++ b/src/app/fddev/main.h
@@ -60,6 +60,7 @@ extern fd_topo_run_tile_t fd_tile_verify;
extern fd_topo_run_tile_t fd_tile_dedup;
extern fd_topo_run_tile_t fd_tile_pack;
extern fd_topo_run_tile_t fd_tile_shred;
+extern fd_topo_run_tile_t fd_tile_snp;
extern fd_topo_run_tile_t fd_tile_sign;
extern fd_topo_run_tile_t fd_tile_metric;
extern fd_topo_run_tile_t fd_tile_cswtch;
@@ -85,6 +86,7 @@ fd_topo_run_tile_t * TILES[] = {
&fd_tile_dedup,
&fd_tile_pack,
&fd_tile_shred,
+ &fd_tile_snp,
&fd_tile_sign,
&fd_tile_metric,
&fd_tile_cswtch,
diff --git a/src/app/shared/fd_config.h b/src/app/shared/fd_config.h
index e1a837f895a..2f8a378b0b3 100644
--- a/src/app/shared/fd_config.h
+++ b/src/app/shared/fd_config.h
@@ -475,6 +475,12 @@ struct fd_config {
ulong write_buffer_size;
} shredcap;
+ struct {
+ int enabled;
+ char enforced_destinations[ FD_TOPO_ADTL_DESTS_MAX ][ sizeof("255.255.255.255:65535") ];
+ ulong enforced_destinations_cnt;
+ } snp;
+
} tiles;
struct {
ulong capture_start_slot;
diff --git a/src/app/shared/fd_config_parse.c b/src/app/shared/fd_config_parse.c
index a0cdac451da..31eac9c778f 100644
--- a/src/app/shared/fd_config_parse.c
+++ b/src/app/shared/fd_config_parse.c
@@ -271,6 +271,9 @@ fd_config_extract_pod( uchar * pod,
CFG_POP ( ulong, tiles.shredcap.write_buffer_size );
}
+ CFG_POP ( bool, tiles.snp.enabled );
+ CFG_POP_ARRAY( cstr, tiles.snp.enforced_destinations );
+
CFG_POP ( bool, development.sandbox );
CFG_POP ( bool, development.no_clone );
CFG_POP ( bool, development.core_dump );
diff --git a/src/disco/keyguard/fd_keyguard.h b/src/disco/keyguard/fd_keyguard.h
index 0c95d8de020..8e7f2b545a9 100644
--- a/src/disco/keyguard/fd_keyguard.h
+++ b/src/disco/keyguard/fd_keyguard.h
@@ -23,7 +23,8 @@ FD_PROTOTYPES_BEGIN
#define FD_KEYGUARD_ROLE_BUNDLE (5) /* Bundle tile */
#define FD_KEYGUARD_ROLE_EVENT (6) /* Event tile */
#define FD_KEYGUARD_ROLE_BUNDLE_CRANK (7) /* Sign cranking transactions for bundle tips */
-#define FD_KEYGUARD_ROLE_CNT (8) /* number of known roles */
+#define FD_KEYGUARD_ROLE_SNP (8) /* SNP tile */
+#define FD_KEYGUARD_ROLE_CNT (9) /* number of known roles */
/* Payload types ******************************************************/
@@ -37,6 +38,7 @@ FD_PROTOTYPES_BEGIN
#define FD_KEYGUARD_PAYLOAD_LG_BUNDLE ( 8) /* Bundle block producer authentication */
#define FD_KEYGUARD_PAYLOAD_LG_EVENT ( 9) /* Event reporter authentication */
#define FD_KEYGUARD_PAYLOAD_LG_PONG (10) /* Gossip/Repair ping/pong protocol */
+#define FD_KEYGUARD_PAYLOAD_LG_SNP (11) /* SNP message */
#define FD_KEYGUARD_PAYLOAD_TXN (1UL<
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/disco/shred/fd_shred_tile.c b/src/disco/shred/fd_shred_tile.c
index a914757eb15..c0821315216 100644
--- a/src/disco/shred/fd_shred_tile.c
+++ b/src/disco/shred/fd_shred_tile.c
@@ -1200,8 +1200,14 @@ fd_shred_signer( void * signer_ctx,
static void
unprivileged_init( fd_topo_t * topo,
fd_topo_tile_t * tile ) {
+ const char * net_in_name = "net_shred";
+ const char * net_out_name = "shred_net";
+ if( tile->shred.is_snp_enabled ) {
+ net_in_name = "snp_shred";
+ net_out_name = "shred_snp";
+ }
- FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ NET_OUT_IDX ]].name, "shred_net" ) );
+ FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ NET_OUT_IDX ]].name, net_out_name ) );
FD_TEST( 0==strcmp( topo->links[tile->out_link_id[ SIGN_OUT_IDX ]].name, "shred_sign" ) );
if( FD_UNLIKELY( !tile->out_cnt ) )
@@ -1364,7 +1370,7 @@ unprivileged_init( fd_topo_t * topo,
fd_topo_link_t const * link = &topo->links[ tile->in_link_id[ i ] ];
fd_topo_wksp_t const * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
- if( FD_LIKELY( !strcmp( link->name, "net_shred" ) ) ) {
+ if( FD_LIKELY( !strcmp( link->name, net_in_name ) ) ) {
ctx->in_kind[ i ] = IN_KIND_NET;
fd_net_rx_bounds_init( &ctx->in[ i ].net_rx, link->dcache );
continue; /* only net_rx needs to be set in this case. */
diff --git a/src/disco/sign/fd_sign_tile.c b/src/disco/sign/fd_sign_tile.c
index f24a1ea7579..76f3ba69d71 100644
--- a/src/disco/sign/fd_sign_tile.c
+++ b/src/disco/sign/fd_sign_tile.c
@@ -202,6 +202,11 @@ after_frag_sensitive( void * _ctx,
fd_ed25519_sign( dst, ctx->event_concat, 18UL+32UL, ctx->public_key, ctx->private_key, ctx->sha512 );
break;
}
+ case FD_KEYGUARD_SIGN_TYPE_ULONG_ID_ED25519: {
+ memcpy( &sig, ctx->_data, sizeof(ulong) );
+ fd_ed25519_sign( dst, ctx->_data + sizeof(ulong), sz-sizeof(ulong), ctx->public_key, ctx->private_key, ctx->sha512 );
+ break;
+ }
default:
FD_LOG_EMERG(( "invalid sign type: %d", sign_type ));
}
@@ -336,6 +341,11 @@ unprivileged_init_sensitive( fd_topo_t * topo,
FD_TEST( !strcmp( out_link->name, "sign_pack" ) );
FD_TEST( in_link->mtu==1232UL );
FD_TEST( out_link->mtu==64UL );
+ } else if( !strcmp(in_link->name, "snp_sign" ) ) {
+ ctx->in[ i ].role = FD_KEYGUARD_ROLE_SNP;
+ FD_TEST( !strcmp( out_link->name, "sign_snp" ) );
+ FD_TEST( in_link->mtu==40UL );
+ FD_TEST( out_link->mtu==64UL );
} else {
FD_LOG_CRIT(( "unexpected link %s", in_link->name ));
}
diff --git a/src/disco/snp/Local.mk b/src/disco/snp/Local.mk
new file mode 100644
index 00000000000..0a928369691
--- /dev/null
+++ b/src/disco/snp/Local.mk
@@ -0,0 +1,3 @@
+ifdef FD_HAS_INT128
+$(call add-objs,fd_snp_tile,fd_disco)
+endif
diff --git a/src/disco/snp/fd_snp_tile.c b/src/disco/snp/fd_snp_tile.c
new file mode 100644
index 00000000000..aec5af49dc0
--- /dev/null
+++ b/src/disco/snp/fd_snp_tile.c
@@ -0,0 +1,743 @@
+#include "../tiles.h"
+
+#include "generated/fd_snp_tile_seccomp.h"
+#include "../shred/fd_shred_dest.h"
+#include "../shred/fd_stake_ci.h"
+#include "../keyguard/fd_keyload.h"
+#include "../keyguard/fd_keyguard.h"
+#include "../keyguard/fd_keyswitch.h"
+#include "../fd_disco.h"
+#include "../net/fd_net_tile.h"
+#include "../../waltz/snp/fd_snp.h"
+#include "../../waltz/snp/fd_snp_app.h"
+
+#include
+
+#include "../../app/fdctl/version.h"
+
+static inline fd_snp_limits_t
+snp_limits( fd_topo_tile_t const * tile ) {
+ (void)tile;
+ fd_snp_limits_t limits = {
+ .peer_cnt = 65536, /* >= MAX_SHRED_DESTS, power of 2 */
+ };
+ if( FD_UNLIKELY( !fd_snp_footprint( &limits ) ) ) {
+ FD_LOG_ERR(( "Invalid SNP limits in config" ));
+ }
+ return limits;
+}
+
+#define FD_SNP_TILE_SCRATCH_ALIGN (128UL)
+
+#define IN_KIND_NET_SHRED (0UL)
+#define IN_KIND_SHRED (1UL)
+#define IN_KIND_GOSSIP (2UL)
+#define IN_KIND_SIGN (3UL)
+#define IN_KIND_CRDS (4UL)
+#define IN_KIND_STAKE (5UL)
+
+/* The order here depends on the order in which fd_topob_tile_out(...)
+ are called inside topology.c (in the corresponding folder) */
+#define NET_OUT_IDX (0)
+#define SHRED_OUT_IDX (1)
+#define SIGN_OUT_IDX (2)
+
+#define SNP_MIN_FDCTL_MINOR_VERSION (711)
+
+typedef union {
+ struct {
+ fd_wksp_t * mem;
+ ulong chunk0;
+ ulong wmark;
+ };
+ fd_net_rx_bounds_t net_rx;
+} fd_snp_in_ctx_t;
+
+typedef struct {
+ int skip_frag;
+ ulong round_robin_id;
+ ulong round_robin_cnt;
+
+ fd_stem_context_t * stem;
+ ulong sig;
+ ulong tsorig;
+
+ fd_snp_in_ctx_t in[ 32 ];
+ int in_kind[ 32 ];
+
+ fd_keyswitch_t * keyswitch;
+ fd_pubkey_t identity_key[1]; /* Just the public key */
+
+ /* Channels */
+ fd_wksp_t * net_out_mem;
+ ulong net_out_chunk0;
+ ulong net_out_wmark;
+ ulong net_out_chunk;
+
+ fd_wksp_t * shred_out_mem;
+ ulong shred_out_chunk0;
+ ulong shred_out_wmark;
+ ulong shred_out_chunk;
+
+ fd_wksp_t * sign_out_mem;
+ ulong sign_out_chunk0;
+ ulong sign_out_wmark;
+ ulong sign_out_chunk;
+
+ /* dests/stake_ci */
+ fd_stake_ci_t * stake_ci;
+ fd_shred_dest_weighted_t * new_dest_ptr;
+ ulong new_dest_cnt;
+
+ /* SNP */
+ uchar * packet;
+ ulong packet_sz;
+ fd_snp_t * snp;
+ fd_snp_meta_t meta;
+ uchar signature[ FD_ED25519_SIG_SZ ];
+
+ /* SNP enforced destinations */
+ ulong enforced_cnt;
+ ulong enforced[ FD_TOPO_ADTL_DESTS_MAX ];
+} fd_snp_tile_ctx_t;
+
+FD_FN_CONST static inline ulong
+scratch_align( void ) {
+ return FD_SNP_TILE_SCRATCH_ALIGN;
+}
+
+FD_FN_PURE static inline ulong
+scratch_footprint( fd_topo_tile_t const * tile ) {
+ fd_snp_limits_t limits = snp_limits( tile );
+
+ ulong l = FD_LAYOUT_INIT;
+ l = FD_LAYOUT_APPEND( l, alignof(fd_snp_tile_ctx_t), sizeof(fd_snp_tile_ctx_t) );
+ l = FD_LAYOUT_APPEND( l, fd_stake_ci_align(), fd_stake_ci_footprint() );
+ l = FD_LAYOUT_APPEND( l, fd_snp_align(), fd_snp_footprint( &limits ) );
+ return FD_LAYOUT_FINI( l, scratch_align() );
+}
+
+static inline void
+during_housekeeping( fd_snp_tile_ctx_t * ctx ) {
+ if( FD_UNLIKELY( fd_keyswitch_state_query( ctx->keyswitch )==FD_KEYSWITCH_STATE_SWITCH_PENDING ) ) {
+ fd_memcpy( ctx->identity_key->uc, ctx->keyswitch->bytes, 32UL );
+ fd_stake_ci_set_identity( ctx->stake_ci, ctx->identity_key );
+ fd_keyswitch_state( ctx->keyswitch, FD_KEYSWITCH_STATE_COMPLETED );
+
+ fd_snp_set_identity( ctx->snp, ctx->identity_key->uc );
+ }
+
+ /* SNP housekeeping */
+ fd_snp_housekeeping( ctx->snp );
+
+#if FD_SNP_DEBUG_ENABLED
+ /* SNP logged metrics. */
+ static long snp_next_metrics_log = 0L;
+ long now = fd_snp_timestamp_ms();
+ if( now > snp_next_metrics_log ) {
+ FD_LOG_NOTICE(( "[SNP] contacts=%lu connections=%lu", fd_snp_dest_meta_map_key_cnt( ctx->snp->dest_meta_map ), fd_snp_conn_pool_used( ctx->snp->conn_pool ) ));
+ snp_next_metrics_log = now + 10000L; /* Every 10 seconds. */
+ }
+#endif
+}
+
+static inline void
+handle_new_cluster_contact_info( fd_snp_tile_ctx_t * ctx,
+ uchar const * buf ) {
+
+ ulong const * header = (ulong const *)fd_type_pun_const( buf );
+
+ ulong dest_cnt = header[ 0 ];
+ if( dest_cnt >= MAX_SHRED_DESTS )
+ FD_LOG_ERR(( "Cluster nodes had %lu destinations, which was more than the max of %lu", dest_cnt, MAX_SHRED_DESTS ));
+
+ fd_shred_dest_wire_t const * in_dests = fd_type_pun_const( header+1UL );
+ fd_shred_dest_weighted_t * dests = fd_stake_ci_dest_add_init( ctx->stake_ci );
+
+ ctx->new_dest_ptr = dests;
+ ctx->new_dest_cnt = dest_cnt;
+
+ ctx->snp->dest_meta_update_idx += 1U;
+
+ for( ulong i=0UL; i= SNP_MIN_FDCTL_MINOR_VERSION) ? 1U : 0U;
+
+ ulong key = fd_snp_dest_meta_map_key_from_parts( ip4_addr, udp_port );
+ fd_snp_dest_meta_map_t sentinel = { 0 };
+ fd_snp_dest_meta_map_t * entry = fd_snp_dest_meta_map_query( ctx->snp->dest_meta_map, key, &sentinel );
+
+ int is_new = 0;
+ if( FD_UNLIKELY( !entry->key ) ) {
+ entry = fd_snp_dest_meta_map_insert( ctx->snp->dest_meta_map, key );
+ if( !entry ) continue;
+ memset( &entry->val, 0, sizeof(fd_snp_dest_meta_t) );
+ entry->val.ip4_addr = ip4_addr;
+ entry->val.udp_port = udp_port;
+ entry->val.snp_available = snp_available;
+ is_new = 1;
+ }
+
+ /* If two or more pubkeys show the same ip4_addr and udp_port in
+ gossip (it has been observed in testnet), we need to avoid a
+ ping-pong around has_changed. The downside to this approach
+ is that only the first entry in the gossip table will be
+ processed here. */
+ if( entry->val.update_idx == ctx->snp->dest_meta_update_idx ) continue;
+ /* For every entry, whether new or not, the update index needs to
+ be refreshed. This is later used to detect (and delete)
+ expired entries. */
+ entry->val.update_idx = ctx->snp->dest_meta_update_idx;
+
+ int has_changed = (!is_new) && (entry->val.snp_available != snp_available);
+
+ if( FD_UNLIKELY( !!is_new || !!has_changed ) ) {
+ entry->val.snp_available = snp_available;
+ entry->val.snp_enabled = 0;
+ /* force a handshake if snp_available. */
+ entry->val.snp_handshake_tstamp = 0;
+ }
+ }
+ for( ulong i=0UL; ienforced_cnt; i++ ) {
+ fd_snp_dest_meta_map_t sentinel = { 0 };
+ fd_snp_dest_meta_map_t * entry = fd_snp_dest_meta_map_query( ctx->snp->dest_meta_map, ctx->enforced[ i ], &sentinel );
+ if( FD_LIKELY( !!entry->key ) ) {
+ entry->val.snp_enabled = 1;
+ entry->val.snp_enforced = 1;
+ }
+ }
+}
+
+static inline void
+finalize_new_cluster_contact_info( fd_snp_tile_ctx_t * ctx ) {
+ fd_stake_ci_dest_add_fini( ctx->stake_ci, ctx->new_dest_cnt );
+}
+
+static inline void
+metrics_write( fd_snp_tile_ctx_t * ctx ) {
+ /* All */
+ FD_MCNT_SET ( SNP, ALL_DEST_META_CNT, ctx->snp->metrics_all->dest_meta_cnt );
+ FD_MCNT_SET ( SNP, ALL_DEST_META_SNP_AVAILABLE_CNT, ctx->snp->metrics_all->dest_meta_snp_available_cnt );
+ FD_MCNT_SET ( SNP, ALL_DEST_META_SNP_ENABLED_CNT, ctx->snp->metrics_all->dest_meta_snp_enabled_cnt );
+ FD_MCNT_SET ( SNP, ALL_CONN_CUR_TOTAL, ctx->snp->metrics_all->conn_cur_total );
+ FD_MCNT_SET ( SNP, ALL_CONN_CUR_ESTABLISHED, ctx->snp->metrics_all->conn_cur_established );
+ FD_MCNT_SET ( SNP, ALL_CONN_ACC_TOTAL, ctx->snp->metrics_all->conn_acc_total );
+ FD_MCNT_SET ( SNP, ALL_CONN_ACC_ESTABLISHED, ctx->snp->metrics_all->conn_acc_established );
+ FD_MCNT_SET ( SNP, ALL_CONN_ACC_DROPPED, ctx->snp->metrics_all->conn_acc_dropped );
+ FD_MCNT_SET ( SNP, ALL_CONN_ACC_DROPPED_HANDSHAKE, ctx->snp->metrics_all->conn_acc_dropped_handshake );
+ FD_MCNT_SET ( SNP, ALL_CONN_ACC_DROPPED_ESTABLISHED, ctx->snp->metrics_all->conn_acc_dropped_established );
+ FD_MCNT_SET ( SNP, ALL_CONN_ACC_DROPPED_SET_IDENTITY, ctx->snp->metrics_all->conn_acc_dropped_set_identity );
+ FD_MCNT_SET ( SNP, ALL_TX_BYTES_VIA_UDP_TO_SNP_AVAIL_CNT, ctx->snp->metrics_all->tx_bytes_via_udp_to_snp_avail_cnt );
+ FD_MCNT_SET ( SNP, ALL_TX_PKTS_VIA_UDP_TO_SNP_AVAIL_CNT, ctx->snp->metrics_all->tx_pkts_via_udp_to_snp_avail_cnt );
+ FD_MCNT_SET ( SNP, ALL_TX_BYTES_VIA_UDP_CNT, ctx->snp->metrics_all->tx_bytes_via_udp_cnt );
+ FD_MCNT_SET ( SNP, ALL_TX_BYTES_VIA_SNP_CNT, ctx->snp->metrics_all->tx_bytes_via_snp_cnt );
+ FD_MCNT_SET ( SNP, ALL_TX_PKTS_VIA_UDP_CNT, ctx->snp->metrics_all->tx_pkts_via_udp_cnt );
+ FD_MCNT_SET ( SNP, ALL_TX_PKTS_VIA_SNP_CNT, ctx->snp->metrics_all->tx_pkts_via_snp_cnt );
+ FD_MCNT_SET ( SNP, ALL_TX_PKTS_DROPPED_NO_CREDITS_CNT, ctx->snp->metrics_all->tx_pkts_dropped_no_credits_cnt );
+ FD_MCNT_SET ( SNP, ALL_RX_BYTES_CNT, ctx->snp->metrics_all->rx_bytes_cnt );
+ FD_MCNT_SET ( SNP, ALL_RX_BYTES_VIA_UDP_CNT, ctx->snp->metrics_all->rx_bytes_via_udp_cnt );
+ FD_MCNT_SET ( SNP, ALL_RX_BYTES_VIA_SNP_CNT, ctx->snp->metrics_all->rx_bytes_via_snp_cnt );
+ FD_MCNT_SET ( SNP, ALL_RX_PKTS_CNT, ctx->snp->metrics_all->rx_pkts_cnt );
+ FD_MCNT_SET ( SNP, ALL_RX_PKTS_VIA_UDP_CNT, ctx->snp->metrics_all->rx_pkts_via_udp_cnt );
+ FD_MCNT_SET ( SNP, ALL_RX_PKTS_VIA_SNP_CNT, ctx->snp->metrics_all->rx_pkts_via_snp_cnt );
+ FD_MCNT_SET ( SNP, ALL_RX_PKTS_DROPPED_NO_CREDITS_CNT, ctx->snp->metrics_all->rx_pkts_dropped_no_credits_cnt );
+ FD_MCNT_SET ( SNP, ALL_MCAST_TX_PKTS_CNT, ctx->snp->metrics_all->mcast_tx_pkts_cnt );
+ FD_MCNT_SET ( SNP, ALL_MCAST_TX_BYTES_CNT, ctx->snp->metrics_all->mcast_tx_bytes_cnt );
+ FD_MCNT_SET ( SNP, ALL_MCAST_RX_PKTS_CNT, ctx->snp->metrics_all->mcast_rx_pkts_cnt );
+ FD_MCNT_SET ( SNP, ALL_MCAST_RX_BYTES_CNT, ctx->snp->metrics_all->mcast_rx_bytes_cnt );
+
+ /* Enforced */
+ FD_MCNT_SET ( SNP, ENF_DEST_META_CNT, ctx->snp->metrics_enf->dest_meta_cnt );
+ FD_MCNT_SET ( SNP, ENF_DEST_META_SNP_AVAILABLE_CNT, ctx->snp->metrics_enf->dest_meta_snp_available_cnt );
+ FD_MCNT_SET ( SNP, ENF_DEST_META_SNP_ENABLED_CNT, ctx->snp->metrics_enf->dest_meta_snp_enabled_cnt );
+ FD_MCNT_SET ( SNP, ENF_CONN_CUR_TOTAL, ctx->snp->metrics_enf->conn_cur_total );
+ FD_MCNT_SET ( SNP, ENF_CONN_CUR_ESTABLISHED, ctx->snp->metrics_enf->conn_cur_established );
+ FD_MCNT_SET ( SNP, ENF_CONN_ACC_TOTAL, ctx->snp->metrics_enf->conn_acc_total );
+ FD_MCNT_SET ( SNP, ENF_CONN_ACC_ESTABLISHED, ctx->snp->metrics_enf->conn_acc_established );
+ FD_MCNT_SET ( SNP, ENF_CONN_ACC_DROPPED, ctx->snp->metrics_enf->conn_acc_dropped );
+ FD_MCNT_SET ( SNP, ENF_CONN_ACC_DROPPED_HANDSHAKE, ctx->snp->metrics_enf->conn_acc_dropped_handshake );
+ FD_MCNT_SET ( SNP, ENF_CONN_ACC_DROPPED_ESTABLISHED, ctx->snp->metrics_enf->conn_acc_dropped_established );
+ FD_MCNT_SET ( SNP, ENF_CONN_ACC_DROPPED_SET_IDENTITY, ctx->snp->metrics_enf->conn_acc_dropped_set_identity );
+ FD_MCNT_SET ( SNP, ENF_TX_BYTES_VIA_UDP_TO_SNP_AVAIL_CNT, ctx->snp->metrics_enf->tx_bytes_via_udp_to_snp_avail_cnt );
+ FD_MCNT_SET ( SNP, ENF_TX_PKTS_VIA_UDP_TO_SNP_AVAIL_CNT, ctx->snp->metrics_enf->tx_pkts_via_udp_to_snp_avail_cnt );
+ FD_MCNT_SET ( SNP, ENF_TX_BYTES_VIA_UDP_CNT, ctx->snp->metrics_enf->tx_bytes_via_udp_cnt );
+ FD_MCNT_SET ( SNP, ENF_TX_BYTES_VIA_SNP_CNT, ctx->snp->metrics_enf->tx_bytes_via_snp_cnt );
+ FD_MCNT_SET ( SNP, ENF_TX_PKTS_VIA_UDP_CNT, ctx->snp->metrics_enf->tx_pkts_via_udp_cnt );
+ FD_MCNT_SET ( SNP, ENF_TX_PKTS_VIA_SNP_CNT, ctx->snp->metrics_enf->tx_pkts_via_snp_cnt );
+ FD_MCNT_SET ( SNP, ENF_TX_PKTS_DROPPED_NO_CREDITS_CNT, ctx->snp->metrics_enf->tx_pkts_dropped_no_credits_cnt );
+ FD_MCNT_SET ( SNP, ENF_RX_BYTES_CNT, ctx->snp->metrics_enf->rx_bytes_cnt );
+ FD_MCNT_SET ( SNP, ENF_RX_BYTES_VIA_UDP_CNT, ctx->snp->metrics_enf->rx_bytes_via_udp_cnt );
+ FD_MCNT_SET ( SNP, ENF_RX_BYTES_VIA_SNP_CNT, ctx->snp->metrics_enf->rx_bytes_via_snp_cnt );
+ FD_MCNT_SET ( SNP, ENF_RX_PKTS_CNT, ctx->snp->metrics_enf->rx_pkts_cnt );
+ FD_MCNT_SET ( SNP, ENF_RX_PKTS_VIA_UDP_CNT, ctx->snp->metrics_enf->rx_pkts_via_udp_cnt );
+ FD_MCNT_SET ( SNP, ENF_RX_PKTS_VIA_SNP_CNT, ctx->snp->metrics_enf->rx_pkts_via_snp_cnt );
+ FD_MCNT_SET ( SNP, ENF_RX_PKTS_DROPPED_NO_CREDITS_CNT, ctx->snp->metrics_enf->rx_pkts_dropped_no_credits_cnt );
+ FD_MCNT_SET ( SNP, ENF_MCAST_TX_PKTS_CNT, ctx->snp->metrics_enf->mcast_tx_pkts_cnt );
+ FD_MCNT_SET ( SNP, ENF_MCAST_TX_BYTES_CNT, ctx->snp->metrics_enf->mcast_tx_bytes_cnt );
+ FD_MCNT_SET ( SNP, ENF_MCAST_RX_PKTS_CNT, ctx->snp->metrics_enf->mcast_rx_pkts_cnt );
+ FD_MCNT_SET ( SNP, ENF_MCAST_RX_BYTES_CNT, ctx->snp->metrics_enf->mcast_rx_bytes_cnt );
+}
+
+static inline int
+before_frag( fd_snp_tile_ctx_t * ctx,
+ ulong in_idx,
+ ulong seq FD_PARAM_UNUSED,
+ ulong sig ) {
+ if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_SHRED ) ) return 0;
+ else if( FD_LIKELY( ctx->in_kind[ in_idx ]==IN_KIND_NET_SHRED ) ) return fd_disco_netmux_sig_proto( sig )!=DST_PROTO_SHRED;
+
+ return 0;
+}
+
+static void
+during_frag( fd_snp_tile_ctx_t * ctx,
+ ulong in_idx,
+ ulong seq FD_PARAM_UNUSED,
+ ulong sig FD_PARAM_UNUSED,
+ ulong chunk,
+ ulong sz,
+ ulong ctl ) {
+
+ ctx->skip_frag = 0;
+
+ ctx->tsorig = fd_frag_meta_ts_comp( fd_tickcount() );
+
+ switch( ctx->in_kind[ in_idx ] ) {
+
+ case IN_KIND_SHRED: {
+ /* Generally, in during_frag we memcpy the incoming frag to a new buffer
+ before processing it in after_frag.
+ In this case, the frag comes from shred tile and will go to the net tile,
+ so the new buffer can be taken from the net_out channel.
+
+ In this version of SNP we have 2 complexities:
+ 1. the destination peer can either have SNP enabled or not, so we have
+ to either send a SNP or UDP packet.
+ 2. the SNP tile is optional, so the shred tile has prepared a UDP packet
+ for the net tile (i.e. frag is a UDP packet).
+
+ To solve 1) we retrieve snp_enabled from the dest_meta map. If SNP were
+ required, we'd simply skip this and always send a SNP packet.
+ To solve 2) we invoke fd_snp_app_send() with the appropriate meta protocol,
+ that in turns copies the payload (the shred) in the correct position...
+ a long way to say that the memcpy is done by fd_snp_app_send().
+
+ Note that fd_snp_app_send() is designed so that it could also be used in the
+ application tile, in this case that'd be in the shred tile.
+ In future versions, when SNP tile is always present, we can prepare the packet
+ directly in the shred tile, and moreover just send a single dcache entry with
+ multiple mcache entries (one for each peer), thus reducing the work in the
+ application/shred tile. */
+
+ /* Applications are unreliable channels, we copy the incoming packet
+ and we'll process it in after_frag. */
+ uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
+ if( FD_UNLIKELY( chunkin[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz>FD_NET_MTU ) )
+ FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
+ ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
+
+ ctx->packet = fd_chunk_to_laddr( ctx->net_out_mem, ctx->net_out_chunk );
+ fd_ip4_udp_hdrs_t * hdr = (fd_ip4_udp_hdrs_t *)dcache_entry;
+ uint ip4_daddr = hdr->ip4->daddr;
+ ushort udp_dport = fd_ushort_bswap( hdr->udp->net_dport );
+
+ //TODO: int snp_enabled = function(...)
+ int snp_enabled = 0;
+ ulong dest_meta_map_key = fd_snp_dest_meta_map_key_from_parts( ip4_daddr, udp_dport );
+ fd_snp_dest_meta_map_t sentinel = { 0 };
+ fd_snp_dest_meta_map_t * entry = fd_snp_dest_meta_map_query( ctx->snp->dest_meta_map, dest_meta_map_key, &sentinel );
+ if( !!entry->key ) {
+ snp_enabled = entry->val.snp_enabled;
+ }
+
+ /* fd_snp_app_send is designed to be used in the app tile, in this case shred tile.
+ */
+ fd_snp_meta_t meta = fd_snp_meta_from_parts( snp_enabled ? FD_SNP_META_PROTO_V1 : FD_SNP_META_PROTO_UDP, 0/*app_id*/, ip4_daddr, udp_dport );
+ int res = fd_snp_app_send( NULL/*ctx->snp_app*/, ctx->packet, FD_NET_MTU, dcache_entry + sizeof(fd_ip4_udp_hdrs_t), sz - sizeof(fd_ip4_udp_hdrs_t), meta );
+ if( res < 0 ) {
+ //TODO: metrics (but this really should never fail...)
+ ctx->skip_frag = 1;
+ }
+ ctx->packet_sz = (ulong)res;
+ ctx->meta = meta;
+ } break;
+
+ case IN_KIND_NET_SHRED: {
+ /* Net is an unreliable channel, we copy the incoming packet
+ and we'll process it in after_frag. */
+ uchar const * dcache_entry = fd_net_rx_translate_frag( &ctx->in[ in_idx ].net_rx, chunk, ctl, sz );
+ ulong hdr_sz = fd_disco_netmux_sig_hdr_sz( sig );
+ FD_TEST( hdr_sz <= sz ); /* Should be ensured by the net tile */
+
+ /* on the shred channel we receive both packets from shred and repair */
+ //TODO: function(...)
+ if( sz > 45
+ && *(dcache_entry + 42UL)=='S'
+ && *(dcache_entry + 43UL)=='N'
+ && *(dcache_entry + 44UL)=='P'
+ && ( *(dcache_entry + 45UL) & 0x0F ) != FD_SNP_TYPE_PAYLOAD ) {
+ ctx->packet = fd_chunk_to_laddr( ctx->net_out_mem, ctx->net_out_chunk );
+ } else {
+ ctx->packet = fd_chunk_to_laddr( ctx->shred_out_mem, ctx->shred_out_chunk );
+ }
+
+ memcpy( ctx->packet, dcache_entry, sz );
+ ctx->packet_sz = sz;
+ ctx->sig = sig;
+ } break;
+
+ case IN_KIND_GOSSIP:
+ /* Gossip is a reliable channel, we can process new contacts here */
+ break;
+
+ case IN_KIND_SIGN: {
+ /* Sign is an unreliable channel but guaranteed not to overflow.
+ Therefore, we can process new signatures here. */
+ uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
+ if( FD_UNLIKELY( chunkin[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark || sz!=FD_ED25519_SIG_SZ ) )
+ FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
+ ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
+
+ fd_memcpy( ctx->signature, dcache_entry, FD_ED25519_SIG_SZ );
+ } break;
+
+ case IN_KIND_CRDS: {
+ if( FD_UNLIKELY( chunkin[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
+ FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
+ ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
+
+ uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
+ handle_new_cluster_contact_info( ctx, dcache_entry );
+ } break;
+
+ case IN_KIND_STAKE: {
+ if( FD_UNLIKELY( chunkin[ in_idx ].chunk0 || chunk>ctx->in[ in_idx ].wmark ) )
+ FD_LOG_ERR(( "chunk %lu %lu corrupt, not in range [%lu,%lu]", chunk, sz,
+ ctx->in[ in_idx ].chunk0, ctx->in[ in_idx ].wmark ));
+
+ uchar const * dcache_entry = fd_chunk_to_laddr_const( ctx->in[ in_idx ].mem, chunk );
+ fd_stake_ci_stake_msg_init( ctx->stake_ci, fd_type_pun_const( dcache_entry ) );
+ } break;
+ }
+}
+
+static void
+after_frag( fd_snp_tile_ctx_t * ctx,
+ ulong in_idx,
+ ulong seq FD_PARAM_UNUSED,
+ ulong sig,
+ ulong sz FD_PARAM_UNUSED,
+ ulong tsorig FD_PARAM_UNUSED,
+ ulong _tspub FD_PARAM_UNUSED,
+ fd_stem_context_t * stem ) {
+ if( FD_UNLIKELY( ctx->skip_frag ) ) return;
+
+ /* make sure to set ctx->stem before invoking any snp callback. */
+ ctx->stem = stem;
+
+ switch( ctx->in_kind[ in_idx ] ) {
+ case IN_KIND_SHRED: {
+ /* Process all applications (with multicast) */
+ //TODO: metrics if failed to send
+ fd_snp_send( ctx->snp, ctx->packet, ctx->packet_sz, ctx->meta );
+ } break;
+
+ case IN_KIND_NET_SHRED: {
+ /* Process incoming network packets */
+ //TODO: metrics if dropped - need to distinguished invalid packet (dropped ok) vs internal error (dropped not ok)
+ fd_snp_process_packet( ctx->snp, ctx->packet, ctx->packet_sz );
+ } break;
+
+ case IN_KIND_GOSSIP:
+ /* Gossip */
+ break;
+
+ case IN_KIND_SIGN: {
+ /* Sign */
+ //TODO: metrics if failed, but should never fail
+ fd_snp_process_signature( ctx->snp, sig /*session_id*/, ctx->signature );
+ } break;
+
+ case IN_KIND_CRDS: {
+ finalize_new_cluster_contact_info( ctx );
+ } break;
+
+ case IN_KIND_STAKE: {
+ fd_stake_ci_stake_msg_fini( ctx->stake_ci );
+ } break;
+ }
+}
+
+static int
+snp_callback_tx( void const * _ctx,
+ uchar const * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta ) {
+
+ fd_snp_tile_ctx_t * ctx = (fd_snp_tile_ctx_t *)_ctx;
+ uint dst_ip_meta;
+ ushort dst_port;
+ ulong proto;
+ fd_snp_meta_into_parts( &proto, NULL, &dst_ip_meta, &dst_port, meta );
+
+ uint dst_ip = fd_uint_load_4( packet+FD_SNP_IP_DST_ADDR_OFF );
+ ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
+ ulong sig = fd_disco_netmux_sig( dst_ip, 0U, dst_ip, DST_PROTO_OUTGOING, FD_NETMUX_SIG_MIN_HDR_SZ );
+
+ /* memcpy is done in during_frag, it's only needed for buffered packets */
+ if( FD_UNLIKELY( meta & FD_SNP_META_OPT_BUFFERED ) ) {
+ memcpy( fd_chunk_to_laddr( ctx->net_out_mem, ctx->net_out_chunk ), packet, packet_sz );
+ }
+ fd_stem_publish( ctx->stem, NET_OUT_IDX /*ctx->net_out_idx*/, sig, ctx->net_out_chunk, packet_sz, 0UL, ctx->tsorig, tspub );
+ ctx->net_out_chunk = fd_dcache_compact_next( ctx->net_out_chunk, packet_sz, ctx->net_out_chunk0, ctx->net_out_wmark );
+
+ return FD_SNP_SUCCESS;
+}
+
+static int
+snp_callback_rx( void const * _ctx,
+ uchar const * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta ) {
+ fd_snp_tile_ctx_t * ctx = (fd_snp_tile_ctx_t *)_ctx;
+ ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
+ ulong sig = ctx->sig;
+ FD_TEST( ctx->stem != NULL );
+ /* No memcpy needed here - already done in during_frag. */
+
+ if( FD_UNLIKELY( meta & FD_SNP_META_OPT_BUFFERED ) ) {
+ /* This calculation of sig is very specific to the shred tile. */
+ fd_ip4_hdr_t * ip4_hdr = (fd_ip4_hdr_t *)(packet + sizeof(fd_eth_hdr_t));
+ ulong hdr_sz = sizeof(fd_eth_hdr_t) + FD_IP4_GET_LEN( *ip4_hdr ) + sizeof(fd_udp_hdr_t);
+ sig = fd_disco_netmux_sig( 0/*unsued*/, 0/*unsued*/, 0/*unsued*/, DST_PROTO_SHRED, hdr_sz );
+ /* copy the buffered packet */
+ memcpy( ctx->packet, packet, packet_sz );
+ }
+
+ /* Because the SNP tileis optional, we need to pretend that the packet sent to
+ the shred tile is a UDP packet. Currently we can't just modify offsets without
+ modifying code in net/shred tiles, so we memmove the payload to the correct
+ position.
+ The proper way to handle this would be to access the payload via fd_snp_app_recv(),
+ but that requires changes in the shred tile that we wanted to avoid for this first
+ version. See also the comment related to fd_snp_app_send() above. */
+ ulong adj_ctl = 0UL;
+ ulong adj_packet_sz = packet_sz;
+ ulong proto;
+ fd_snp_meta_into_parts( &proto, NULL, NULL, NULL, meta );
+ if( FD_LIKELY( proto != FD_SNP_META_PROTO_UDP ) ) {
+ adj_packet_sz -= ( 12 /*SNP*/ + 3/*TL*/ + 19 /*MAC*/ );
+ fd_ip4_hdr_t * ip4_hdr = (fd_ip4_hdr_t *)(packet + sizeof(fd_eth_hdr_t));
+ ulong hdr_sz = sizeof(fd_eth_hdr_t) + FD_IP4_GET_LEN( *ip4_hdr ) + sizeof(fd_udp_hdr_t);
+ memmove( ctx->packet+hdr_sz, packet+hdr_sz+( 12 /*SNP*/ + 3/*TL*/ ), adj_packet_sz-hdr_sz );
+ }
+
+ fd_stem_publish( ctx->stem, SHRED_OUT_IDX /*ctx->shred_out_idx*/, sig, ctx->shred_out_chunk, adj_packet_sz, adj_ctl, ctx->tsorig, tspub );
+ ctx->shred_out_chunk = fd_dcache_compact_next( ctx->shred_out_chunk, packet_sz, ctx->shred_out_chunk0, ctx->shred_out_wmark );
+ return FD_SNP_SUCCESS;
+}
+
+static int
+snp_callback_sign( void const * _ctx,
+ ulong session_id,
+ uchar const to_sign[ FD_SNP_TO_SIGN_SZ ] ) {
+ (void)to_sign;
+ fd_snp_tile_ctx_t * ctx = (fd_snp_tile_ctx_t *)_ctx;
+
+ ulong tspub = fd_frag_meta_ts_comp( fd_tickcount() );
+ ulong sig = (ulong)FD_KEYGUARD_SIGN_TYPE_ULONG_ID_ED25519;
+ FD_TEST( ctx->stem != NULL );
+
+ uchar * dst = fd_chunk_to_laddr( ctx->sign_out_mem, ctx->sign_out_chunk );
+ memcpy( dst+0UL, &session_id, sizeof(ulong) );
+ memcpy( dst+sizeof(ulong), to_sign, FD_SNP_TO_SIGN_SZ - sizeof(ulong) );
+ //TODO: metrics number of signatures requested + received => should always match => we shouldn't loose signatures
+ fd_stem_publish( ctx->stem, SIGN_OUT_IDX /*ctx->sign_out_idx*/, sig, ctx->sign_out_chunk, FD_SNP_TO_SIGN_SZ, 0UL, ctx->tsorig, tspub );
+ ctx->sign_out_chunk = fd_dcache_compact_next( ctx->sign_out_chunk, FD_SNP_TO_SIGN_SZ, ctx->sign_out_chunk0, ctx->sign_out_wmark );
+ return FD_SNP_SUCCESS;
+}
+
+static void
+privileged_init( fd_topo_t * topo,
+ fd_topo_tile_t * tile ) {
+ void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
+
+ FD_SCRATCH_ALLOC_INIT( l, scratch );
+ fd_snp_tile_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_snp_tile_ctx_t ), sizeof( fd_snp_tile_ctx_t ) );
+
+ if( FD_UNLIKELY( !strcmp( tile->snp.identity_key_path, "" ) ) )
+ FD_LOG_ERR(( "identity_key_path not set" ));
+
+ ctx->identity_key[ 0 ] = *(fd_pubkey_t const *)fd_type_pun_const( fd_keyload_load( tile->snp.identity_key_path, /* pubkey only: */ 1 ) );
+}
+
+static void
+unprivileged_init( fd_topo_t * topo,
+ fd_topo_tile_t * tile ) {
+ void * scratch = fd_topo_obj_laddr( topo, tile->tile_obj_id );
+ if( FD_LIKELY( tile->out_cnt==3UL ) ) { /* frankendancer */
+ FD_TEST( 0==strcmp( topo->links[tile->out_link_id[NET_OUT_IDX]].name, "snp_net" ) );
+ FD_TEST( 0==strcmp( topo->links[tile->out_link_id[SHRED_OUT_IDX]].name, "snp_shred" ) );
+ FD_TEST( 0==strcmp( topo->links[tile->out_link_id[SIGN_OUT_IDX]].name, "snp_sign" ) );
+ } else {
+ FD_LOG_ERR(( "snp tile has unexpected cnt of output links %lu", tile->out_cnt ));
+ }
+
+ if( FD_UNLIKELY( !tile->out_cnt ) )
+ FD_LOG_ERR(( "snp tile has no primary output link" ));
+
+ ulong snp_store_mcache_depth = tile->snp.depth;
+ if( topo->links[ tile->out_link_id[ 0 ] ].depth != snp_store_mcache_depth )
+ FD_LOG_ERR(( "snp tile out depths are not equal %lu %lu",
+ topo->links[ tile->out_link_id[ 0 ] ].depth, snp_store_mcache_depth ));
+
+ FD_SCRATCH_ALLOC_INIT( l, scratch );
+ fd_snp_tile_ctx_t * ctx = FD_SCRATCH_ALLOC_APPEND( l, alignof( fd_snp_tile_ctx_t ), sizeof( fd_snp_tile_ctx_t ) );
+
+ /* Round robin */
+ ctx->round_robin_cnt = fd_topo_tile_name_cnt( topo, tile->name );
+ ctx->round_robin_id = tile->kind_id;
+
+ /* SNP */
+ fd_snp_limits_t limits = snp_limits( tile );
+ void * _stake_ci = FD_SCRATCH_ALLOC_APPEND( l, fd_stake_ci_align(), fd_stake_ci_footprint() );
+ void * _snp = FD_SCRATCH_ALLOC_APPEND( l, fd_snp_align(), fd_snp_footprint( &limits ) );
+
+ ctx->stake_ci = fd_stake_ci_join( fd_stake_ci_new( _stake_ci, ctx->identity_key ) );
+
+ fd_snp_t * snp = fd_snp_join( fd_snp_new( _snp, &limits ) );
+ ctx->snp = snp;
+ snp->cb.ctx = ctx;
+ snp->cb.rx = snp_callback_rx;
+ snp->cb.tx = snp_callback_tx;
+ snp->cb.sign = snp_callback_sign;
+ snp->apps_cnt = 1;
+ //TODO: shred port from config
+ snp->apps[0].port = 8003;
+ /* Flow control initialization. The allocation per connection is
+ arbitrary at the moment. */
+ snp->flow_cred_alloc = (long)( 4 * 1024 * 1024 ); /* 4MiB */
+
+ FD_TEST( fd_snp_init( snp ) );
+ fd_memcpy( snp->config.identity, ctx->identity_key, sizeof(fd_pubkey_t) );
+
+ ctx->keyswitch = fd_keyswitch_join( fd_topo_obj_laddr( topo, tile->keyswitch_obj_id ) );
+ FD_TEST( ctx->keyswitch );
+
+ /* Channels */
+ for( ulong i=0UL; iin_cnt; i++ ) {
+ fd_topo_link_t const * link = &topo->links[ tile->in_link_id[ i ] ];
+ fd_topo_wksp_t const * link_wksp = &topo->workspaces[ topo->objs[ link->dcache_obj_id ].wksp_id ];
+
+ if( FD_LIKELY( !strcmp( link->name, "net_shred" ) ) ) {
+ ctx->in_kind[ i ] = IN_KIND_NET_SHRED;
+ fd_net_rx_bounds_init( &ctx->in[ i ].net_rx, link->dcache );
+ continue; /* only net_rx needs to be set in this case. */
+ }
+
+ if( FD_LIKELY( !strcmp( link->name, "shred_snp" ) ) ) ctx->in_kind[ i ] = IN_KIND_SHRED;
+ else if( FD_LIKELY( !strcmp( link->name, "crds_shred" ) ) ) ctx->in_kind[ i ] = IN_KIND_CRDS; /* reusing crds_shred */
+ else if( FD_LIKELY( !strcmp( link->name, "stake_out" ) ) ) ctx->in_kind[ i ] = IN_KIND_STAKE;
+ else if( FD_LIKELY( !strcmp( link->name, "sign_snp" ) ) ) ctx->in_kind[ i ] = IN_KIND_SIGN;
+ else FD_LOG_ERR(( "shred tile has unexpected input link %lu %s", i, link->name ));
+
+ if( FD_LIKELY( !!link->mtu ) ) {
+ ctx->in[ i ].mem = link_wksp->wksp;
+ ctx->in[ i ].chunk0 = fd_dcache_compact_chunk0( ctx->in[ i ].mem, link->dcache );
+ ctx->in[ i ].wmark = fd_dcache_compact_wmark ( ctx->in[ i ].mem, link->dcache, link->mtu );
+ }
+ }
+
+ fd_topo_link_t * net_out = &topo->links[ tile->out_link_id[ NET_OUT_IDX ] ];
+
+ ctx->net_out_chunk0 = fd_dcache_compact_chunk0( fd_wksp_containing( net_out->dcache ), net_out->dcache );
+ ctx->net_out_mem = topo->workspaces[ topo->objs[ net_out->dcache_obj_id ].wksp_id ].wksp;
+ ctx->net_out_wmark = fd_dcache_compact_wmark ( ctx->net_out_mem, net_out->dcache, net_out->mtu );
+ ctx->net_out_chunk = ctx->net_out_chunk0;
+
+ fd_topo_link_t * shred_out = &topo->links[ tile->out_link_id[ SHRED_OUT_IDX ] ];
+
+ ctx->shred_out_chunk0 = fd_dcache_compact_chunk0( fd_wksp_containing( shred_out->dcache ), shred_out->dcache );
+ ctx->shred_out_mem = topo->workspaces[ topo->objs[ shred_out->dcache_obj_id ].wksp_id ].wksp;
+ ctx->shred_out_wmark = fd_dcache_compact_wmark ( ctx->shred_out_mem, shred_out->dcache, shred_out->mtu );
+ ctx->shred_out_chunk = ctx->shred_out_chunk0;
+
+ fd_topo_link_t * sign_out = &topo->links[ tile->out_link_id[ SIGN_OUT_IDX ] ];
+
+ ctx->sign_out_chunk0 = fd_dcache_compact_chunk0( fd_wksp_containing( sign_out->dcache ), sign_out->dcache );
+ ctx->sign_out_mem = topo->workspaces[ topo->objs[ sign_out->dcache_obj_id ].wksp_id ].wksp;
+ ctx->sign_out_wmark = fd_dcache_compact_wmark ( ctx->sign_out_mem, sign_out->dcache, sign_out->mtu );
+ ctx->sign_out_chunk = ctx->sign_out_chunk0;
+
+ ctx->packet = NULL;
+
+ ctx->enforced_cnt = tile->snp.enforced_destinations_cnt;
+ for( ulong i=0UL; isnp.enforced_destinations_cnt; i++ ) {
+ uint ip4_addr = tile->snp.enforced_destinations[ i ].ip;
+ ushort udp_port = tile->snp.enforced_destinations[ i ].port;
+ ctx->enforced[ i ] = fd_snp_dest_meta_map_key_from_parts( ip4_addr, udp_port );
+ }
+
+ ulong scratch_top = FD_SCRATCH_ALLOC_FINI( l, 1UL );
+ if( FD_UNLIKELY( scratch_top > (ulong)scratch + scratch_footprint( tile ) ) )
+ FD_LOG_ERR(( "scratch overflow %lu %lu %lu", scratch_top - (ulong)scratch - scratch_footprint( tile ), scratch_top, (ulong)scratch + scratch_footprint( tile ) ));
+
+ ctx->stem = NULL; /* to be set before every snp callback function */
+}
+
+static ulong
+populate_allowed_seccomp( fd_topo_t const * topo,
+ fd_topo_tile_t const * tile,
+ ulong out_cnt,
+ struct sock_filter * out ) {
+ (void)topo;
+ (void)tile;
+ populate_sock_filter_policy_fd_snp_tile( out_cnt, out, (uint)fd_log_private_logfile_fd() );
+ return sock_filter_policy_fd_snp_tile_instr_cnt;
+}
+
+static ulong
+populate_allowed_fds( fd_topo_t const * topo FD_PARAM_UNUSED,
+ fd_topo_tile_t const * tile FD_PARAM_UNUSED,
+ ulong out_fds_cnt,
+ int * out_fds ) {
+ if( FD_UNLIKELY( out_fds_cnt<2UL ) ) FD_LOG_ERR(( "out_fds_cnt %lu", out_fds_cnt ));
+ ulong out_cnt = 0UL;
+ out_fds[ out_cnt++ ] = 2; /* stderr */
+ if( FD_LIKELY( -1!=fd_log_private_logfile_fd() ) )
+ out_fds[ out_cnt++ ] = fd_log_private_logfile_fd(); /* logfile */
+ return out_cnt;
+}
+
+#define STEM_BURST (2UL)
+
+/* See explanation in fd_pack */
+#define STEM_LAZY (128L*3000L)
+
+#define STEM_CALLBACK_CONTEXT_TYPE fd_snp_tile_ctx_t
+#define STEM_CALLBACK_CONTEXT_ALIGN alignof(fd_snp_tile_ctx_t)
+
+#define STEM_CALLBACK_DURING_HOUSEKEEPING during_housekeeping
+#define STEM_CALLBACK_METRICS_WRITE metrics_write
+#define STEM_CALLBACK_BEFORE_FRAG before_frag
+#define STEM_CALLBACK_DURING_FRAG during_frag
+#define STEM_CALLBACK_AFTER_FRAG after_frag
+
+#include "../stem/fd_stem.c"
+
+fd_topo_run_tile_t fd_tile_snp = {
+ .name = "snp",
+ .populate_allowed_seccomp = populate_allowed_seccomp,
+ .populate_allowed_fds = populate_allowed_fds,
+ .scratch_align = scratch_align,
+ .scratch_footprint = scratch_footprint,
+ .privileged_init = privileged_init,
+ .unprivileged_init = unprivileged_init,
+ .run = stem_run,
+};
diff --git a/src/disco/snp/fd_snp_tile.seccomppolicy b/src/disco/snp/fd_snp_tile.seccomppolicy
new file mode 100644
index 00000000000..634b41e27a5
--- /dev/null
+++ b/src/disco/snp/fd_snp_tile.seccomppolicy
@@ -0,0 +1,36 @@
+# logfile_fd: It can be disabled by configuration, but typically tiles
+# will open a log file on boot and write all messages there.
+unsigned int logfile_fd
+
+# logging: all log messages are written to a file and/or pipe
+#
+# 'WARNING' and above are written to the STDERR pipe, while all messages
+# are always written to the log file.
+#
+# arg 0 is the file descriptor to write to. The boot process ensures
+# that descriptor 2 is always STDERR.
+write: (or (eq (arg 0) 2)
+ (eq (arg 0) logfile_fd))
+
+# logging: 'WARNING' and above fsync the logfile to disk immediately
+#
+# arg 0 is the file descriptor to fsync.
+fsync: (eq (arg 0) logfile_fd)
+
+getrandom
+
+# low_power_mode: stem calls fd_log_sleep to reduce CPU usage when
+# the tile is idle. Can be enabled by configuration.
+#
+# fd_log_sleep calls clock_nanosleep; glibc's nanosleep uses the
+# clock_nanosleep syscall. arg 0 is the clock (CLOCK_REALTIME),
+# arg 1 is flags (0), arg2 is requested time, arg3 is remainder pointer.
+clock_nanosleep: (and (eq (arg 0) CLOCK_REALTIME)
+ (eq (arg 1) 0))
+
+# low_power_mode: stem calls fd_log_sleep to reduce CPU usage when
+# the tile is idle. Can be enabled by configuration.
+#
+# fd_log_sleep calls sched_yield depending on the amount of time.
+# This syscall takes no arguments.
+sched_yield
diff --git a/src/disco/snp/generated/fd_snp_tile_seccomp.h b/src/disco/snp/generated/fd_snp_tile_seccomp.h
new file mode 100644
index 00000000000..b3c6fbce1de
--- /dev/null
+++ b/src/disco/snp/generated/fd_snp_tile_seccomp.h
@@ -0,0 +1,77 @@
+/* THIS FILE WAS GENERATED BY generate_filters.py. DO NOT EDIT BY HAND! */
+#ifndef HEADER_fd_src_disco_snp_generated_fd_snp_tile_seccomp_h
+#define HEADER_fd_src_disco_snp_generated_fd_snp_tile_seccomp_h
+
+#include "../../../../src/util/fd_util_base.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#if defined(__i386__)
+# define ARCH_NR AUDIT_ARCH_I386
+#elif defined(__x86_64__)
+# define ARCH_NR AUDIT_ARCH_X86_64
+#elif defined(__aarch64__)
+# define ARCH_NR AUDIT_ARCH_AARCH64
+#else
+# error "Target architecture is unsupported by seccomp."
+#endif
+static const unsigned int sock_filter_policy_fd_snp_tile_instr_cnt = 21;
+
+static void populate_sock_filter_policy_fd_snp_tile( ulong out_cnt, struct sock_filter * out, unsigned int logfile_fd) {
+ FD_TEST( out_cnt >= 21 );
+ struct sock_filter filter[21] = {
+ /* Check: Jump to RET_KILL_PROCESS if the script's arch != the runtime arch */
+ BPF_STMT( BPF_LD | BPF_W | BPF_ABS, ( offsetof( struct seccomp_data, arch ) ) ),
+ BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, ARCH_NR, 0, /* RET_KILL_PROCESS */ 17 ),
+ /* loading syscall number in accumulator */
+ BPF_STMT( BPF_LD | BPF_W | BPF_ABS, ( offsetof( struct seccomp_data, nr ) ) ),
+ /* allow write based on expression */
+ BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_write, /* check_write */ 5, 0 ),
+ /* allow fsync based on expression */
+ BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_fsync, /* check_fsync */ 8, 0 ),
+ /* simply allow getrandom */
+ BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_getrandom, /* RET_ALLOW */ 14, 0 ),
+ /* allow clock_nanosleep based on expression */
+ BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_clock_nanosleep, /* check_clock_nanosleep */ 8, 0 ),
+ /* simply allow sched_yield */
+ BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, SYS_sched_yield, /* RET_ALLOW */ 12, 0 ),
+ /* none of the syscalls matched */
+ { BPF_JMP | BPF_JA, 0, 0, /* RET_KILL_PROCESS */ 10 },
+// check_write:
+ /* load syscall argument 0 in accumulator */
+ BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
+ BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 2, /* RET_ALLOW */ 9, /* lbl_1 */ 0 ),
+// lbl_1:
+ /* load syscall argument 0 in accumulator */
+ BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
+ BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_ALLOW */ 7, /* RET_KILL_PROCESS */ 6 ),
+// check_fsync:
+ /* load syscall argument 0 in accumulator */
+ BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
+ BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, logfile_fd, /* RET_ALLOW */ 5, /* RET_KILL_PROCESS */ 4 ),
+// check_clock_nanosleep:
+ /* load syscall argument 0 in accumulator */
+ BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[0])),
+ BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, CLOCK_REALTIME, /* lbl_2 */ 0, /* RET_KILL_PROCESS */ 2 ),
+// lbl_2:
+ /* load syscall argument 1 in accumulator */
+ BPF_STMT( BPF_LD | BPF_W | BPF_ABS, offsetof(struct seccomp_data, args[1])),
+ BPF_JUMP( BPF_JMP | BPF_JEQ | BPF_K, 0, /* RET_ALLOW */ 1, /* RET_KILL_PROCESS */ 0 ),
+// RET_KILL_PROCESS:
+ /* KILL_PROCESS is placed before ALLOW since it's the fallthrough case. */
+ BPF_STMT( BPF_RET | BPF_K, SECCOMP_RET_KILL_PROCESS ),
+// RET_ALLOW:
+ /* ALLOW has to be reached by jumping */
+ BPF_STMT( BPF_RET | BPF_K, SECCOMP_RET_ALLOW ),
+ };
+ fd_memcpy( out, filter, sizeof( filter ) );
+}
+
+#endif
diff --git a/src/disco/topo/fd_topo.h b/src/disco/topo/fd_topo.h
index cf226bf1fb2..b533d3ff543 100644
--- a/src/disco/topo/fd_topo.h
+++ b/src/disco/topo/fd_topo.h
@@ -309,8 +309,16 @@ struct fd_topo_tile {
fd_topo_ip_port_t adtl_dests_retransmit[ FD_TOPO_ADTL_DESTS_MAX ];
ulong adtl_dests_leader_cnt;
fd_topo_ip_port_t adtl_dests_leader[ FD_TOPO_ADTL_DESTS_MAX ];
+ int is_snp_enabled;
} shred;
+ struct {
+ ulong depth;
+ char identity_key_path[ PATH_MAX ];
+ ulong enforced_destinations_cnt;
+ fd_topo_ip_port_t enforced_destinations[ FD_TOPO_ADTL_DESTS_MAX ];
+ } snp;
+
struct {
ulong disable_blockstore_from_slot;
} store;
diff --git a/src/disco/topo/fd_topob.c b/src/disco/topo/fd_topob.c
index 37007b5817d..16798a1d16d 100644
--- a/src/disco/topo/fd_topob.c
+++ b/src/disco/topo/fd_topob.c
@@ -366,6 +366,7 @@ fd_topob_auto_layout( fd_topo_t * topo,
"poh", /* FRANK only */
"pohi", /* FIREDANCER only */
"shred",
+ "snp",
"store", /* FRANK only */
"storei", /* FIREDANCER only */
"sign",
diff --git a/src/waltz/snp/Local.mk b/src/waltz/snp/Local.mk
new file mode 100644
index 00000000000..863cc985dcf
--- /dev/null
+++ b/src/waltz/snp/Local.mk
@@ -0,0 +1,32 @@
+$(call make-lib,fd_snp)
+
+# fd_snp_app, e.g. for shred tile
+$(call add-hdrs,fd_snp_app.h fd_snp_common.h)
+$(call add-objs,fd_snp_app,fd_snp)
+
+# fd_snp for snp tile
+$(call add-hdrs,fd_snp.h fd_snp_proto.h)
+$(call add-objs,fd_snp,fd_snp)
+
+# fd_snp_v1 header is not needed by applications
+$(call add-objs,fd_snp_v1,fd_snp)
+
+ifdef FD_HAS_HOSTED
+SNP_TEST_LIBS:=fd_snp fd_util fd_ballet
+
+$(call make-unit-test,test_snp_v1,test_snp_v1,$(SNP_TEST_LIBS))
+$(call run-unit-test,test_snp_v1)
+
+$(call make-unit-test,test_snp_app,test_snp_app,$(SNP_TEST_LIBS))
+$(call run-unit-test,test_snp_app)
+
+$(call make-unit-test,test_snp_common,test_snp_common,$(SNP_TEST_LIBS))
+$(call run-unit-test,test_snp_common)
+
+$(call make-unit-test,test_snp_limits,test_snp_limits,$(SNP_TEST_LIBS))
+$(call run-unit-test,test_snp_limits)
+
+$(call make-unit-test,test_snp_live, test_snp_live, $(SNP_TEST_LIBS))
+# Do not autorun test_snp_live - only for manual testing.
+
+endif
diff --git a/src/waltz/snp/fd_snp.c b/src/waltz/snp/fd_snp.c
new file mode 100644
index 00000000000..05b1822d514
--- /dev/null
+++ b/src/waltz/snp/fd_snp.c
@@ -0,0 +1,1493 @@
+#include "fd_snp.h"
+#include "fd_snp_common.h"
+#include "fd_snp_private.h"
+#include "fd_snp_proto.h"
+
+static inline int
+fd_snp_conn_map_lg_slot_cnt_from_peer_cnt( ulong peer_cnt ) {
+ /* 2 insertions per connection (+1) and map should have twice the capacity (+1) */
+ return 2 + fd_ulong_find_msb( peer_cnt );
+}
+
+static inline int
+fd_snp_dest_meta_map_lg_slot_cnt_from_peer_cnt( ulong peer_cnt ) {
+ /* map should have twice the capacity (+1) */
+ return 1 + fd_ulong_find_msb( peer_cnt );
+}
+
+ulong
+fd_snp_footprint_ext( fd_snp_limits_t const * limits,
+ fd_snp_layout_t * layout ) {
+ memset( layout, 0, sizeof(fd_snp_layout_t) );
+ if( FD_UNLIKELY( !limits ) ) return 0UL;
+
+ if( FD_UNLIKELY( limits->peer_cnt ==0UL ) ) { FD_LOG_WARNING(( "invalid limits->peer_cnt==0" )); return 0UL; }
+
+ layout->meta_sz = sizeof(fd_snp_layout_t);
+
+ /* allocate space for fd_snp_t */
+ ulong offs = sizeof(fd_snp_t);
+
+ /* allocate space for connections */
+ offs = fd_ulong_align_up( offs, fd_snp_conn_pool_align() );
+ layout->conn_pool_off = offs;
+ ulong conn_pool_footprint = fd_snp_conn_pool_footprint( limits->peer_cnt );
+ if( FD_UNLIKELY( !conn_pool_footprint ) ) { FD_LOG_WARNING(( "invalid fd_snp_conn_pool_footprint" )); return 0UL; }
+ offs += conn_pool_footprint;
+
+ /* allocate space for conn IDs */
+ offs = fd_ulong_align_up( offs, fd_snp_conn_map_align() );
+ layout->conn_map_off = offs;
+ ulong conn_map_footprint = fd_snp_conn_map_footprint( fd_snp_conn_map_lg_slot_cnt_from_peer_cnt( limits->peer_cnt ) );
+ if( FD_UNLIKELY( !conn_map_footprint ) ) { FD_LOG_WARNING(( "invalid fd_snp_conn_map_footprint" )); return 0UL; }
+ offs += conn_map_footprint;
+
+ /* allocate space for packets */
+ offs = fd_ulong_align_up( offs, fd_snp_pkt_pool_align() );
+ layout->pkt_pool_off = offs;
+ ulong pkt_pool_footprint = fd_snp_pkt_pool_footprint( limits->peer_cnt );
+ if( FD_UNLIKELY( !pkt_pool_footprint ) ) { FD_LOG_WARNING(( "invalid fd_snp_pkt_pool_footprint (pkt_pool)" )); return 0UL; }
+ offs += pkt_pool_footprint;
+
+ /* allocate space for connections' last packet */
+ offs = fd_ulong_align_up( offs, fd_snp_pkt_pool_align() );
+ layout->last_pkt_pool_off = offs;
+ ulong last_pkt_footprint = fd_snp_pkt_pool_footprint( limits->peer_cnt );
+ if( FD_UNLIKELY( !last_pkt_footprint ) ) { FD_LOG_WARNING(( "invalid fd_snp_pkt_pool_footprint (last_pkt_pool)" )); return 0UL; }
+ offs += last_pkt_footprint;
+
+ /* allocate space for dest_meta maps (a,b) */
+ offs = fd_ulong_align_up( offs, fd_snp_dest_meta_map_align() );
+ layout->dest_meta_map_off_a = offs;
+ ulong dest_meta_map_footprint_a = fd_snp_dest_meta_map_footprint( fd_snp_dest_meta_map_lg_slot_cnt_from_peer_cnt( limits->peer_cnt ) );
+ if( FD_UNLIKELY( !dest_meta_map_footprint_a ) ) { FD_LOG_WARNING(( "invalid fd_snp_dest_meta_map_footprint a" )); return 0UL; }
+ offs += dest_meta_map_footprint_a;
+
+ offs = fd_ulong_align_up( offs, fd_snp_dest_meta_map_align() );
+ layout->dest_meta_map_off_b = offs;
+ ulong dest_meta_map_footprint_b = fd_snp_dest_meta_map_footprint( fd_snp_dest_meta_map_lg_slot_cnt_from_peer_cnt( limits->peer_cnt ) );
+ if( FD_UNLIKELY( !dest_meta_map_footprint_b ) ) { FD_LOG_WARNING(( "invalid fd_snp_dest_meta_map_footprint b" )); return 0UL; }
+ offs += dest_meta_map_footprint_b;
+
+ return offs;
+}
+
+ulong
+fd_snp_footprint( fd_snp_limits_t const * limits ) {
+ fd_snp_layout_t layout;
+ return fd_snp_footprint_ext( limits, &layout );
+}
+
+void *
+fd_snp_new( void* mem,
+ fd_snp_limits_t const * limits ) {
+
+ if( FD_UNLIKELY( !mem ) ) {
+ FD_LOG_WARNING(( "NULL mem" ));
+ return NULL;
+ }
+
+ ulong align = fd_snp_align();
+ if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)mem, align ) ) ) {
+ FD_LOG_WARNING(( "misaligned mem" ));
+ return NULL;
+ }
+
+ if( FD_UNLIKELY( !limits ) ) {
+ FD_LOG_WARNING(( "NULL limits" ));
+ return NULL;
+ }
+
+ if( FD_UNLIKELY( limits->peer_cnt == 0UL ) ) {
+ FD_LOG_WARNING(( "invalid limits" ));
+ return NULL;
+ }
+
+ fd_snp_layout_t layout;
+ ulong footprint = fd_snp_footprint_ext( limits, &layout );
+ if( FD_UNLIKELY( !footprint ) ) {
+ FD_LOG_WARNING(( "invalid footprint for config" ));
+ return NULL;
+ }
+
+ /* Zero the entire memory region */
+ fd_snp_t * snp = (fd_snp_t *)mem;
+ memset( snp, 0, fd_snp_footprint( limits ) );
+
+ /* Store the limits */
+ snp->limits = *limits;
+ snp->layout = layout;
+
+ /* Set magic number to indicate successful initialization */
+ FD_COMPILER_MFENCE();
+ snp->magic = FD_SNP_MAGIC;
+ FD_COMPILER_MFENCE();
+
+ return snp;
+}
+
+fd_snp_t *
+fd_snp_join( void* shsnp ) {
+ return shsnp;
+}
+
+fd_snp_t *
+fd_snp_init( fd_snp_t * snp ) {
+
+ fd_snp_limits_t const * limits = &snp->limits;
+
+ /* Validate layout */
+ fd_snp_layout_t layout = {0};
+ if( FD_UNLIKELY( !fd_snp_footprint_ext( limits, &layout ) ) ) {
+ FD_LOG_WARNING(( "fd_snp_footprint_ext failed" ));
+ }
+ if( FD_UNLIKELY( 0!=memcmp( &layout, &snp->layout, sizeof(fd_snp_layout_t) ) ) ) {
+ FD_LOG_HEXDUMP_WARNING(( "saved layout", &snp->layout, sizeof(fd_snp_layout_t) ));
+ FD_LOG_HEXDUMP_WARNING(( "derived layout", &layout, sizeof(fd_snp_layout_t) ));
+ FD_LOG_WARNING(( "fd_snp_layout changed. Memory corruption?" ));
+ }
+
+ /* Initialize apps (statically allocated) */
+ if( FD_UNLIKELY( snp->apps_cnt > FD_SNP_APPS_CNT_MAX ) ) {
+ FD_LOG_WARNING(( "invalid apps_cnt=%lu max=%lu", snp->apps_cnt, FD_SNP_APPS_CNT_MAX ));
+ return NULL;
+ }
+ for( ulong j=0; japps_cnt; j++ ) {
+ if( FD_UNLIKELY( snp->apps[j].port==0 ) ) {
+ FD_LOG_WARNING(( "invalid apps[%lu].port=%hu", j, snp->apps[j].port ));
+ return NULL;
+ }
+ fd_ip4_udp_hdr_init( snp->apps[j].net_hdr, 0, 0, snp->apps[j].port );
+ snp->apps[j].multicast_net_hdr[j] = snp->apps[j].net_hdr[j];
+ snp->apps[j].multicast_net_hdr->ip4->daddr = fd_uint_bswap( snp->apps[j].multicast_ip );
+ snp->apps[j].multicast_net_hdr->udp->net_dport = fd_ushort_bswap( snp->apps[j].port );
+ }
+
+ /* Initialize conn_pool */
+ uchar * conn_pool_laddr = (uchar *)snp + layout.conn_pool_off;
+ snp->conn_pool = fd_snp_conn_pool_join( fd_snp_conn_pool_new( conn_pool_laddr, limits->peer_cnt ) );
+ if( FD_UNLIKELY( !snp->conn_pool ) ) {
+ FD_LOG_WARNING(( "NULL conn_pool" ));
+ return NULL;
+ }
+
+ /* Initialize conn_map */
+ uchar * conn_map_laddr = (uchar *)snp + layout.conn_map_off;
+ snp->conn_map = fd_snp_conn_map_join( fd_snp_conn_map_new( (void *)conn_map_laddr, fd_snp_conn_map_lg_slot_cnt_from_peer_cnt( limits->peer_cnt ) ) );
+ if( FD_UNLIKELY( !snp->conn_map ) ) {
+ FD_LOG_WARNING(( "NULL conn_map" ));
+ return NULL;
+ }
+
+ /* Initialize pkt_pool */
+ uchar * pkt_pool_laddr = (uchar *)snp + layout.pkt_pool_off;
+ snp->pkt_pool = fd_snp_pkt_pool_join( fd_snp_pkt_pool_new( pkt_pool_laddr, limits->peer_cnt ) );
+ if( FD_UNLIKELY( !snp->pkt_pool ) ) {
+ FD_LOG_WARNING(( "NULL pkt_pool" ));
+ return NULL;
+ }
+
+ /* Initialize last_pkt_pool */
+ uchar * last_pkt_pool_laddr = (uchar *)snp + layout.last_pkt_pool_off;
+ snp->last_pkt_pool = fd_snp_pkt_pool_join( fd_snp_pkt_pool_new( last_pkt_pool_laddr, limits->peer_cnt ) );
+ if( FD_UNLIKELY( !snp->last_pkt_pool ) ) {
+ FD_LOG_WARNING(( "NULL last_pkt_pool" ));
+ return NULL;
+ }
+
+ /* Initialize private state */
+ fd_rng_join( fd_rng_new( snp->config._rng, (uint)fd_tickcount(), 0UL ) );
+ uchar random_aes_key[ 16 ] = { 0 };
+ fd_snp_rng( random_aes_key, 16 );
+ fd_aes_set_encrypt_key( random_aes_key, 128, snp->config._state_enc_key );
+ fd_aes_set_decrypt_key( random_aes_key, 128, snp->config._state_dec_key );
+
+ /* Initialize flow control credits pool (to zero). */
+ if( FD_UNLIKELY( !snp->flow_cred_total ) ) { /* must be set externally. */
+ snp->flow_cred_total = 1L;
+ FD_LOG_WARNING(( "snp flow_cred_total uninitialized setting to %ld", snp->flow_cred_total ));
+ }
+ snp->flow_cred_taken = 0L;
+ if( FD_UNLIKELY( !snp->flow_cred_alloc ) ) { /* must be set externally. */
+ snp->flow_cred_alloc = 1L;
+ FD_LOG_WARNING(( "snp flow_cred_alloc uninitialized setting to %ld", snp->flow_cred_alloc ));
+ }
+
+ /* Initialize dest_meta_map */
+ uchar * dest_meta_map_laddr_a = (uchar *)snp + layout.dest_meta_map_off_a;
+ snp->dest_meta_map_a = fd_snp_dest_meta_map_join( fd_snp_dest_meta_map_new( (void *)dest_meta_map_laddr_a, fd_snp_dest_meta_map_lg_slot_cnt_from_peer_cnt( limits->peer_cnt ) ) );
+ if( FD_UNLIKELY( !snp->dest_meta_map_a ) ) {
+ FD_LOG_WARNING(( "NULL dest_meta_map_a" ));
+ return NULL;
+ }
+ uchar * dest_meta_map_laddr_b = (uchar *)snp + layout.dest_meta_map_off_b;
+ snp->dest_meta_map_b = fd_snp_dest_meta_map_join( fd_snp_dest_meta_map_new( (void *)dest_meta_map_laddr_b, fd_snp_dest_meta_map_lg_slot_cnt_from_peer_cnt( limits->peer_cnt ) ) );
+ if( FD_UNLIKELY( !snp->dest_meta_map_b ) ) {
+ FD_LOG_WARNING(( "NULL dest_meta_map_b" ));
+ return NULL;
+ }
+ snp->dest_meta_map = snp->dest_meta_map_a;
+ snp->dest_meta_update_idx = 0U;
+ snp->dest_meta_next_update_ts = 0UL;
+
+ snp->rng = fd_rng_join( fd_rng_new( snp->rng_mem, (uint)fd_tickcount() /*seed*/, 0UL ) );
+
+ memset( snp->metrics_all, 0, sizeof(fd_snp_metrics_t) );
+ memset( snp->metrics_enf, 0, sizeof(fd_snp_metrics_t) );
+
+ return snp;
+}
+
+fd_snp_t *
+fd_snp_fini( fd_snp_t* snp ) {
+ return snp;
+}
+
+/* Connections */
+
+#define FD_SNP_MAX_SESSION_ID_RETRIES (10)
+
+/* fd_snp_conn_zeroize zeroes out a fd_snp_conn_t struct.
+ Because fd_snp_conn_t implements a fd_pool, we need to save
+ the field next before zeroing out, and restore it after.
+ We use fd_memset_explicit() to make sure key material is erased. */
+void
+fd_snp_conn_zeroize( fd_snp_conn_t * conn ) {
+ ulong next = conn->next;
+ fd_memset_explicit( conn, 0, sizeof(fd_snp_conn_t) );
+ conn->next = next;
+}
+
+/* fd_snp_conn_create a new fd_snp_conn_t struct from the snp pool,
+ and inserts in the snp map by peer_addr and by session_id. */
+static inline fd_snp_conn_t *
+fd_snp_conn_create( fd_snp_t * snp,
+ ulong peer_addr,
+ uchar is_server ) {
+ fd_snp_conn_map_t * entry = NULL;
+ ulong session_id = 0UL;
+ int i = 0;
+
+ /* get a new conn from pool */
+ if( FD_UNLIKELY( !fd_snp_conn_pool_free( snp->conn_pool ) ) ) {
+ FD_SNP_LOG_DEBUG_W( "unable to find space in connection pool" );
+ return NULL;
+ }
+ fd_snp_conn_t * conn = fd_snp_conn_pool_ele_acquire( snp->conn_pool );
+ if( FD_UNLIKELY( conn==NULL ) ) {
+ FD_SNP_LOG_DEBUG_W( "unable to acquire element from connection pool" );
+ return NULL;
+ }
+
+ /* get a new last_pkt from pool */
+ if( FD_UNLIKELY( !fd_snp_pkt_pool_free( snp->last_pkt_pool ) ) ) {
+ FD_SNP_LOG_DEBUG_W( "unable to find space in packet pool" );
+ return NULL;
+ }
+ fd_snp_pkt_t * last_pkt = fd_snp_pkt_pool_ele_acquire( snp->last_pkt_pool );
+ if( FD_UNLIKELY( last_pkt==NULL ) ) {
+ FD_SNP_LOG_DEBUG_W( "unable to acquire element from packet pool" );
+ goto err;
+ }
+
+ /* insert conn in map by peer_addr. ignore failure.
+ if this fails, there's already a conn for peer_addr. */
+ entry = fd_snp_conn_map_insert( snp->conn_map, peer_addr );
+ if( FD_LIKELY( entry ) ) {
+ entry->val = conn;
+ }
+
+ /* insert conn in map by session_id. do NOT ignore failure.
+ session_id is randomly generated, in case of failure we
+ retry FD_SNP_MAX_SESSION_ID_RETRIES times, then fail. */
+ for( i=0, entry=NULL; iconfig._rng );
+ entry = fd_snp_conn_map_insert( snp->conn_map, session_id );
+ }
+ if( FD_LIKELY( entry ) ) {
+ entry->val = conn;
+ } else {
+ /* fd_snp_conn_map_insert(..., sessio_id) failed n times */
+ FD_SNP_LOG_DEBUG_W( "unable to generate a unique session_id" );
+ goto err;
+ }
+ FD_SNP_LOG_DEBUG_N( "fd_snp_conn_create is_server=%u %s", is_server, FD_SNP_LOG_CONN( conn ) );
+
+ /* init conn */
+ fd_snp_conn_zeroize( conn );
+ conn->peer_addr = peer_addr;
+ conn->session_id = session_id;
+ conn->state = FD_SNP_TYPE_INVALID;
+ conn->last_pkt = last_pkt;
+ conn->_pubkey = snp->config.identity;
+ conn->is_server = is_server;
+ /* Currently, every connection is allocated the same amount
+ of credits. In the future, however, it may be possible
+ to allocate more credits to specific connections. */
+ snp->flow_cred_taken += snp->flow_cred_alloc;
+ conn->flow_rx_alloc = snp->flow_cred_alloc;
+ conn->flow_rx_level = 0L;
+ conn->flow_rx_wmark = snp->flow_cred_alloc;
+ conn->flow_rx_wmark_tstamp = fd_snp_timestamp_ms();
+ conn->flow_tx_level = 0L;
+ conn->flow_tx_wmark = LONG_MAX; /* This prevents any kind of deadlocks on startup.*/
+
+ conn->is_multicast = 0;
+
+ conn->snp_enabled = 0;
+ conn->snp_enforced = 0;
+ ulong dest_meta_map_key = fd_snp_dest_meta_map_key_from_conn( conn );
+ fd_snp_dest_meta_map_t sentinel = { 0 };
+ fd_snp_dest_meta_map_t * dest_meta = fd_snp_dest_meta_map_query( snp->dest_meta_map, dest_meta_map_key, &sentinel );
+ if( !!entry->key ) {
+ conn->snp_enabled = dest_meta->val.snp_enabled;
+ conn->snp_enforced = dest_meta->val.snp_enforced;
+ }
+
+ conn->last_sent_ts = fd_snp_timestamp_ms();
+
+ /* init last_pkt */
+ last_pkt->data_sz = 0;
+
+ /* metrics */
+ snp->metrics_all->conn_acc_total += 1UL;
+ if( !!conn->snp_enforced ) {
+ snp->metrics_enf->conn_cur_total += 1UL;
+ snp->metrics_enf->conn_acc_total += 1UL;
+ }
+
+ return conn;
+
+err:
+ if( last_pkt ) {
+ fd_snp_pkt_pool_ele_release( snp->last_pkt_pool, last_pkt );
+ }
+ if( conn ) {
+ fd_snp_conn_pool_ele_release( snp->conn_pool, conn );
+ }
+ return NULL;
+}
+
+int
+fd_snp_conn_delete( fd_snp_t * snp,
+ fd_snp_conn_t * conn ) {
+ /* return taken flow credits to the pool. */
+ snp->flow_cred_taken -= conn->flow_rx_alloc;
+
+ /* metrics */
+ snp->metrics_all->conn_cur_established -= fd_ulong_if( conn->state==FD_SNP_TYPE_HS_DONE, 1UL, 0UL );
+ snp->metrics_all->conn_acc_dropped += 1UL;
+ if( !!conn->snp_enforced ) {
+ snp->metrics_enf->conn_cur_total -= 1UL;
+ snp->metrics_enf->conn_cur_established -= fd_ulong_if( conn->state==FD_SNP_TYPE_HS_DONE, 1UL, 0UL );
+ snp->metrics_enf->conn_acc_dropped += 1UL;
+ }
+
+ if( snp->last_pkt_pool ) {
+ fd_snp_pkt_pool_ele_release( snp->last_pkt_pool, conn->last_pkt );
+ }
+
+ fd_snp_conn_map_t sentinel = { 0 };
+ fd_snp_conn_map_t * entry0 = fd_snp_conn_map_query( snp->conn_map, conn->peer_addr, &sentinel );
+ if( entry0->val && entry0->val->session_id==conn->session_id ) {
+ fd_snp_conn_map_remove( snp->conn_map, entry0 );
+ }
+ fd_snp_conn_map_t * entry1 = fd_snp_conn_map_query( snp->conn_map, conn->session_id, &sentinel );
+ if( entry1->val ) {
+ fd_snp_conn_map_remove( snp->conn_map, entry1 );
+ }
+
+ fd_snp_conn_zeroize( conn );
+ fd_snp_conn_pool_ele_release( snp->conn_pool, conn );
+ return 0;
+}
+
+static inline fd_snp_conn_t *
+fd_snp_conn_query( fd_snp_t * snp,
+ ulong session_id ) {
+ if( FD_UNLIKELY( !session_id ) ) {
+ return NULL;
+ }
+ fd_snp_conn_map_t sentinel = { 0 };
+ fd_snp_conn_map_t * entry = fd_snp_conn_map_query( snp->conn_map, session_id, &sentinel );
+ return entry->val;
+}
+
+static inline fd_snp_conn_t *
+fd_snp_conn_query_by_peer( fd_snp_t * snp,
+ ulong peer_addr ) {
+ if( FD_UNLIKELY( !peer_addr ) ) {
+ return NULL;
+ }
+ fd_snp_conn_map_t sentinel = { 0 };
+ fd_snp_conn_map_t * entry = fd_snp_conn_map_query( snp->conn_map, peer_addr, &sentinel );
+ return entry->val;
+}
+
+static inline int
+fd_snp_has_enough_flow_tx_credit( fd_snp_t * snp,
+ fd_snp_conn_t * conn ) {
+ (void)snp;
+ /* Returns true if there are enough flow tx credits to send a
+ packet. It does not take responses into account (e.g.
+ ACKs), in which case one should also check flow_rx_level.
+ The receiver guarantees that there are FD_SNP_MTU bytes
+ available beyond the watermark, which may be crossed only
+ once per watermark value. This minimizes the calculations
+ around the crossing boundary and avoids weird edge cases. */
+ int has_enough_tx_credit = conn->flow_tx_level < conn->flow_tx_wmark;
+ return has_enough_tx_credit;
+}
+
+static inline int
+fd_snp_has_enough_flow_rx_credit( fd_snp_t * snp FD_PARAM_UNUSED,
+ fd_snp_conn_t * conn ) {
+ /* Returns true if there are enough flow rx credits to receive
+ a packet. It does not take responses into account (e.g.
+ ACKs), in which case one should also check flow_tx_level.
+ The receiver guarantees that there are FD_SNP_MTU bytes
+ available beyond the watermark, which may be crossed only
+ once per watermark value. This minimizes the calculations
+ around the crossing boundary and avoids weird edge cases. */
+ int has_enough_rx_credit = conn->flow_rx_level < conn->flow_rx_wmark;
+ return has_enough_rx_credit;
+}
+
+static inline void
+fd_snp_incr_flow_tx_level( fd_snp_t * snp FD_PARAM_UNUSED,
+ fd_snp_conn_t * conn,
+ ulong incr ) {
+ conn->flow_tx_level += (long)incr;
+}
+
+static inline void
+fd_snp_incr_flow_rx_level( fd_snp_t * snp FD_PARAM_UNUSED,
+ fd_snp_conn_t * conn,
+ ulong incr ) {
+ conn->flow_rx_level += (long)incr;
+}
+
+static inline int
+fd_snp_finalize_udp_and_invoke_tx_cb(
+ fd_snp_t * snp,
+ uchar * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta,
+ fd_snp_conn_t * opt_conn
+) {
+ if( FD_UNLIKELY( packet_sz==0 ) ) {
+ return 0;
+ }
+
+ uchar snp_app_id;
+ ushort dst_port;
+ uint dst_ip;
+ fd_snp_meta_into_parts( NULL, &snp_app_id, &dst_ip, &dst_port, meta );
+
+ fd_ip4_udp_hdrs_t * hdr = (fd_ip4_udp_hdrs_t *)packet;
+ *hdr = *( snp->apps[ snp_app_id ].net_hdr );
+ fd_ip4_hdr_t * ip4 = hdr->ip4;
+ ip4->daddr = dst_ip;
+ ip4->net_id = fd_ushort_bswap( snp->apps[ snp_app_id ].net_id++ );
+ ip4->net_tot_len = fd_ushort_bswap( (ushort)(packet_sz - sizeof(fd_eth_hdr_t)) );
+ ip4->check = fd_ip4_hdr_check_fast( ip4 );
+ hdr->udp->net_dport = fd_ushort_bswap( dst_port );
+ hdr->udp->net_len = fd_ushort_bswap( (ushort)( packet_sz - sizeof(fd_ip4_udp_hdrs_t) + sizeof(fd_udp_hdr_t) ) );
+
+ if( !!opt_conn ) {
+ opt_conn->last_sent_ts = fd_snp_timestamp_ms();
+ }
+
+ /* metrics */
+ if( !!opt_conn ) {
+ snp->metrics_all->tx_bytes_via_snp_cnt += packet_sz;
+ snp->metrics_all->tx_pkts_via_snp_cnt += 1UL;
+ if( !!opt_conn->snp_enforced ) {
+ snp->metrics_enf->tx_bytes_via_snp_cnt += packet_sz;
+ snp->metrics_enf->tx_pkts_via_snp_cnt += 1UL;
+ }
+ } else {
+ snp->metrics_all->tx_bytes_via_udp_cnt += packet_sz;
+ snp->metrics_all->tx_pkts_via_udp_cnt += 1UL;
+ }
+ /* metrics multicast */
+ if( fd_snp_ip_is_multicast( packet ) ) {
+ snp->metrics_all->mcast_tx_pkts_cnt += 1UL;
+ snp->metrics_all->mcast_tx_bytes_cnt += packet_sz;
+ }
+
+ return snp->cb.tx ? snp->cb.tx( snp->cb.ctx, packet, packet_sz, meta ) : (int)packet_sz;
+}
+
+static inline int
+fd_snp_finalize_snp_and_invoke_tx_cb(
+ fd_snp_t * snp,
+ fd_snp_conn_t * conn,
+ uchar * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta,
+ int flow_tx_credit_bypass
+) {
+ if( FD_UNLIKELY( packet_sz==0 ) ) {
+ return 0;
+ }
+ if( FD_UNLIKELY( !flow_tx_credit_bypass && !fd_snp_has_enough_flow_tx_credit( snp, conn ) ) ) {
+ /* metrics */
+ snp->metrics_all->tx_pkts_dropped_no_credits_cnt += 1UL;
+ if( !!conn->snp_enforced ) {
+ snp->metrics_enf->tx_pkts_dropped_no_credits_cnt += 1UL;
+ }
+ FD_SNP_LOG_DEBUG_W( "[snp-finalize] unable to send snp pkt due to insufficient flow tx credits %s", FD_SNP_LOG_CONN( conn ) );
+ return -1;
+ }
+ fd_snp_incr_flow_tx_level( snp, conn, packet_sz );
+ fd_snp_v1_finalize_packet( conn, packet+sizeof(fd_ip4_udp_hdrs_t), packet_sz-sizeof(fd_ip4_udp_hdrs_t) );
+
+ return fd_snp_finalize_udp_and_invoke_tx_cb( snp, packet, packet_sz, meta & (~FD_SNP_META_OPT_HANDSHAKE), conn );
+}
+
+static inline void
+fd_snp_update_rx_metrics( fd_snp_t * snp,
+ ulong packet_sz,
+ fd_snp_meta_t meta,
+ fd_snp_conn_t * opt_conn,
+ fd_snp_dest_meta_map_t * dest_meta ) {
+ /* in this function, meta is set with FD_SNP_META_PROTO_UDP or SNP.
+ in the UDP case, opt_conn is always NULL.
+ in the SNP case, opt_conn can be NULL.
+ in both cases dest_meta was already queried,
+ but dest_meta->key could be NULL. */
+ if( ( meta & FD_SNP_META_PROTO_MASK ) == FD_SNP_META_PROTO_UDP ) {
+ /* UDP */
+ snp->metrics_all->rx_bytes_via_udp_cnt += packet_sz;
+ snp->metrics_all->rx_pkts_via_udp_cnt += 1UL;
+
+ if( FD_UNLIKELY( dest_meta->key && dest_meta->val.snp_enforced ) ) {
+ /* This should never happen, but we increase the metric to track it. */
+ snp->metrics_enf->rx_bytes_via_udp_cnt += packet_sz;
+ snp->metrics_enf->rx_pkts_via_udp_cnt += 1UL;
+ }
+ } else {
+ /* SNP */
+ snp->metrics_all->rx_bytes_via_snp_cnt += packet_sz;
+ snp->metrics_all->rx_pkts_via_snp_cnt += 1UL;
+
+ /* enforced can come either from conn (when not NULL),
+ or from dest_meta (when key is not NULL). */
+ int is_enforced = ( opt_conn && opt_conn->snp_enforced )
+ || ( dest_meta->key && dest_meta->val.snp_enforced );
+
+ if( is_enforced ) {
+ snp->metrics_enf->rx_bytes_via_snp_cnt += packet_sz;
+ snp->metrics_enf->rx_pkts_via_snp_cnt += 1UL;
+ }
+ }
+}
+
+static inline int
+fd_snp_process_flow_tx_wmark_packet(
+ fd_snp_conn_t * conn,
+ uchar * packet,
+ ulong packet_sz
+) {
+ uchar * udp_load = packet + sizeof(fd_ip4_udp_hdrs_t);
+ uchar * snp_load = udp_load + sizeof(snp_hdr_t);
+ /* This assumes that every wmark update is sent in a separate packet. */
+ uchar type = fd_snp_tlv_extract_type( snp_load );
+ if( FD_UNLIKELY( type!=FD_SNP_FRAME_MAX_DATA ) ) {
+ return 1; /* positive value, this is not necessarily an error. */
+ }
+ ulong udp_load_sz = packet_sz - sizeof(fd_ip4_udp_hdrs_t);
+ ulong snp_load_sz = udp_load_sz - sizeof(snp_hdr_t);
+ /* first validate the packet */
+ int res = fd_snp_v1_validate_packet( conn, udp_load, udp_load_sz );
+ if( FD_UNLIKELY( res < 0 ) ) {
+ FD_SNP_LOG_DEBUG_W( "[snp-pkt] tlv type %u fd_snp_v1_validate_packet failed with res %d %s", type, res, FD_SNP_LOG_CONN( conn ) );
+ return -1;
+ }
+ /* then iterate over all tlvs */
+ for( fd_snp_tlv_iter_t iter = fd_snp_tlv_iter_init( snp_load_sz );
+ !fd_snp_tlv_iter_done( iter, snp_load );
+ iter = fd_snp_tlv_iter_next( iter, snp_load ) ) {
+ fd_snp_tlv_t tlv = fd_snp_tlv_iter_tlv( iter, snp_load );
+ if( tlv.type!=FD_SNP_FRAME_MAX_DATA ) continue;
+ if( tlv.len==8U ) {
+ long wmark = (long)fd_ulong_load_8( tlv.ptr + 0UL );
+ FD_SNP_LOG_TRACE( "[snp-pkt] tlv type %u wmark prev %ld new %ld %s", tlv.type, conn->flow_tx_wmark, wmark, FD_SNP_LOG_CONN( conn ) );
+ conn->flow_tx_wmark = wmark;
+ continue;
+ }
+ if( tlv.len==16U ) {
+ long wmark = (long)fd_ulong_load_8( tlv.ptr + 0UL );
+ long level = (long)fd_ulong_load_8( tlv.ptr + 8UL );
+ FD_SNP_LOG_TRACE( "[snp-pkt] tlv type %u wmark prev %ld new %ld level prev %ld new %ld %s", tlv.type, conn->flow_tx_wmark, wmark, conn->flow_tx_level, level, FD_SNP_LOG_CONN( conn ) );
+ conn->flow_tx_wmark = wmark;
+ /* This is not 100% accurate, since pkts may still be in flight when the
+ current level was sampled by the reciver, but this is acceptable. */
+ conn->flow_tx_level = level; /* It does its best to resync if pkts have been lost. */
+ continue;
+ }
+ FD_SNP_LOG_DEBUG_W( "[snp-pkt] tlv type %u len %u mismatch! %s", tlv.type, tlv.len, FD_SNP_LOG_CONN( conn ) );
+ return -1;
+ }
+ conn->last_recv_ts = fd_snp_timestamp_ms();
+ return 0;
+}
+
+static inline int
+fd_snp_verify_snp_and_invoke_rx_cb(
+ fd_snp_t * snp,
+ fd_snp_conn_t * conn,
+ uchar * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta
+) {
+ /* Process wmark updates first. */
+ int wmark_res = fd_snp_process_flow_tx_wmark_packet( conn, packet, packet_sz );
+ if( FD_UNLIKELY( wmark_res==0 ) ) { /* the packet was processed */
+ return 0;
+ }
+ if( FD_UNLIKELY( wmark_res<0 ) ) { /* there was an error condition */
+ return -1;
+ }
+
+ /* Process any other packet. */
+ if( FD_UNLIKELY( !fd_snp_has_enough_flow_rx_credit( snp, conn ) ) ) {
+ /* metrics */
+ snp->metrics_all->rx_pkts_dropped_no_credits_cnt += 1UL;
+ if( !!conn->snp_enforced ) {
+ snp->metrics_enf->rx_pkts_dropped_no_credits_cnt += 1UL;
+ }
+ FD_SNP_LOG_DEBUG_W( "[snp-verify] unable to verify snp pkt due to insufficient flow rx credits %s", FD_SNP_LOG_CONN( conn ) );
+ return -1;
+ }
+ fd_snp_incr_flow_rx_level( snp, conn, packet_sz );
+ int res = fd_snp_v1_validate_packet( conn, packet+sizeof(fd_ip4_udp_hdrs_t), packet_sz-sizeof(fd_ip4_udp_hdrs_t) );
+ if( FD_UNLIKELY( res < 0 ) ) {
+ FD_SNP_LOG_DEBUG_W( "[snp-verify] validate packet failed with res=%d %s", res, FD_SNP_LOG_CONN( conn ) );
+ return res;
+ }
+
+ conn->last_recv_ts = fd_snp_timestamp_ms();
+ ulong data_offset = sizeof(fd_ip4_udp_hdrs_t) + 12;
+ if( FD_LIKELY( packet[data_offset]==FD_SNP_FRAME_DATAGRAM ) ) {
+ return snp->cb.rx( snp->cb.ctx, packet, packet_sz, meta );
+ } else if( FD_LIKELY( packet[data_offset]==FD_SNP_FRAME_PING ) ) {
+ FD_SNP_LOG_DEBUG_N( "[snp] received PING %s", FD_SNP_LOG_CONN( conn ) );
+ return 0;
+ }
+ FD_SNP_LOG_DEBUG_W( "[snp-verify] nothing to do!? %s", FD_SNP_LOG_CONN( conn ) );
+ return 0;
+}
+
+static inline int
+fd_snp_finalize_multicast_and_invoke_tx_cb(
+ fd_snp_t * snp,
+ fd_snp_conn_t * conn FD_PARAM_UNUSED,
+ uchar * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta
+) {
+ if( FD_UNLIKELY( packet_sz==0 ) ) {
+ return 0;
+ }
+
+ /* no mac auth */
+ packet_sz -= 19UL;
+
+ /* snp header */
+ snp_hdr_t * udp_payload = (snp_hdr_t *)( packet + sizeof(fd_ip4_udp_hdrs_t) );
+ udp_payload->version_type = fd_snp_hdr_version_type( FD_SNP_V1, FD_SNP_TYPE_PAYLOAD );
+ udp_payload->session_id = 0UL;
+
+ /* ip header */
+ uchar snp_app_id;
+ fd_snp_meta_into_parts( NULL, &snp_app_id, NULL, NULL, meta );
+
+ fd_ip4_udp_hdrs_t * hdr = (fd_ip4_udp_hdrs_t *)packet;
+ memcpy( hdr, snp->apps[ snp_app_id ].multicast_net_hdr, sizeof(fd_ip4_udp_hdrs_t) );
+ fd_ip4_hdr_t * ip4 = hdr->ip4;
+ ip4->net_id = fd_ushort_bswap( snp->apps[ snp_app_id ].net_id++ );
+ ip4->net_tot_len = fd_ushort_bswap( (ushort)(packet_sz - sizeof(fd_eth_hdr_t)) );
+ ip4->check = fd_ip4_hdr_check_fast( ip4 );
+ hdr->udp->net_len = fd_ushort_bswap( (ushort)( packet_sz - sizeof(fd_ip4_udp_hdrs_t) + sizeof(fd_udp_hdr_t) ) );
+
+ return snp->cb.tx ? snp->cb.tx( snp->cb.ctx, packet, packet_sz, meta ) : (int)packet_sz;
+}
+
+static inline int
+fd_snp_cache_packet_and_invoke_sign_cb(
+ fd_snp_t * snp,
+ fd_snp_conn_t * conn,
+ uchar * packet,
+ int packet_snp_sz, /* without headers */
+ uchar * to_sign
+) {
+ if( FD_LIKELY( packet_snp_sz > 0 ) ) {
+ conn->last_pkt->data_sz = (ushort)((ulong)packet_snp_sz+sizeof(fd_ip4_udp_hdrs_t));
+ memcpy( conn->last_pkt->data, packet, conn->last_pkt->data_sz );
+ return snp->cb.sign( snp->cb.ctx, conn->session_id, to_sign );
+ }
+ return packet_snp_sz;
+}
+
+int
+fd_snp_cache_packet_for_retry( fd_snp_conn_t * conn,
+ uchar const * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta ) {
+ if( FD_UNLIKELY( conn==NULL || conn->last_pkt==NULL ) ) {
+ return -1;
+ }
+ conn->retry_cnt = 0;
+ memcpy( conn->last_pkt->data, packet, packet_sz );
+ conn->last_pkt->data_sz = (ushort)packet_sz;
+ conn->last_pkt->meta = meta;
+ return 0;
+}
+
+int
+fd_snp_retry_cached_packet( fd_snp_t * snp,
+ fd_snp_conn_t * conn ) {
+ uchar * packet = conn->last_pkt->data;
+ ulong packet_sz = conn->last_pkt->data_sz;
+ fd_snp_meta_t meta = conn->last_pkt->meta;
+ return fd_snp_finalize_udp_and_invoke_tx_cb( snp, packet, packet_sz, meta | FD_SNP_META_OPT_BUFFERED, conn );
+}
+
+int
+fd_snp_send_ping( fd_snp_t * snp,
+ fd_snp_conn_t * conn ) {
+ uchar packet[ FD_SNP_MTU ] = { 0 };
+
+ /* PING */
+ ulong data_offset = sizeof(fd_ip4_udp_hdrs_t) + 12;
+ if( FD_LIKELY( packet!=NULL ) ) {
+ packet[data_offset] = FD_SNP_FRAME_PING;
+ ushort data_sz_h = (ushort)0;
+ memcpy( packet+data_offset+1, &data_sz_h, 2 );
+ }
+ data_offset += 3;
+
+ ulong packet_sz = 0 + data_offset + 19;
+ fd_snp_meta_t meta = conn->peer_addr | FD_SNP_META_PROTO_V1;
+ return fd_snp_finalize_snp_and_invoke_tx_cb( snp, conn, packet, packet_sz, meta | FD_SNP_META_OPT_BUFFERED, 0/*flow_tx_credit_bypass*/ );
+}
+
+static inline int
+fd_snp_pkt_pool_store( fd_snp_t * snp,
+ fd_snp_conn_t const * conn,
+ uchar const * packet,
+ ulong packet_sz,
+ uchar send ) {
+ if( FD_UNLIKELY( !fd_snp_pkt_pool_free( snp->pkt_pool ) ) ) {
+ return -1;
+ }
+ fd_snp_pkt_t * pkt = fd_snp_pkt_pool_ele_acquire( snp->pkt_pool );
+ if( FD_LIKELY( pkt ) ) {
+ pkt->session_id = conn->session_id;
+ memcpy( pkt->data, packet, packet_sz );
+ pkt->data_sz = (ushort)packet_sz;
+ pkt->send = send;
+ }
+ return 0;
+}
+
+static inline void
+fd_snp_pkt_pool_process(
+ fd_snp_t * snp,
+ fd_snp_conn_t * conn,
+ fd_snp_meta_t meta
+) {
+ ulong meta_buffered = ( meta | FD_SNP_META_OPT_BUFFERED );
+ ulong max = fd_snp_pkt_pool_max( snp->pkt_pool );
+ ulong used = fd_snp_pkt_pool_used( snp->pkt_pool );
+ ulong idx = 0;
+ ulong used_ele = 0;
+ fd_snp_pkt_t * ele = snp->pkt_pool;
+ for( ; idxsession_id == 0 ) continue;
+ if( ele->session_id == conn->session_id ) {
+ uchar * buf = ele->data;
+ ulong buf_sz = (ulong)ele->data_sz;
+
+ /* ignore return from callbacks for cached packets */
+ FD_PARAM_UNUSED int res = 0;
+ if( ele->send==1 ) {
+ res = fd_snp_finalize_snp_and_invoke_tx_cb( snp, conn, buf, buf_sz, meta_buffered, 0/*flow_tx_credit_bypass*/ );
+ } else {
+ res = fd_snp_verify_snp_and_invoke_rx_cb( snp, conn, buf, buf_sz, meta_buffered );
+ }
+
+ /* delete cached packet */
+ ele->session_id = 0;
+ fd_snp_pkt_pool_idx_release( snp->pkt_pool, idx );
+
+ if( res<0 ) { FD_SNP_LOG_DEBUG_W( "[snp-pool] unable to process cached packet ele->send=%u %s", ele->send, FD_SNP_LOG_CONN( conn ) ); }
+ }
+ if( ++used_ele>=used ) break;
+ }
+}
+
+static inline int
+fd_snp_send_flow_rx_wmark_packet( fd_snp_t * snp,
+ fd_snp_conn_t * conn ) {
+ uchar packet[1514];
+ const ulong off = sizeof(fd_ip4_udp_hdrs_t) + sizeof(snp_hdr_t);
+ const ulong packet_sz = off + (1UL+2UL+8UL)/*tlv with wmark */ + (1UL+2UL+8UL+8UL)/*tlv with wmark and level */ + (1UL+2UL+16UL)/*hmac*/;
+ fd_snp_meta_t meta = conn->peer_addr | FD_SNP_META_PROTO_V1;
+ /* backward compatible format */
+ packet [off + 0UL ] = FD_SNP_FRAME_MAX_DATA;
+ FD_STORE( ushort, packet + off + 1UL, 8U );
+ FD_STORE( long, packet + off + 3UL, conn->flow_rx_wmark );
+ /* new format */
+ packet [off + 11UL ] = FD_SNP_FRAME_MAX_DATA;
+ FD_STORE( ushort, packet + off + 12UL, 16U );
+ FD_STORE( long, packet + off + 14UL, conn->flow_rx_wmark );
+ FD_STORE( long, packet + off + 22UL, conn->flow_rx_level );
+ conn->flow_rx_wmark_tstamp = fd_snp_timestamp_ms();
+ return fd_snp_finalize_snp_and_invoke_tx_cb( snp, conn, packet, packet_sz, meta | FD_SNP_META_OPT_BUFFERED, 1/*flow_tx_credit_bypass*/ );
+}
+
+static inline void
+fd_snp_dest_meta_map_update_on_handshake( fd_snp_t * snp,
+ fd_snp_conn_t * conn ) {
+ FD_TEST( snp );
+ uint ip4_addr = 0;
+ ushort udp_port = 0;
+ fd_snp_peer_addr_into_parts( &ip4_addr, &udp_port, conn->peer_addr );
+ ulong key = fd_snp_dest_meta_map_key_from_conn( conn );
+ fd_snp_dest_meta_map_t sentinel = { 0 };
+ fd_snp_dest_meta_map_t * entry = fd_snp_dest_meta_map_query( snp->dest_meta_map, key, &sentinel );
+ int is_new = 0;
+ if( FD_UNLIKELY( !entry->key ) ) {
+ entry = fd_snp_dest_meta_map_insert( snp->dest_meta_map, key );
+ is_new = 1;
+ }
+ if( FD_UNLIKELY( is_new ) ) {
+ entry->val.ip4_addr = ip4_addr;
+ entry->val.udp_port = udp_port;
+ }
+ entry->val.update_idx = snp->dest_meta_update_idx;
+ entry->val.snp_available = 1;
+ entry->val.snp_enabled = 1;
+
+ FD_SNP_LOG_DEBUG_N( "%u.%u.%u.%u:%u snp_available %x snp_enabled %x %s",
+ (entry->val.ip4_addr>>0)&0xff, (entry->val.ip4_addr>>8)&0xff, (entry->val.ip4_addr>>16)&0xff, (entry->val.ip4_addr>>24)&0xff, entry->val.udp_port,
+ entry->val.snp_available, entry->val.snp_enabled, is_new ? "(auto-detected!)" : "(detected!)" );
+}
+
+/* fd_snp_send sends a packet to a peer.
+
+ Workflow:
+ 1. Validate input
+ 2. If proto==UDP, send packet as UDP
+ 3. Query connection by peer (meta)
+ 4. (likely case) If we have an established connection, send packet and return
+ 5. If we don't have a connection, create a new connection
+ 6. If packet_sz > 0, cache current packet
+ 7. If we did have a connection, return
+ 8. Prepare client_initial, overwrite packet
+ 9. Send client_initial */
+int
+fd_snp_send( fd_snp_t * snp,
+ uchar * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta ) {
+
+ /* 1. Validate input */
+ if( packet_sz > FD_SNP_MTU ) {
+ return -1;
+ }
+
+ /* 2. If proto==UDP, send packet as UDP */
+ ulong proto = meta & FD_SNP_META_PROTO_MASK;
+ if( FD_LIKELY( proto==FD_SNP_META_PROTO_UDP ) ) {
+ FD_SNP_LOG_TRACE( "[snp-send] UDP send" );
+ /* metrics */
+ ulong dest_meta_map_key = fd_snp_peer_addr_from_meta( meta );
+ fd_snp_dest_meta_map_t sentinel = { 0 };
+ fd_snp_dest_meta_map_t * dest_meta = fd_snp_dest_meta_map_query( snp->dest_meta_map, dest_meta_map_key, &sentinel );
+ if( !!dest_meta->key && packet_sz>0UL ) {
+ if( !!dest_meta->val.snp_enforced ) {
+ /* This should never happen. It would indicate an error. */
+ snp->metrics_enf->tx_bytes_via_udp_cnt += packet_sz;
+ snp->metrics_enf->tx_pkts_via_udp_cnt += 1UL;
+ }
+ if( !!dest_meta->val.snp_available ) {
+ snp->metrics_all->tx_bytes_via_udp_to_snp_avail_cnt += packet_sz;
+ snp->metrics_all->tx_pkts_via_udp_to_snp_avail_cnt += 1UL;
+ if( !!dest_meta->val.snp_enforced ) {
+ /* This should never happen. It would indicate an error. */
+ snp->metrics_enf->tx_bytes_via_udp_to_snp_avail_cnt += packet_sz;
+ snp->metrics_enf->tx_pkts_via_udp_to_snp_avail_cnt += 1UL;
+ }
+ }
+ }
+ return fd_snp_finalize_udp_and_invoke_tx_cb( snp, packet, packet_sz, meta, NULL );
+ }
+
+ /* 3. Query connection by peer (meta) */
+ ulong peer_addr = meta & FD_SNP_META_PEER_MASK;
+ fd_snp_conn_t * conn = fd_snp_conn_query_by_peer( snp, peer_addr );
+
+ /* 4. (likely case) If we have an established connection, send packet and return */
+ if( FD_LIKELY( conn!=NULL && conn->state==FD_SNP_TYPE_HS_DONE ) ) {
+ if( FD_UNLIKELY( conn->is_multicast ) ) {
+ if( meta & FD_SNP_META_OPT_BROADCAST ) {
+ return 0;
+ }
+ return fd_snp_finalize_multicast_and_invoke_tx_cb( snp, conn, packet, packet_sz, meta );
+ }
+ FD_SNP_LOG_TRACE( "[snp-send] SNP send %s", FD_SNP_LOG_CONN( conn ) );
+ return fd_snp_finalize_snp_and_invoke_tx_cb( snp, conn, packet, packet_sz, meta, 0/*flow_tx_credit_bypass*/ );
+ } /* else is implicit */
+
+ /* 5. If we don't have a connection, create a new connection */
+ if( conn==NULL ) {
+ conn = fd_snp_conn_create( snp, peer_addr, /* is_server */ 0 );
+ if( conn==NULL ) {
+ return -1;
+ }
+ conn->is_server = 0;
+ }
+ if( FD_UNLIKELY( conn==NULL ) ) {
+ FD_SNP_LOG_DEBUG_W( "[snp-send] fd_snp_conn_create returned NULL %s", FD_SNP_LOG_CONN( conn ) );
+ return -1;
+ }
+
+ /* 6. If packet_sz > 0, cache current packet */
+ if( packet_sz>0 ) {
+ FD_SNP_LOG_TRACE( "[snp-send] cache packet" );
+ if( FD_UNLIKELY( fd_snp_pkt_pool_store( snp, conn, packet, packet_sz, /* send */ 1 ) < 0 ) ) {
+ FD_SNP_LOG_DEBUG_W( "unable to cache packet in pool due to insufficient space %s", FD_SNP_LOG_CONN( conn ) );
+ return -1;
+ }
+ }
+
+ /* 7. If we did have a connection, return */
+ if( FD_UNLIKELY( conn->state != 0 ) ) {
+ return 0; /* success */
+ } /* else is implicit */
+
+ /* 8. Prepare client_initial, overwrite packet */
+ int sz = fd_snp_v1_client_init( &snp->config, conn, NULL, 0UL, packet + sizeof(fd_ip4_udp_hdrs_t), NULL );
+ if( FD_UNLIKELY( sz<=0 ) ) {
+ FD_SNP_LOG_DEBUG_W( "[snp-send] fd_snp_s0_client_initial failed %s", FD_SNP_LOG_CONN( conn ) );
+ return -1;
+ }
+
+ /* 9. Send client_initial */
+ FD_SNP_LOG_DEBUG_N( "[snp-send] SNP send hs1 %s", FD_SNP_LOG_CONN( conn ) );
+ packet_sz = (ulong)sz + sizeof(fd_ip4_udp_hdrs_t);
+ fd_snp_cache_packet_for_retry( conn, packet, packet_sz, meta | FD_SNP_META_OPT_HANDSHAKE );
+ return fd_snp_finalize_udp_and_invoke_tx_cb( snp, packet, packet_sz, meta | FD_SNP_META_OPT_HANDSHAKE, conn );
+}
+
+/* Workflow:
+ 1. Parse UDP: derive which app to send the packet to
+ 2. Parse SNP: derive proto and meta
+ 3. If proto==UDP, recv packet as UDP
+ 4. Query connection by session_id
+
+ 5. (likely case) Recv state machine
+
+ R1. If multicast, accept
+ R2. Validate conn, or drop
+ R3. (likely case) conn established + validate integrity, accept
+ R4. state==4, cache packet
+
+ 6. Handshake state machine
+ ...
+
+ 7. Send handshake packet (if any)
+ 8. If connection is established, send/recv cached packets */
+int
+fd_snp_process_packet( fd_snp_t * snp,
+ uchar * packet,
+ ulong packet_sz ) {
+ /* 1. Parse UDP: derive which app to send the packet to */
+ if( packet_sz <= sizeof(fd_ip4_udp_hdrs_t) ) {
+ return -1;
+ }
+
+ fd_ip4_udp_hdrs_t * hdr = (fd_ip4_udp_hdrs_t *)packet;
+ uint src_ip = hdr->ip4->saddr;
+ ushort src_port = fd_ushort_bswap( hdr->udp->net_sport );
+ ushort dst_port = fd_ushort_bswap( hdr->udp->net_dport );
+
+ /* metrics */
+ snp->metrics_all->rx_bytes_cnt += packet_sz;
+ snp->metrics_all->rx_pkts_cnt += 1UL;
+ fd_snp_dest_meta_map_t sentinel = { 0 };
+ fd_snp_dest_meta_map_t * dest_meta = fd_snp_dest_meta_map_query( snp->dest_meta_map,
+ fd_snp_peer_addr_from_meta( fd_snp_meta_from_parts( 0, 0, src_ip, src_port ) ), &sentinel );
+ int snp_enforced = dest_meta->key && dest_meta->val.snp_enforced;
+ if( snp_enforced ) {
+ snp->metrics_enf->rx_bytes_cnt += packet_sz;
+ snp->metrics_enf->rx_pkts_cnt += 1UL;
+ }
+
+ /* metrics multicast */
+ if( fd_snp_ip_is_multicast( packet ) ) {
+ snp->metrics_all->mcast_rx_pkts_cnt += 1UL;
+ snp->metrics_all->mcast_rx_bytes_cnt += packet_sz;
+ }
+
+ uchar snp_app_id;
+ for( snp_app_id=0U; snp_app_idapps_cnt; snp_app_id++ ) {
+ if( snp->apps[ snp_app_id ].port == dst_port ) {
+ break;
+ }
+ }
+ if( FD_UNLIKELY( snp_app_id>=snp->apps_cnt ) ) {
+ /* The packet is not for SNP, ignore */
+ FD_SNP_LOG_TRACE( "[snp-pkt] app not found for dst_port=%u, fallback to UDP", dst_port );
+ fd_snp_meta_t meta = fd_snp_meta_from_parts( FD_SNP_META_PROTO_UDP, snp_app_id, src_ip, src_port );
+ /* metrics */
+ fd_snp_update_rx_metrics( snp, packet_sz, meta, NULL, dest_meta );
+ return snp->cb.rx( snp->cb.ctx, packet, packet_sz, meta );
+ }
+
+ /* 2. Parse SNP: derive proto and meta */
+ ulong proto = FD_SNP_META_PROTO_UDP;
+
+ if( FD_LIKELY( packet_sz >= sizeof(fd_ip4_udp_hdrs_t) + 4 ) ) {
+ uchar const * magic = packet + sizeof(fd_ip4_udp_hdrs_t);
+ if( (*magic)=='S' && (*(magic+1))=='N' && (*(magic+2))=='P' ) {
+ proto = FD_SNP_META_PROTO_V1;
+ }
+ }
+
+ fd_snp_meta_t meta = fd_snp_meta_from_parts( proto, snp_app_id, src_ip, src_port );
+ ulong peer_addr = meta & FD_SNP_META_PEER_MASK;
+
+ /* 3. If proto==UDP, recv packet as UDP */
+ if( proto==FD_SNP_META_PROTO_UDP ) {
+ /* metrics */
+ fd_snp_update_rx_metrics( snp, packet_sz, meta, NULL, dest_meta );
+ return snp->cb.rx( snp->cb.ctx, packet, packet_sz, meta );
+ } /* else is implicit */
+
+ /* 4. Query connection by session_id */
+ snp_hdr_t * head = (snp_hdr_t *)(packet + sizeof(fd_ip4_udp_hdrs_t));
+ ulong session_id = head->session_id;
+ fd_snp_conn_t * conn = fd_snp_conn_query( snp, session_id );
+
+ /* metrics */
+ fd_snp_update_rx_metrics( snp, packet_sz, meta, conn, dest_meta );
+
+ /* 5. (likely case) Recv state machine */
+ int type = snp_hdr_type( head );
+ if( FD_LIKELY( type==FD_SNP_TYPE_PAYLOAD ) ) {
+ /* R1. If multicast, accept */
+ if( FD_UNLIKELY( fd_snp_ip_is_multicast( packet ) ) ) {
+ return snp->cb.rx( snp->cb.ctx, packet, packet_sz, meta );
+ }
+
+ /* R2. Validate conn, or drop */
+ if( FD_UNLIKELY( conn==NULL || conn->peer_addr != peer_addr ) ) {
+ FD_SNP_LOG_DEBUG_W( "[snp-pkt] invalid conn or IP" );
+ return -1;
+ }
+
+ /* R3. (likely case) conn established + validate integrity, accept */
+ if( FD_LIKELY( conn->state==FD_SNP_TYPE_HS_DONE ) ) {
+ return fd_snp_verify_snp_and_invoke_rx_cb( snp, conn, packet, packet_sz, meta );
+ }
+
+ /* R4. state==4 or 5, cache packet */
+ if( FD_LIKELY( conn->state==FD_SNP_TYPE_HS_SERVER_FINI || conn->state==FD_SNP_TYPE_HS_CLIENT_FINI ) ) {
+ if( FD_UNLIKELY( fd_snp_pkt_pool_store( snp, conn, packet, packet_sz, /* recv */ 0 ) < 0 ) ) {
+ FD_SNP_LOG_DEBUG_W( "unable to cache packet in pool %s", FD_SNP_LOG_CONN( conn ) );
+ return -1;
+ };
+ return 0;
+ }
+
+ return -1;
+ }
+
+ /* 6. Handshake state machine */
+
+ uchar * pkt = packet + sizeof(fd_ip4_udp_hdrs_t);
+ ulong pkt_sz = packet_sz - sizeof(fd_ip4_udp_hdrs_t);
+ uchar to_sign[32];
+ int sz = 0;
+ fd_snp_conn_t conn_empty[1] = { 0 };
+ conn_empty->peer_addr = peer_addr;
+ conn_empty->snp_enforced = (uchar)snp_enforced; /* only used for accurate metrics */
+ switch( type ) {
+
+ /* HS1. Server receives client_init and sends server_init */
+ case FD_SNP_TYPE_HS_CLIENT_INIT: {
+ /* Whether there was or not an existing connection, we allow to create a new one */
+ conn = conn_empty; /* As a side effect, conn is not NULL */
+ sz = fd_snp_v1_server_init( &snp->config, conn, pkt, pkt_sz, pkt, NULL );
+ FD_SNP_LOG_DEBUG_N( "[snp-hsk] fd_snp_v1_server_init sz=%d %s", sz, FD_SNP_LOG_CONN( conn ) );
+ } break;
+
+ /* HS2. Client receives server_init and sends client_cont */
+ case FD_SNP_TYPE_HS_SERVER_INIT: {
+ if( conn==NULL ) {
+ return -1;
+ }
+ sz = fd_snp_v1_client_cont( &snp->config, conn, pkt, pkt_sz, pkt, NULL );
+ FD_SNP_LOG_DEBUG_N( "[snp-hsk] fd_snp_v1_client_cont sz=%d %s", sz, FD_SNP_LOG_CONN( conn ) );
+ if( sz > 0 ) {
+ fd_snp_dest_meta_map_update_on_handshake( snp, conn );
+ }
+ } break;
+
+ /* HS3. Server receives client_cont and sends server_fini */
+ case FD_SNP_TYPE_HS_CLIENT_CONT: {
+ sz = fd_snp_v1_server_fini_precheck( &snp->config, conn_empty, pkt, pkt_sz, pkt, to_sign );
+ FD_SNP_LOG_DEBUG_N( "[snp-hsk] fd_snp_v1_server_fini_precheck sz=%d %s", sz, FD_SNP_LOG_CONN( conn_empty ) );
+ if( FD_UNLIKELY( sz < 0 ) ) {
+ return -1;
+ }
+ conn = fd_snp_conn_query_by_peer( snp, peer_addr );
+ /* The likely case is that conn==NULL, ie. there's no existing conn to the peer,
+ and the handshake proceeds as expected. */
+ if( FD_LIKELY( conn==NULL || conn->state==FD_SNP_TYPE_HS_DONE ) ) {
+ conn = fd_snp_conn_create( snp, peer_addr, /* is_server */ 1 );
+ }
+ if( conn==NULL ) {
+ return -1;
+ }
+ if( conn->state==FD_SNP_TYPE_HS_SERVER_FINI ) {
+ /* This immediate retry is not necessary, but it accelerates the handshake. */
+ return fd_snp_retry_cached_packet( snp, conn );
+ }
+ sz = fd_snp_v1_server_fini( &snp->config, conn, pkt, pkt_sz, pkt, to_sign );
+ FD_SNP_LOG_DEBUG_N( "[snp-hsk] fd_snp_v1_server_fini sz=%d %s", sz, FD_SNP_LOG_CONN( conn ) );
+ if( FD_UNLIKELY( sz < 0 ) ) {
+ return -1;
+ }
+ if( sz > 0 ) {
+ fd_snp_dest_meta_map_update_on_handshake( snp, conn );
+ }
+ return fd_snp_cache_packet_and_invoke_sign_cb( snp, conn, packet, sz, to_sign );
+ } break;
+
+ /* HS4. Client receives server_fini and sends client_fini */
+ case FD_SNP_TYPE_HS_SERVER_FINI: {
+ if( conn==NULL ) {
+ return -1;
+ }
+ if( FD_LIKELY( conn->state == FD_SNP_TYPE_HS_CLIENT_CONT ) ) {
+ sz = fd_snp_v1_client_fini( &snp->config, conn, pkt, pkt_sz, pkt, to_sign );
+ FD_SNP_LOG_DEBUG_N( "[snp-hsk] fd_snp_v1_client_fini sz=%d %s", sz, FD_SNP_LOG_CONN( conn ) );
+ if( FD_UNLIKELY( sz < 0 ) ) {
+ return -1;
+ }
+ conn->last_recv_ts = fd_snp_timestamp_ms();
+ return fd_snp_cache_packet_and_invoke_sign_cb( snp, conn, packet, sz, to_sign );
+ } else if( conn->state==FD_SNP_TYPE_HS_DONE ) {
+ /* This immediate retry is necessary, because from the client perspective
+ the handshake is completed, and thus housekeeping wouldn't be retrying.
+ But if the server re-sends server_fini, it means it didn't receive
+ client_fini, and so we have to retry. */
+ conn->last_recv_ts = fd_snp_timestamp_ms();
+ return fd_snp_retry_cached_packet( snp, conn );
+ }
+ } break;
+
+ /* HS5. Server receives client_fini and accepts */
+ case FD_SNP_TYPE_HS_CLIENT_FINI: {
+ if( conn==NULL ) {
+ return -1;
+ }
+ sz = fd_snp_v1_server_acpt( &snp->config, conn, pkt, pkt_sz, pkt, NULL );
+ FD_SNP_LOG_DEBUG_N( "[snp-hsk] fd_snp_v1_server_acpt sz=%d %s", sz, FD_SNP_LOG_CONN( conn ) );
+ if( FD_LIKELY( sz>=0 ) ) {
+ conn->last_recv_ts = fd_snp_timestamp_ms();
+ /* Update the default connection to peer_addr to this conn */
+ fd_snp_conn_map_t sentinel = { 0 };
+ fd_snp_conn_map_t * entry = fd_snp_conn_map_query( snp->conn_map, peer_addr, &sentinel );
+ if( entry->val!=NULL && entry->val!=conn ) {
+ entry->val = conn;
+ }
+ /* metrics */
+ snp->metrics_all->conn_cur_established += 1UL;
+ snp->metrics_all->conn_acc_established += 1UL;
+ if( !!conn->snp_enforced ) {
+ snp->metrics_enf->conn_cur_established += 1UL;
+ snp->metrics_enf->conn_acc_established += 1UL;
+ }
+ }
+ } break;
+
+ /* Drop any other packet */
+ default:
+ return -1;
+ }
+
+ /* 7. Send handshake packet (if any) */
+ if( FD_UNLIKELY( sz < 0 ) ) {
+ return -1;
+ }
+ if( FD_LIKELY( sz > 0 ) ) {
+ packet_sz = (ulong)sz + sizeof(fd_ip4_udp_hdrs_t);
+ fd_snp_cache_packet_for_retry( conn, packet, packet_sz, meta | FD_SNP_META_OPT_HANDSHAKE );
+ sz = fd_snp_finalize_udp_and_invoke_tx_cb( snp, packet, packet_sz, meta | FD_SNP_META_OPT_HANDSHAKE, conn );
+ }
+
+ /* 8. If connection is established, send/recv cached packets */
+ if( FD_UNLIKELY( conn && conn->state==FD_SNP_TYPE_HS_DONE ) ) {
+ fd_snp_pkt_pool_process( snp, conn, meta );
+ }
+
+ return sz; /* return value is from the handshake msg, not cached packets */
+}
+
+int
+fd_snp_process_signature( fd_snp_t * snp,
+ ulong session_id,
+ uchar const signature[ 64 ] ) {
+
+ fd_snp_conn_t * conn = fd_snp_conn_query( snp, session_id );
+ if( conn==NULL ) {
+ return -1;
+ }
+
+ fd_snp_meta_t meta = conn->peer_addr | FD_SNP_META_PROTO_V1 | FD_SNP_META_OPT_BUFFERED | FD_SNP_META_OPT_HANDSHAKE;
+
+ int sz;
+ switch( conn->state ) {
+ /* HS3. Server receives client_cont and sends server_fini */
+ case FD_SNP_TYPE_HS_SERVER_FINI_SIG: {
+ fd_snp_v1_server_fini_add_signature( conn, conn->last_pkt->data+sizeof(fd_ip4_udp_hdrs_t), signature );
+ conn->retry_cnt = 0;
+ conn->last_pkt->meta = meta;
+ return fd_snp_finalize_udp_and_invoke_tx_cb( snp, conn->last_pkt->data, conn->last_pkt->data_sz, meta, conn );
+ } break;
+
+ /* HS4. Client receives server_fini and sends client_fini */
+ case FD_SNP_TYPE_HS_CLIENT_FINI_SIG: {
+ fd_snp_v1_client_fini_add_signature( conn, conn->last_pkt->data+sizeof(fd_ip4_udp_hdrs_t), signature );
+ sz = fd_snp_finalize_udp_and_invoke_tx_cb( snp, conn->last_pkt->data, conn->last_pkt->data_sz, meta, conn );
+
+ /* process cached packets before return */
+ fd_snp_pkt_pool_process( snp, conn, meta );
+
+ /* metrics */
+ snp->metrics_all->conn_cur_established += 1UL;
+ snp->metrics_all->conn_acc_established += 1UL;
+ if( !!conn->snp_enforced ) {
+ snp->metrics_enf->conn_cur_established += 1UL;
+ snp->metrics_enf->conn_acc_established += 1UL;
+ }
+
+ return sz; /* return value is from the handshake msg, not cached packets */
+ } break;
+ }
+ return -1;
+}
+
+void
+fd_snp_housekeeping( fd_snp_t * snp ) {
+ ulong max = fd_snp_conn_pool_max( snp->conn_pool );
+ ulong used = fd_snp_conn_pool_used( snp->conn_pool );
+ ulong idx = 0;
+ ulong used_ele = 0;
+ fd_snp_conn_t * conn = snp->conn_pool;
+
+#define FD_SNP_HANDSHAKE_RETRY_MAX (5U)
+#define FD_SNP_HANDSHAKE_RETRY_MS (500L)
+#define FD_SNP_KEEP_ALIVE_MS (4000L)
+#define FD_SNP_TIMEOUT_MS (FD_SNP_KEEP_ALIVE_MS * 3L + 1000L)
+#define FD_SNP_DEST_META_UPDATE_MS (12000L)
+#define FD_SNP_FLOW_RX_WMARK_MS (4000L)
+
+ long now = fd_snp_timestamp_ms();
+ for( ; idxsession_id == 0 ) continue;
+ used_ele++;
+
+ if( conn->state==FD_SNP_TYPE_INVALID ) {
+ FD_SNP_LOG_DEBUG_W( "[snp-hkp] connection invalid %s", FD_SNP_LOG_CONN( conn ) );
+ fd_snp_conn_delete( snp, conn );
+ continue;
+ }
+
+ if( FD_SNP_TYPE_INVALID < conn->state && conn->state < FD_SNP_TYPE_HS_DONE ) {
+ if( conn->retry_cnt >= FD_SNP_HANDSHAKE_RETRY_MAX ) {
+ FD_SNP_LOG_DEBUG_W( "[snp-hkp] retry expired - deleting %s", FD_SNP_LOG_CONN( conn ) );
+ /* metrics */
+ snp->metrics_all->conn_acc_dropped_handshake += 1UL;
+ if( !!conn->snp_enforced ) {
+ snp->metrics_enf->conn_acc_dropped_handshake += 1UL;
+ }
+ fd_snp_conn_delete( snp, conn );
+ continue;
+ }
+ if( now > conn->last_sent_ts + FD_SNP_HANDSHAKE_RETRY_MS ) {
+ FD_SNP_LOG_DEBUG_N( "[snp-hkp] retry %d %s", conn->retry_cnt, FD_SNP_LOG_CONN( conn ) );
+ fd_snp_retry_cached_packet( snp, conn );
+ conn->retry_cnt++;
+ continue;
+ }
+ }
+
+ if( FD_LIKELY( conn->state==FD_SNP_TYPE_HS_DONE ) ) {
+ if( now > conn->last_recv_ts + FD_SNP_TIMEOUT_MS ) {
+ FD_SNP_LOG_DEBUG_W( "[snp-hkp] timeout - deleting %s", FD_SNP_LOG_CONN( conn ) );
+ /* metrics */
+ snp->metrics_all->conn_acc_dropped_established += 1UL;
+ if( !!conn->snp_enforced ) {
+ snp->metrics_enf->conn_acc_dropped_established += 1UL;
+ }
+ fd_snp_conn_delete( snp, conn );
+
+ uint ip4_addr = 0;
+ ushort udp_port = 0;
+ fd_snp_peer_addr_into_parts( &ip4_addr, &udp_port, conn->peer_addr );
+ ulong dest_meta_map_key = fd_snp_dest_meta_map_key_from_conn( conn );
+ fd_snp_dest_meta_map_t sentinel = { 0 };
+ fd_snp_dest_meta_map_t * entry = fd_snp_dest_meta_map_query( snp->dest_meta_map, dest_meta_map_key, &sentinel );
+ if( !entry->key ) {
+ FD_SNP_LOG_DEBUG_W( "[snp-hkp] dest_meta_map unable to delete %s", FD_SNP_LOG_CONN( conn ) );
+ } else {
+ entry->val.snp_enabled = 0;
+ FD_SNP_LOG_DEBUG_N( "[snp-hkp] %s snp_available %x snp_enabled %x (** disabled **)", FD_SNP_LOG_CONN( conn ), entry->val.snp_available, entry->val.snp_enabled );
+ /* Try to re-establish the connection one more time. */
+ fd_snp_meta_t meta = fd_snp_meta_from_parts( FD_SNP_META_PROTO_V1, 0/*app_id*/, ip4_addr, udp_port );
+ uchar packet[ FD_SNP_MTU ] = { 0 };
+ fd_snp_send( snp, packet, 0/*packet_sz*/, meta );
+ }
+
+ continue;
+ }
+ /* Flow rx watermark is updated when flow rx level nears the
+ watermark by half the flow rx allocation. This is arbitrary,
+ and it tries to minimize credit starvation and the number of
+ watermark updates. Note that conn->flow_rx_alloc should be
+ at least larger than the amount of bytes considered for the
+ margin update and the bytes reserved beyond the next watermark
+ (see below). */
+ if( FD_UNLIKELY( ( conn->flow_rx_wmark - conn->flow_rx_level ) < ( conn->flow_rx_alloc / 2 ) ) ) {
+ /* The next watermark value must take into account any unused
+ credits (in which case it references the the current level)
+ and any overused credits (in which case it references the
+ current watermark). The receiver guarantees that there are
+ FD_SNP_MTU bytes available beyond the next watermark, which
+ may be crossed only once. This minimizes the calculations
+ around the crossing boundary and avoids weird edge cases. */
+ long wmark = fd_long_min( conn->flow_rx_level, conn->flow_rx_wmark ) + conn->flow_rx_alloc - (long)FD_SNP_MTU;
+ FD_SNP_LOG_TRACE( "[snp-hkp] updating flow rx wmark from %ld to %ld for %s level %ld", conn->flow_rx_wmark, wmark, FD_SNP_LOG_CONN( conn ), conn->flow_rx_level );
+ conn->flow_rx_wmark = wmark;
+ fd_snp_send_flow_rx_wmark_packet( snp, conn );
+ continue;
+ } else {
+ if( FD_UNLIKELY( now > conn->flow_rx_wmark_tstamp + FD_SNP_FLOW_RX_WMARK_MS ) ) {
+ FD_SNP_LOG_TRACE( "[snp-hkp] timed wmark update %ld level %ld at tstamp %016lx for %s", conn->flow_rx_wmark, conn->flow_rx_level, (ulong)conn->flow_rx_wmark_tstamp, FD_SNP_LOG_CONN( conn ) );
+ fd_snp_send_flow_rx_wmark_packet( snp, conn );
+ continue;
+ }
+ }
+ if( now > conn->last_sent_ts + FD_SNP_KEEP_ALIVE_MS ) {
+ FD_SNP_LOG_TRACE( "[snp-hkp] keep alive - pinging %s", FD_SNP_LOG_CONN( conn ) );
+ fd_snp_send_ping( snp, conn );
+ continue;
+ }
+ }
+ }
+
+ /* dest_meta_update and handshake retriggering. */
+ if( now > snp->dest_meta_next_update_ts + FD_SNP_DEST_META_UPDATE_MS ) {
+ ulong m_dest_meta_cnt_enf = 0UL;
+ ulong m_snp_available_cnt_all = 0UL;
+ ulong m_snp_available_cnt_enf = 0UL;
+ ulong m_snp_enabled_cnt_all = 0UL;
+ ulong m_snp_enabled_cnt_enf = 0UL;
+ fd_snp_dest_meta_map_t * curr_map = snp->dest_meta_map;
+ fd_snp_dest_meta_map_t * next_map = curr_map==snp->dest_meta_map_a ? snp->dest_meta_map_b : snp->dest_meta_map_a;
+ /* swap dest_meta_map and clone unexpired entries */
+ snp->dest_meta_map = next_map;
+ ulong slot_cnt = fd_snp_dest_meta_map_slot_cnt( curr_map );
+ ulong key_cnt = fd_snp_dest_meta_map_key_cnt( curr_map );
+ ulong key_i = 0UL;
+ for( ulong i=0; i < slot_cnt && key_i < key_cnt; i++ ) {
+ fd_snp_dest_meta_map_t * curr_entry = &curr_map[i];
+ if( !fd_snp_dest_meta_map_key_equal( curr_entry->key, fd_snp_dest_meta_map_key_null() ) ) {
+ /* clone current map's entry into next map's entry,
+ excluding older ones. Only use ==, since any
+ other operation may required both to be ulong. */
+ if( curr_entry->val.update_idx == snp->dest_meta_update_idx ) {
+ /* update next_entry */
+ fd_snp_dest_meta_map_t * next_entry = fd_snp_dest_meta_map_insert( next_map, curr_entry->key );
+ next_entry->val = curr_entry->val;
+ /* check if a handshake needs to be retriggered. */
+ if( FD_UNLIKELY( ( !!next_entry->val.snp_available ) &&
+ ( !next_entry->val.snp_enabled ) &&
+ ( now > next_entry->val.snp_handshake_tstamp + FD_SNP_DEST_META_UPDATE_MS ) ) ) {
+ fd_snp_meta_t meta = fd_snp_meta_from_parts( FD_SNP_META_PROTO_V1, 0/*app_id*/, next_entry->val.ip4_addr, next_entry->val.udp_port );
+ uchar packet[ FD_SNP_MTU ] = { 0 };
+ FD_SNP_LOG_TRACE( "[snp-hsk] retry handshake at tstamp %016lx for %s", (ulong)next_entry->val.snp_handshake_tstamp, FD_SNP_LOG_CONN( conn ) );
+ fd_snp_send( snp, packet, 0/*packet_sz*/, meta | FD_SNP_META_OPT_BUFFERED );
+ /* randomly set the handshake timestamp, to prevent all entries from
+ triggering at the same time in upcoming housekeeping(s). The rng
+ yields a ushort, in the range of [0, 65536) ms. */
+ next_entry->val.snp_handshake_tstamp = now + (long)( fd_rng_ushort( snp->rng ) );
+ }
+ m_snp_available_cnt_all += fd_ulong_if( !!next_entry->val.snp_available, 1UL, 0UL );
+ m_snp_enabled_cnt_all += fd_ulong_if( !!next_entry->val.snp_enabled, 1UL, 0UL );
+ if( !!next_entry->val.snp_enforced ) {
+ m_dest_meta_cnt_enf += 1UL;
+ m_snp_available_cnt_enf += fd_ulong_if( !!next_entry->val.snp_available, 1UL, 0UL );
+ m_snp_enabled_cnt_enf += fd_ulong_if( !!next_entry->val.snp_enabled, 1UL, 0UL );
+ }
+ }
+ /* manually reset current map's entry, avoiding fd_snp_dest_meta_map_clear() overhead. */
+ curr_entry->key = fd_snp_dest_meta_map_key_null();
+ key_i += 1UL;
+ }
+ }
+ /* manually reset current map's key_cnt, avoiding fd_snp_dest_meta_map_clear() overhead. */
+ fd_snp_dest_meta_map_private_t * hdr = fd_snp_dest_meta_map_private_from_slot( curr_map );
+ hdr->key_cnt = 0UL;
+ /* prepare for the next update */
+ snp->dest_meta_next_update_ts = now + FD_SNP_DEST_META_UPDATE_MS;
+
+ /* metrics */
+ snp->metrics_all->dest_meta_cnt = fd_snp_dest_meta_map_key_cnt( snp->dest_meta_map );
+ snp->metrics_all->dest_meta_snp_available_cnt = m_snp_available_cnt_all;
+ snp->metrics_all->dest_meta_snp_enabled_cnt = m_snp_enabled_cnt_all;
+
+ snp->metrics_enf->dest_meta_cnt = m_dest_meta_cnt_enf;
+ snp->metrics_enf->dest_meta_snp_available_cnt = m_snp_available_cnt_enf;
+ snp->metrics_enf->dest_meta_snp_enabled_cnt = m_snp_enabled_cnt_enf;
+
+ snp->metrics_all->conn_cur_total = fd_snp_conn_pool_used( snp->conn_pool );
+ }
+
+#undef FD_SNP_HANDSHAKE_RETRY_MAX
+#undef FD_SNP_HANDSHAKE_RETRY_MS
+#undef FD_SNP_KEEP_ALIVE_MS
+#undef FD_SNP_TIMEOUT_MS
+#undef FD_SNP_DEST_META_UPDATE_MS
+}
+
+void
+fd_snp_set_identity( fd_snp_t * snp,
+ uchar const * new_identity ) {
+ fd_memcpy( snp->config.identity, new_identity, 32UL );
+
+ ulong max = fd_snp_conn_pool_max( snp->conn_pool );
+ ulong used = fd_snp_conn_pool_used( snp->conn_pool );
+ ulong idx = 0;
+ ulong used_ele = 0;
+ fd_snp_conn_t * conn = snp->conn_pool;
+
+ for( ; idxsession_id == 0 ) continue;
+ used_ele++;
+ /* metrics */
+ snp->metrics_all->conn_acc_dropped_set_identity += 1UL;
+ if( !!conn->snp_enforced ) {
+ snp->metrics_enf->conn_acc_dropped_set_identity += 1UL;
+ }
+ fd_snp_conn_delete( snp, conn );
+ }
+}
diff --git a/src/waltz/snp/fd_snp.h b/src/waltz/snp/fd_snp.h
new file mode 100644
index 00000000000..792c4c3364c
--- /dev/null
+++ b/src/waltz/snp/fd_snp.h
@@ -0,0 +1,300 @@
+#ifndef HEADER_fd_src_waltz_snp_fd_snp_h
+#define HEADER_fd_src_waltz_snp_fd_snp_h
+
+#include "fd_snp_common.h"
+#include "fd_snp_proto.h"
+#include "../../util/net/fd_net_headers.h"
+
+/* fd_snp_limits_t defines the limits used to allocate
+ memory for the SNP instance. */
+struct __attribute__((aligned(16UL))) fd_snp_limits {
+ ulong peer_cnt; /* instance-wide, max peer count */
+};
+typedef struct fd_snp_limits fd_snp_limits_t;
+
+/* fd_snp_layout_t defines the memory layout of an fd_snp_t object.
+ Limits are immutable and valid for the lifetime of an fd_snp_t
+ (i.e. outlasts joins, until fd_snp_delete) */
+struct __attribute__((aligned(16UL))) fd_snp_layout {
+ ulong meta_sz; /* size of this struct */
+ ulong conn_pool_off; /* offset of connections pool mem region */
+ ulong conn_map_off; /* offset of connections map mem region */
+ ulong pkt_pool_off; /* offset of packets pool mem region */
+ ulong last_pkt_pool_off; /* offset of last packets pool mem region */
+ ulong dest_meta_map_off_a; /* offset of destinations meta map A mem region */
+ ulong dest_meta_map_off_b; /* offset of destinations meta map B mem region */
+};
+typedef struct fd_snp_layout fd_snp_layout_t;
+
+/* Callback API *****************************************************/
+
+/* CALLBACKS */
+
+/* send/tx callback is invoked to send the packet over the wire. */
+typedef int
+( * fd_snp_cb_tx_t )( void const * ctx, /* callback context */
+ uchar const * packet, /* packet to send */
+ ulong packet_sz, /* size of packet to send */
+ fd_snp_meta_t meta ); /* connection metadata */
+
+/* recv/rx callback is invoked for packets with app payload, to
+ dispatch them to the proper instance of fd_snp_app. */
+typedef int
+( * fd_snp_cb_rx_t )( void const * ctx, /* callback context */
+ uchar const * packet, /* packet to dispatch */
+ ulong packet_sz, /* size of packet to dispatch */
+ fd_snp_meta_t meta ); /* connection metadata */
+
+/* sign callback is invoked to sign payload during handshake. */
+typedef int
+( * fd_snp_cb_sign_t )( void const * ctx, /* callback context */
+ ulong session_id, /* connection session id */
+ uchar const to_sign[ FD_SNP_TO_SIGN_SZ ] ); /* payload to sign */
+
+/* fd_snp_callbacks_t defines the callbacks used by the SNP instance. */
+struct fd_snp_callbacks {
+ /* Function pointers to user callbacks */
+ void * ctx;
+ fd_snp_cb_tx_t tx;
+ fd_snp_cb_rx_t rx;
+ fd_snp_cb_sign_t sign;
+};
+typedef struct fd_snp_callbacks fd_snp_callbacks_t;
+
+/* fd_snp_applications_t defines the applications supported by the SNP instance. */
+struct fd_snp_applications {
+ ushort port;
+ ushort net_id;
+ fd_ip4_udp_hdrs_t net_hdr[1];
+ uint multicast_ip;
+ fd_ip4_udp_hdrs_t multicast_net_hdr[1];
+};
+typedef struct fd_snp_applications fd_snp_applications_t;
+
+/* fd_snp_dest_meta_t defines the metadata for a destination peer. */
+struct fd_snp_dest_meta {
+ long snp_handshake_tstamp;
+ uint update_idx;
+ uint ip4_addr;
+ ushort udp_port;
+ uchar snp_available;
+ uchar snp_enabled;
+ uchar snp_enforced;
+ uchar reserved[3];
+};
+typedef struct fd_snp_dest_meta fd_snp_dest_meta_t;
+
+/* fd_snp_dest_meta_map_t defines the map of destination metadata. */
+struct __attribute__((aligned(32))) fd_snp_dest_meta_map {
+ ulong key;
+ fd_snp_dest_meta_t val;
+};
+typedef struct fd_snp_dest_meta_map fd_snp_dest_meta_map_t;
+
+#define MAP_NAME fd_snp_dest_meta_map
+#define MAP_T fd_snp_dest_meta_map_t
+#define MAP_MEMOIZE 0
+#define MAP_HASH_T ulong
+#define MAP_KEY_HASH(k) (k)
+#include "../../util/tmpl/fd_map_dynamic.c"
+
+/* fd_snp_metrics_t defines the metrics for the SNP instance. */
+struct fd_snp_metrics {
+ ulong dest_meta_cnt;
+ ulong dest_meta_snp_available_cnt;
+ ulong dest_meta_snp_enabled_cnt;
+
+ ulong conn_cur_total; /* pool cnt. */
+ ulong conn_cur_established; /* current number of active connections. */
+ ulong conn_acc_total; /* incr every time we create a connections. */
+ ulong conn_acc_established; /* incr every time we complete a handshake. */
+ ulong conn_acc_dropped; /* incr every time we delete a connection. */
+ ulong conn_acc_dropped_handshake; /* incr every time we delete a handshake connection. */
+ ulong conn_acc_dropped_established; /* incr every time we delete an established connection. */
+ ulong conn_acc_dropped_set_identity; /* incr every time we delete a connection due to set identity. */
+
+ ulong tx_bytes_via_udp_to_snp_avail_cnt;
+ ulong tx_pkts_via_udp_to_snp_avail_cnt;
+
+ ulong tx_bytes_via_udp_cnt;
+ ulong tx_bytes_via_snp_cnt;
+ ulong tx_pkts_via_udp_cnt;
+ ulong tx_pkts_via_snp_cnt;
+ ulong tx_pkts_dropped_no_credits_cnt;
+
+ ulong rx_bytes_cnt;
+ ulong rx_bytes_via_udp_cnt;
+ ulong rx_bytes_via_snp_cnt;
+ ulong rx_pkts_cnt;
+ ulong rx_pkts_via_udp_cnt;
+ ulong rx_pkts_via_snp_cnt;
+ ulong rx_pkts_dropped_no_credits_cnt;
+
+ ulong mcast_tx_pkts_cnt;
+ ulong mcast_tx_bytes_cnt;
+ ulong mcast_rx_pkts_cnt;
+ ulong mcast_rx_bytes_cnt;
+};
+typedef struct fd_snp_metrics fd_snp_metrics_t;
+
+/* fd_snp_t defines the SNP instance. */
+struct FD_SNP_ALIGNED fd_snp {
+ ulong magic; /* ==FD_SNP_MAGIC */
+
+ fd_snp_config_t config;
+ fd_snp_layout_t layout;
+ fd_snp_limits_t limits;
+ fd_snp_callbacks_t cb;
+
+ fd_snp_applications_t apps[FD_SNP_APPS_CNT_MAX];
+ ulong apps_cnt;
+
+ struct {
+ ulong ip4;
+ ushort net_id;
+ fd_ip4_udp_hdrs_t net_hdr[1];
+ } multicast;
+
+ fd_snp_conn_t * conn_pool;
+ fd_snp_conn_map_t * conn_map;
+ fd_snp_pkt_t * pkt_pool;
+ fd_snp_pkt_t * last_pkt_pool;
+
+ /* Support negative values to simplify arithmetic operations. */
+ long flow_cred_total;
+ long flow_cred_taken;
+ long flow_cred_alloc;
+
+ fd_snp_dest_meta_map_t * dest_meta_map_a;
+ fd_snp_dest_meta_map_t * dest_meta_map_b;
+ fd_snp_dest_meta_map_t * dest_meta_map;
+ uint dest_meta_update_idx;
+ long dest_meta_next_update_ts;
+
+ fd_rng_t rng_mem[ 1 ];
+ fd_rng_t * rng;
+
+ /* Metrics.
+ We currently duplicate the metrics for all vs enforced connections.
+ We want to monitor how SNP behaves when enforced, before we enforce it
+ for everybody. */
+ fd_snp_metrics_t metrics_all[1];
+ fd_snp_metrics_t metrics_enf[1];
+};
+typedef struct fd_snp fd_snp_t;
+
+FD_PROTOTYPES_BEGIN
+
+/* construction API */
+
+static FD_FN_CONST inline ulong
+fd_snp_align( void ) {
+ return FD_SNP_ALIGN;
+}
+
+/* fd_snp_footprint returns the footprint of the fd_snp_t structure. */
+ulong
+fd_snp_footprint( fd_snp_limits_t const * limits );
+
+/* fd_snp_new formats an unused memory region for use as a SNP.
+ mem is a non-NULL pointer to this region in the local address space
+ with the required footprint and alignment. */
+void *
+fd_snp_new( void * mem,
+ fd_snp_limits_t const * limits );
+
+/* fd_snp_join joins the caller to the fd_snp. shsnp points to the
+ first byte of the memory region backing the SNP in the caller's
+ address space. */
+fd_snp_t *
+fd_snp_join( void * shsnp );
+
+/* fd_snp_init initializes the SNP such that it is ready to serve.
+ permits the calling thread exclusive access during which no other
+ thread may write to the SNP. Exclusive rights get released when
+ the thread exits or calls fd_snp_fini.
+
+ Requires valid configuration and external objects (callbacks).
+ Returns given snp on success and NULL on failure (logs details).
+ Performs various heap allocations and file system accesses such
+ reading certs. Reasons for failure include invalid config or
+ fd_tls error. */
+fd_snp_t *
+fd_snp_init( fd_snp_t * snp );
+
+/* fd_snp_fini releases exclusive access over a SNP. Zero-initializes
+ references to external objects (aio, callbacks). Frees any heap
+ allocs made by fd_snp_init. Returns snp. */
+fd_snp_t *
+fd_snp_fini( fd_snp_t * snp );
+
+/* Service API */
+
+/* fd_snp_send prepares a packet for sending and invokes the send callback.
+ It returns a negative value on error (packet dropped), and a non-negative
+ value on success (typically the packet size, but the return value is
+ controlled by the callback). */
+int
+fd_snp_send( fd_snp_t * snp,
+ uchar * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta );
+
+/* fd_snp_process_packet processes an incoming packet received from
+ the network.
+ Depending on the packet, it invokes one of the registered callbacks:
+ - receive callback, if the packet contains a payload for an application
+ - send callback, if the packet requires sending a new packet in response
+ - sign callback, if the packet requires signing during handshake
+ It returns a negative value on error (packet dropped), and a non-negative
+ value on success (typically the packet size, but the return value is
+ controlled by the callback). */
+int
+fd_snp_process_packet( fd_snp_t * snp,
+ uchar * packet,
+ ulong packet_sz );
+
+/* fd_snp_process_signature processes a signature received asynchronously.
+ It invokes the send callback to send the signed packet to the peer.
+ It returns a negative value on error (signature dropped), and a non-negative
+ value on success (typically the packet size, but the return value is
+ controlled by the callback). */
+int
+fd_snp_process_signature( fd_snp_t * snp,
+ ulong session_id,
+ uchar const signature[ 64 ] );
+
+/* fd_snp_housekeeping performs housekeeping tasks, such as:
+ - checking for and removing expired connections
+ - implementing keep-alive logic
+ - implementing retry logic during handshakes
+ This function is expected to be called periodically by the snp tile.
+ It invokes the send callback to send any necessary packet.
+ The current implementation checks and potentially sends a packet
+ for every existing connection. */
+void
+fd_snp_housekeeping( fd_snp_t * snp );
+
+/* fd_snp_set_identity implements the logic to respond to a set identity event.
+ Currently, it deletes all connections. While this may seem a bit extreme and
+ in future we may want to gracefully close connections, this is done so that
+ we avoid any unnecessary assumptions in the SNP state machine. */
+void
+fd_snp_set_identity( fd_snp_t * snp,
+ uchar const * new_identity );
+
+
+static inline ulong
+fd_snp_dest_meta_map_key_from_parts( uint ip4_addr,
+ ushort udp_port ) {
+ return fd_snp_peer_addr_from_parts( ip4_addr, udp_port );
+}
+
+static inline ulong
+fd_snp_dest_meta_map_key_from_conn( fd_snp_conn_t * conn ) {
+ return conn->peer_addr;
+}
+
+FD_PROTOTYPES_END
+
+#endif /* HEADER_fd_src_waltz_snp_fd_snp_h */
diff --git a/src/waltz/snp/fd_snp_app.c b/src/waltz/snp/fd_snp_app.c
new file mode 100644
index 00000000000..359f95c8632
--- /dev/null
+++ b/src/waltz/snp/fd_snp_app.c
@@ -0,0 +1,107 @@
+#include "fd_snp_app.h"
+#include "../../util/net/fd_net_headers.h"
+
+ulong
+fd_snp_app_footprint( void ) {
+ return sizeof( fd_snp_app_t );
+}
+
+void *
+fd_snp_app_new( void * mem ) {
+ if( FD_UNLIKELY( !mem ) ) return NULL;
+
+ ulong align = fd_snp_app_align();
+ if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)mem, align ) ) ) return NULL;
+
+ /* Zero the entire memory region */
+ fd_snp_app_t * snp = (fd_snp_app_t *)mem;
+ memset( snp, 0, fd_snp_app_footprint() );
+
+ /* Set magic number to indicate successful initialization */
+ FD_COMPILER_MFENCE();
+ snp->magic = FD_SNP_APP_MAGIC;
+ FD_COMPILER_MFENCE();
+
+ return snp;
+}
+
+fd_snp_app_t *
+fd_snp_app_join( void * shsnp ) {
+ return (fd_snp_app_t *)(shsnp);
+}
+
+int
+fd_snp_app_recv( fd_snp_app_t const * ctx, /* snp_app context */
+ uchar const * packet, /* input packet */
+ ulong packet_sz, /* size of input packet */
+ fd_snp_meta_t meta ) { /* connection metadata */
+ uchar const * data = NULL;
+ ulong data_sz = 0UL;
+
+ ulong proto = meta & FD_SNP_META_PROTO_MASK;
+ switch( proto ) {
+ case FD_SNP_META_PROTO_UDP:
+ data = packet + sizeof(fd_ip4_udp_hdrs_t);
+ data_sz = packet_sz - sizeof(fd_ip4_udp_hdrs_t);
+ break;
+ case FD_SNP_META_PROTO_V1:
+ data = packet + sizeof(fd_ip4_udp_hdrs_t) + 15; /* 12 is for SNP header + 3 for TL */
+ data_sz = packet_sz - sizeof(fd_ip4_udp_hdrs_t) - 15 - 19; /* 19 is for final TL-MAC */
+ if( FD_UNLIKELY( fd_snp_ip_is_multicast( packet ) ) ) {
+ data_sz += 19;
+ meta |= FD_SNP_META_OPT_BROADCAST;
+ }
+ break;
+ default:
+ return FD_SNP_FAILURE; /* Not implemented */
+ }
+
+ fd_snp_peer_t peer = 0;
+
+ return ctx && ctx->cb.rx ? ctx->cb.rx( ctx->cb.ctx, peer, data, data_sz, meta ) : (int)data_sz;
+}
+
+int
+fd_snp_app_send( fd_snp_app_t const * ctx, /* snp_app context */
+ uchar * packet, /* output packet buffer */
+ ulong packet_sz, /* (max) size of output packet buffer */
+ void const * data, /* app data to send to peer */
+ ulong data_sz, /* size of app data to send to peer */
+ fd_snp_meta_t meta ) { /* connection metadata */
+ if( FD_UNLIKELY( packet==NULL ) ) {
+ return FD_SNP_FAILURE;
+ }
+
+ ulong data_offset = 0UL;
+ ulong actual_packet_sz = 0UL;
+
+ ulong proto = meta & FD_SNP_META_PROTO_MASK;
+ switch( proto ) {
+
+ case FD_SNP_META_PROTO_UDP:
+ data_offset = sizeof(fd_ip4_udp_hdrs_t);
+ actual_packet_sz = data_sz + data_offset;
+ break;
+
+ case FD_SNP_META_PROTO_V1:
+ data_offset = sizeof(fd_ip4_udp_hdrs_t) + 12; /* 12 is for SNP header */
+
+ packet[data_offset] = FD_SNP_FRAME_DATAGRAM;
+ ushort data_sz_h = (ushort)data_sz;
+ memcpy( packet+data_offset+1, &data_sz_h, 2 );
+
+ data_offset += 3;
+
+ actual_packet_sz = data_sz + data_offset + 19; /* 19 is for final MAC */
+ break;
+
+ default:
+ return FD_SNP_FAILURE; /* Not implemented */
+ }
+ if( FD_UNLIKELY( packet_sz < actual_packet_sz ) ) {
+ return FD_SNP_FAILURE;
+ }
+
+ memcpy( packet + data_offset, data, data_sz );
+ return ctx && ctx->cb.tx ? ctx->cb.tx( ctx->cb.ctx, packet, actual_packet_sz, meta ) : (int)actual_packet_sz;
+}
diff --git a/src/waltz/snp/fd_snp_app.h b/src/waltz/snp/fd_snp_app.h
new file mode 100644
index 00000000000..89dfa3418da
--- /dev/null
+++ b/src/waltz/snp/fd_snp_app.h
@@ -0,0 +1,96 @@
+#ifndef HEADER_fd_src_waltz_snp_fd_snp_app_h
+#define HEADER_fd_src_waltz_snp_fd_snp_app_h
+
+#include "fd_snp_common.h"
+
+#define FD_SNP_APP_ALIGN (8UL)
+#define FD_SNP_APP_MAGIC (0xf17eda2ce7529a99UL)
+
+/* TYPES */
+
+/* CALLBACKS */
+
+/* send/tx callback is invoked by fd_snp_app_send* to actually send
+ the packet over the wire (or to a different process). */
+typedef int
+( * fd_snp_app_cb_tx_t )( void const * ctx, /* callback context */
+ uchar * packet, /* packet to send */
+ ulong packet_sz, /* size of packet to send */
+ fd_snp_meta_t meta ); /* connection metadata */
+
+/* recv/rx callback is invoked by fd_snp_app_recv to process data
+ received from peer. */
+typedef int
+( * fd_snp_app_cb_rx_t )( void const * ctx, /* callback context */
+ fd_snp_peer_t peer, /* source peer */
+ uchar const * data, /* app data received from peer */
+ ulong data_sz, /* size of app data received from peer */
+ fd_snp_meta_t meta ); /* connection metadata */
+
+/* fd_snp_app_callbacks_t groups all the callbacks for fd_snp_app_t. */
+struct fd_snp_app_callbacks {
+ void * ctx;
+ fd_snp_app_cb_tx_t tx;
+ fd_snp_app_cb_rx_t rx;
+};
+typedef struct fd_snp_app_callbacks fd_snp_app_callbacks_t;
+
+/* fd_snp_app_t is a type to represent a SNP app context. */
+struct __attribute__((aligned(FD_SNP_APP_ALIGN))) fd_snp_app {
+ ulong magic;
+ fd_snp_app_callbacks_t cb;
+};
+typedef struct fd_snp_app fd_snp_app_t;
+
+FD_PROTOTYPES_BEGIN
+
+/* ALLOC */
+
+FD_FN_CONST static inline ulong
+fd_snp_app_align( void ) {
+ return FD_SNP_APP_ALIGN;
+}
+
+/* fd_snp_app_footprint returns the footprint of the fd_snp_app_t structure. */
+ulong
+fd_snp_app_footprint( void );
+
+/* fd_snp_app_new initializes a new fd_snp_app_t structure. */
+void *
+fd_snp_app_new( void * mem );
+
+/* fd_snp_app_join joins the caller to the fd_snp_app.
+ shsnp points to the first byte of the memory region backing the SNP app in
+ the caller's address space. */
+fd_snp_app_t *
+fd_snp_app_join( void * shsnp );
+
+/* APP API */
+
+/* fd_snp_app_recv receives data from a peer.
+ Concretely, it invokes the receive callback registered in the snp_app context `ctx`,
+ passing a pointer to the payload data (the position of the payload depends on the
+ incoming packet protocol).
+ packet, packet_sz and meta are received from the fd_snp_process_packet()
+ function (via its receive callback). */
+int
+fd_snp_app_recv( fd_snp_app_t const * ctx, /* snp_app context */
+ uchar const * packet, /* input packet */
+ ulong packet_sz, /* size of input packet */
+ fd_snp_meta_t meta ); /* connection metadata */
+
+/* fd_snp_app_send sends data to a peer.
+ Concretely, it prepares packet by storing data in the right position,
+ depending on the protocol (SNP vs UDP) and invokes the send callback
+ registered in the snp_app context `ctx`. */
+int
+fd_snp_app_send( fd_snp_app_t const * ctx, /* snp_app context */
+ uchar * packet, /* output packet buffer */
+ ulong packet_sz, /* (max) size of output packet buffer */
+ void const * data, /* app data to send to peer */
+ ulong data_sz, /* size of app data to send to peer */
+ fd_snp_meta_t meta ); /* connection metadata */
+
+FD_PROTOTYPES_END
+
+#endif /* HEADER_fd_src_waltz_snp_fd_snp_app_h */
diff --git a/src/waltz/snp/fd_snp_common.h b/src/waltz/snp/fd_snp_common.h
new file mode 100644
index 00000000000..9a39abd8402
--- /dev/null
+++ b/src/waltz/snp/fd_snp_common.h
@@ -0,0 +1,250 @@
+#ifndef HEADER_fd_src_waltz_snp_fd_snp_common_h
+#define HEADER_fd_src_waltz_snp_fd_snp_common_h
+
+/* Common definitions between fd_snp.h and fd_snp_app.h. */
+
+#include "../../util/fd_util_base.h"
+#include "../../util/bits/fd_bits.h"
+
+/* Debug and trace flags, useful for debugging logs (running a
+ non-production validator) or tracing (running tests). */
+#define FD_SNP_DEBUG_ENABLED (0)
+#define FD_SNP_TRACE_ENABLED (0)
+
+#define FD_SNP_META_PROTO_UDP (0x0000000000000000UL)
+#define FD_SNP_META_PROTO_V1 (0x0100000000000000UL)
+#define FD_SNP_META_PROTO_V2 (0x0200000000000000UL)
+
+#define FD_SNP_META_OPT_BUFFERED (0x1000000000000000UL)
+#define FD_SNP_META_OPT_HANDSHAKE (0x2000000000000000UL)
+#define FD_SNP_META_OPT_BROADCAST (0x4000000000000000UL)
+
+#define FD_SNP_META_IP_MASK (0x00000000FFFFFFFFUL)
+#define FD_SNP_META_PORT_MASK (0x0000FFFF00000000UL)
+#define FD_SNP_META_PEER_MASK (0x0000FFFFFFFFFFFFUL)
+#define FD_SNP_META_APP_MASK (0x000F000000000000UL)
+#define FD_SNP_META_PROTO_MASK (0x0F00000000000000UL)
+#define FD_SNP_META_OPT_MASK (0xF000000000000000UL)
+
+#define FD_SNP_SUCCESS ( 0)
+#define FD_SNP_FAILURE (-1)
+
+#define FD_SNP_FRAME_EMPTY (0x00) /* Bytes padded to 0 are essentially empty TLVs */
+#define FD_SNP_FRAME_PING (0x01)
+#define FD_SNP_FRAME_DATAGRAM (0x31)
+#define FD_SNP_FRAME_AUTH (0x41)
+#define FD_SNP_FRAME_MAX_DATA (0x10)
+#define FD_SNP_FRAME_CONN_CLOSE (0x1D)
+#define FD_SNP_FRAME_MC_ANNOUNCE (0x51)
+#define FD_SNP_FRAME_MC_STATE (0x52)
+
+#define FD_SNP_IP_DST_ADDR_OFF (30UL)
+
+#define FD_SNP_APPS_CNT_MAX (8UL)
+
+/* TYPES */
+
+/* fd_snp_app_peer_t is a type to represent a peer identifier.
+ Currently, it encodes the peer IPv4 + port, but the application
+ should not make any assumption. */
+typedef ulong fd_snp_peer_t;
+
+/* fd_snp_app_meta_t is a type to represent connection metadata. */
+typedef ulong fd_snp_meta_t;
+
+/* fd_snp_tlv_t holds type and length of the TLV, as well as a pointer
+ to the begining of the memory location where the value is located.
+ IMPORTANT: do NOT cast a pointer to a buffer as (fd_snp_tlv_t *).
+ Instead, use either the extract or iterator methods to obtain type,
+ len and ptr. */
+struct fd_snp_tlv {
+ uchar const * ptr;
+ ushort len;
+ uchar type;
+};
+typedef struct fd_snp_tlv fd_snp_tlv_t;
+
+/* fd_snp_tlv_iter_t contains the iterator's metadata. Off indicates
+ the offset inside the given buffer, whereas rem indicates the
+ remaining amount of bytes (signed, in order to simplify checks). */
+struct fd_snp_tlv_iter {
+ ulong off;
+ long rem;
+};
+typedef struct fd_snp_tlv_iter fd_snp_tlv_iter_t;
+
+FD_PROTOTYPES_BEGIN
+
+/* ALLOC */
+
+static inline fd_snp_meta_t
+fd_snp_meta_from_parts( ulong snp_proto,
+ uchar snp_app_id,
+ uint ip4,
+ ushort port ) {
+ return ( snp_proto & FD_SNP_META_PROTO_MASK )
+ | (( (ulong) ( snp_app_id & 0x0F ) ) << 48 )
+ | (( (ulong) port ) << 32 )
+ | (( (ulong) ip4 ));
+}
+
+static inline void
+fd_snp_meta_into_parts( ulong * snp_proto,
+ uchar * snp_app_id,
+ uint * ip4,
+ ushort * port,
+ fd_snp_meta_t meta ) {
+ if( snp_proto ) *snp_proto = meta & FD_SNP_META_PROTO_MASK;
+ if( snp_app_id ) *snp_app_id = (uchar)( ( meta & FD_SNP_META_APP_MASK ) >> 48 );
+ if( ip4 ) *ip4 = (uint )( meta );
+ if( port ) *port = (ushort)( meta >> 32 );
+}
+
+static inline ulong
+fd_snp_peer_addr_from_meta( fd_snp_meta_t meta ) {
+ return meta & FD_SNP_META_PEER_MASK;
+}
+
+static inline ulong
+fd_snp_peer_addr_from_parts( uint ip4,
+ ushort port ) {
+ return (( (ulong) port ) << 32 )
+ | (( (ulong) ip4 ));
+}
+
+static inline void
+fd_snp_peer_addr_into_parts( uint * ip4,
+ ushort * port,
+ ulong peer_addr ) {
+ if( ip4 ) *ip4 = (uint )( peer_addr );
+ if( port ) *port = (ushort)( peer_addr >> 32 );
+}
+
+static inline int
+fd_snp_ip_is_multicast( uchar const * packet ) {
+ return 224 <= packet[FD_SNP_IP_DST_ADDR_OFF] && packet[FD_SNP_IP_DST_ADDR_OFF] <= 239;
+}
+
+/* fd_snp_tlv_extract_{type/len/ptr/tlv} extracts the corresponding
+ values from a pointer to the beginning of a TLV set. None of these
+ methods returns the value itself, but rather a pointer to the
+ beginning of the location in memory where the value is located. */
+FD_FN_CONST static inline uchar
+fd_snp_tlv_extract_type( uchar const * tlv_ptr ) {
+ return fd_uchar_load_1( tlv_ptr );
+}
+
+FD_FN_CONST static inline ushort
+fd_snp_tlv_extract_len( uchar const * tlv_ptr ) {
+ return fd_ushort_load_2( tlv_ptr + 1UL );
+}
+
+FD_FN_CONST static inline uchar const *
+fd_snp_tlv_extract_ptr( uchar const * tlv_ptr ) {
+ return tlv_ptr + 3UL;
+}
+
+static inline fd_snp_tlv_t
+fd_snp_tlv_extract_tlv( uchar const * tlv_ptr ) {
+ fd_snp_tlv_t tlv;
+ tlv.type = fd_snp_tlv_extract_type( tlv_ptr );
+ tlv.len = fd_snp_tlv_extract_len( tlv_ptr );
+ tlv.ptr = fd_snp_tlv_extract_ptr( tlv_ptr );
+ return tlv;
+}
+
+/* fd_snp_tlv_iter_{init/done/next} are basic methods to iterate over
+ a given buffer containing a sequence of TLVs.
+ fd_snp_tlv_iter_{type/len/ptr/tlv} extract the corresponding TLV
+ parts at the current location of the iterator. None of these
+ methods returns the value itself, which needs to be deduced using
+ ptr and len (and probably type as well). The application needs to
+ verify that tlv.len holds a "reasonable" length value, since it can
+ theoretically be in the range [0, 1<<16).
+ Typical usage:
+
+ for( fd_snp_tlv_iter_t iter = fd_snp_tlv_iter_init( data_sz );
+ !fd_snp_tlv_iter_done( iter, data );
+ iter = fd_snp_tlv_iter_next( iter, data ) ) {
+ ...
+ uchar type = fd_snp_tlv_iter_type( iter, data );
+ ushort len = fd_snp_tlv_iter_len( iter, data );
+ uchar const * ptr = fd_snp_tlv_iter_ptr( iter, data );
+ fd_snp_tlv_t tlv = fd_snp_tlv_iter_tlv( iter, data );
+ ...
+ uchar u8 = fd_uchar_load_1( tlv.ptr ); // if len==1
+ ushort u16 = fd_ushort_load_2( tlv.ptr ); // if len==2
+ uint u32 = fd_uint_load_4( tlv.ptr ); // if len==4
+ ulong u64 = fd_ulong_load_8( tlv.ptr ); // if len==8
+ fd_memcpy( value_buf, tlv.ptr, tlv.len ); // otherwise
+ ...
+ } */
+FD_FN_CONST static inline fd_snp_tlv_iter_t
+fd_snp_tlv_iter_init( ulong data_sz ) {
+ fd_snp_tlv_iter_t iter;
+ iter.off = 0UL;
+ iter.rem = fd_long_if( !!(data_sz>>63), 0UL/*overflow size*/, (long)data_sz );
+ return iter;
+}
+
+FD_FN_CONST static inline int
+fd_snp_tlv_iter_done( fd_snp_tlv_iter_t iter,
+ uchar const * data FD_PARAM_UNUSED ) {
+ /* TLV "header" part (i.e. TL is 3 bytes long). */
+ return iter.rem < 3L;
+}
+
+FD_FN_CONST static inline fd_snp_tlv_iter_t
+fd_snp_tlv_iter_next( fd_snp_tlv_iter_t iter,
+ uchar const * data ) {
+ ulong tlv_sz = fd_snp_tlv_extract_len( data + iter.off ) + 3UL;
+ iter.off += tlv_sz;
+ iter.rem -= (long)tlv_sz; /* tlv_sz in range [3, (1<<16)+3). */
+ return iter;
+}
+
+FD_FN_CONST static inline uchar
+fd_snp_tlv_iter_type( fd_snp_tlv_iter_t iter,
+ uchar const * data ) {
+ return fd_snp_tlv_extract_type( data + iter.off );
+}
+
+FD_FN_CONST static inline ushort
+fd_snp_tlv_iter_len( fd_snp_tlv_iter_t iter,
+ uchar const * data ) {
+ return fd_snp_tlv_extract_len( data + iter.off );
+}
+
+FD_FN_CONST static inline uchar const *
+fd_snp_tlv_iter_ptr( fd_snp_tlv_iter_t iter,
+ uchar const * data ) {
+ return fd_snp_tlv_extract_ptr( data + iter.off );
+}
+
+FD_FN_CONST static inline fd_snp_tlv_t
+fd_snp_tlv_iter_tlv( fd_snp_tlv_iter_t iter,
+ uchar const * data ) {
+ return fd_snp_tlv_extract_tlv( data + iter.off );
+}
+
+FD_PROTOTYPES_END
+
+#if FD_SNP_TRACE_ENABLED
+#undef FD_SNP_DEBUG_ENABLED
+#define FD_SNP_DEBUG_ENABLED (1)
+#define FD_SNP_LOG_TRACE(...) FD_LOG_NOTICE(( __VA_ARGS__ ))
+#else
+#define FD_SNP_LOG_TRACE(...)
+#endif
+
+#if FD_SNP_DEBUG_ENABLED
+#define FD_SNP_LOG_DEBUG_N(...) FD_LOG_NOTICE(( __VA_ARGS__ ))
+#define FD_SNP_LOG_DEBUG_W(...) FD_LOG_WARNING(( __VA_ARGS__ ))
+#define FD_SNP_LOG_CONN( conn ) fd_snp_log_conn( conn )
+#else
+#define FD_SNP_LOG_DEBUG_N(...)
+#define FD_SNP_LOG_DEBUG_W(...)
+#define FD_SNP_LOG_CONN( conn ) ""
+#endif
+
+#endif /* HEADER_fd_src_waltz_snp_fd_snp_common_h */
diff --git a/src/waltz/snp/fd_snp_private.h b/src/waltz/snp/fd_snp_private.h
new file mode 100644
index 00000000000..e79442b161c
--- /dev/null
+++ b/src/waltz/snp/fd_snp_private.h
@@ -0,0 +1,16 @@
+#ifndef HEADER_fd_src_waltz_snp_fd_snp_private_h
+#define HEADER_fd_src_waltz_snp_fd_snp_private_h
+
+/* snp_private.h contains private functions, used e.g. in tests. */
+
+#include "fd_snp_v1.h"
+
+FD_PROTOTYPES_BEGIN
+
+int
+fd_snp_conn_delete( fd_snp_t * snp,
+ fd_snp_conn_t * conn );
+
+FD_PROTOTYPES_END
+
+#endif /* HEADER_fd_src_waltz_snp_fd_snp_private_h */
diff --git a/src/waltz/snp/fd_snp_proto.h b/src/waltz/snp/fd_snp_proto.h
new file mode 100644
index 00000000000..a8cde010acf
--- /dev/null
+++ b/src/waltz/snp/fd_snp_proto.h
@@ -0,0 +1,217 @@
+#ifndef HEADER_fd_src_waltz_snp_fd_snp_proto_h
+#define HEADER_fd_src_waltz_snp_fd_snp_proto_h
+
+/* Protocol definitions, in common between fd_snp.h and fd_snp_v1.h. */
+
+#include "../../util/fd_util_base.h"
+#include "../../util/rng/fd_rng.h"
+#include "../../ballet/aes/fd_aes_base.h"
+
+/* SNP_MTU controls the maximum supported UDP payload size. */
+
+#define FD_SNP_MTU (1536UL) /* FD_SNP_MTU is currently 24 x 64 */
+#define FD_SNP_MTU_MIN (1200UL)
+
+#define FD_SNP_ALIGN (128UL)
+#define FD_SNP_ALIGNED __attribute__((aligned(128UL)))
+
+/* SNP_V{...} identify SNP versions. */
+
+#define FD_SNP_V1 ((uchar)0x01)
+#define FD_SNP_V2 ((uchar)0x02)
+
+/* SNP_TYPE_{...} identify SNP packet types (and connection states). */
+
+#define FD_SNP_TYPE_INVALID ((uchar)0x00) /* invalid */
+
+#define FD_SNP_TYPE_HS_CLIENT_INIT ((uchar)0x01)
+#define FD_SNP_TYPE_HS_SERVER_INIT ((uchar)0x02)
+#define FD_SNP_TYPE_HS_CLIENT_CONT ((uchar)0x03)
+#define FD_SNP_TYPE_HS_SERVER_FINI ((uchar)0x04)
+#define FD_SNP_TYPE_HS_CLIENT_FINI ((uchar)0x05)
+#define FD_SNP_TYPE_PAYLOAD ((uchar)0x0F)
+
+#define FD_SNP_TYPE_HS_SERVER_FINI_SIG ((uchar)0xF4) /* invalid on wire */
+#define FD_SNP_TYPE_HS_CLIENT_FINI_SIG ((uchar)0xF5) /* invalid on wire */
+#define FD_SNP_TYPE_HS_DONE ((uchar)0xFF) /* invalid on wire */
+
+#define FD_SNP_TO_SIGN_SZ (40UL)
+#define FD_SNP_MAGIC (0xf17eda2ce7552299UL)
+
+/* fd_snp_config is the configuration for the SNP instance. */
+struct fd_snp_config {
+ double tick_per_us; /* tick_per_us: clock ticks per microsecond */
+ long keep_alive_ms;
+ long handshake_retry_ms;
+
+ /* identity pubkey */
+ uchar identity[ 32UL ];
+
+ /* Private members */
+
+ fd_rng_t _rng[1];
+ fd_aes_key_t _state_enc_key[1];
+ fd_aes_key_t _state_dec_key[1];
+};
+typedef struct fd_snp_config fd_snp_config_t;
+
+/* Packets */
+
+struct FD_SNP_ALIGNED fd_snp_pkt {
+ /* used both by packets cache, and last sent packets - preferably,
+ data should be FD_SNP_ALIGNED, so placing it at the top of the
+ struct. */
+ uchar data[ FD_SNP_MTU ];
+
+ /* fd_pool */
+ ulong next;
+
+ /* only used by last sent packets */
+ ulong meta;
+
+ /* only used by packets cache */
+ ulong session_id;
+ ushort data_sz;
+ uchar send; // send or recv
+
+ /* force sizeof(fd_snp_pkt_t)==2048 for feng shui (cf fd_pool.c) */
+ uchar _padding[ 2048 - 1563 ];
+};
+typedef struct fd_snp_pkt fd_snp_pkt_t;
+FD_STATIC_ASSERT( sizeof(fd_snp_pkt_t)==2048UL, fd_snp_pkt_t );
+
+#define POOL_NAME fd_snp_pkt_pool
+#define POOL_T fd_snp_pkt_t
+#include "../../util/tmpl/fd_pool.c"
+
+/* Connections */
+
+struct FD_SNP_ALIGNED fd_snp_conn {
+ ulong next; // fd_pool
+
+ ulong session_id;
+ ulong peer_addr;
+ ulong peer_session_id;
+
+ /* Flow control */
+ long flow_rx_alloc;
+ long flow_rx_level;
+ long flow_rx_wmark;
+ long flow_rx_wmark_tstamp;
+ long flow_tx_level;
+ long flow_tx_wmark;
+
+ uchar state;
+ uchar is_server;
+ uchar is_multicast;
+
+ uchar snp_enabled;
+ uchar snp_enforced;
+
+ fd_snp_pkt_t * last_pkt;
+
+ long last_sent_ts;
+ long last_recv_ts;
+ uchar retry_cnt;
+
+ /* public key. Access via: fd_snp_conn_pubkey() */
+ uchar * _pubkey;
+
+ /* peer public key. Access via: fd_snp_conn_peer_pubkey() */
+ uchar _peer_pubkey[ 32 ];
+
+ /* Memory area for key material:
+ - For established connections: 2x 256-bit keys (HMAC-SHA-256-128, RFC 4868)
+ - During handshake: Noise hash transcript, and symmetric encryption key
+ - For client, before Noise, ephemeral DH keypair */
+ uchar _sensitive_keys[ 64 ];
+};
+typedef struct fd_snp_conn fd_snp_conn_t;
+
+#define POOL_NAME fd_snp_conn_pool
+#define POOL_T fd_snp_conn_t
+#include "../../util/tmpl/fd_pool.c"
+
+struct __attribute__((aligned(16))) fd_snp_conn_map {
+ ulong key;
+ fd_snp_conn_t * val;
+};
+typedef struct fd_snp_conn_map fd_snp_conn_map_t;
+
+#define MAP_NAME fd_snp_conn_map
+#define MAP_T fd_snp_conn_map_t
+#define MAP_MEMOIZE 0
+#define MAP_HASH_T ulong
+#define MAP_KEY_HASH(k) (k)
+#include "../../util/tmpl/fd_map_dynamic.c"
+
+/* snp_hdr_t is the common SNP header shared by all packets. */
+struct __attribute__((packed)) snp_hdr {
+ uint version_type;
+ ulong session_id;
+};
+typedef struct snp_hdr snp_hdr_t;
+
+/* snp_hs_hdr_t is the SNP header shared by all handshake packets. */
+struct __attribute__((packed)) snp_hs_hdr {
+ snp_hdr_t base;
+ ulong src_session_id;
+};
+typedef struct snp_hs_hdr snp_hdr_hs_t;
+
+FD_PROTOTYPES_BEGIN
+
+static inline int
+fd_snp_rng( uchar * buf, ulong buf_sz ) {
+ if( FD_LIKELY( fd_rng_secure( buf, buf_sz )!=NULL ) ) {
+ return (int)buf_sz;
+ }
+ return -1;
+}
+
+static inline long
+fd_snp_timestamp_ms( void ) {
+ return fd_log_wallclock() / 1000000;
+}
+
+/* snp_hdr_{version,type} extract the version and type fields from
+ an snp_hdr_t. */
+
+FD_FN_PURE static inline uchar
+snp_hdr_version( snp_hdr_t const * hdr ) {
+ return (uchar)( hdr->version_type >> (24+4) );
+}
+
+FD_FN_PURE static inline uchar
+snp_hdr_type( snp_hdr_t const * hdr ) {
+ return (uchar)( (hdr->version_type >> 24) & 0x0F );
+}
+
+/* snp_hdr_version_type assembles the version_type compound field. */
+
+FD_FN_CONST static inline uint
+fd_snp_hdr_version_type( uint version,
+ uint type ) {
+ return (uint)( ( version << 4 ) | ( type & 0x0F ) ) << 24
+ | (uint)'S' << 0
+ | (uint)'N' << 8
+ | (uint)'P' << 16;
+}
+
+#if FD_SNP_DEBUG_ENABLED
+#include /* sprintf */
+static inline char *
+fd_snp_log_conn( fd_snp_conn_t * conn ) {
+ static char buf[256];
+ if( !conn ) return "";
+ uint ip4 = (uint )( conn->peer_addr>>0 );
+ ushort port = (ushort)( conn->peer_addr>>32 );
+ sprintf( buf, "session_id=%016lx peer=%u.%u.%u.%u:%u",
+ conn->session_id, (ip4>>0)&0xff, (ip4>>8)&0xff, (ip4>>16)&0xff, (ip4>>24)&0xff, port );
+ return buf;
+}
+#endif
+
+FD_PROTOTYPES_END
+
+#endif /* HEADER_fd_src_waltz_snp_fd_snp_proto_h */
diff --git a/src/waltz/snp/fd_snp_v1.c b/src/waltz/snp/fd_snp_v1.c
new file mode 100644
index 00000000000..2f64cbf0db0
--- /dev/null
+++ b/src/waltz/snp/fd_snp_v1.c
@@ -0,0 +1,557 @@
+#include "fd_snp_v1.h"
+#include "fd_snp_common.h"
+
+#include "../../util/bits/fd_bits.h"
+#include "../../ballet/aes/fd_aes_gcm.h"
+#include "../../ballet/sha256/fd_sha256.h"
+#include "../../ballet/hmac/fd_hmac.h"
+#include "../../ballet/ed25519/fd_ed25519.h"
+
+static inline uchar *
+fd_snp_conn_tx_key( fd_snp_conn_t * conn ) {
+ return conn->_sensitive_keys;
+}
+
+static inline uchar *
+fd_snp_conn_rx_key( fd_snp_conn_t * conn ) {
+ return conn->_sensitive_keys + 32;
+}
+
+static inline uchar *
+fd_snp_conn_noise_hash( fd_snp_conn_t * conn ) {
+ return conn->_peer_pubkey; /* SymmetricState.h */
+}
+
+static inline uchar *
+fd_snp_conn_noise_chaining_key( fd_snp_conn_t * conn ) {
+ return conn->_sensitive_keys; /* SymmetricState.ck */
+}
+
+static inline uchar *
+fd_snp_conn_noise_cipher_key( fd_snp_conn_t * conn ) {
+ return conn->_sensitive_keys + 32; /* CipherState.k */
+}
+
+static inline uchar *
+fd_snp_conn_noise_shared_secret( fd_snp_conn_t * conn ) {
+ return fd_snp_conn_noise_cipher_key( conn );
+}
+
+static inline uchar *
+fd_snp_conn_ephemeral_private_key( fd_snp_conn_t * conn ) {
+ return fd_snp_conn_noise_cipher_key( conn );
+}
+
+static inline uchar *
+fd_snp_conn_ephemeral_public_key( fd_snp_conn_t * conn ) {
+ return fd_snp_conn_noise_hash( conn );
+}
+
+static inline void
+fd_snp_v1_noise_mix_hash( fd_snp_conn_t * conn,
+ uchar const * data,
+ ulong data_sz ) {
+ fd_sha256_t sha[1];
+ uchar * hash = fd_snp_conn_noise_hash( conn );
+ fd_sha256_init( sha );
+ fd_sha256_append( sha, hash, 32 );
+ fd_sha256_append( sha, data, data_sz );
+ fd_sha256_fini( sha, fd_snp_conn_noise_hash( conn ) );
+}
+
+static inline void
+fd_snp_v1_noise_init( fd_snp_conn_t * conn ) {
+ // sha256( SNPv1=Noise_XXsig_25519_Ed25519_AESGCM_SHA256 )
+ uchar init[32] = {
+ 0x3e, 0xa8, 0xf4, 0xa8, 0xf9, 0xf6, 0x58, 0x04, 0x5c, 0xe9, 0x43, 0x3e, 0x39, 0xf9, 0xf0, 0x44,
+ 0x83, 0xec, 0x19, 0x92, 0xd5, 0x57, 0x1f, 0x77, 0x89, 0x31, 0x84, 0x71, 0xee, 0xb9, 0x12, 0xfc,
+ };
+ uchar * hash = fd_snp_conn_noise_hash( conn );
+ memcpy( hash, init, 32 );
+ memcpy( fd_snp_conn_noise_chaining_key( conn ), hash, 32 );
+}
+
+void FD_FN_SENSITIVE
+fd_snp_v1_noise_hkdf( fd_snp_conn_t * conn,
+ uchar const * data,
+ ulong data_sz,
+ uchar * key0,
+ uchar * key1 ) {
+ uchar temp_key[32];
+ uchar temp_data[33];
+
+ fd_hmac_sha256( data, data_sz, fd_snp_conn_noise_chaining_key( conn ), 32, temp_key );
+
+ temp_data[0] = 1;
+ fd_hmac_sha256( temp_data, 1, temp_key, 32, key0 );
+
+ memcpy( temp_data, key0, 32 );
+ temp_data[32] = 2;
+ fd_hmac_sha256( temp_data, 33, temp_key, 32, key1 );
+
+ /* clean up sensitive variables */
+ fd_memset_explicit( temp_key, 0, 32 );
+ fd_memset_explicit( temp_data, 0, 33 );
+}
+
+static inline void
+fd_snp_v1_noise_mix_key( fd_snp_conn_t * conn,
+ uchar const * data,
+ ulong data_sz ) {
+ fd_snp_v1_noise_hkdf( conn, data, data_sz,
+ fd_snp_conn_noise_chaining_key( conn ),
+ fd_snp_conn_noise_cipher_key( conn )
+ );
+}
+
+void FD_FN_SENSITIVE
+fd_snp_v1_noise_enc_and_hash( fd_snp_conn_t * conn,
+ uchar nonce,
+ uchar const * data,
+ ulong data_sz,
+ uchar * out ) {
+ fd_aes_gcm_t aes[1];
+ uchar iv[12] = { 0 };
+ uchar * hash = fd_snp_conn_noise_hash( conn );
+
+ iv[0] = nonce;
+ fd_aes_128_gcm_init( aes, fd_snp_conn_noise_cipher_key( conn ), iv );
+ fd_aes_gcm_encrypt( aes, out, data, data_sz, hash, 32, out+data_sz );
+ fd_snp_v1_noise_mix_hash( conn, out, data_sz+16 );
+}
+
+int FD_FN_SENSITIVE
+fd_snp_v1_noise_dec_and_hash( fd_snp_conn_t * conn,
+ uchar nonce,
+ uchar const * data,
+ ulong data_sz,
+ uchar * out ) {
+ fd_aes_gcm_t aes[1];
+ uchar iv[12] = { 0 };
+ uchar * hash = fd_snp_conn_noise_hash( conn );
+
+ iv[0] = nonce;
+ fd_aes_128_gcm_init( aes, fd_snp_conn_noise_cipher_key( conn ), iv );
+ if( FD_LIKELY( fd_aes_gcm_decrypt( aes, data, out, data_sz-16, hash, 32, data+data_sz-16 )==1 ) ) {
+ fd_snp_v1_noise_mix_hash( conn, data, data_sz );
+ return 0;
+ }
+ return -1;
+}
+
+int
+fd_snp_v1_noise_sig_verify( fd_snp_conn_t * conn,
+ uchar const pubkey[32],
+ uchar const sig[64] ) {
+ fd_sha512_t sha[1];
+ uchar const * msg = fd_snp_conn_noise_hash( conn );
+ if( FD_LIKELY( fd_ed25519_verify( msg, 32, sig, pubkey, sha )==FD_ED25519_SUCCESS ) ) {
+ return 0;
+ }
+ return -1;
+}
+
+int FD_FN_SENSITIVE
+fd_snp_v1_noise_dec_sig_verify_and_hash( fd_snp_conn_t * conn,
+ uchar nonce,
+ uchar const * data,
+ ulong data_sz,
+ uchar const pubkey[32] ) {
+ fd_aes_gcm_t aes[1];
+ uchar iv[12] = { 0 };
+ uchar * hash = fd_snp_conn_noise_hash( conn );
+
+ uchar out_signature[64];
+
+ iv[0] = nonce;
+ fd_aes_128_gcm_init( aes, fd_snp_conn_noise_cipher_key( conn ), iv );
+ if( FD_LIKELY(
+ fd_aes_gcm_decrypt( aes, data, out_signature, data_sz-16, hash, 32, data+data_sz-16 )==1
+ && fd_snp_v1_noise_sig_verify( conn, pubkey, out_signature )==0
+ ) ) {
+ fd_snp_v1_noise_mix_hash( conn, data, data_sz );
+ return 0;
+ }
+ return -1;
+}
+
+void
+fd_snp_v1_noise_fini( fd_snp_conn_t * conn ) {
+ /* For server, key0 is rx key, key1 is tx key. For client it's vice versa. */
+ if( conn->is_server ) {
+ fd_snp_v1_noise_hkdf( conn, NULL, 0, fd_snp_conn_rx_key( conn ), fd_snp_conn_tx_key( conn ) );
+ } else {
+ fd_snp_v1_noise_hkdf( conn, NULL, 0, fd_snp_conn_tx_key( conn ), fd_snp_conn_rx_key( conn ) );
+ }
+}
+
+int
+fd_snp_v1_client_init( fd_snp_config_t const * client FD_PARAM_UNUSED,
+ fd_snp_conn_t * conn,
+ uchar const * pkt_in FD_PARAM_UNUSED, /* no incoming data */
+ ulong pkt_in_sz FD_PARAM_UNUSED, /* no incoming data */
+ uchar * pkt_out,
+ uchar * extra FD_PARAM_UNUSED ) {
+ fd_snp_v1_pkt_hs_client_t out[1] = { 0 };
+
+ /* Validate */
+ if( FD_UNLIKELY( conn->state != 0 ) ) {
+ return -1;
+ }
+
+ /* Prepare data */
+ uchar * private_key = fd_snp_conn_ephemeral_private_key( conn );
+ uchar * public_key = fd_snp_conn_ephemeral_public_key ( conn );
+ if( FD_UNLIKELY( fd_snp_v1_crypto_key_share_generate( private_key, public_key )<0 ) ) {
+ return -1;
+ }
+
+ /* Prepare packet */
+ out->hs.version = fd_snp_hdr_version_type( FD_SNP_V1, FD_SNP_TYPE_HS_CLIENT_INIT );
+ out->hs.src_session_id = conn->session_id;
+ memcpy( out->e, conn->_sensitive_keys+32, 32 );
+
+ /* Finalize packet and copy to output buffer */
+ memset( pkt_out+FD_SNP_SIZEOF_CLIENT_INIT_PAYLOAD, 0, FD_SNP_SIZEOF_CLIENT_INIT );
+ memcpy( pkt_out, out, FD_SNP_SIZEOF_CLIENT_INIT_PAYLOAD );
+
+ /* Update conn state */
+ conn->state = FD_SNP_TYPE_HS_CLIENT_INIT;
+
+ return (int)FD_SNP_SIZEOF_CLIENT_INIT;
+}
+
+int
+fd_snp_v1_server_init( fd_snp_config_t const * server,
+ fd_snp_conn_t * conn FD_PARAM_UNUSED, /* stateless */
+ uchar const * pkt_in,
+ ulong pkt_in_sz,
+ uchar * pkt_out,
+ uchar * extra FD_PARAM_UNUSED ) {
+ fd_snp_v1_pkt_hs_server_t out[1] = { 0 };
+
+ /* Validate */
+ if( FD_UNLIKELY( pkt_in_sz != FD_SNP_SIZEOF_CLIENT_INIT ) ) {
+ return -1;
+ }
+
+ /* Prepare data */
+ ulong session_id = fd_ulong_load_8_fast( pkt_in+FD_SNP_PKT_SRC_SESSION_ID_OFF );
+
+ /* Prepare packet */
+ out->hs.version = fd_snp_hdr_version_type( FD_SNP_V1, FD_SNP_TYPE_HS_SERVER_INIT );
+ out->hs.session_id = session_id;
+ fd_snp_v1_crypto_enc_state_generate( server, conn, out->r );
+
+ /* Finalize packet and copy to output buffer */
+ memset( pkt_out+FD_SNP_SIZEOF_SERVER_INIT_PAYLOAD, 0, FD_SNP_SIZEOF_SERVER_INIT );
+ memcpy( pkt_out, out, FD_SNP_SIZEOF_SERVER_INIT_PAYLOAD );
+
+ /* NO conn state update: stateless */
+
+ return (int)FD_SNP_SIZEOF_SERVER_INIT;
+}
+
+int
+fd_snp_v1_client_cont( fd_snp_config_t const * client FD_PARAM_UNUSED,
+ fd_snp_conn_t * conn,
+ uchar const * pkt_in,
+ ulong pkt_in_sz,
+ uchar * pkt_out,
+ uchar * extra FD_PARAM_UNUSED ) {
+ fd_snp_v1_pkt_hs_client_t out[1] = { 0 };
+
+ /* Validate */
+ if( FD_UNLIKELY( conn->state != FD_SNP_TYPE_HS_CLIENT_INIT
+ && conn->state != FD_SNP_TYPE_HS_CLIENT_CONT ) ) {
+ return -1;
+ }
+
+ if( FD_UNLIKELY( pkt_in_sz != FD_SNP_SIZEOF_SERVER_INIT ) ) {
+ return -1;
+ }
+
+ /* Prepare packet */
+ out->hs.version = fd_snp_hdr_version_type( FD_SNP_V1, FD_SNP_TYPE_HS_CLIENT_CONT );
+ out->hs.src_session_id = conn->session_id;
+ memcpy( out->e, fd_snp_conn_ephemeral_public_key( conn ), 32 );
+ memcpy( out->r, pkt_in+FD_SNP_PKT_SERVER_CHALLENGE_OFF, 16 );
+
+ /* Finalize packet and copy to output buffer */
+ memset( pkt_out+FD_SNP_SIZEOF_CLIENT_CONT_PAYLOAD, 0, FD_SNP_SIZEOF_CLIENT_CONT );
+ memcpy( pkt_out, out, FD_SNP_SIZEOF_CLIENT_CONT_PAYLOAD );
+
+ /* Update conn state */
+ conn->state = FD_SNP_TYPE_HS_CLIENT_CONT;
+
+ return (int)FD_SNP_SIZEOF_CLIENT_CONT;
+}
+
+int
+fd_snp_v1_server_fini_precheck( fd_snp_config_t const * server,
+ fd_snp_conn_t * conn,
+ uchar const * pkt_in,
+ ulong pkt_in_sz,
+ uchar * pkt_out FD_PARAM_UNUSED,
+ uchar * extra FD_PARAM_UNUSED ) {
+ /* Validate */
+ if( FD_UNLIKELY( pkt_in_sz != FD_SNP_SIZEOF_CLIENT_CONT ) ) {
+ return -1;
+ }
+
+ if( FD_UNLIKELY( fd_snp_v1_crypto_enc_state_validate( server, conn, pkt_in+FD_SNP_PKT_CLIENT_CHALLENGE_OFF )<0 ) ) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+fd_snp_v1_server_fini( fd_snp_config_t const * server,
+ fd_snp_conn_t * conn,
+ uchar const * pkt_in,
+ ulong pkt_in_sz,
+ uchar * pkt_out,
+ uchar * extra ) {
+ fd_snp_v1_pkt_hs_server_t out[1] = { 0 };
+
+ /* Validate */
+ if( FD_UNLIKELY( pkt_in_sz != FD_SNP_SIZEOF_CLIENT_CONT ) ) {
+ return -1;
+ }
+
+ /* Expect server state to be just initialized
+ (because it wasn't modified by server_init) */
+ if( FD_UNLIKELY( conn->state != 0 ) ) {
+ return -1;
+ }
+
+ if( FD_UNLIKELY( fd_snp_v1_crypto_enc_state_validate( server, conn, pkt_in+FD_SNP_PKT_CLIENT_CHALLENGE_OFF )<0 ) ) {
+ return -1;
+ }
+
+ /* Prepare data */
+ ulong session_id = fd_ulong_load_8_fast( pkt_in+FD_SNP_PKT_SRC_SESSION_ID_OFF );
+ uchar const * client_ephemeral = pkt_in + FD_SNP_PKT_CLIENT_EPHEMERAL_OFF;
+ uchar const * challenge = pkt_in + FD_SNP_PKT_CLIENT_CHALLENGE_OFF;
+
+ uchar * server_ephemeral_private = fd_snp_conn_ephemeral_private_key( conn );
+ uchar server_ephemeral[ 32 ];
+ if( FD_UNLIKELY( fd_snp_v1_crypto_key_share_generate( server_ephemeral_private, server_ephemeral )<0 ) ) {
+ return -1;
+ }
+
+ uchar * shared_secret_ee = fd_snp_conn_noise_shared_secret( conn );
+ fd_x25519_exchange( shared_secret_ee, server_ephemeral_private, client_ephemeral );
+
+ fd_snp_v1_noise_init( conn );
+ fd_snp_v1_noise_mix_hash( conn, (void *)&session_id, 8 ); /* client session_id */
+ fd_snp_v1_noise_mix_hash( conn, client_ephemeral, 32 );
+ fd_snp_v1_noise_mix_hash( conn, challenge, 16 );
+ fd_snp_v1_noise_mix_hash( conn, (void *)&conn->session_id, 8 ); /* server session_id */
+ fd_snp_v1_noise_mix_hash( conn, server_ephemeral, 32 );
+ fd_snp_v1_noise_mix_key( conn, shared_secret_ee, 32 );
+ fd_snp_v1_noise_enc_and_hash( conn, 0, conn->_pubkey, 32, out->enc_s1 );
+
+ /* Prepare packet */
+ out->hs.version = fd_snp_hdr_version_type( FD_SNP_V1, FD_SNP_TYPE_HS_SERVER_FINI );
+ out->hs.session_id = session_id;
+ out->hs.src_session_id = conn->session_id;
+ memcpy( out->r, challenge, 16 );
+ memcpy( out->e, server_ephemeral, 32 );
+
+ /* Finalize packet and copy to output buffer */
+ memset( pkt_out+FD_SNP_SIZEOF_SERVER_FINI_PAYLOAD, 0, FD_SNP_SIZEOF_SERVER_FINI );
+ memcpy( pkt_out, out, FD_SNP_SIZEOF_SERVER_FINI_PAYLOAD );
+
+ /* Update conn state */
+ conn->peer_session_id = session_id;
+ conn->state = FD_SNP_TYPE_HS_SERVER_FINI_SIG;
+
+ /* Prepare payload to sign */
+ memcpy( extra, fd_snp_conn_noise_hash( conn ), 32 );
+
+ return (int)FD_SNP_SIZEOF_SERVER_FINI;
+}
+
+int
+fd_snp_v1_client_fini( fd_snp_config_t const * client FD_PARAM_UNUSED,
+ fd_snp_conn_t * conn,
+ uchar const * pkt_in,
+ ulong pkt_in_sz,
+ uchar * pkt_out,
+ uchar * extra ) {
+ fd_snp_v1_pkt_hs_client_t out[1] = { 0 };
+
+ /* Validate */
+ if( FD_UNLIKELY( conn->state != FD_SNP_TYPE_HS_CLIENT_CONT ) ) {
+ return -1;
+ }
+
+ if( FD_UNLIKELY( pkt_in_sz != FD_SNP_SIZEOF_SERVER_FINI ) ) {
+ return -1;
+ }
+
+ /* Prepare data */
+ ulong session_id = fd_ulong_load_8_fast( pkt_in+FD_SNP_PKT_SRC_SESSION_ID_OFF );
+ uchar const * challenge = pkt_in + FD_SNP_PKT_SERVER_CHALLENGE_OFF;
+ uchar const * server_ephemeral = pkt_in + FD_SNP_PKT_SERVER_EPHEMERAL_OFF;
+ uchar const * enc_server_pubkey = pkt_in + FD_SNP_PKT_SERVER_ENC_PUBKEY_OFF;
+ uchar const * enc_server_sig = pkt_in + FD_SNP_PKT_SERVER_ENC_SIG_OFF;
+
+ uchar * client_ephemeral_private = fd_snp_conn_ephemeral_private_key( conn );
+ uchar client_ephemeral[ 32 ];
+ memcpy( client_ephemeral, fd_snp_conn_ephemeral_public_key( conn ), 32 );
+
+ uchar * shared_secret_ee = fd_snp_conn_noise_shared_secret( conn );
+ fd_x25519_exchange( shared_secret_ee, client_ephemeral_private, server_ephemeral );
+
+ fd_snp_v1_noise_init( conn );
+ fd_snp_v1_noise_mix_hash( conn, (void *)&conn->session_id, 8 ); /* client session_id */
+ fd_snp_v1_noise_mix_hash( conn, client_ephemeral, 32 );
+ fd_snp_v1_noise_mix_hash( conn, challenge, 16 );
+ fd_snp_v1_noise_mix_hash( conn, (void *)&session_id, 8 ); /* server session_id */
+ fd_snp_v1_noise_mix_hash( conn, server_ephemeral, 32 );
+ fd_snp_v1_noise_mix_key( conn, shared_secret_ee, 32 );
+
+ uchar server_pubkey[ 32 ];
+ if( FD_UNLIKELY( fd_snp_v1_noise_dec_and_hash( conn, 0, enc_server_pubkey, 32+16, server_pubkey )<0 ) ) {
+ return -1;
+ }
+ if( FD_UNLIKELY( fd_snp_v1_noise_dec_sig_verify_and_hash( conn, 1, enc_server_sig, 64+16, server_pubkey )<0 ) ) {
+ return -1;
+ }
+
+ fd_snp_v1_noise_enc_and_hash( conn, 2, conn->_pubkey, 32, out->enc_s1 );
+
+ /* Prepare packet */
+ out->hs.version = fd_snp_hdr_version_type( FD_SNP_V1, FD_SNP_TYPE_HS_CLIENT_FINI );
+ out->hs.session_id = session_id;
+ out->hs.src_session_id = conn->session_id;
+
+ /* Finalize packet and copy to output buffer */
+ memset( pkt_out+FD_SNP_SIZEOF_CLIENT_FINI_PAYLOAD, 0, FD_SNP_SIZEOF_CLIENT_FINI );
+ memcpy( pkt_out, out, FD_SNP_SIZEOF_CLIENT_FINI_PAYLOAD );
+
+ /* Prepare payload to sign */
+ memcpy( extra, fd_snp_conn_noise_hash( conn ), 32 );
+
+ /* Update conn state */
+ conn->peer_session_id = session_id;
+ conn->state = FD_SNP_TYPE_HS_CLIENT_FINI_SIG;
+
+ /* We need to temp store server_pubkey, so that we finalize it after
+ the async signature. We store it at the end of the packet. */
+ memcpy( pkt_out+FD_SNP_SIZEOF_CLIENT_FINI, server_pubkey, 32 );
+
+ return (int)FD_SNP_SIZEOF_CLIENT_FINI;
+}
+
+int
+fd_snp_v1_server_acpt( fd_snp_config_t const * server FD_PARAM_UNUSED,
+ fd_snp_conn_t * conn,
+ uchar const * pkt_in,
+ ulong pkt_in_sz,
+ uchar * pkt_out FD_PARAM_UNUSED,
+ uchar * extra FD_PARAM_UNUSED ) {
+ /* Validate */
+ if( FD_UNLIKELY( conn->state != FD_SNP_TYPE_HS_SERVER_FINI ) ) {
+ return -1;
+ }
+
+ if( FD_UNLIKELY( pkt_in_sz != FD_SNP_SIZEOF_CLIENT_FINI ) ) {
+ return -1;
+ }
+
+ /* Prepare data */
+ uchar const * enc_client_pubkey = pkt_in + FD_SNP_PKT_CLIENT_ENC_PUBKEY_OFF;
+ uchar const * enc_client_sig = pkt_in + FD_SNP_PKT_CLIENT_ENC_SIG_OFF;
+ uchar client_pubkey[ 32 ];
+ if( FD_UNLIKELY( fd_snp_v1_noise_dec_and_hash( conn, 2, enc_client_pubkey, 32+16, client_pubkey )<0 ) ) {
+ return -1;
+ }
+ if( FD_UNLIKELY( fd_snp_v1_noise_dec_sig_verify_and_hash( conn, 3, enc_client_sig, 64+16, client_pubkey )<0 ) ) {
+ return -1;
+ }
+
+ fd_snp_v1_noise_fini( conn );
+ memcpy( conn->_peer_pubkey, client_pubkey, 32 );
+ conn->state = FD_SNP_TYPE_HS_DONE;
+ return 0;
+}
+
+int
+fd_snp_v1_server_fini_add_signature( fd_snp_conn_t * conn,
+ uchar pkt_out[ FD_SNP_MTU-42 ],
+ uchar const sig[ 64 ] ) {
+ fd_snp_v1_noise_enc_and_hash( conn, 1, sig, 64, pkt_out+FD_SNP_PKT_SERVER_ENC_SIG_OFF );
+ conn->state = FD_SNP_TYPE_HS_SERVER_FINI;
+ return 0;
+}
+
+int
+fd_snp_v1_client_fini_add_signature( fd_snp_conn_t * conn,
+ uchar pkt_out[ FD_SNP_MTU-42 ],
+ uchar const sig[ 64 ] ) {
+ fd_snp_v1_noise_enc_and_hash( conn, 3, sig, 64, pkt_out+FD_SNP_PKT_CLIENT_ENC_SIG_OFF );
+ fd_snp_v1_noise_fini( conn );
+
+ /* Set _peer_pubkey as server_pubkey, that was temp stored at the end of the packet */
+ memcpy( conn->_peer_pubkey, pkt_out+FD_SNP_SIZEOF_CLIENT_FINI, 32 );
+
+ conn->state = FD_SNP_TYPE_HS_DONE;
+ return 0;
+}
+
+void
+fd_snp_v1_finalize_packet( fd_snp_conn_t * conn,
+ uchar * packet,
+ ulong packet_sz ) {
+ /* SNP header */
+ snp_hdr_t * udp_payload = (snp_hdr_t *)packet;
+ udp_payload->version_type = fd_snp_hdr_version_type( FD_SNP_V1, FD_SNP_TYPE_PAYLOAD );
+ udp_payload->session_id = conn->peer_session_id;
+
+ /* Data is already set by fd_snp_app_send */
+
+ /* Compute MAC */
+ packet[packet_sz-19] = FD_SNP_FRAME_AUTH;
+ packet[packet_sz-18] = 16;
+ packet[packet_sz-17] = 0;
+ uchar * hmac_out = packet+packet_sz-16;
+ /* this assumes that packet has extra 16 bytes (hmac_out is 32 bytes, truncated to 16) */
+ fd_hmac_sha256( packet, packet_sz-16, fd_snp_conn_tx_key( conn ), 32, hmac_out );
+}
+
+int
+fd_snp_v1_validate_packet( fd_snp_conn_t * conn,
+ uchar * packet,
+ ulong packet_sz ) {
+ uchar * snp_load = packet + sizeof(snp_hdr_t);
+ ulong snp_load_sz = packet_sz - sizeof(snp_hdr_t);
+ uchar const * hmac_ptr = NULL;
+ /* then iterate over all tlvs */
+ fd_snp_tlv_iter_t iter = fd_snp_tlv_iter_init( snp_load_sz );
+ for( ; !fd_snp_tlv_iter_done( iter, snp_load );
+ iter = fd_snp_tlv_iter_next( iter, snp_load ) ) {
+ /* not the proper type - continue */
+ if( fd_snp_tlv_iter_type( iter, snp_load )!=FD_SNP_FRAME_AUTH ) continue;
+ /* incorrect length */
+ if( fd_snp_tlv_iter_len( iter, snp_load )!=16U ) return -1;
+ /* verify hmac (done outside the loop to minimize compute) */
+ hmac_ptr = fd_snp_tlv_iter_ptr( iter, snp_load );
+ }
+ if( FD_UNLIKELY( iter.rem!=0L ) ) {
+ return -1; /* tlv chain vs packet length mismatch */
+ }
+ /* verify hmac */
+ if( FD_UNLIKELY( hmac_ptr==NULL ) ) {
+ return -2;
+ }
+ uchar hmac_out[ 32 ];
+ if( FD_LIKELY( fd_hmac_sha256( packet, ((ulong)hmac_ptr)-((ulong)packet), fd_snp_conn_rx_key( conn ), 32, hmac_out )==hmac_out
+ && fd_memeq( hmac_out, hmac_ptr, 16 ) ) ) {
+ return 0; /* hmac validation pass. */
+ }
+ return -3; /* hmac validation failed. */
+}
diff --git a/src/waltz/snp/fd_snp_v1.h b/src/waltz/snp/fd_snp_v1.h
new file mode 100644
index 00000000000..ea9ebe55e7a
--- /dev/null
+++ b/src/waltz/snp/fd_snp_v1.h
@@ -0,0 +1,275 @@
+#ifndef HEADER_fd_src_waltz_snp_fd_snp_v1_h
+#define HEADER_fd_src_waltz_snp_fd_snp_v1_h
+
+#include "fd_snp_proto.h"
+#include "../../ballet/ed25519/fd_x25519.h"
+
+#define FD_SNP_HS_SERVER_CHALLENGE_TIMOUT_MS (60000L)
+
+/* Size of payload and packet for each handshake message.
+ All handshake packets are 1200 bytes long. */
+#define FD_SNP_SIZEOF_CLIENT_INIT_PAYLOAD (52UL)
+#define FD_SNP_SIZEOF_CLIENT_INIT FD_SNP_MTU_MIN
+#define FD_SNP_SIZEOF_SERVER_INIT_PAYLOAD (36UL)
+#define FD_SNP_SIZEOF_SERVER_INIT FD_SNP_MTU_MIN
+#define FD_SNP_SIZEOF_CLIENT_CONT_PAYLOAD (68UL)
+#define FD_SNP_SIZEOF_CLIENT_CONT FD_SNP_MTU_MIN
+#define FD_SNP_SIZEOF_SERVER_FINI_PAYLOAD (196UL)
+#define FD_SNP_SIZEOF_SERVER_FINI FD_SNP_MTU_MIN
+#define FD_SNP_SIZEOF_CLIENT_FINI_PAYLOAD (148UL)
+#define FD_SNP_SIZEOF_CLIENT_FINI FD_SNP_MTU_MIN
+
+/* Offsets of fields in the handshake packet. */
+#define FD_SNP_PKT_SRC_SESSION_ID_OFF (12UL)
+#define FD_SNP_PKT_CLIENT_EPHEMERAL_OFF (20UL)
+#define FD_SNP_PKT_CLIENT_CHALLENGE_OFF (52UL)
+#define FD_SNP_PKT_CLIENT_ENC_PUBKEY_OFF (20UL)
+#define FD_SNP_PKT_CLIENT_ENC_SIG_OFF (68UL)
+#define FD_SNP_PKT_SERVER_CHALLENGE_OFF (20UL)
+#define FD_SNP_PKT_SERVER_EPHEMERAL_OFF (36UL)
+#define FD_SNP_PKT_SERVER_ENC_PUBKEY_OFF (68UL)
+#define FD_SNP_PKT_SERVER_ENC_SIG_OFF (116UL)
+
+/* Handshake packet header.
+ It contains the version, session ID, and source session ID.
+ Comparable to fd_snp_hdr_t, during the handhsake we have
+ the extra src_session_id field. */
+struct __attribute__((packed)) fd_snp_v1_pkt_hs {
+ uint version;
+ ulong session_id;
+ ulong src_session_id;
+};
+typedef struct fd_snp_v1_pkt_hs fd_snp_v1_pkt_hs_t;
+
+/* Client handshake packet.
+ It contains the hs header, and the client_init or client_fini data.
+ For client_init, it contains the ephemeral private key and the challenge.
+ For client_fini, it contains the ephemeral public key and the signature. */
+struct __attribute__((packed)) fd_snp_v1_pkt_hs_client {
+ fd_snp_v1_pkt_hs_t hs;
+ union {
+ struct {
+ uchar e[ 32 ]; /* client_init */
+ uchar r[ 16 ]; /* client_cont */
+ };
+ struct {
+ uchar enc_s1 [ 32+16 ]; /* client_fini */
+ uchar enc_sig1[ 64+16 ];
+ };
+ };
+};
+typedef struct fd_snp_v1_pkt_hs_client fd_snp_v1_pkt_hs_client_t;
+
+/* Server handshake packet.
+ It contains the hs header, and the server_init or server_fini data.
+ For server_init, it contains the challenge and the ephemeral private key.
+ For server_fini, it contains the ephemeral public key and the signature. */
+struct __attribute__((packed)) fd_snp_v1_pkt_hs_server {
+ fd_snp_v1_pkt_hs_t hs;
+ uchar r[ 16 ]; /* server_init */
+ uchar e[ 32 ]; /* server_fini */
+ uchar enc_s1 [ 32+16 ];
+ uchar enc_sig1[ 64+16 ];
+};
+typedef struct fd_snp_v1_pkt_hs_server fd_snp_v1_pkt_hs_server_t;
+
+/* Server challenge.
+ It contains the timestamp and the peer address.
+ Used to validate the challenge in the server_init packet.
+ This struct is exactly 16 bytes long, so we can encrypt it
+ with a single AES block. */
+struct __attribute__((packed)) fd_snp_v1_pkt_hs_server_r {
+ long timestamp_ms;
+ ulong peer_addr;
+};
+typedef struct fd_snp_v1_pkt_hs_server_r fd_snp_v1_pkt_hs_server_r_t;
+FD_STATIC_ASSERT( sizeof(fd_snp_v1_pkt_hs_server_r_t)==16UL, fd_snp_v1_pkt_hs_server_r_t );
+
+FD_PROTOTYPES_BEGIN
+
+/* Send/recv functions used by fd_snp.c */
+
+/* fd_snp_v1_finalize_packet finalizes the packet by adding the header,
+ data, and MAC in SNPv1 format.
+ conn is a valid, established connection (required, not checked).
+ packet is a valid buffer of at least packet_sz bytes (required, not checked).
+ packet_sz is the size of the packet buffer. */
+void
+fd_snp_v1_finalize_packet( fd_snp_conn_t * conn,
+ uchar * packet,
+ ulong packet_sz );
+
+/* fd_snp_v1_validate_packet validates the packet by checking the MAC
+ in SNPv1 format.
+ conn is a valid, established connection (required, not checked).
+ packet is a valid buffer of at least packet_sz bytes (required, not checked).
+ packet_sz is the size of the packet buffer.
+ Returns 0 on success, -1 on failure. */
+int
+fd_snp_v1_validate_packet( fd_snp_conn_t * conn,
+ uchar * packet,
+ ulong packet_sz );
+
+/* Handshake functions used by fd_snp.c
+
+ For consinstency, all handshake functions have the same signature.
+ They accept in input a config (either client or server), a connection
+ `conn` and an input packet `pkt_in` of size `pkt_in_sz`.
+ They write the output packet in `pkt_out`, which is assumed to be at least
+ 1200 bytes long, and optionally they store a hash to be signed in the
+ `extra` buffer.
+ They return the size of the output packet on success, -1 on failure.
+
+ Client and server are assumed to be initialized and not checked.
+ Conn is assumed to be a valid pointer, the state and other properties are checked.
+ In each function, unnecessary parameters are simply ignored.
+
+ The handshake messages/functions are:
+
+ Client Server
+ client_init
+ ------------------->
+ server_init
+ <-------------------
+ client_cont
+ ------------------->
+ server_fini
+ <-------------------
+ client_fini
+ -------------------> --+
+ | server_acpt
+ <-+
+*/
+
+/* fd_snp_v1_client_init generates the client_init packet.
+ (see also common docs above) */
+int
+fd_snp_v1_client_init( fd_snp_config_t const * client FD_PARAM_UNUSED,
+ fd_snp_conn_t * conn,
+ uchar const * pkt_in,
+ ulong pkt_in_sz,
+ uchar * pkt_out,
+ uchar * extra );
+
+/* fd_snp_v1_server_init validates the client_init packet and
+ generates the server_init packet. (see also common docs above) */
+int
+fd_snp_v1_server_init( fd_snp_config_t const * server,
+ fd_snp_conn_t * conn,
+ uchar const * pkt_in,
+ ulong pkt_in_sz,
+ uchar * pkt_out,
+ uchar * extra );
+
+/* fd_snp_v1_client_cont validates the server_init packet and
+ generates the client_cont packet. (see also common docs above) */
+int
+fd_snp_v1_client_cont( fd_snp_config_t const * client,
+ fd_snp_conn_t * conn,
+ uchar const * pkt_in,
+ ulong pkt_in_sz,
+ uchar * pkt_out,
+ uchar * extra );
+
+/* fd_snp_v1_server_fini validates the client_cont packet and
+ generates the server_fini packet and the metadata to be signed.
+ (see also common docs above) */
+int
+fd_snp_v1_server_fini( fd_snp_config_t const * server,
+ fd_snp_conn_t * conn,
+ uchar const * pkt_in,
+ ulong pkt_in_sz,
+ uchar * pkt_out,
+ uchar * extra );
+
+/* fd_snp_v1_client_fini validates the server_fini packet and
+ generates the client_fini packet and the metadata to be signed.
+ (see also common docs above) */
+ int
+fd_snp_v1_client_fini( fd_snp_config_t const * client,
+ fd_snp_conn_t * conn,
+ uchar const * pkt_in,
+ ulong pkt_in_sz,
+ uchar * pkt_out,
+ uchar * extra );
+
+/* fd_snp_v1_server_acpt validates the client_fini.
+ (see also common docs above) */
+int
+fd_snp_v1_server_acpt( fd_snp_config_t const * server,
+ fd_snp_conn_t * conn,
+ uchar const * pkt_in,
+ ulong pkt_in_sz,
+ uchar * pkt_out,
+ uchar * extra );
+
+/* fd_snp_v1_server_fini_add_signature adds a signature to a pending
+ server_fini packet. */
+int
+fd_snp_v1_server_fini_add_signature( fd_snp_conn_t * conn,
+ uchar out[ FD_SNP_MTU-42 ],
+ uchar const sig[ 64 ] );
+
+/* fd_snp_v1_client_fini_add_signature adds a signature to a pending
+ client_fini packet. */
+int
+fd_snp_v1_client_fini_add_signature( fd_snp_conn_t * conn,
+ uchar out[ FD_SNP_MTU-42 ],
+ uchar const sig[ 64 ] );
+
+/* fd_snp_v1_server_fini_precheck validates the client_cont packet
+ challenge. This is a convenient function to quickly exclude invalid
+ packets, before performing more expensive operations (such as
+ allocating connection state and generating cryptographic keys). */
+int
+fd_snp_v1_server_fini_precheck( fd_snp_config_t const * server,
+ fd_snp_conn_t * conn,
+ uchar const * pkt_in,
+ ulong pkt_in_sz,
+ uchar * pkt_out,
+ uchar * extra );
+
+/* Private functions, defined here so we can use them in tests. */
+
+static inline int
+fd_snp_v1_crypto_key_share_generate( uchar private_key[32], uchar public_key[32] ) {
+ int res = fd_snp_rng( private_key, 32 );
+ if( FD_UNLIKELY( res < 0 ) ) {
+ return -1;
+ }
+ fd_x25519_public( public_key, private_key );
+ return 0;
+}
+
+static inline int
+fd_snp_v1_crypto_enc_state_generate( fd_snp_config_t const * server,
+ fd_snp_conn_t const * conn,
+ uchar out_challenge[ 16 ] ) {
+ fd_snp_v1_pkt_hs_server_r_t challenge[1] = { 0 };
+ challenge->timestamp_ms = fd_snp_timestamp_ms();
+ challenge->peer_addr = conn->peer_addr;
+ fd_aes_encrypt( (uchar const *)challenge, out_challenge, server->_state_enc_key );
+ return 0;
+}
+
+static inline int
+fd_snp_v1_crypto_enc_state_validate( fd_snp_config_t const * server,
+ fd_snp_conn_t const * conn,
+ uchar const in_challenge[ 16 ] ) {
+ fd_snp_v1_pkt_hs_server_r_t decrypted[1] = { 0 };
+ fd_aes_decrypt( in_challenge, (uchar *)decrypted, server->_state_dec_key );
+
+ long now_ms = fd_snp_timestamp_ms();
+ long min_ms = now_ms - FD_SNP_HS_SERVER_CHALLENGE_TIMOUT_MS;
+ if( FD_LIKELY(
+ ( min_ms <= decrypted->timestamp_ms && decrypted->timestamp_ms <= now_ms )
+ && ( decrypted->peer_addr == conn->peer_addr )
+ ) ) {
+ return 0;
+ }
+ return -1;
+}
+
+FD_PROTOTYPES_END
+
+#endif /* HEADER_fd_src_waltz_snp_fd_snp_v1_h */
diff --git a/src/waltz/snp/test_snp_app.c b/src/waltz/snp/test_snp_app.c
new file mode 100644
index 00000000000..005d7787aaa
--- /dev/null
+++ b/src/waltz/snp/test_snp_app.c
@@ -0,0 +1,684 @@
+#define _POSIX_C_SOURCE 199309L
+
+#include "fd_snp_app.h"
+#include "fd_snp.h"
+#include "fd_snp_private.h"
+#include "../../ballet/sha512/fd_sha512.h"
+#include "../../ballet/ed25519/fd_ed25519.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+static void
+external_generate_keypair( uchar private_key[32], uchar public_key[32] ) {
+ fd_sha512_t sha512[1];
+ FD_TEST( fd_sha512_join( fd_sha512_new( sha512 ) ) );
+ FD_TEST( fd_rng_secure( private_key, 32 )!=NULL );
+ fd_ed25519_public_from_private( public_key, private_key, sha512 );
+}
+
+static void
+clear_state( fd_snp_t * snp ) {
+ ulong max = fd_snp_conn_pool_max( snp->conn_pool );
+ ulong used = fd_snp_conn_pool_used( snp->conn_pool );
+ ulong idx = 0;
+ ulong used_ele = 0;
+ fd_snp_conn_t * conn = snp->conn_pool;
+
+ for( ; idxsession_id == 0 ) continue;
+ fd_snp_conn_delete( snp, conn );
+ if( ++used_ele>=used ) break;
+ }
+}
+
+struct test_cb_ctx {
+ uchar * out_packet;
+ uchar * assert_packet;
+ ulong assert_packet_sz;
+ uchar * assert_data;
+ ulong assert_data_sz;
+ ulong assert_peer;
+ ulong assert_meta;
+
+ /* buffering */
+ uchar assert_buffered;
+ fd_snp_pkt_t buf_packet[2];
+ ulong buf_cnt;
+
+ /* signature */
+ ulong sign_cnt;
+ uchar signature [ 64 ];
+ uchar public_key [ 32 ];
+ uchar private_key [ 32 ];
+ fd_snp_t * snp; // to invoke fd_snp_process_signature
+
+ /* test_v1_detailed */
+ fd_snp_app_t * snp_app;
+ uchar success;
+ uint ip;
+};
+typedef struct test_cb_ctx test_cb_ctx_t;
+
+static int
+test_cb_snp_tx( void const * _ctx,
+ uchar const * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta ) {
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+
+ /* set src ip - snp doesn't set it because in fd the net tile takes care of it */
+ fd_ip4_udp_hdrs_t * hdr = (fd_ip4_udp_hdrs_t *)packet;
+ hdr->ip4->saddr = ctx->ip;
+
+ if( meta & FD_SNP_META_OPT_BUFFERED ) {
+ memcpy( ctx->buf_packet[ ctx->buf_cnt ].data, packet, packet_sz );
+ ctx->buf_packet[ ctx->buf_cnt ].data_sz = (ushort)packet_sz;
+ ctx->buf_cnt++;
+ }
+ return (int)packet_sz;
+}
+
+static int
+test_cb_snp_rx( void const * _ctx,
+ uchar const * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta ) {
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+ uchar buffered = (meta & FD_SNP_META_OPT_BUFFERED)==FD_SNP_META_OPT_BUFFERED;
+ FD_TEST( buffered == ctx->assert_buffered );
+ FD_TEST( packet_sz == ctx->assert_packet_sz );
+ FD_TEST( meta == ctx->assert_meta );
+ if( buffered ) {
+ ctx->buf_cnt++;
+ FD_TEST( fd_memeq( packet, ctx->assert_packet, packet_sz ) );
+ } else {
+ FD_TEST( packet == ctx->assert_packet );
+ }
+ return 1;
+}
+
+static int
+test_cb_snp_sign( void const * _ctx,
+ ulong session_id,
+ uchar const to_sign[ FD_SNP_TO_SIGN_SZ ] ) {
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+ fd_sha512_t sha512[1];
+ fd_ed25519_sign( ctx->signature, to_sign, 32, ctx->public_key, ctx->private_key, sha512 );
+ ctx->sign_cnt++;
+ return fd_snp_process_signature( ctx->snp, session_id, ctx->signature );
+}
+
+int
+test_cb_app_rx( void const * _ctx,
+ fd_snp_peer_t peer,
+ uchar const * data,
+ ulong data_sz,
+ fd_snp_meta_t meta ) {
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+ FD_TEST( peer == ctx->assert_peer );
+ FD_TEST( data_sz == ctx->assert_data_sz );
+ FD_TEST( meta == ctx->assert_meta );
+ if( ctx->assert_data ) FD_TEST( fd_memeq( data, ctx->assert_data, data_sz ) );
+ return 1;
+}
+
+static void
+test_snp_app_send_recv_udp( fd_wksp_t * wksp ) {
+ ulong proto = FD_SNP_META_PROTO_UDP;
+ fd_snp_limits_t limits = { .peer_cnt = 256 };
+
+ /* Client */
+ ushort client_port = 1234;
+ test_cb_ctx_t client_cb_test[1] = { 0 };
+ fd_snp_app_t client_app[1] = { 0 };
+ uchar client_packet[FD_SNP_MTU] = { 0 };
+ uint client_ip4 = 0UL;
+ int client_sz = 0;
+ fd_snp_meta_t client_meta = 0UL;
+ int client_cb_res = 0;
+ ulong client_msg_sz = 5UL;
+ uchar * client_msg = (uchar *)"hello";
+
+ void * client_mem = fd_wksp_alloc_laddr( wksp, fd_snp_align(), fd_snp_footprint( &limits ), 1UL );
+ fd_snp_t * client = fd_snp_join( fd_snp_new( client_mem, &limits ) );
+
+ client->cb.rx = test_cb_snp_rx;
+ client->cb.tx = test_cb_snp_tx;
+ client->cb.ctx = client_cb_test;
+ client_cb_test->out_packet = client_packet;
+ client->apps_cnt = 1;
+ client->apps[0].port = client_port;
+ client->flow_cred_total = 1L;
+ client->flow_cred_alloc = 1L;
+ FD_TEST( fd_snp_init( client ) );
+
+ client_app->cb.rx = test_cb_app_rx;
+ client_app->cb.ctx = client_cb_test;
+
+ /* Server */
+ ushort server_port = 4567;
+ test_cb_ctx_t server_cb_test[1] = { 0 };
+ fd_snp_app_t server_app[1] = { 0 };
+ uchar server_packet[FD_SNP_MTU] = { 0 };
+ uint server_ip4 = 0UL;
+ int server_sz = 0UL;
+ fd_snp_meta_t server_meta = 0UL;
+ int server_cb_res = 0;
+ ulong server_msg_sz = 6UL;
+ uchar * server_msg = (uchar *)"world!";
+
+ void * server_mem = fd_wksp_alloc_laddr( wksp, fd_snp_align(), fd_snp_footprint( &limits ), 1UL );
+ fd_snp_t * server = fd_snp_join( fd_snp_new( server_mem, &limits ) );
+
+ server->cb.tx = test_cb_snp_tx;
+ server->cb.rx = test_cb_snp_rx;
+ server->cb.ctx = server_cb_test;
+ server_cb_test->out_packet = server_packet;
+ server->apps_cnt = 1;
+ server->apps[0].port = server_port;
+ server->flow_cred_total = 1L;
+ server->flow_cred_alloc = 1L;
+ FD_TEST( fd_snp_init( server ) );
+
+ server_app->cb.rx = test_cb_app_rx;
+ server_app->cb.ctx = server_cb_test;
+
+ /* Test protocol */
+
+ /* Client sends */
+ client_meta = fd_snp_meta_from_parts( proto, /* app_id */ 0, server_ip4, server_port );
+ client_sz = fd_snp_app_send( client_app, client_packet, FD_SNP_MTU, client_msg, client_msg_sz, client_meta );
+ assert( client_sz>0 );
+ client_sz = fd_snp_send( client, client_packet, (ulong)client_sz, client_meta );
+ assert( client_sz>0 );
+
+ /* simulate network */ server_sz = client_sz; memcpy( server_packet, client_packet, (ulong)client_sz );
+ FD_LOG_HEXDUMP_WARNING(( "packet", server_packet, (ulong)server_sz ));
+
+ /* Server receives */
+ server_meta = fd_snp_meta_from_parts( proto, /* app_id */ 0, client_ip4, client_port );
+
+ server_cb_test->assert_packet = server_packet;
+ server_cb_test->assert_packet_sz = (ulong)server_sz;
+ server_cb_test->assert_meta = server_meta;
+ server_cb_res = fd_snp_process_packet( server, server_packet, (ulong)server_sz );
+ assert( server_cb_res==1 );
+
+ server_cb_test->assert_peer = 0UL;
+ server_cb_test->assert_data = client_msg;
+ server_cb_test->assert_data_sz = client_msg_sz;
+ server_cb_test->assert_meta = server_meta;
+ server_cb_res = fd_snp_app_recv( server_app, server_packet, (ulong)server_sz, server_meta );
+ assert( server_cb_res==1 );
+
+ /* Server sends */
+ server_sz = fd_snp_app_send( server_app, server_packet, FD_SNP_MTU, server_msg, server_msg_sz, server_meta );
+ assert( server_sz>0 );
+ server_sz = fd_snp_send( server, server_packet, (ulong)server_sz, server_meta );
+ assert( server_sz>0 );
+
+ /* simulate network */ client_sz = server_sz; memcpy( client_packet, server_packet, (ulong)server_sz );
+ FD_LOG_HEXDUMP_WARNING(( "packet", client_packet, (ulong)client_sz ));
+
+ /* Client receives */
+ client_cb_test->assert_packet = client_packet;
+ client_cb_test->assert_packet_sz = (ulong)client_sz;
+ client_cb_test->assert_meta = client_meta;
+ client_cb_res = fd_snp_process_packet( client, client_packet, (ulong)client_sz );
+ assert( client_cb_res==1 );
+
+ client_cb_test->assert_peer = 0UL;
+ client_cb_test->assert_data = server_msg;
+ client_cb_test->assert_data_sz = server_msg_sz;
+ client_cb_test->assert_meta = client_meta;
+ client_cb_res = fd_snp_app_recv( client_app, client_packet, (ulong)client_sz, client_meta );
+ assert( client_cb_res==1 );
+
+ FD_LOG_NOTICE(( "Test snp_app proto=udp: ok" ));
+}
+
+static void
+test_snp_app_send_recv_v1( fd_wksp_t * wksp ) {
+ ulong proto = FD_SNP_META_PROTO_V1;
+ fd_snp_limits_t limits = { .peer_cnt = 256 };
+
+ /* Client */
+ ushort client_port = 1234;
+ test_cb_ctx_t client_cb_test[1] = { 0 };
+ fd_snp_app_t client_app[1] = { 0 };
+ uchar client_packet[FD_SNP_MTU] = { 0 };
+ uint client_ip4 = 0UL;
+ int client_sz = 0;
+ fd_snp_meta_t client_meta = 0UL;
+ int client_cb_res = 0;
+ ulong client_msg_sz = 5UL;
+ uchar * client_msg = (uchar *)"hello";
+
+ void * client_mem = fd_wksp_alloc_laddr( wksp, fd_snp_align(), fd_snp_footprint( &limits ), 1UL );
+ fd_snp_t * client = fd_snp_join( fd_snp_new( client_mem, &limits ) );
+
+ client->cb.rx = test_cb_snp_rx;
+ client->cb.tx = test_cb_snp_tx;
+ client->cb.sign = test_cb_snp_sign;
+ client->cb.ctx = client_cb_test;
+ client_cb_test->out_packet = client_packet;
+ external_generate_keypair( client_cb_test->private_key, client_cb_test->public_key );
+ memcpy( client->config.identity, client_cb_test->public_key, 32 );
+ client_cb_test->snp = client;
+ client->apps_cnt = 1;
+ client->apps[0].port = client_port;
+ // client->flow_cred_total = 0; /* testing auto-initialization */
+ // client->flow_cred_alloc = 0; /* testing auto-initialization */
+ FD_TEST( fd_snp_init( client ) );
+
+ client_app->cb.rx = test_cb_app_rx;
+ client_app->cb.ctx = client_cb_test;
+
+ /* Server */
+ ushort server_port = 4567;
+ test_cb_ctx_t server_cb_test[1] = { 0 };
+ fd_snp_app_t server_app[1] = { 0 };
+ uchar server_packet[FD_SNP_MTU] = { 0 };
+ uint server_ip4 = 0UL;
+ int server_sz = 0UL;
+ fd_snp_meta_t server_meta = 0UL;
+ int server_cb_res = 0;
+ ulong server_msg_sz = 6UL;
+ uchar * server_msg = (uchar *)"world!";
+
+ void * server_mem = fd_wksp_alloc_laddr( wksp, fd_snp_align(), fd_snp_footprint( &limits ), 1UL );
+ fd_snp_t * server = fd_snp_join( fd_snp_new( server_mem, &limits ) );
+
+ server->cb.tx = test_cb_snp_tx;
+ server->cb.rx = test_cb_snp_rx;
+ server->cb.sign = test_cb_snp_sign;
+ server->cb.ctx = server_cb_test;
+ server_cb_test->out_packet = server_packet;
+ external_generate_keypair( server_cb_test->private_key, server_cb_test->public_key );
+ memcpy( server->config.identity, server_cb_test->public_key, 32 );
+ server_cb_test->snp = server;
+ server->apps_cnt = 1;
+ server->apps[0].port = server_port;
+ // server->flow_cred_total = 0; /* testing auto-initialization */
+ // server->flow_cred_alloc = 0; /* testing auto-initialization */
+ FD_TEST( fd_snp_init( server ) );
+
+ server_app->cb.rx = test_cb_app_rx;
+ server_app->cb.ctx = server_cb_test;
+
+ /* Test protocol */
+
+ /* Client sends */
+ client_meta = fd_snp_meta_from_parts( proto, /* app_id */ 0, server_ip4, server_port );
+ client_sz = fd_snp_app_send( client_app, client_packet, FD_SNP_MTU, client_msg, client_msg_sz, client_meta );
+ assert( client_sz>0 );
+ client_sz = fd_snp_send( client, client_packet, (ulong)client_sz, client_meta ); /* client_init */
+ assert( client_sz>0 );
+
+ /* Handshake - snp_app is not involved - don't really need to memcpy packet all the times */
+ server_sz = fd_snp_process_packet( server, client_packet, (ulong)client_sz ); /* server_init */
+ assert( server_sz>0 );
+ client_sz = fd_snp_process_packet( client, client_packet, (ulong)server_sz ); /* client_cont */
+ assert( client_sz>0 );
+
+ assert( server_cb_test->buf_cnt==0 );
+ assert( server_cb_test->sign_cnt==0 );
+ server_sz = fd_snp_process_packet( server, client_packet, (ulong)client_sz ); /* server_fini */
+ assert( server_sz>0 );
+ assert( server_cb_test->buf_cnt==1 );
+ assert( server_cb_test->sign_cnt==1 );
+ assert( (ushort)server_sz==server_cb_test->buf_packet[0].data_sz );
+
+ /* send buffered packet (server_fini): server_cb_test->buf_packet[0].data */
+ assert( client_cb_test->buf_cnt==0 );
+ assert( client_cb_test->sign_cnt==0 );
+ client_sz = fd_snp_process_packet( client, server_cb_test->buf_packet[0].data, (ulong)server_sz ); /* client_fini */
+ assert( client_cb_test->buf_cnt==2 );
+ assert( client_cb_test->sign_cnt==1 );
+ assert( client_sz>0 );
+ assert( (ushort)client_sz==client_cb_test->buf_packet[0].data_sz );
+#if 1
+ /* send buffered packet (client_fini): client_cb_test->buf_packet[0].data */
+ server_sz = fd_snp_process_packet( server, client_cb_test->buf_packet[0].data, (ulong)client_sz ); /* server_acpt */
+ assert( server_sz==0 );
+
+ /* send buffered packet (client_app_payload): client_cb_test->buf_packet[1].data */
+ server_sz = (int)client_cb_test->buf_packet[1].data_sz;
+ memcpy( server_packet, client_cb_test->buf_packet[1].data, (ulong)server_sz );
+
+ /* Server receives */
+ server_meta = fd_snp_meta_from_parts( proto, /* app_id */ 0, client_ip4, client_port );
+
+ server_cb_test->assert_packet = server_packet;
+ server_cb_test->assert_packet_sz = (ulong)server_sz;
+ server_cb_test->assert_meta = server_meta;
+ FD_LOG_HEXDUMP_WARNING(( "packet", server_cb_test->assert_packet, (ulong)server_cb_test->assert_packet_sz ));
+ server_cb_res = fd_snp_process_packet( server, server_packet, (ulong)server_sz );
+ assert( server_cb_res==1 );
+#else
+ /* send buffered packet (client_app_payload): client_cb_test->buf_packet[1].data */
+ server_sz = fd_snp_process_packet( server, client_cb_test->buf_packet[1].data, (ulong)client_cb_test->buf_packet[1].data_sz ); /* server_acpt */
+ assert( server_sz==0 ); /* cache only */
+
+ /* send buffered packet (client_fini): client_cb_test->buf_packet[1].data */
+ server_sz = (int)client_cb_test->buf_packet[0].data_sz;
+ memcpy( server_packet, client_cb_test->buf_packet[0].data, (ulong)server_sz );
+
+ /* Server receives */
+ server_meta = fd_snp_meta_from_parts( proto, /* app_id */ 0, client_ip4, client_port );
+
+ /* Result is now buffered (from cache) */
+ server_cb_test->assert_packet = client_cb_test->buf_packet[1].data;
+ server_cb_test->assert_packet_sz = client_cb_test->buf_packet[1].data_sz;
+ server_cb_test->assert_meta = server_meta | FD_SNP_META_OPT_BUFFERED;
+ server_cb_test->assert_buffered = 1;
+ assert( server_cb_test->buf_cnt==1 );
+ FD_LOG_HEXDUMP_WARNING(( "packet", server_cb_test->assert_packet, (ulong)server_cb_test->assert_packet_sz ));
+ server_cb_res = fd_snp_process_packet( server, server_packet, (ulong)server_sz );
+ assert( server_cb_test->buf_cnt==2 );
+ assert( server_cb_res==0 ); /* return is from processing server_acpt */
+#endif
+
+ server_cb_test->assert_peer = 0UL;
+ server_cb_test->assert_data = client_msg;
+ server_cb_test->assert_data_sz = client_msg_sz;
+ server_cb_test->assert_meta = server_meta;
+ server_cb_res = fd_snp_app_recv( server_app, server_cb_test->assert_packet, (ulong)server_cb_test->assert_packet_sz, server_meta );
+ assert( server_cb_res==1 );
+
+ /* Server sends */
+ server_sz = fd_snp_app_send( server_app, server_packet, FD_SNP_MTU, server_msg, server_msg_sz, server_meta );
+ assert( server_sz>=0 );
+ server_sz = fd_snp_send( server, server_packet, (ulong)server_sz, server_meta );
+ assert( server_sz>0 );
+
+ /* Handshake NOT needed a second time */
+
+ /* simulate network */ client_sz = server_sz; memcpy( client_packet, server_packet, (ulong)server_sz );
+ FD_LOG_HEXDUMP_WARNING(( "packet", client_packet, (ulong)client_sz ));
+
+ /* Client receives */
+ client_cb_test->assert_packet = client_packet;
+ client_cb_test->assert_packet_sz = (ulong)client_sz;
+ client_cb_test->assert_meta = client_meta;
+ client_cb_res = fd_snp_process_packet( client, client_packet, (ulong)client_sz );
+ assert( client_cb_res==1 );
+
+ client_cb_test->assert_peer = 0UL;
+ client_cb_test->assert_data = server_msg;
+ client_cb_test->assert_data_sz = server_msg_sz;
+ client_cb_test->assert_meta = client_meta;
+ client_cb_res = fd_snp_app_recv( client_app, client_packet, (ulong)client_sz, client_meta );
+ assert( client_cb_res==1 );
+
+ FD_LOG_NOTICE(( "Test snp_app proto=v1: ok" ));
+
+}
+static void
+test_snp_app_send_recv_v2( void ) {
+ ulong proto = FD_SNP_META_PROTO_V2;
+
+ /* Client */
+ fd_snp_app_t client_app[1] = { 0 };
+ uchar client_packet[FD_SNP_MTU] = { 0 };
+ int client_sz = 0;
+ fd_snp_meta_t client_meta = 0UL;
+ ulong client_msg_sz = 5UL;
+ uchar * client_msg = (uchar *)"hello";
+
+ uint server_ip4 = 0UL;
+ ushort server_port = 0UL;
+
+ /* Test protocol */
+
+ /* Client sends */
+ client_meta = fd_snp_meta_from_parts( proto, /* app_id */ 0, server_ip4, server_port );
+ client_sz = fd_snp_app_send( client_app, client_packet, FD_SNP_MTU, client_msg, client_msg_sz, client_meta );
+ assert( client_sz==-1 ); /* Not implemented */
+
+ FD_LOG_NOTICE(( "Test snp_app proto=v2: ok (not implemented)" ));
+}
+
+int
+test_cb_app_tx_detailed( void const * _ctx,
+ uchar * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta ) {
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+ return fd_snp_send( ctx->snp, packet, packet_sz, meta );
+}
+
+static int
+test_cb_snp_rx_detailed( void const * _ctx,
+ uchar const * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta ) {
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+ return fd_snp_app_recv( ctx->snp_app, packet, packet_sz, meta );
+}
+
+int
+test_cb_app_rx_detailed( void const * _ctx,
+ fd_snp_peer_t peer,
+ uchar const * data,
+ ulong data_sz,
+ fd_snp_meta_t meta ) {
+ (void)peer;
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+ if( data_sz==5UL && fd_memeq( data, "hello", data_sz ) ) {
+ return fd_snp_app_send( ctx->snp_app, ctx->out_packet, FD_SNP_MTU, "world!", 6, meta );
+ }
+ if( data_sz==6UL && fd_memeq( data, "world!", data_sz ) ) {
+ ctx->success = 1;
+ }
+ return 1;
+}
+
+static int
+attacker( ulong iter, ulong attack ) {
+ /*
+ attacks:
+ 0x0001: send 0x15, 0x1f in reverse order
+ 0x0002: dupe 0x13
+ 0x0004: dupe 0x14
+ 0x0008: dupe 0x15
+ */
+ return (iter & attack)>0;
+}
+
+static void
+test_snp_app_send_recv_v1_detailed( fd_wksp_t * wksp ) {
+ ulong proto = FD_SNP_META_PROTO_V1;
+ fd_snp_limits_t limits = { .peer_cnt = 256 };
+
+ /* Client */
+ ushort client_port = 0x5566;
+ test_cb_ctx_t client_cb_test[1] = { 0 };
+ fd_snp_app_t client_app[1] = { 0 };
+ uchar client_packet[FD_SNP_MTU] = { 0 };
+ uint client_ip4 = 0x11223344UL;
+ int client_sz = 0;
+ fd_snp_meta_t client_meta = 0UL;
+ int client_cb_res = 0; (void)client_cb_res;
+ ulong client_msg_sz = 5UL;
+ uchar * client_msg = (uchar *)"hello";
+
+ void * client_mem = fd_wksp_alloc_laddr( wksp, fd_snp_align(), fd_snp_footprint( &limits ), 1UL );
+ fd_snp_t * client = fd_snp_join( fd_snp_new( client_mem, &limits ) );
+
+ client->cb.rx = test_cb_snp_rx_detailed;
+ client->cb.tx = test_cb_snp_tx;
+ client->cb.sign = test_cb_snp_sign;
+ client->cb.ctx = client_cb_test;
+ client_cb_test->out_packet = client_packet;
+ client_cb_test->ip = client_ip4;
+ external_generate_keypair( client_cb_test->private_key, client_cb_test->public_key );
+ memcpy( client->config.identity, client_cb_test->public_key, 32 );
+ client_cb_test->snp = client;
+ client_cb_test->snp_app = client_app;
+ client->apps_cnt = 1;
+ client->apps[0].port = client_port;
+ // client->flow_cred_total = 0; /* testing auto-initialization */
+ // client->flow_cred_alloc = 0; /* testing auto-initialization */
+ FD_TEST( fd_snp_init( client ) );
+
+ client_app->cb.rx = test_cb_app_rx_detailed;
+ client_app->cb.tx = test_cb_app_tx_detailed;
+ client_app->cb.ctx = client_cb_test;
+
+ /* Server */
+ ushort server_port = 0xDDEE;
+ test_cb_ctx_t server_cb_test[1] = { 0 };
+ fd_snp_app_t server_app[1] = { 0 };
+ uchar server_packet[FD_SNP_MTU] = { 0 };
+ uint server_ip4 = 0xAABBCCDDUL;
+ int server_sz = 0UL;
+ int server_cb_res = 0; (void)server_cb_res;
+
+ void * server_mem = fd_wksp_alloc_laddr( wksp, fd_snp_align(), fd_snp_footprint( &limits ), 1UL );
+ fd_snp_t * server = fd_snp_join( fd_snp_new( server_mem, &limits ) );
+
+ server->cb.rx = test_cb_snp_rx_detailed;
+ server->cb.tx = test_cb_snp_tx;
+ server->cb.sign = test_cb_snp_sign;
+ server->cb.ctx = server_cb_test;
+ server_cb_test->out_packet = server_packet;
+ server_cb_test->ip = server_ip4;
+ external_generate_keypair( server_cb_test->private_key, server_cb_test->public_key );
+ memcpy( server->config.identity, server_cb_test->public_key, 32 );
+ server_cb_test->snp = server;
+ server_cb_test->snp_app = server_app;
+ server->apps_cnt = 1;
+ server->apps[0].port = server_port;
+ // server->flow_cred_total = 0; /* testing auto-initialization */
+ // server->flow_cred_alloc = 0; /* testing auto-initialization */
+ FD_TEST( fd_snp_init( server ) );
+
+ server_app->cb.rx = test_cb_app_rx_detailed;
+ server_app->cb.tx = test_cb_app_tx_detailed;
+ server_app->cb.ctx = server_cb_test;
+
+ /* Test protocol */
+
+#define MAX
+ uchar network_packet[2048];
+ server_cb_test->out_packet = network_packet;
+ client_cb_test->out_packet = network_packet;
+
+ fd_snp_pkt_t trace[100];
+ ulong trace_cnt = 0UL;
+
+ client_meta = fd_snp_meta_from_parts( proto, /* app_id */ 0, server_ip4, server_port );
+
+ (void)client_sz;
+ (void)server_sz;
+
+ for( ulong j=0; j<16; j++ ) {
+
+ /* reset */
+ clear_state( client );
+ client_cb_test->success = 0;
+ client_cb_test->buf_cnt = 0;
+ clear_state( server );
+ server_cb_test->buf_cnt = 0;
+
+ /* client_init */
+ int packet_sz = fd_snp_app_send( client_app, network_packet, FD_SNP_MTU, client_msg, client_msg_sz, client_meta );
+ FD_TEST_CUSTOM( packet_sz>0, "client_init failed" );
+
+ trace_cnt = 0UL;
+
+ trace[trace_cnt].data_sz = (ushort)packet_sz;
+ memcpy( trace[trace_cnt].data, network_packet, trace[trace_cnt].data_sz );
+ trace[trace_cnt].meta = 0;
+ trace[trace_cnt].send = 1;
+ ++trace_cnt;
+
+ for( ulong k=0; k<20; k++ ) {
+ /*
+ from client to server:
+ trace[k].meta = 0 // from
+ trace[k].send = 1 // to
+
+ from server to client:
+ trace[k].meta = 1 // from
+ trace[k].send = 0 // to
+ */
+ uchar is_server = trace[k].send;
+ fd_snp_t * snp = is_server ? server : client;
+ test_cb_ctx_t * ctx = is_server ? server_cb_test : client_cb_test;
+
+ FD_TEST_CUSTOM( ksuccess ) break;
+
+ /* Copy result(s) */
+ if( ctx->buf_cnt ) {
+ for( ulong _i=0; _ibuf_cnt; _i++ ) {
+ /* 0x0001: send 0x15, 0x1f in reverse order */
+ ulong src = attacker( j, 0x0001 ) ? ctx->buf_cnt-1-_i : _i;
+
+ trace[trace_cnt+_i].data_sz = ctx->buf_packet[ src ].data_sz;
+ memcpy( trace[trace_cnt+_i].data, ctx->buf_packet[ src ].data, trace[trace_cnt+_i].data_sz );
+ trace[trace_cnt+_i].meta = is_server;
+ trace[trace_cnt+_i].send = !is_server;
+ }
+ trace_cnt += ctx->buf_cnt;
+
+ ctx->buf_cnt = 0;
+ } else if (packet_sz > 0) {
+ trace[trace_cnt].data_sz = (ushort)packet_sz;
+ memcpy( trace[trace_cnt].data, network_packet, trace[trace_cnt].data_sz );
+ trace[trace_cnt].meta = trace[k].send;
+ trace[trace_cnt].send = !trace[k].send;
+ ++trace_cnt;
+ }
+
+ if( dupe_packet ) {
+ packet_sz = trace[k].data_sz;
+ memcpy( network_packet, trace[k].data, trace[k].data_sz );
+ packet_sz = fd_snp_process_packet( snp, network_packet, (ulong)packet_sz );
+ }
+ }
+ FD_TEST(client_cb_test->success);
+ FD_LOG_NOTICE(( "Test snp_app proto=v1 attacker_bits=%04lx: ok", j ));
+ }
+}
+
+int
+main( int argc,
+ char ** argv ) {
+ (void)argc;
+ (void)argv;
+
+ fd_boot( &argc, &argv );
+ fd_wksp_t * wksp = fd_wksp_new_anonymous( FD_SHMEM_NORMAL_PAGE_SZ, 1UL << 15, fd_shmem_cpu_idx( 0 ), "wksp", 0UL );
+ FD_TEST( wksp );
+
+ test_snp_app_send_recv_udp( wksp );
+ test_snp_app_send_recv_v1( wksp );
+ test_snp_app_send_recv_v2();
+
+ test_snp_app_send_recv_v1_detailed( wksp );
+
+ fd_wksp_delete_anonymous( wksp );
+
+ return 0;
+}
diff --git a/src/waltz/snp/test_snp_common.c b/src/waltz/snp/test_snp_common.c
new file mode 100644
index 00000000000..f62c443d86a
--- /dev/null
+++ b/src/waltz/snp/test_snp_common.c
@@ -0,0 +1,188 @@
+#include "fd_snp_common.h"
+#include "../../util/fd_util.h"
+
+static void
+test_tlv_parsing( void ) {
+#define BUF_SZ ( 2048UL )
+ FD_LOG_NOTICE(( "test_tlv_parsing" ));
+ fd_rng_t _rng[ 1 ]; fd_rng_t * r = fd_rng_join( fd_rng_new( _rng, (uint)fd_tickcount() /*seed*/, 0UL ) );
+ uchar buf[ BUF_SZ ];
+ fd_snp_tlv_t exp[ 1024UL ];
+ for( ulong i=0UL; i<128UL; i++ ) {
+ ulong cnt = 0UL;
+ ulong sz = 0UL;
+ ulong buf_sz = 0UL;
+ while( sz < BUF_SZ ) {
+ uchar type = fd_rng_uchar( r );
+ ushort len = (ushort)( fd_rng_ushort_roll( r, 16U ) + 1U );
+ if( ( sz + len + 3UL ) <= BUF_SZ ) {
+ exp[ cnt ].type = type;
+ exp[ cnt ].len = len;
+ exp[ cnt ].ptr = &buf[ sz + 3UL ];
+ fd_memcpy( buf + sz + 0UL, &type, 1UL );
+ fd_memcpy( buf + sz + 1UL, &len, 2UL );
+ for( ulong k=0; k 0 );
+ /* test tlv extract */
+ ulong off = 0UL;
+ for( ulong j=0UL; jip4->saddr = ctx->ip;
+
+ if( meta & FD_SNP_META_OPT_BUFFERED ) {
+ memcpy( ctx->buf_packet[ ctx->buf_cnt ].data, packet, packet_sz );
+ ctx->buf_packet[ ctx->buf_cnt ].data_sz = (ushort)packet_sz;
+ ctx->buf_cnt++;
+ }
+ return (int)packet_sz;
+}
+
+static int
+test_cb_snp_rx( void const * _ctx,
+ uchar const * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta ) {
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+ uchar buffered = (meta & FD_SNP_META_OPT_BUFFERED)==FD_SNP_META_OPT_BUFFERED;
+ FD_TEST( buffered == ctx->assert_buffered );
+ FD_TEST( packet_sz == ctx->assert_packet_sz );
+ FD_TEST( meta == ctx->assert_meta );
+ if( buffered ) {
+ ctx->buf_cnt++;
+ FD_TEST( fd_memeq( packet, ctx->assert_packet, packet_sz ) );
+ } else {
+ FD_TEST( packet == ctx->assert_packet );
+ }
+ return 1;
+}
+
+static int
+test_cb_snp_sign( void const * _ctx,
+ ulong session_id,
+ uchar const to_sign[ FD_SNP_TO_SIGN_SZ ] ) {
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+ fd_sha512_t sha512[1];
+ fd_ed25519_sign( ctx->signature, to_sign, 32, ctx->public_key, ctx->private_key, sha512 );
+ ctx->sign_cnt++;
+ return fd_snp_process_signature( ctx->snp, session_id, ctx->signature );
+}
+
+int
+test_cb_app_rx( void const * _ctx,
+ fd_snp_peer_t peer,
+ uchar const * data,
+ ulong data_sz,
+ fd_snp_meta_t meta ) {
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+ FD_TEST( peer == ctx->assert_peer );
+ FD_TEST( data_sz == ctx->assert_data_sz );
+ FD_TEST( meta == ctx->assert_meta );
+ if( ctx->assert_data ) FD_TEST( fd_memeq( data, ctx->assert_data, data_sz ) );
+ return 1;
+}
+
+static void
+test_snp_limits_private( fd_snp_t * server,
+ uint server_ip4,
+ ushort server_port,
+ fd_snp_t * client,
+ uint client_ip4,
+ ushort client_port,
+ int should_pass_many_to_one,
+ int should_pass_one_to_many ) {
+ ulong proto = FD_SNP_META_PROTO_V1;
+
+ /* Client */
+ int client_sz = 0;
+ fd_snp_meta_t client_meta = 0UL;
+ int client_cb_res = 0;
+ ulong client_msg_sz = 5UL;
+ uchar * client_msg = (uchar *)"hello";
+
+ test_cb_ctx_t * client_cb_test = client->cb.ctx;
+ uchar * client_pkt = client_cb_test->out_packet;
+
+ fd_snp_app_t client_app[1] = { 0 };
+ client_app->cb.rx = test_cb_app_rx;
+ client_app->cb.ctx = client_cb_test;
+
+ client_cb_test->buf_cnt = 0;
+ client_cb_test->sign_cnt = 0;
+
+ /* Server */
+ int server_sz = 0UL;
+ fd_snp_meta_t server_meta = 0UL;
+ int server_cb_res = 0;
+ ulong server_msg_sz = 6UL;
+ uchar * server_msg = (uchar *)"world!";
+
+ test_cb_ctx_t * server_cb_test = server->cb.ctx;
+ uchar * server_pkt = server_cb_test->out_packet;
+
+ fd_snp_app_t server_app[1] = { 0 };
+ server_app->cb.rx = test_cb_app_rx;
+ server_app->cb.ctx = server_cb_test;
+
+ server_cb_test->buf_cnt = 0;
+ server_cb_test->sign_cnt = 0;
+
+ /* Test protocol */
+
+ /* Client sends */
+ client_meta = fd_snp_meta_from_parts( proto, /* app_id */ 0, server_ip4, server_port );
+ client_sz = fd_snp_app_send( client_app, client_pkt, FD_SNP_MTU, client_msg, client_msg_sz, client_meta );
+ FD_TEST( client_sz>0 );
+ client_sz = fd_snp_send( client, client_pkt, (ulong)client_sz, client_meta ); /* client_init */
+ if( !should_pass_one_to_many ) {
+ FD_TEST( client_sz==-1 );
+ TEST_SNP_LIMITS_LOG( FD_LOG_WARNING(( "test stops earlier" )) );
+ return;
+ } else {
+ FD_TEST( client_sz>0 );
+ }
+
+ /* Handshake - snp_app is not involved - don't really need to memcpy pkt all the times */
+ server_sz = fd_snp_process_packet( server, client_pkt, (ulong)client_sz ); /* server_init */
+ FD_TEST( server_sz>0 );
+ client_sz = fd_snp_process_packet( client, client_pkt, (ulong)server_sz ); /* client_cont */
+ FD_TEST( client_sz>0 );
+
+ FD_TEST( server_cb_test->buf_cnt ==0 );
+ FD_TEST( server_cb_test->sign_cnt==0 );
+ server_sz = fd_snp_process_packet( server, client_pkt, (ulong)client_sz ); /* server_fini */
+ if( !should_pass_many_to_one ) {
+ FD_TEST( server_sz==-1 );
+ FD_TEST( server_cb_test->buf_cnt ==0 );
+ FD_TEST( server_cb_test->sign_cnt==0 );
+ TEST_SNP_LIMITS_LOG( FD_LOG_WARNING(( "test stops earlier" )) );
+ return;
+ } else {
+ FD_TEST( server_sz>0 );
+ FD_TEST( server_cb_test->buf_cnt ==1 );
+ FD_TEST( server_cb_test->sign_cnt==1 );
+ FD_TEST( (ushort)server_sz==server_cb_test->buf_packet[0].data_sz );
+ }
+
+ /* send buffered packet (server_fini): server_cb_test->buf_packet[0].data */
+ FD_TEST( client_cb_test->buf_cnt==0 );
+ FD_TEST( client_cb_test->sign_cnt==0 );
+ client_sz = fd_snp_process_packet( client, server_cb_test->buf_packet[0].data, (ulong)server_sz ); /* client_fini */
+ if( !should_pass_many_to_one ) {
+ FD_TEST( client_cb_test->buf_cnt ==0 );
+ FD_TEST( client_cb_test->sign_cnt==0 );
+ FD_TEST( client_sz>0 );
+ } else {
+ FD_TEST( client_cb_test->buf_cnt ==2 );
+ FD_TEST( client_cb_test->sign_cnt==1 );
+ FD_TEST( client_sz>0 );
+ FD_TEST( (ushort)client_sz==client_cb_test->buf_packet[0].data_sz );
+ }
+
+ /* send buffered packet (client_fini): client_cb_test->buf_packet[0].data */
+ server_sz = fd_snp_process_packet( server, client_cb_test->buf_packet[0].data, (ulong)client_sz ); /* server_acpt */
+ if( !should_pass_many_to_one ) {
+ FD_TEST( server_sz==-1 );
+ } else {
+ FD_TEST( server_sz==0 );
+ }
+
+ /* send buffered packet (client_app_payload): client_cb_test->buf_packet[1].data */
+ server_sz = (int)client_cb_test->buf_packet[1].data_sz;
+ memcpy( server_pkt, client_cb_test->buf_packet[1].data, (ulong)server_sz );
+
+ /* Server receives */
+ server_meta = fd_snp_meta_from_parts( proto, /* app_id */ 0, client_ip4, client_port );
+
+ server_cb_test->assert_packet = server_pkt;
+ server_cb_test->assert_packet_sz = (ulong)server_sz;
+ server_cb_test->assert_meta = server_meta;
+ TEST_SNP_LIMITS_LOG( FD_LOG_HEXDUMP_NOTICE(( "packet", server_cb_test->assert_packet, (ulong)server_cb_test->assert_packet_sz )) );
+ server_cb_res = fd_snp_process_packet( server, server_pkt, (ulong)server_sz );
+ TEST_SNP_LIMITS_LOG( FD_LOG_NOTICE(( "fd_snp_process_packet server_cb_res %d", server_cb_res )) );
+ if( !should_pass_many_to_one ) {
+ FD_TEST( server_cb_res==-1 );
+ } else {
+ FD_TEST( server_cb_res==1 );
+ }
+
+ server_cb_test->assert_peer = 0UL;
+ server_cb_test->assert_data = client_msg;
+ server_cb_test->assert_data_sz = client_msg_sz;
+ server_cb_test->assert_meta = server_meta;
+ server_cb_res = fd_snp_app_recv( server_app, server_cb_test->assert_packet, (ulong)server_cb_test->assert_packet_sz, server_meta );
+ TEST_SNP_LIMITS_LOG( FD_LOG_NOTICE(( "fd_snp_app_recv server_cb_res %d", server_cb_res )) );
+ FD_TEST( server_cb_res==1 );
+
+ /* Server sends */
+ server_sz = fd_snp_app_send( server_app, server_pkt, FD_SNP_MTU, server_msg, server_msg_sz, server_meta );
+ if( !should_pass_many_to_one ) {
+ FD_TEST( server_sz>0 );
+ } else {
+ FD_TEST( server_sz>=0 );
+ }
+ server_sz = fd_snp_send( server, server_pkt, (ulong)server_sz, server_meta );
+ if( !should_pass_many_to_one ) {
+ FD_TEST( server_sz==-1 );
+ } else {
+ FD_TEST( server_sz>0 );
+ }
+
+ /* Handshake NOT needed a second time */
+
+ /* simulate network */ client_sz = server_sz; memcpy( client_pkt, server_pkt, (ulong)server_sz );
+ TEST_SNP_LIMITS_LOG( FD_LOG_HEXDUMP_NOTICE(( "packet", client_pkt, (ulong)client_sz )) );
+
+ /* Client receives */
+ client_cb_test->assert_packet = client_pkt;
+ client_cb_test->assert_packet_sz = (ulong)client_sz;
+ client_cb_test->assert_meta = client_meta;
+ client_cb_res = fd_snp_process_packet( client, client_pkt, (ulong)client_sz );
+ TEST_SNP_LIMITS_LOG( FD_LOG_NOTICE(( "fd_snp_process_packet client_cb_res %d", client_cb_res )) );
+ FD_TEST( client_cb_res==1 );
+
+ client_cb_test->assert_peer = 0UL;
+ client_cb_test->assert_data = server_msg;
+ client_cb_test->assert_data_sz = server_msg_sz;
+ client_cb_test->assert_meta = client_meta;
+ client_cb_res = fd_snp_app_recv( client_app, client_pkt, (ulong)client_sz, client_meta );
+ TEST_SNP_LIMITS_LOG( FD_LOG_NOTICE(( "fd_snp_app_recv client_cb_res %d", client_cb_res )) );
+ FD_TEST( client_cb_res==1 );
+}
+
+static void
+test_snp_limits_many_to_one( fd_wksp_t * wksp,
+ fd_snp_limits_t * limits ) {
+
+ FD_LOG_NOTICE(( "test_snp_limits_many_to_one" ));
+
+ void * server_mem = fd_wksp_alloc_laddr( wksp, fd_snp_align(), fd_snp_footprint( limits ), 1 );
+ fd_snp_t * server = fd_snp_join( fd_snp_new( server_mem, limits ) );
+
+ void * client_mem[ ITERATIONS_N ];
+ fd_snp_t * client[ ITERATIONS_N ];
+ for( ulong i=0; i< ITERATIONS_N; i++ ) {
+ client_mem[i] = fd_wksp_alloc_laddr( wksp, fd_snp_align(), fd_snp_footprint( limits ), 1+i );
+ client[i] = fd_snp_join( fd_snp_new( client_mem[ i ], limits ) );
+ }
+
+ /* Server */
+ uint server_ip4 = 0x010000a4;
+ ushort server_port = 8001;
+ test_cb_ctx_t server_cb_test[1] = { 0 };
+ uchar server_pkt[FD_SNP_MTU] = { 0 };
+
+ server->cb.tx = test_cb_snp_tx;
+ server->cb.rx = test_cb_snp_rx;
+ server->cb.sign = test_cb_snp_sign;
+ server->cb.ctx = server_cb_test;
+ server_cb_test->out_packet = server_pkt;
+ external_generate_keypair( server_cb_test->private_key, server_cb_test->public_key );
+ memcpy( server->config.identity, server_cb_test->public_key, 32 );
+ server_cb_test->snp = server;
+ server_cb_test->ip = server_ip4;
+ server->apps_cnt = 1;
+ server->apps[0].port = server_port;
+ server->flow_cred_total = LONG_MAX;
+ server->flow_cred_alloc = 32;
+ FD_TEST( fd_snp_init( server ) );
+
+ for( ulong i=0; icb.rx = test_cb_snp_rx;
+ client[i]->cb.tx = test_cb_snp_tx;
+ client[i]->cb.sign = test_cb_snp_sign;
+ client[i]->cb.ctx = client_cb_test;
+ client_cb_test->out_packet = client_pkt;
+ external_generate_keypair( client_cb_test->private_key, client_cb_test->public_key );
+ memcpy( client[i]->config.identity, client_cb_test->public_key, 32 );
+ client_cb_test->snp = client[i];
+ client_cb_test->ip = client_ip4;
+ client[i]->apps_cnt = 1;
+ client[i]->apps[0].port = client_port;
+ client[i]->flow_cred_total = LONG_MAX;
+ client[i]->flow_cred_alloc = 32;
+ FD_TEST( fd_snp_init( client[i] ) );
+
+ int should_pass = (i < limits->peer_cnt) ? 1 : 0;
+
+ TEST_SNP_LIMITS_LOG( FD_LOG_NOTICE(( "................" )) );
+ TEST_SNP_LIMITS_LOG( FD_LOG_NOTICE(( "[ A ] many_to_one %02lu should_pass %d", i, should_pass )) );
+ test_snp_limits_private( server, server_ip4, server_port,
+ client[i], client_ip4, client_port,
+ should_pass /*should_pass_many_to_one*/,
+ 1 /*should_pass_one_to_many*/ );
+ TEST_SNP_LIMITS_LOG( FD_LOG_NOTICE(( "fd_snp_connmap_key_cnt( server->conn_map ) %lu", fd_snp_conn_map_key_cnt( server->conn_map ) )) );
+ }
+ FD_LOG_NOTICE(( "... pass" ));
+}
+
+static void
+test_snp_limits_one_to_many( fd_wksp_t * wksp,
+ fd_snp_limits_t * limits ) {
+
+ FD_LOG_NOTICE(( "test_snp_limits_one_to_many" ));
+
+ void * client_mem = fd_wksp_alloc_laddr( wksp, fd_snp_align(), fd_snp_footprint( limits ), 1 );
+ fd_snp_t * client = fd_snp_join( fd_snp_new( client_mem, limits ) );
+
+ void * server_mem[ ITERATIONS_N ];
+ fd_snp_t * server[ ITERATIONS_N ];
+ for( ulong i=0; i< ITERATIONS_N; i++ ) {
+ server_mem[i] = fd_wksp_alloc_laddr( wksp, fd_snp_align(), fd_snp_footprint( limits ), 1+i );
+ server[i] = fd_snp_join( fd_snp_new( server_mem[ i ], limits ) );
+ }
+
+ /* Client */
+ uint client_ip4 = 0x010000a4;
+ ushort client_port = 8001;
+ test_cb_ctx_t client_cb_test[1] = { 0 };
+ uchar client_pkt[FD_SNP_MTU] = { 0 };
+
+ client->cb.rx = test_cb_snp_rx;
+ client->cb.tx = test_cb_snp_tx;
+ client->cb.sign = test_cb_snp_sign;
+ client->cb.ctx = client_cb_test;
+ client_cb_test->out_packet = client_pkt;
+ external_generate_keypair( client_cb_test->private_key, client_cb_test->public_key );
+ memcpy( client->config.identity, client_cb_test->public_key, 32 );
+ client_cb_test->snp = client;
+ client_cb_test->ip = client_ip4;
+ client->apps_cnt = 1;
+ client->apps[0].port = client_port;
+ client->flow_cred_total = LONG_MAX;
+ client->flow_cred_alloc = 32;
+ FD_TEST( fd_snp_init( client ) );
+
+ for( ulong i=0; icb.tx = test_cb_snp_tx;
+ server[i]->cb.rx = test_cb_snp_rx;
+ server[i]->cb.sign = test_cb_snp_sign;
+ server[i]->cb.ctx = server_cb_test;
+ server_cb_test->out_packet = server_pkt;
+ external_generate_keypair( server_cb_test->private_key, server_cb_test->public_key );
+ memcpy( server[i]->config.identity, server_cb_test->public_key, 32 );
+ server_cb_test->snp = server[i];
+ server_cb_test->ip = server_ip4;
+ server[i]->apps_cnt = 1;
+ server[i]->apps[0].port = server_port;
+ server[i]->flow_cred_total = LONG_MAX;
+ server[i]->flow_cred_alloc = 32;
+ FD_TEST( fd_snp_init( server[i] ) );
+
+ int should_pass = (i < limits->peer_cnt) ? 1 : 0;
+
+ TEST_SNP_LIMITS_LOG( FD_LOG_NOTICE(( "................" )) );
+ TEST_SNP_LIMITS_LOG( FD_LOG_NOTICE(( "[ B ] one_to_many %02lu should_pass %d", i, should_pass )) );
+ test_snp_limits_private( server[i], server_ip4, server_port,
+ client, client_ip4, client_port,
+ 1 /*should_pass_many_to_one*/,
+ should_pass /*should_pass_one_to_many*/ );
+ TEST_SNP_LIMITS_LOG( FD_LOG_NOTICE(( "fd_snp_connmap_key_cnt( client->conn_map ) %lu", fd_snp_conn_map_key_cnt( client->conn_map ) )) );
+ }
+ FD_LOG_NOTICE(( "... pass" ));
+}
+
+int
+main( int argc,
+ char ** argv ) {
+ fd_boot( &argc, &argv );
+
+ fd_wksp_t * wksp = fd_wksp_new_anonymous( fd_cstr_to_shmem_page_sz( "huge" ), ITERATIONS_N/*page_cnt*/, fd_log_cpu_id() /*near_cpu*/, "wksp", 0UL );
+ FD_TEST( wksp );
+
+ fd_snp_limits_t limits = { .peer_cnt = ITERATIONS_N / 2 };
+
+ test_snp_limits_many_to_one( wksp, &limits );
+
+ test_snp_limits_one_to_many( wksp, &limits );
+
+ FD_LOG_NOTICE(( "pass" ));
+ fd_halt();
+ return 0;
+}
diff --git a/src/waltz/snp/test_snp_live.c b/src/waltz/snp/test_snp_live.c
new file mode 100644
index 00000000000..558bace5842
--- /dev/null
+++ b/src/waltz/snp/test_snp_live.c
@@ -0,0 +1,324 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "fd_snp_app.h"
+#include "fd_snp.h"
+#include "../../ballet/ed25519/fd_ed25519.h"
+
+#define BUFFER_SIZE 2048
+
+static void
+external_generate_keypair( uchar private_key[32], uchar public_key[32] ) {
+ fd_sha512_t sha512[1];
+ FD_TEST( fd_sha512_join( fd_sha512_new( sha512 ) ) );
+ FD_TEST( fd_rng_secure( private_key, 32 )!=NULL );
+ fd_ed25519_public_from_private( public_key, private_key, sha512 );
+}
+
+// Create UDP socket and bind if server
+int create_udp_socket( uint ip, ushort port ) {
+ int fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd < 0) {
+ perror("error: socket create failed");
+ return -1;
+ }
+
+ // Set socket to non-blocking
+ int flags = fcntl(fd, F_GETFL, 0);
+ fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+
+ struct sockaddr_in addr = { 0 };
+ addr.sin_family = AF_INET;
+ addr.sin_port = htons(port);
+ addr.sin_addr.s_addr = ip;
+
+ if( bind( fd, (void*)&addr, sizeof(addr) )<0 ) {
+ perror("error: socket bind failed");
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+// Clean up resources
+static void cleanup( int sock_fd ) {
+ printf("Cleanup done\n");
+ if (sock_fd >= 0) {
+ close(sock_fd);
+ sock_fd = -1;
+ }
+}
+
+/* Callbacks */
+struct test_cb_ctx {
+ fd_snp_app_t * snp_app;
+ fd_snp_t * snp;
+ ulong ack;
+ int sock_fd;
+ uint ip;
+ ushort sport;
+ uchar done;
+ uchar private_key[ 32 ];
+ uchar packet[ BUFFER_SIZE ];
+};
+typedef struct test_cb_ctx test_cb_ctx_t;
+
+static int
+test_cb_snp_tx( void const * _ctx,
+ uchar const * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta ) {
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+
+ uint ip;
+ ushort port;
+ fd_snp_meta_into_parts( NULL, NULL, &ip, &port, meta );
+
+ if( meta & FD_SNP_META_OPT_HANDSHAKE ) {
+ FD_LOG_NOTICE(( "sending handshake %x dport=%hx session_id=%016lx...", packet[45], port, *((ulong *)(packet+46)) ));
+ } else {
+ FD_LOG_NOTICE(( "sending data %x dport=%hx session_id=%016lx...", packet[45], port, *((ulong *)(packet+46)) ));
+ }
+
+ struct sockaddr_in dest_addr;
+ dest_addr.sin_family = AF_INET;
+ dest_addr.sin_port = htons(port);
+ dest_addr.sin_addr.s_addr = ip;
+
+ *((uint *)(packet + 14 + 12)) = ip;
+ ssize_t sent = sendto( ctx->sock_fd, packet+14+28, packet_sz-14-28, 0, (void*)&dest_addr, sizeof(dest_addr) );
+ if (sent < 0) {
+ FD_LOG_WARNING(( "sendto failed: %x dport=%hx session_id=%016lx", packet[45], port, *((ulong *)(packet+46)) ));
+ }
+
+ return (int)packet_sz;
+}
+
+static int
+test_cb_snp_rx( void const * _ctx,
+ uchar const * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta ) {
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+ return fd_snp_app_recv( ctx->snp_app, packet, packet_sz, meta );
+}
+
+static int
+test_cb_snp_sign( void const * _ctx,
+ ulong session_id,
+ uchar const to_sign[ FD_SNP_TO_SIGN_SZ ] ) {
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+ fd_sha512_t sha512[1];
+ uchar signature[ 64 ];
+ fd_ed25519_sign( signature, to_sign, 32, ctx->snp->config.identity, ctx->private_key, sha512 );
+ FD_LOG_NOTICE(( "test_cb_snp_sign" ));
+ return fd_snp_process_signature( ctx->snp, session_id, signature );
+}
+
+int
+test_cb_app_tx( void const * _ctx,
+ uchar * packet,
+ ulong packet_sz,
+ fd_snp_meta_t meta ) {
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+ printf("Sending to %016lx...\n", meta);
+ return fd_snp_send( ctx->snp, packet, packet_sz, meta );
+}
+
+int
+test_cb_app_rx( void const * _ctx,
+ fd_snp_peer_t peer,
+ uchar const * data,
+ ulong data_sz,
+ fd_snp_meta_t meta ) {
+ (void)peer;
+ test_cb_ctx_t * ctx = (test_cb_ctx_t *)_ctx;
+ uchar buf[ FD_SNP_MTU ] = { 0 };
+ memcpy( buf, data, data_sz );
+ printf( "Received from %016lx: %s\n", meta, buf );
+ if( strncmp( (char *)data, "ACK", fd_min( data_sz, 4 ) )==0 ) {
+ // ctx->done = 1;
+ } else {
+ (void)ctx;
+ }
+ return (int)data_sz;
+}
+
+
+int main(int argc, char *argv[]) {
+ fd_boot( &argc, &argv );
+ fd_wksp_t * wksp = fd_wksp_new_anonymous( FD_SHMEM_NORMAL_PAGE_SZ, 1UL << 15, fd_shmem_cpu_idx( 0 ), "wksp", 0UL );
+ FD_TEST( wksp );
+
+ /* Parse command line arguments */
+ if( argc < 2 ) {
+ fprintf(stderr, "Usage: %s [...]\n", argv[0]);
+ return 1;
+ }
+ const char * ip_str = "127.0.0.1";
+ uint ip = inet_addr(ip_str);
+ ushort port = (ushort)atoi(argv[1]);
+
+ /* Setup SNP */
+ fd_snp_limits_t limits = {
+ .peer_cnt = 256,
+ };
+ void * _snp = fd_wksp_alloc_laddr( wksp, fd_snp_align(), fd_snp_footprint( &limits ), 1UL );
+ fd_snp_t * snp = fd_snp_join( fd_snp_new( _snp, &limits ) );
+ fd_snp_app_t snp_app[1] = { 0 };
+ test_cb_ctx_t ctx[1] = { 0 };
+
+ snp->apps_cnt = 1;
+ snp->apps[0].port = port;
+ FD_TEST( fd_snp_init( snp ) );
+ external_generate_keypair( ctx->private_key, snp->config.identity );
+
+ snp_app->cb.ctx = ctx;
+ snp_app->cb.rx = test_cb_app_rx;
+ snp_app->cb.tx = test_cb_app_tx;
+
+ snp->cb.ctx = ctx;
+ snp->cb.rx = test_cb_snp_rx;
+ snp->cb.tx = test_cb_snp_tx;
+ snp->cb.sign = test_cb_snp_sign;
+
+ snp->flow_cred_total = 16384L; /* Arbitrary for this test - typically dcache's depth. */
+ // snp->flow_cred_taken = 0L; /* Initialized inside fd_snp_init( snp ). */
+ snp->flow_cred_alloc = 4 * FD_SNP_MTU; /* Arbitrary for this test */
+
+ /* Create UDP socket */
+ int sock_fd = create_udp_socket(ip, port);
+ if (sock_fd < 0) {
+ return 1;
+ }
+ ctx->snp = snp;
+ ctx->snp_app = snp_app;
+ ctx->sock_fd = sock_fd;
+ ctx->ip = ip;
+ printf("Listening on %s:%d...\n", ip_str, port);
+
+ /* Setup poll fds */
+ struct pollfd fds[2];
+ fds[0].fd = STDIN_FILENO;
+ fds[0].events = POLLIN;
+ fds[1].fd = sock_fd;
+ fds[1].events = POLLIN;
+
+ /* Main loop */
+ uchar packet[BUFFER_SIZE];
+ uchar recv_buffer[BUFFER_SIZE];
+ int running = 1;
+ int housekeep = 0;
+ int j = 0;
+ while (running) {
+ int ret = poll(fds, 2, 300);
+ if (ret == -1) {
+ perror("poll");
+ break;
+ }
+
+ // Check for network data
+ if (fds[1].revents & POLLIN) {
+ struct sockaddr_in src_addr;
+ socklen_t src_len = sizeof(src_addr);
+ long recv_len = recvfrom(sock_fd, recv_buffer+14+28, BUFFER_SIZE-14-28, 0, (void*)&src_addr, &src_len);
+ if (recv_len > 0) {
+ // Get local socket address
+ struct sockaddr_in local_addr;
+ socklen_t local_len = sizeof(local_addr);
+ getsockname(sock_fd, (void*)&local_addr, &local_len);
+
+ // Fill Ethernet header (14 bytes) - keep as zeros
+ memset(recv_buffer, 0, 14);
+
+ // Fill IP header (20 bytes)
+ recv_buffer[14+0] = 0x45; // Version (4) + IHL (5)
+ recv_buffer[14+1] = 0x00; // DSCP + ECN
+ ushort total_len = (ushort)(20 + 8 + recv_len); // IP + UDP + payload
+ recv_buffer[14+2] = (uchar)(total_len >> 8);
+ recv_buffer[14+3] = (uchar)(total_len & 0xFF);
+ memset(recv_buffer+14+4, 0, 4); // ID + Flags + Fragment offset
+ recv_buffer[14+8] = 64; // TTL
+ recv_buffer[14+9] = 17; // Protocol (UDP)
+ memset(recv_buffer+14+10, 0, 2); // Checksum (set to 0)
+ memcpy(recv_buffer+14+12, &src_addr.sin_addr.s_addr, 4); // Source IP
+ memcpy(recv_buffer+14+16, &local_addr.sin_addr.s_addr, 4); // Dest IP
+
+ // Fill UDP header (8 bytes)
+ memcpy(recv_buffer+14+20, &src_addr.sin_port, 2); // Source port
+ memcpy(recv_buffer+14+22, &local_addr.sin_port, 2); // Dest port
+ ushort udp_len = (ushort)(8 + recv_len); // UDP header + payload
+ recv_buffer[14+24] = (uchar)(udp_len >> 8);
+ recv_buffer[14+25] = (uchar)(udp_len & 0xFF);
+ memset(recv_buffer+14+26, 0, 2); // Checksum (set to 0)
+
+ /* drop 30% packets */
+ FD_LOG_NOTICE(( "received packet %x dport=%hx session_id=%016lx...", recv_buffer[45], src_addr.sin_port, *((ulong *)(recv_buffer+46)) ));
+ if( (double)rand() / (double)RAND_MAX > -0.1 || recv_buffer[45]==0x1F ) {
+ fd_snp_process_packet( snp, recv_buffer, (ulong)recv_len+14+28 );
+ } else {
+ FD_LOG_NOTICE(( "dropped packet %x dport=%hx session_id=%016lx...", recv_buffer[45], src_addr.sin_port, *((ulong *)(recv_buffer+46)) ));
+ }
+ }
+ }
+
+ // Check for user input (client mode only sends on input)
+ if (fds[0].revents & POLLIN) {
+ char c;
+ if (scanf("%c", &c) != 1) {
+ printf("Error reading input\n");
+ break;
+ }
+ while (getchar() != '\n'); // Clear input buffer
+
+ if (c == 'q') {
+ FD_LOG_NOTICE(( "cmd 'q'" ));
+ running=0;
+ }
+
+ if (c == 's') {
+ FD_LOG_NOTICE(( "cmd 's'" ));
+ ctx->done = 1;
+ }
+
+ if (c == 'h') {
+ FD_LOG_NOTICE(( "cmd 'h'" ));
+ housekeep=1;
+ }
+ }
+
+ if( ctx->done==1 ) {
+ ctx->done = 0;
+ for( int j=2; jack ) {
+ fd_snp_app_send( ctx->snp_app, packet, sizeof(packet), "ACK", 4, ctx->ack );
+ ctx->ack = 0;
+ }
+
+ if( ( (++j % 1) == 0 ) || ( housekeep != 0 ) ) {
+ housekeep=0;
+ fd_snp_housekeeping( snp );
+ }
+ }
+
+ cleanup( sock_fd );
+ fd_wksp_delete_anonymous( wksp );
+ return 0;
+}
diff --git a/src/waltz/snp/test_snp_v1.c b/src/waltz/snp/test_snp_v1.c
new file mode 100644
index 00000000000..45fde02766e
--- /dev/null
+++ b/src/waltz/snp/test_snp_v1.c
@@ -0,0 +1,227 @@
+#define _POSIX_C_SOURCE 199309L
+
+#include "fd_snp_v1.h"
+#include "../../ballet/sha512/fd_sha512.h"
+#include "../../ballet/ed25519/fd_ed25519.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+static inline long
+wallclock( void ) {
+ struct timespec ts[1];
+ clock_gettime( CLOCK_REALTIME, ts );
+ return ((long)1e9)*((long)ts->tv_sec) + (long)ts->tv_nsec;
+}
+
+static inline void
+bench_output( ulong iter, long dt ) {
+ double ops = ((double)iter) / ((double)dt) * 1e3;
+ double ns = ((double)dt) / ((double)iter);
+ double gbps = ((double)(8UL*(70UL+1200UL)*iter)) / ((double)dt);
+ fprintf( stderr, "\t%13.6f Gbps Ethernet equiv throughput / core\n", gbps );
+ fprintf( stderr, "\t%13.6f Mpps / core\n", ops );
+ fprintf( stderr, "\t%13.6f ns / op\n", ns );
+}
+
+void external_generate_keypair( uchar private_key[32], uchar public_key[32] ) {
+ fd_sha512_t sha512[1];
+ FD_TEST( fd_sha512_join( fd_sha512_new( sha512 ) ) );
+ FD_TEST( fd_rng_secure( private_key, 32 )!=NULL );
+ fd_ed25519_public_from_private( public_key, private_key, sha512 );
+}
+
+void external_sign( uchar signature[64], uchar to_sign[32], uchar private_key[32], uchar public_key[32] ) {
+ fd_sha512_t sha512[1];
+ fd_ed25519_sign( signature, to_sign, 32, public_key, private_key, sha512 );
+}
+
+static void
+test_v1_handshake( void ) {
+ fd_snp_config_t client[1] = { 0 };
+ fd_snp_conn_t client_conn[1] = { 0 };
+ uchar client_private_key[ 32 ];
+
+ fd_snp_config_t server[1] = { 0 };
+ fd_snp_conn_t server_conn[1] = { 0 };
+ uchar server_private_key[ 32 ];
+
+ /* client init */
+ external_generate_keypair( client_private_key, client->identity );
+ client_conn->_pubkey = client->identity;
+
+ /* server init */
+ uchar aes_key[16];
+ FD_TEST( fd_snp_rng( aes_key, 16 )==16 );
+ fd_aes_set_encrypt_key( aes_key, 128, server->_state_enc_key );
+ fd_aes_set_decrypt_key( aes_key, 128, server->_state_dec_key );
+
+ external_generate_keypair( server_private_key, server->identity );
+ server_conn->_pubkey = server->identity;
+
+ int res;
+ int pkt_sz;
+ uchar _pkt[ 1500 ]; uchar * pkt = _pkt;
+ uchar to_sign[ 32 ];
+ uchar sig[ 64 ];
+
+ pkt_sz = fd_snp_v1_client_init( client, client_conn, NULL, 0, pkt, NULL );
+ FD_TEST_CUSTOM( pkt_sz>0, "fd_snp_v1_client_init failed" );
+ pkt_sz = fd_snp_v1_server_init( server, server_conn, pkt, (ulong)pkt_sz, pkt, NULL );
+ FD_TEST_CUSTOM( pkt_sz>0, "fd_snp_v1_server_init failed" );
+ pkt_sz = fd_snp_v1_client_cont( client, client_conn, pkt, (ulong)pkt_sz, pkt, NULL );
+ FD_TEST_CUSTOM( pkt_sz>0, "fd_snp_v1_client_cont failed" );
+ pkt_sz = fd_snp_v1_server_fini( server, server_conn, pkt, (ulong)pkt_sz, pkt, to_sign );
+ FD_TEST_CUSTOM( pkt_sz>0, "fd_snp_v1_server_fini failed" );
+ external_sign( sig, to_sign, server_private_key, server->identity );
+ res = fd_snp_v1_server_fini_add_signature( server_conn, pkt, sig );
+ FD_TEST_CUSTOM( res==0, "fd_snp_v1_server_fini_add_signature failed" );
+ pkt_sz = fd_snp_v1_client_fini( client, client_conn, pkt, (ulong)pkt_sz, pkt, to_sign );
+ FD_TEST_CUSTOM( pkt_sz>0, "fd_snp_v1_client_fini failed" );
+ external_sign( sig, to_sign, client_private_key, client->identity );
+ res = fd_snp_v1_client_fini_add_signature( client_conn, pkt, sig );
+ FD_TEST_CUSTOM( res==0, "fd_snp_v1_client_fini_add_signature failed" );
+ pkt_sz = fd_snp_v1_server_acpt( server, server_conn, pkt, (ulong)pkt_sz, pkt, NULL );
+ FD_TEST_CUSTOM( pkt_sz==0, "fd_snp_v1_server_acpt failed" );
+
+ FD_TEST( client_conn->state==FD_SNP_TYPE_HS_DONE );
+ FD_TEST( fd_memeq( client_conn->_peer_pubkey, server->identity, 32 ) );
+ FD_TEST( server_conn->state==FD_SNP_TYPE_HS_DONE );
+ FD_TEST( fd_memeq( server_conn->_peer_pubkey, client->identity, 32 ) );
+
+ FD_LOG_NOTICE(( "Test v1 handshake: ok" ));
+
+ /* Bench */
+ unsigned long iter = 1001UL;
+ long dt = -wallclock();
+ for( unsigned long rem=iter; rem; rem-- ) {
+ memset( client_conn, 0, sizeof( fd_snp_conn_t ) ); client_conn->_pubkey = client->identity;
+ memset( server_conn, 0, sizeof( fd_snp_conn_t ) ); server_conn->_pubkey = server->identity;
+ pkt_sz = fd_snp_v1_client_init( client, client_conn, NULL, 0, pkt, NULL );
+ FD_TEST_CUSTOM( pkt_sz>0, "fd_snp_v1_client_init failed" );
+ pkt_sz = fd_snp_v1_server_init( server, server_conn, pkt, (ulong)pkt_sz, pkt, NULL );
+ FD_TEST_CUSTOM( pkt_sz>0, "fd_snp_v1_server_init failed" );
+ pkt_sz = fd_snp_v1_client_cont( client, client_conn, pkt, (ulong)pkt_sz, pkt, NULL );
+ FD_TEST_CUSTOM( pkt_sz>0, "fd_snp_v1_client_cont failed" );
+ pkt_sz = fd_snp_v1_server_fini( server, server_conn, pkt, (ulong)pkt_sz, pkt, to_sign );
+ FD_TEST_CUSTOM( pkt_sz>0, "fd_snp_v1_server_fini failed" );
+ external_sign( sig, to_sign, server_private_key, server->identity );
+ res = fd_snp_v1_server_fini_add_signature( server_conn, pkt, sig );
+ FD_TEST_CUSTOM( res==0, "fd_snp_v1_server_fini_add_signature failed" );
+ pkt_sz = fd_snp_v1_client_fini( client, client_conn, pkt, (ulong)pkt_sz, pkt, to_sign );
+ FD_TEST_CUSTOM( pkt_sz>0, "fd_snp_v1_client_fini failed" );
+ external_sign( sig, to_sign, client_private_key, client->identity );
+ res = fd_snp_v1_client_fini_add_signature( client_conn, pkt, sig );
+ FD_TEST_CUSTOM( res==0, "fd_snp_v1_client_fini_add_signature failed" );
+ pkt_sz = fd_snp_v1_server_acpt( server, server_conn, pkt, (ulong)pkt_sz, pkt, NULL );
+ FD_TEST_CUSTOM( pkt_sz==0, "fd_snp_v1_server_acpt failed" );
+ }
+ dt += wallclock();
+ fprintf( stderr, "Benchmarking full handshake\n" );
+ bench_output( iter, dt );
+}
+
+static void
+bench_ephemeral_generate( void ) {
+
+ uchar public_key[32];
+ uchar private_key[32];
+
+ /* warmup */
+ for( unsigned long rem=1000UL; rem; rem-- ) {
+ fd_snp_v1_crypto_key_share_generate( private_key, public_key );
+ __asm__ __volatile__( "# Compiler Barrier" : "+r" (private_key[0]) );
+ }
+
+ /* for real */
+ unsigned long iter = 2000UL;
+ long dt = -wallclock();
+ for( unsigned long rem=iter; rem; rem-- ) {
+ fd_snp_v1_crypto_key_share_generate( private_key, public_key );
+ __asm__ __volatile__( "# Compiler Barrier" : "+r" (private_key[0]) );
+ }
+ dt += wallclock();
+ fprintf( stderr, "Benchmarking ephemeral/key share generate\n" );
+ bench_output( iter, dt );
+}
+
+static void
+bench_enc_state_generate( void ) {
+ fd_snp_config_t config[1];
+ fd_snp_conn_t conn[1];
+ uchar out[16];
+
+ uchar aes_key[16];
+ FD_TEST( fd_snp_rng( aes_key, 16 )==16 );
+ fd_aes_set_encrypt_key( aes_key, 128, config->_state_enc_key );
+ fd_aes_set_decrypt_key( aes_key, 128, config->_state_dec_key );
+ conn->peer_addr = 123UL;
+
+ /* warmup */
+ for( unsigned long rem=1000UL; rem; rem-- ) {
+ fd_snp_v1_crypto_enc_state_generate( config, conn, out );
+ __asm__ __volatile__( "# Compiler Barrier" : "+r" (out[0]) );
+ }
+
+ /* for real */
+ unsigned long iter = 20000UL;
+ long dt = -wallclock();
+ for( unsigned long rem=iter; rem; rem-- ) {
+ fd_snp_v1_crypto_enc_state_generate( config, conn, out );
+ __asm__ __volatile__( "# Compiler Barrier" : "+r" (out[0]) );
+ }
+ dt += wallclock();
+ fprintf( stderr, "Benchmarking encrypted state generate\n" );
+ bench_output( iter, dt );
+}
+
+static void
+bench_enc_state_verify( void ) {
+
+ fd_snp_config_t config[1];
+ fd_snp_conn_t conn[1];
+ uchar out[16];
+
+ uchar aes_key[16];
+ FD_TEST( fd_snp_rng( aes_key, 16 )==16 );
+ fd_aes_set_encrypt_key( aes_key, 128, config->_state_enc_key );
+ fd_aes_set_decrypt_key( aes_key, 128, config->_state_dec_key );
+ conn->peer_addr = 123UL;
+
+ fd_snp_v1_crypto_enc_state_generate( config, conn, out );
+
+ /* warmup */
+ for( unsigned long rem=1000UL; rem; rem-- ) {
+ fd_snp_v1_crypto_enc_state_validate( config, conn, out );
+ __asm__ __volatile__( "# Compiler Barrier" : "+r" (out[0]) );
+ }
+
+ /* for real */
+ unsigned long iter = 20000UL;
+ long dt = -wallclock();
+ for( unsigned long rem=iter; rem; rem-- ) {
+ fd_snp_v1_crypto_enc_state_validate( config, conn, out );
+ __asm__ __volatile__( "# Compiler Barrier" : "+r" (out[0]) );
+ }
+ dt += wallclock();
+ fprintf( stderr, "Benchmarking encrypted state verify\n" );
+ bench_output( iter, dt );
+}
+
+int
+main( int argc,
+ char ** argv ) {
+ (void)argc;
+ (void)argv;
+
+ test_v1_handshake();
+ bench_ephemeral_generate();
+ bench_enc_state_generate();
+ bench_enc_state_verify();
+
+ return 0;
+}