@@ -1305,3 +1305,134 @@ fn test_htlc_localremoved_persistence() {
13051305 let htlc_fail_msg_after_reload = msgs. 2 . unwrap ( ) . update_fail_htlcs [ 0 ] . clone ( ) ;
13061306 assert_eq ! ( htlc_fail_msg, htlc_fail_msg_after_reload) ;
13071307}
1308+
1309+
1310+
1311+ #[ test]
1312+ #[ cfg( peer_storage) ]
1313+ fn test_peer_storage ( ) {
1314+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
1315+ let ( persister, chain_monitor) ;
1316+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
1317+ let nodes_0_deserialized;
1318+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
1319+ let mut nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
1320+
1321+ let ( _, _, cid, _) = create_announced_chan_between_nodes ( & nodes, 0 , 1 ) ;
1322+ send_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 1000 ) ;
1323+ let nodes_0_serialized = nodes[ 0 ] . node . encode ( ) ;
1324+ let old_state_monitor = get_monitor ! ( nodes[ 0 ] , cid) . encode ( ) ;
1325+ send_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 10000 ) ;
1326+ send_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 9999 ) ;
1327+
1328+ // Update peer storage with latest commitment txns
1329+ connect_blocks ( & nodes[ 0 ] , 1 ) ;
1330+ connect_blocks ( & nodes[ 0 ] , 1 ) ;
1331+
1332+ let peer_storage_msg_events_node0 =
1333+ nodes[ 0 ] . chain_monitor . chain_monitor . get_and_clear_pending_msg_events ( ) ;
1334+ let peer_storage_msg_events_node1 =
1335+ nodes[ 1 ] . chain_monitor . chain_monitor . get_and_clear_pending_msg_events ( ) ;
1336+ assert_ne ! ( peer_storage_msg_events_node0. len( ) , 0 ) ;
1337+ assert_ne ! ( peer_storage_msg_events_node1. len( ) , 0 ) ;
1338+
1339+ for ps_msg in peer_storage_msg_events_node0 {
1340+ match ps_msg {
1341+ MessageSendEvent :: SendPeerStorage { ref node_id, ref msg } => {
1342+ assert_eq ! ( * node_id, nodes[ 1 ] . node. get_our_node_id( ) ) ;
1343+ nodes[ 1 ] . node . handle_peer_storage ( nodes[ 0 ] . node . get_our_node_id ( ) , msg. clone ( ) ) ;
1344+ } ,
1345+ _ => panic ! ( "Unexpected event" ) ,
1346+ }
1347+ }
1348+
1349+ for ps_msg in peer_storage_msg_events_node1 {
1350+ match ps_msg {
1351+ MessageSendEvent :: SendPeerStorage { ref node_id, ref msg } => {
1352+ assert_eq ! ( * node_id, nodes[ 0 ] . node. get_our_node_id( ) ) ;
1353+ nodes[ 0 ] . node . handle_peer_storage ( nodes[ 1 ] . node . get_our_node_id ( ) , msg. clone ( ) ) ;
1354+ } ,
1355+ _ => panic ! ( "Unexpected event" ) ,
1356+ }
1357+ }
1358+
1359+ nodes[ 0 ] . node . peer_disconnected ( nodes[ 1 ] . node . get_our_node_id ( ) ) ;
1360+ nodes[ 1 ] . node . peer_disconnected ( nodes[ 0 ] . node . get_our_node_id ( ) ) ;
1361+
1362+ // Reload Node!
1363+ // TODO: Handle the case where we've completely forgotten about an active channel.
1364+ reload_node ! (
1365+ nodes[ 0 ] ,
1366+ test_default_channel_config( ) ,
1367+ & nodes_0_serialized,
1368+ & [ & old_state_monitor[ ..] ] ,
1369+ persister,
1370+ chain_monitor,
1371+ nodes_0_deserialized
1372+ ) ;
1373+
1374+ nodes[ 0 ]
1375+ . node
1376+ . peer_connected (
1377+ nodes[ 1 ] . node . get_our_node_id ( ) ,
1378+ & msgs:: Init {
1379+ features : nodes[ 1 ] . node . init_features ( ) ,
1380+ networks : None ,
1381+ remote_network_address : None ,
1382+ } ,
1383+ true ,
1384+ )
1385+ . unwrap ( ) ;
1386+
1387+ nodes[ 1 ]
1388+ . node
1389+ . peer_connected (
1390+ nodes[ 0 ] . node . get_our_node_id ( ) ,
1391+ & msgs:: Init {
1392+ features : nodes[ 0 ] . node . init_features ( ) ,
1393+ networks : None ,
1394+ remote_network_address : None ,
1395+ } ,
1396+ false ,
1397+ )
1398+ . unwrap ( ) ;
1399+
1400+ let node_1_events = nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
1401+ assert_eq ! ( node_1_events. len( ) , 2 ) ;
1402+
1403+ let node_0_events = nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
1404+ assert_eq ! ( node_0_events. len( ) , 1 ) ;
1405+
1406+ match node_0_events[ 0 ] {
1407+ MessageSendEvent :: SendChannelReestablish { ref node_id, .. } => {
1408+ assert_eq ! ( * node_id, nodes[ 1 ] . node. get_our_node_id( ) ) ;
1409+ // nodes[0] would send a stale channel reestablish, so there's no need to handle this.
1410+ } ,
1411+ _ => panic ! ( "Unexpected event" ) ,
1412+ }
1413+
1414+ if let MessageSendEvent :: SendPeerStorageRetrieval { node_id, msg } = & node_1_events[ 0 ] {
1415+ assert_eq ! ( * node_id, nodes[ 0 ] . node. get_our_node_id( ) ) ;
1416+ // Should Panic here!
1417+ let res = std:: panic:: catch_unwind ( || {
1418+ nodes[ 0 ]
1419+ . node
1420+ . handle_peer_storage_retrieval ( nodes[ 1 ] . node . get_our_node_id ( ) , msg. clone ( ) )
1421+ } ) ;
1422+ assert ! ( res. is_err( ) ) ;
1423+ } else {
1424+ panic ! ( "Unexpected event {node_1_events:?}" )
1425+ }
1426+
1427+ if let MessageSendEvent :: SendChannelReestablish { .. } = & node_1_events[ 1 ] {
1428+ // After the `peer_storage_retreival` message would come a `channel_reestablish` (which
1429+ // would also cause nodes[0] to panic) but it already went down due to lost state so
1430+ // there's nothing to deliver.
1431+ } else {
1432+ panic ! ( "Unexpected event {node_1_events:?}" )
1433+ }
1434+ // When we panic'd, we expect to panic on `Drop`.
1435+ let res = std:: panic:: catch_unwind ( || drop ( nodes) ) ;
1436+ assert ! ( res. is_err( ) ) ;
1437+ }
1438+
0 commit comments