From 0cdcf5421ccf92d199996fc4de9a54ed138558c7 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Mon, 3 Nov 2025 13:41:39 -0800 Subject: [PATCH 1/2] Support capturing `tx_abort` send within channel reestablish tests --- lightning/src/ln/async_signer_tests.rs | 17 +++++++++------ lightning/src/ln/functional_test_utils.rs | 26 ++++++++++++++++++++++- 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 71821081094..03728e28222 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -596,7 +596,7 @@ fn do_test_async_raa_peer_disconnect( } // Expect the RAA - let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _) = + let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { assert!(revoke_and_ack.is_none()); @@ -612,14 +612,14 @@ fn do_test_async_raa_peer_disconnect( dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { - let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _) = + let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_some()); assert!(commitment_signed.is_some()); assert!(resend_order == RAACommitmentOrder::RevokeAndACKFirst); } else { // Make sure we don't double send the RAA. - let (_, revoke_and_ack, commitment_signed, _, _, _, _) = + let (_, revoke_and_ack, commitment_signed, _, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_none()); assert!(commitment_signed.is_none()); @@ -746,7 +746,7 @@ fn do_test_async_commitment_signature_peer_disconnect( } // Expect the RAA - let (_, revoke_and_ack, commitment_signed, _, _, _, _) = + let (_, revoke_and_ack, commitment_signed, _, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_some()); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { @@ -760,11 +760,11 @@ fn do_test_async_commitment_signature_peer_disconnect( dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { - let (_, _, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, _, commitment_signed, _, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(commitment_signed.is_some()); } else { // Make sure we don't double send the CS. - let (_, _, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, _, commitment_signed, _, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(commitment_signed.is_none()); } } @@ -882,6 +882,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.4.is_none()); assert!(as_resp.5.is_none()); assert!(as_resp.6.is_none()); + assert!(as_resp.7.is_none()); if monitor_update_failure { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -904,6 +905,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.4.is_none()); assert!(as_resp.5.is_none()); assert!(as_resp.6.is_none()); + assert!(as_resp.7.is_none()); nodes[0].enable_channel_signer_op(&node_b_id, &chan_id, SignerOp::SignCounterpartyCommitment); nodes[0].node.signer_unblocked(Some((node_b_id, chan_id))); @@ -929,6 +931,9 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.6.is_none()); assert!(bs_resp.6.is_none()); + assert!(as_resp.7.is_none()); + assert!(bs_resp.7.is_none()); + // Now that everything is restored, get the CS + RAA and handle them. nodes[1] .node diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 45c8e072f8d..271d458bcc8 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -4996,9 +4996,17 @@ macro_rules! handle_chan_reestablish_msgs { ($src_node: expr, $dst_node: expr) => {{ let msg_events = $src_node.node.get_and_clear_pending_msg_events(); let mut idx = 0; + + let mut tx_abort = None; + if let Some(&MessageSendEvent::SendTxAbort { ref node_id, ref msg }) = msg_events.get(idx) { + idx += 1; + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + tx_abort = Some(msg.clone()); + } + let channel_ready = if let Some(&MessageSendEvent::SendChannelReady { ref node_id, ref msg }) = - msg_events.get(0) + msg_events.get(idx) { idx += 1; assert_eq!(*node_id, $dst_node.node.get_our_node_id()); @@ -5115,6 +5123,7 @@ macro_rules! handle_chan_reestablish_msgs { announcement_sigs, tx_signatures, stfu, + tx_abort, ) }}; } @@ -5127,6 +5136,7 @@ pub struct ReconnectArgs<'a, 'b, 'c, 'd> { pub send_stfu: (bool, bool), pub send_interactive_tx_commit_sig: (bool, bool), pub send_interactive_tx_sigs: (bool, bool), + pub send_tx_abort: (bool, bool), pub expect_renegotiated_funding_locked_monitor_update: (bool, bool), pub pending_responding_commitment_signed: (bool, bool), /// Indicates that the pending responding commitment signed will be a dup for the recipient, @@ -5150,6 +5160,7 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { send_stfu: (false, false), send_interactive_tx_commit_sig: (false, false), send_interactive_tx_sigs: (false, false), + send_tx_abort: (false, false), expect_renegotiated_funding_locked_monitor_update: (false, false), pending_responding_commitment_signed: (false, false), pending_responding_commitment_signed_dup_monitor: (false, false), @@ -5174,6 +5185,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { send_stfu, send_interactive_tx_commit_sig, send_interactive_tx_sigs, + send_tx_abort, expect_renegotiated_funding_locked_monitor_update, pending_htlc_adds, pending_htlc_claims, @@ -5305,6 +5317,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { &commitment_update.commitment_signed, ) } + if send_tx_abort.0 { + let tx_abort = chan_msgs.7.take().unwrap(); + node_a.node.handle_tx_abort(node_b_id, &tx_abort); + } else { + assert!(chan_msgs.7.is_none()); + } if send_interactive_tx_sigs.0 { let tx_signatures = chan_msgs.5.take().unwrap(); node_a.node.handle_tx_signatures(node_b_id, &tx_signatures); @@ -5417,6 +5435,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { &commitment_update.commitment_signed, ) } + if send_tx_abort.1 { + let tx_abort = chan_msgs.7.take().unwrap(); + node_a.node.handle_tx_abort(node_b_id, &tx_abort); + } else { + assert!(chan_msgs.7.is_none()); + } if send_interactive_tx_sigs.1 { let tx_signatures = chan_msgs.5.take().unwrap(); node_b.node.handle_tx_signatures(node_a_id, &tx_signatures); From d282a47fc5be6fa14eae864e4c95456fdb5f61d2 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Mon, 3 Nov 2025 13:41:42 -0800 Subject: [PATCH 2/2] Allow counterparty tx_abort before handling initial commitment signed Upon processing the counterparty's initial `commitment_signed` for a splice, we queue a monitor update with the new commitment transactions spending the new funding transaction. Once handled, funding transaction signatures can be exchanged and we must start monitoring the chain for a possible commitment transaction broadcast spending the new funding transaction. Aborting the splice negotiation then would therefore be unsafe, hence why we currently force close in such cases. However, there is no reason to force close prior to receiving their initial `commitment_signed`, as this would imply the funding transaction signatures have yet to be exchanged, thus making a commitment broadcast spending said transaction impossible allowing us to abort the splice negotiation safely. --- lightning/src/ln/channel.rs | 104 +++++++++++++++++++--------- lightning/src/ln/interactivetxs.rs | 34 +++++++++ lightning/src/ln/splicing_tests.rs | 106 +++++++++++++++++++++++++++-- 3 files changed, 207 insertions(+), 37 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 75905dba1cd..c5942f38da6 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1654,7 +1654,7 @@ where } chan.context.channel_state.clear_local_stfu_sent(); chan.context.channel_state.clear_remote_stfu_sent(); - if chan.should_reset_pending_splice_state() { + if chan.should_reset_pending_splice_state(false) { // If there was a pending splice negotiation that failed due to disconnecting, we // also take the opportunity to clean up our state. let splice_funding_failed = chan.reset_pending_splice_state(); @@ -1775,7 +1775,7 @@ where None }, ChannelPhase::Funded(funded_channel) => { - if funded_channel.should_reset_pending_splice_state() { + if funded_channel.should_reset_pending_splice_state(false) { funded_channel.reset_pending_splice_state() } else { debug_assert!(false, "We should never fail an interactive funding negotiation once we're exchanging tx_signatures"); @@ -1932,12 +1932,24 @@ where (had_constructor, None) }, ChannelPhase::Funded(funded_channel) => { - if funded_channel.has_pending_splice_awaiting_signatures() { + if funded_channel.has_pending_splice_awaiting_signatures() + && funded_channel + .context() + .interactive_tx_signing_session + .as_ref() + .expect("We have a pending splice awaiting signatures") + .has_received_commitment_signed() + { + // We only force close once the counterparty tries to abort after committing to + // the splice via their initial `commitment_signed`. This is because our monitor + // state is updated with the post-splice commitment transaction upon receiving + // their `commitment_signed`, so we would need another monitor update to abandon + // it, which we don't currently support. return Err(ChannelError::close( "Received tx_abort while awaiting tx_signatures exchange".to_owned(), )); } - if funded_channel.should_reset_pending_splice_state() { + if funded_channel.should_reset_pending_splice_state(true) { let has_funding_negotiation = funded_channel .pending_splice .as_ref() @@ -2681,19 +2693,6 @@ impl FundingNegotiation { } impl PendingFunding { - fn can_abandon_state(&self) -> bool { - self.funding_negotiation - .as_ref() - .map(|funding_negotiation| { - !matches!(funding_negotiation, FundingNegotiation::AwaitingSignatures { .. }) - }) - .unwrap_or_else(|| { - let has_negotiated_candidates = !self.negotiated_candidates.is_empty(); - debug_assert!(has_negotiated_candidates); - !has_negotiated_candidates - }) - } - fn check_get_splice_locked( &mut self, context: &ChannelContext, confirmed_funding_index: usize, height: u32, ) -> Option @@ -6873,7 +6872,7 @@ pub struct SpliceFundingFailed { } macro_rules! maybe_create_splice_funding_failed { - ($pending_splice: expr, $get: ident, $contributed_inputs_and_outputs: ident) => {{ + ($funded_channel: expr, $pending_splice: expr, $get: ident, $contributed_inputs_and_outputs: ident) => {{ $pending_splice .and_then(|pending_splice| pending_splice.funding_negotiation.$get()) .filter(|funding_negotiation| funding_negotiation.is_initiator()) @@ -6895,10 +6894,12 @@ macro_rules! maybe_create_splice_funding_failed { interactive_tx_constructor, .. } => interactive_tx_constructor.$contributed_inputs_and_outputs(), - FundingNegotiation::AwaitingSignatures { .. } => { - debug_assert!(false); - (Vec::new(), Vec::new()) - }, + FundingNegotiation::AwaitingSignatures { .. } => $funded_channel + .context + .interactive_tx_signing_session + .$get() + .expect("We have a pending splice awaiting signatures") + .$contributed_inputs_and_outputs(), }; SpliceFundingFailed { @@ -6937,7 +6938,7 @@ where fn maybe_fail_splice_negotiation(&mut self) -> Option { if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { - if self.should_reset_pending_splice_state() { + if self.should_reset_pending_splice_state(false) { self.reset_pending_splice_state() } else { match self.quiescent_action.take() { @@ -7011,19 +7012,54 @@ where /// Returns a boolean indicating whether we should reset the splice's /// [`PendingFunding::funding_negotiation`]. - fn should_reset_pending_splice_state(&self) -> bool { + fn should_reset_pending_splice_state(&self, counterparty_aborted: bool) -> bool { self.pending_splice .as_ref() - .map(|pending_splice| pending_splice.can_abandon_state()) + .map(|pending_splice| { + pending_splice + .funding_negotiation + .as_ref() + .map(|funding_negotiation| { + let is_awaiting_signatures = matches!( + funding_negotiation, + FundingNegotiation::AwaitingSignatures { .. } + ); + if counterparty_aborted { + !is_awaiting_signatures + || !self + .context() + .interactive_tx_signing_session + .as_ref() + .expect("We have a pending splice awaiting signatures") + .has_received_commitment_signed() + } else { + !is_awaiting_signatures + } + }) + .unwrap_or_else(|| { + let has_negotiated_candidates = + !pending_splice.negotiated_candidates.is_empty(); + debug_assert!(has_negotiated_candidates); + !has_negotiated_candidates + }) + }) .unwrap_or(false) } fn reset_pending_splice_state(&mut self) -> Option { - debug_assert!(self.should_reset_pending_splice_state()); - debug_assert!(self.context.interactive_tx_signing_session.is_none()); - self.context.channel_state.clear_quiescent(); + debug_assert!(self.should_reset_pending_splice_state(true)); + debug_assert!( + self.context.interactive_tx_signing_session.is_none() + || !self + .context + .interactive_tx_signing_session + .as_ref() + .expect("We have a pending splice awaiting signatures") + .has_received_commitment_signed() + ); let splice_funding_failed = maybe_create_splice_funding_failed!( + self, self.pending_splice.as_mut(), take, into_contributed_inputs_and_outputs @@ -7033,15 +7069,19 @@ where self.pending_splice.take(); } + self.context.channel_state.clear_quiescent(); + self.context.interactive_tx_signing_session.take(); + splice_funding_failed } pub(super) fn maybe_splice_funding_failed(&self) -> Option { - if !self.should_reset_pending_splice_state() { + if !self.should_reset_pending_splice_state(false) { return None; } maybe_create_splice_funding_failed!( + self, self.pending_splice.as_ref(), as_ref, to_contributed_inputs_and_outputs @@ -11996,7 +12036,7 @@ where pub fn abandon_splice( &mut self, ) -> Result<(msgs::TxAbort, Option), APIError> { - if self.should_reset_pending_splice_state() { + if self.should_reset_pending_splice_state(false) { let tx_abort = msgs::TxAbort { channel_id: self.context.channel_id(), data: Vec::new() }; let splice_funding_failed = self.reset_pending_splice_state(); @@ -14361,7 +14401,7 @@ where } channel_state.clear_local_stfu_sent(); channel_state.clear_remote_stfu_sent(); - if self.should_reset_pending_splice_state() + if self.should_reset_pending_splice_state(false) || !self.has_pending_splice_awaiting_signatures() { // We shouldn't be quiescent anymore upon reconnecting if: @@ -14735,7 +14775,7 @@ where // We don't have to worry about resetting the pending `FundingNegotiation` because we // can only read `FundingNegotiation::AwaitingSignatures` variants anyway. let pending_splice = - self.pending_splice.as_ref().filter(|_| !self.should_reset_pending_splice_state()); + self.pending_splice.as_ref().filter(|_| !self.should_reset_pending_splice_state(false)); write_tlv_fields!(writer, { (0, self.context.announcement_sigs, option), diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index 97047eb3a0d..3bb68442524 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -350,6 +350,36 @@ impl ConstructedTransaction { NegotiationError { reason, contributed_inputs, contributed_outputs } } + fn to_contributed_inputs_and_outputs(&self) -> (Vec, Vec) { + let contributed_inputs = self + .tx + .input + .iter() + .zip(self.input_metadata.iter()) + .enumerate() + .filter(|(_, (_, input))| input.is_local(self.holder_is_initiator)) + .filter(|(index, _)| { + self.shared_input_index + .map(|shared_index| *index != shared_index as usize) + .unwrap_or(true) + }) + .map(|(_, (txin, _))| txin.previous_output) + .collect(); + + let contributed_outputs = self + .tx + .output + .iter() + .zip(self.output_metadata.iter()) + .enumerate() + .filter(|(_, (_, output))| output.is_local(self.holder_is_initiator)) + .filter(|(index, _)| *index != self.shared_output_index as usize) + .map(|(_, (txout, _))| txout.clone()) + .collect(); + + (contributed_inputs, contributed_outputs) + } + fn into_contributed_inputs_and_outputs(self) -> (Vec, Vec) { let contributed_inputs = self .tx @@ -859,6 +889,10 @@ impl InteractiveTxSigningSession { self.unsigned_tx.into_negotiation_error(reason) } + pub(super) fn to_contributed_inputs_and_outputs(&self) -> (Vec, Vec) { + self.unsigned_tx.to_contributed_inputs_and_outputs() + } + pub(super) fn into_contributed_inputs_and_outputs(self) -> (Vec, Vec) { self.unsigned_tx.into_contributed_inputs_and_outputs() } diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index f95883e9434..e387ac3bfdd 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -395,17 +395,28 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { // retry later. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let (persister_0a, persister_0b, persister_0c, persister_1a, persister_1b, persister_1c); + let ( + persister_0a, + persister_0b, + persister_0c, + persister_0d, + persister_1a, + persister_1b, + persister_1c, + persister_1d, + ); let ( chain_monitor_0a, chain_monitor_0b, chain_monitor_0c, + chain_monitor_0d, chain_monitor_1a, chain_monitor_1b, chain_monitor_1c, + chain_monitor_1d, ); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let (node_0a, node_0b, node_0c, node_1a, node_1b, node_1c); + let (node_0a, node_0b, node_0c, node_0d, node_1a, node_1b, node_1c, node_1d); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_id_0 = nodes[0].node.get_our_node_id(); @@ -533,9 +544,56 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { reconnect_args.send_announcement_sigs = (true, true); reconnect_nodes(reconnect_args); - // Attempt a splice negotiation that completes, (i.e. `tx_signatures` are exchanged). Reconnecting - // should not abort the negotiation or reset the splice state. - let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, contribution); + nodes[0] + .node + .splice_channel( + &channel_id, + &node_id_1, + contribution.clone(), + FEERATE_FLOOR_SATS_PER_KW, + None, + ) + .unwrap(); + + // Attempt a splice negotiation that ends before the initial `commitment_signed` messages are + // exchanged. The node missing the other's `commitment_signed` upon reconnecting should + // implicitly abort the negotiation and reset the splice state such that we're able to retry + // another splice later. + let stfu = get_event_msg!(nodes[0], MessageSendEvent::SendStfu, node_id_1); + nodes[1].node.handle_stfu(node_id_0, &stfu); + let stfu = get_event_msg!(nodes[1], MessageSendEvent::SendStfu, node_id_0); + nodes[0].node.handle_stfu(node_id_1, &stfu); + + let splice_init = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceInit, node_id_1); + nodes[1].node.handle_splice_init(node_id_0, &splice_init); + let splice_ack = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceAck, node_id_0); + nodes[0].node.handle_splice_ack(node_id_1, &splice_ack); + + let tx_add_input = get_event_msg!(nodes[0], MessageSendEvent::SendTxAddInput, node_id_1); + nodes[1].node.handle_tx_add_input(node_id_0, &tx_add_input); + let tx_complete = get_event_msg!(nodes[1], MessageSendEvent::SendTxComplete, node_id_0); + nodes[0].node.handle_tx_complete(node_id_1, &tx_complete); + + let tx_add_output = get_event_msg!(nodes[0], MessageSendEvent::SendTxAddOutput, node_id_1); + nodes[1].node.handle_tx_add_output(node_id_0, &tx_add_output); + let tx_complete = get_event_msg!(nodes[1], MessageSendEvent::SendTxComplete, node_id_0); + nodes[0].node.handle_tx_complete(node_id_1, &tx_complete); + + let tx_add_output = get_event_msg!(nodes[0], MessageSendEvent::SendTxAddOutput, node_id_1); + nodes[1].node.handle_tx_add_output(node_id_0, &tx_add_output); + let tx_complete = get_event_msg!(nodes[1], MessageSendEvent::SendTxComplete, node_id_0); + nodes[0].node.handle_tx_complete(node_id_1, &tx_complete); + + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2); + if let MessageSendEvent::SendTxComplete { .. } = &msg_events[0] { + } else { + panic!("Unexpected event"); + } + if let MessageSendEvent::UpdateHTLCs { .. } = &msg_events[1] { + } else { + panic!("Unexpected event"); + } if reload { let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); @@ -561,6 +619,44 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { nodes[1].node.peer_disconnected(node_id_0); } + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_channel_ready = (true, false); + reconnect_args.send_announcement_sigs = (true, true); + reconnect_args.send_tx_abort = (true, false); + reconnect_nodes(reconnect_args); + + let tx_abort = get_event_msg!(nodes[0], MessageSendEvent::SendTxAbort, node_id_1); + nodes[1].node.handle_tx_abort(node_id_0, &tx_abort); + let _event = get_event!(nodes[0], Event::SpliceFailed); + + // Attempt a splice negotiation that completes, (i.e. `tx_signatures` are exchanged). Reconnecting + // should not abort the negotiation or reset the splice state. + let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, contribution); + + if reload { + let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); + reload_node!( + nodes[0], + &nodes[0].node.encode(), + &[&encoded_monitor_0], + persister_0d, + chain_monitor_0d, + node_0d + ); + let encoded_monitor_1 = get_monitor!(nodes[1], channel_id).encode(); + reload_node!( + nodes[1], + &nodes[1].node.encode(), + &[&encoded_monitor_1], + persister_1d, + chain_monitor_1d, + node_1d + ); + } else { + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + } + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_announcement_sigs = (true, true); reconnect_nodes(reconnect_args);