diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index 8d470984f42..471c7562702 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -180,7 +180,7 @@ mod test { use lightning::types::payment::PaymentHash; use lightning::util::logger::Logger; - use lightning::{expect_payment_claimed, get_htlc_update_msgs}; + use lightning::expect_payment_claimed; use lightning_types::string::UntrustedString; use std::ops::Deref; @@ -416,7 +416,7 @@ mod test { nodes[0].onion_messenger.handle_onion_message(payee_id, &inv); check_added_monitors(&nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], payee_id); + let updates = get_htlc_update_msgs(&nodes[0], &payee_id); nodes[1].node.handle_update_add_htlc(payer_id, &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); expect_and_process_pending_htlcs(&nodes[1], false); @@ -450,7 +450,7 @@ mod test { } check_added_monitors(&nodes[1], 1); - let mut updates = get_htlc_update_msgs!(nodes[1], payer_id); + let mut updates = get_htlc_update_msgs(&nodes[1], &payer_id); nodes[0].node.handle_update_fulfill_htlc(payee_id, updates.update_fulfill_htlcs.remove(0)); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); diff --git a/lightning-liquidity/tests/lsps5_integration_tests.rs b/lightning-liquidity/tests/lsps5_integration_tests.rs index 80707a60774..73bd409ea8b 100644 --- a/lightning-liquidity/tests/lsps5_integration_tests.rs +++ b/lightning-liquidity/tests/lsps5_integration_tests.rs @@ -8,12 +8,11 @@ use common::{ }; use lightning::chain::{BestBlock, Filter}; -use lightning::check_closed_event; use lightning::events::ClosureReason; use lightning::ln::channelmanager::{ChainParameters, InterceptId}; use lightning::ln::functional_test_utils::{ - close_channel, create_chan_between_nodes, create_chanmon_cfgs, create_network, - create_node_cfgs, create_node_chanmgrs, Node, + check_closed_event, close_channel, create_chan_between_nodes, create_chanmon_cfgs, + create_network, create_node_cfgs, create_node_chanmgrs, Node, }; use lightning::ln::msgs::Init; use lightning::ln::peer_handler::CustomMessageHandler; @@ -1480,9 +1479,9 @@ fn dos_protection() { close_channel(&service_node.inner, &client_node.inner, &channel_id, funding_tx, true); let node_a_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(service_node.inner, 1, node_a_reason, [client_node_id], 100000); + check_closed_event(&service_node.inner, 1, node_a_reason, false, &[client_node_id], 100000); let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(client_node.inner, 1, node_b_reason, [service_node_id], 100000); + check_closed_event(&client_node.inner, 1, node_b_reason, false, &[service_node_id], 100000); // channel is now closed again -> should reject assert_lsps5_reject(&service_node, &client_node); diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs index 9b15398d4d1..bff3388a5f8 100644 --- a/lightning-persister/src/fs_store.rs +++ b/lightning-persister/src/fs_store.rs @@ -720,7 +720,6 @@ mod tests { use lightning::chain::chainmonitor::Persist; use lightning::chain::ChannelMonitorUpdateStatus; - use lightning::check_closed_event; use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::*; use lightning::ln::msgs::BaseMessageHandler; @@ -884,7 +883,7 @@ mod tests { .unwrap(); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); // Set the store's directory to read-only, which should result in @@ -928,7 +927,7 @@ mod tests { .unwrap(); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap(); diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index 636967a6937..36c2c809662 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -1,14 +1,14 @@ use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::{ - connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block, - create_network, create_node_cfgs, create_node_chanmgrs, send_payment, + check_closed_event, connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, + create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs, send_payment, }; use lightning::util::persist::{ migrate_kv_store_data, read_channel_monitors, KVStoreSync, MigratableKVStore, KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN, }; use lightning::util::test_utils; -use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event}; +use lightning::{check_added_monitors, check_closed_broadcast}; use std::panic::RefUnwindSafe; @@ -188,7 +188,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { .force_close_broadcasting_latest_txn(&chan_id, &node_b_id, message.clone()) .unwrap(); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -204,12 +204,13 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { ), ); check_closed_broadcast!(nodes[1], true); - check_closed_event!( - nodes[1], + check_closed_event( + &nodes[1], 1, ClosureReason::CommitmentTxConfirmed, - [nodes[0].node.get_our_node_id()], - 100000 + false, + &[nodes[0].node.get_our_node_id()], + 100000, ); check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 046e285e206..16a5a125b14 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -1596,11 +1596,10 @@ where mod tests { use crate::chain::channelmonitor::ANTI_REORG_DELAY; use crate::chain::{ChannelMonitorUpdateStatus, Watch}; + use crate::check_added_monitors; use crate::events::{ClosureReason, Event}; - use crate::get_htlc_update_msgs; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; - use crate::{check_added_monitors, check_closed_event}; use crate::{expect_payment_path_successful, get_event_msg}; const CHAINSYNC_MONITOR_PARTITION_FACTOR: u32 = 5; @@ -1691,7 +1690,7 @@ mod tests { // Now manually walk the commitment signed dance - because we claimed two payments // back-to-back it doesn't fit into the neat walk commitment_signed_dance does. - let mut updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); @@ -1700,7 +1699,7 @@ mod tests { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); check_added_monitors!(nodes[1], 1); - let mut bs_2nd_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_2nd_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_first_update); check_added_monitors!(nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); @@ -1781,7 +1780,7 @@ mod tests { .unwrap(); let closure_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(&nodes[0], 1, closure_reason, false, [node_c_id], 1000000); + check_closed_event(&nodes[0], 1, closure_reason, false, &[node_c_id], 1000000); check_closed_broadcast(&nodes[0], 1, true); let close_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(close_tx.len(), 1); @@ -1790,7 +1789,7 @@ mod tests { check_closed_broadcast(&nodes[2], 1, true); check_added_monitors(&nodes[2], 1); let closure_reason = ClosureReason::CommitmentTxConfirmed; - check_closed_event!(&nodes[2], 1, closure_reason, false, [node_a_id], 1000000); + check_closed_event(&nodes[2], 1, closure_reason, false, &[node_a_id], 1000000); chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear(); chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().clear(); diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 2de00d0af49..3c96de317ab 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -582,7 +582,7 @@ fn lock_in_htlc_for_static_invoice( // The sender should lock in the held HTLC with their LSP right after receiving the static invoice. sender.onion_messenger.handle_onion_message(om_peer, &static_invoice_om); check_added_monitors(sender, 1); - let commitment_update = get_htlc_update_msgs!(sender, sender_lsp.node.get_our_node_id()); + let commitment_update = get_htlc_update_msgs(&sender, &sender_lsp.node.get_our_node_id()); let update_add = commitment_update.update_add_htlcs[0].clone(); let payment_hash = update_add.payment_hash; assert!(update_add.hold_htlc.is_some()); @@ -639,7 +639,7 @@ fn invalid_keysend_payment_secret() { .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); do_pass_along_path(args); - let updates_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); assert_eq!(updates_2_1.update_fail_malformed_htlcs.len(), 1); let update_malformed = &updates_2_1.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.sha256_of_onion, [0; 32]); @@ -652,7 +652,7 @@ fn invalid_keysend_payment_secret() { .handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, true, false); - let updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates_1_0 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert_eq!(updates_1_0.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc( nodes[1].node.get_our_node_id(), diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 03728e28222..d751ece17ca 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -950,7 +950,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { // The rest of this is boilerplate for resolving the previous state. nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); - let as_commitment_signed = get_htlc_update_msgs!(nodes[0], node_b_id); + let as_commitment_signed = get_htlc_update_msgs(&nodes[0], &node_b_id); check_added_monitors!(nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_commitment_signed); @@ -1319,9 +1319,9 @@ fn do_test_closing_signed(extra_closing_signed: bool, reconnect: bool) { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 100000); let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_a_id], 100000); } #[test] @@ -1358,12 +1358,12 @@ fn test_no_disconnect_while_async_revoke_and_ack_expecting_remote_commitment_sig nodes[1].node.send_payment_with_route(route2, payment_hash2, onion2, payment_id2).unwrap(); check_added_monitors(&nodes[1], 1); - let update = get_htlc_update_msgs!(&nodes[0], node_b_id); + let update = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &update.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &update.commitment_signed); check_added_monitors(&nodes[1], 1); - let update = get_htlc_update_msgs!(&nodes[1], node_a_id); + let update = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_add_htlc(node_b_id, &update.update_add_htlcs[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &update.commitment_signed); check_added_monitors(&nodes[0], 1); @@ -1420,7 +1420,7 @@ fn test_no_disconnect_while_async_commitment_signed_expecting_remote_revoke_and_ // After processing the `update_fulfill`, they'll only be able to send `revoke_and_ack` until // the `commitment_signed` is no longer pending. - let mut update = get_htlc_update_msgs!(&nodes[1], node_a_id); + let mut update = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, update.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &update.commitment_signed); diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index cfb2878845f..51e0e7fb95e 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -126,7 +126,7 @@ pub fn fail_blinded_htlc_backwards( expect_payment_failed_conditions(&nodes[0], payment_hash, false, payment_failed_conditions); }, i if i <= intro_node_idx => { - let unblinded_node_updates = get_htlc_update_msgs!(nodes[i], nodes[i-1].node.get_our_node_id()); + let unblinded_node_updates = get_htlc_update_msgs(&nodes[i], &nodes[i-1].node.get_our_node_id()); assert_eq!(unblinded_node_updates.update_fail_htlcs.len(), 1); nodes[i-1].node.handle_update_fail_htlc( nodes[i].node.get_our_node_id(), &unblinded_node_updates.update_fail_htlcs[i-1] @@ -134,7 +134,7 @@ pub fn fail_blinded_htlc_backwards( do_commitment_signed_dance(&nodes[i-1], &nodes[i], &unblinded_node_updates.commitment_signed, false, false); }, _ => { - let blinded_node_updates = get_htlc_update_msgs!(nodes[i], nodes[i-1].node.get_our_node_id()); + let blinded_node_updates = get_htlc_update_msgs(&nodes[i], &nodes[i-1].node.get_our_node_id()); assert_eq!(blinded_node_updates.update_fail_malformed_htlcs.len(), 1); let update_malformed = &blinded_node_updates.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.sha256_of_onion, [0; 32]); @@ -426,7 +426,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { } } - let mut updates_0_1 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let mut updates_0_1 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let update_add = &mut updates_0_1.update_add_htlcs[0]; if intro_fails { @@ -441,7 +441,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { check_added_monitors!(nodes[1], 1); if intro_fails { - let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); let failed_destination = match check { @@ -466,7 +466,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { return } - let mut updates_1_2 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let mut updates_1_2 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); let mut update_add = &mut updates_1_2.update_add_htlcs[0]; cause_error!(2, 3, update_add); @@ -486,7 +486,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { ); check_added_monitors!(nodes[2], 1); - let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); assert_eq!(update_malformed.sha256_of_onion, [0; 32]); @@ -497,7 +497,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false); - let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, @@ -550,7 +550,7 @@ fn failed_backwards_to_intro_node() { expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); check_added_monitors(&nodes[2], 1); - let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let mut update_malformed = &mut updates.update_fail_malformed_htlcs[0]; // Check that the final node encodes its failure correctly. assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); @@ -562,7 +562,7 @@ fn failed_backwards_to_intro_node() { nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false); - let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, @@ -658,7 +658,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, expect_and_process_pending_htlcs(&nodes[1], false); check_added_monitors!(nodes[1], 1); - let mut updates_1_2 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let mut updates_1_2 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); let mut update_add = &mut updates_1_2.update_add_htlcs[0]; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add); check_added_monitors!(nodes[2], 0); @@ -667,7 +667,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, cause_error!(nodes[1], nodes[2], nodes[3], chan_id_2_3, chan_upd_2_3.short_channel_id); check_added_monitors!(nodes[2], 1); - let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); assert_eq!(update_malformed.sha256_of_onion, [0; 32]); @@ -678,7 +678,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false); - let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, @@ -1051,7 +1051,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { } } - let updates_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); assert_eq!(updates_2_1.update_fail_malformed_htlcs.len(), 1); let update_malformed = &updates_2_1.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.sha256_of_onion, [0; 32]); @@ -1072,7 +1072,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { _ => panic!() } }).unwrap() - } else { get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()) }; + } else { get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()) }; assert_eq!(updates_1_0.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates_1_0.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, false, false); @@ -1140,7 +1140,7 @@ fn blinded_path_retries() { nodes[3].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[3], 1); - let updates = get_htlc_update_msgs!(nodes[3], $intro_node.node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[3], &$intro_node.node.get_our_node_id()); assert_eq!(updates.update_fail_malformed_htlcs.len(), 1); let update_malformed = &updates.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.sha256_of_onion, [0; 32]); @@ -1148,7 +1148,7 @@ fn blinded_path_retries() { $intro_node.node.handle_update_fail_malformed_htlc(nodes[3].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&$intro_node, &nodes[3], &updates.commitment_signed, true, false); - let updates = get_htlc_update_msgs!($intro_node, nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&$intro_node, &nodes[0].node.get_our_node_id()); assert_eq!(updates.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc($intro_node.node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &$intro_node, &updates.commitment_signed, false, false); @@ -1256,7 +1256,7 @@ fn min_htlc() { &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }] ); check_added_monitors(&nodes[1], 1); - let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, @@ -1448,7 +1448,7 @@ fn fails_receive_tlvs_authentication() { check_added_monitors!(nodes[1], 1); expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); - let mut update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let mut update_fail = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(update_fail.update_fail_htlcs.len() == 1); let fail_msg = &update_fail.update_fail_htlcs[0]; nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), fail_msg); @@ -2151,14 +2151,14 @@ fn test_trampoline_forward_payload_encoded_as_receive() { do_pass_along_path(args); { - let unblinded_node_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let unblinded_node_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_fail_htlc( nodes[2].node.get_our_node_id(), &unblinded_node_updates.update_fail_htlcs[0] ); do_commitment_signed_dance(&nodes[1], &nodes[2], &unblinded_node_updates.commitment_signed, true, false); } { - let unblinded_node_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let unblinded_node_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc( nodes[1].node.get_our_node_id(), &unblinded_node_updates.update_fail_htlcs[0] ); @@ -2526,14 +2526,14 @@ fn test_trampoline_forward_rejection() { do_pass_along_path(args); { - let unblinded_node_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let unblinded_node_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_fail_htlc( nodes[2].node.get_our_node_id(), &unblinded_node_updates.update_fail_htlcs[0] ); do_commitment_signed_dance(&nodes[1], &nodes[2], &unblinded_node_updates.commitment_signed, true, false); } { - let unblinded_node_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let unblinded_node_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc( nodes[1].node.get_our_node_id(), &unblinded_node_updates.update_fail_htlcs[0] ); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 27c29b1774c..b7fedb6f973 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -124,7 +124,7 @@ fn test_monitor_and_persister_update_fail() { expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); check_added_monitors!(nodes[1], 1); - let mut updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); @@ -288,7 +288,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // PaymentPathFailed event assert_eq!(nodes[0].node.list_channels().len(), 0); - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } #[test] @@ -562,7 +562,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { macro_rules! handle_bs_raa { () => { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); - as_commitment_update = get_htlc_update_msgs!(nodes[0], node_b_id); + as_commitment_update = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(as_commitment_update.update_add_htlcs.is_empty()); assert!(as_commitment_update.update_fulfill_htlcs.is_empty()); assert!(as_commitment_update.update_fail_htlcs.is_empty()); @@ -575,7 +575,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { macro_rules! handle_initial_raa { () => { nodes[1].node.handle_revoke_and_ack(node_a_id, &initial_revoke_and_ack); - bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); + bs_second_commitment_update = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); @@ -944,7 +944,7 @@ fn test_monitor_update_raa_while_paused() { let as_update_raa = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_update_raa.0); check_added_monitors!(nodes[1], 1); - let bs_cs = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_cs = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_update_raa.1); check_added_monitors!(nodes[1], 1); @@ -996,7 +996,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { ); check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); @@ -1143,7 +1143,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &raa.unwrap()); check_added_monitors!(nodes[2], 1); - let bs_cs = get_htlc_update_msgs!(nodes[2], node_b_id); + let bs_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(bs_cs.update_add_htlcs.is_empty()); assert!(bs_cs.update_fail_htlcs.is_empty()); assert!(bs_cs.update_fail_malformed_htlcs.is_empty()); @@ -1152,7 +1152,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); check_added_monitors!(nodes[1], 1); - as_cs = get_htlc_update_msgs!(nodes[1], node_c_id); + as_cs = get_htlc_update_msgs(&nodes[1], &node_c_id); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_cs.commitment_signed); check_added_monitors!(nodes[1], 1); @@ -1172,7 +1172,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { _ => panic!("Unexpected event"), } - as_cs = get_htlc_update_msgs!(nodes[1], node_c_id); + as_cs = get_htlc_update_msgs(&nodes[1], &node_c_id); match bs_revoke_and_commit[1] { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { @@ -1205,7 +1205,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); check_added_monitors!(nodes[2], 1); - let bs_second_cs = get_htlc_update_msgs!(nodes[2], node_b_id); + let bs_second_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_second_raa); check_added_monitors!(nodes[1], 1); @@ -1295,7 +1295,7 @@ fn test_monitor_update_fail_reestablish() { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); - let mut updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -1357,7 +1357,7 @@ fn test_monitor_update_fail_reestablish() { nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); - updates = get_htlc_update_msgs!(nodes[1], node_a_id); + updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -1474,7 +1474,7 @@ fn raa_no_response_awaiting_raa_state() { check_added_monitors!(nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); - let bs_update = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_update = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors!(nodes[0], 1); @@ -1556,7 +1556,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); check_added_monitors!(nodes[0], 1); - let as_updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_updates.commitment_signed); check_added_monitors!(nodes[1], 1); @@ -1601,9 +1601,9 @@ fn claim_while_disconnected_monitor_update_fail() { _ => panic!("Unexpected event"), } - let as_commitment = get_htlc_update_msgs!(nodes[0], node_b_id); + let as_commitment = get_htlc_update_msgs(&nodes[0], &node_b_id); - let bs_commitment = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_commitment = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); @@ -1898,7 +1898,7 @@ fn test_monitor_update_fail_claim() { expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); - let mut bs_fulfill = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_fulfill.update_fulfill_htlcs.remove(0)); do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_fulfill.commitment_signed, false, false); expect_payment_sent!(nodes[0], payment_preimage_1); @@ -1906,7 +1906,7 @@ fn test_monitor_update_fail_claim() { // Get the payment forwards, note that they were batched into one commitment update. nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); - let bs_forward_update = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_forward_update = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[0]); nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[1]); let commitment = &bs_forward_update.commitment_signed; @@ -1996,7 +1996,7 @@ fn test_monitor_update_on_pending_forwards() { ); check_added_monitors!(nodes[2], 1); - let cs_fail_update = get_htlc_update_msgs!(nodes[2], node_b_id); + let cs_fail_update = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fail_htlc(node_c_id, &cs_fail_update.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[2], &cs_fail_update.commitment_signed, true, true); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2026,7 +2026,7 @@ fn test_monitor_update_on_pending_forwards() { nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); - let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); nodes[0].node.handle_update_add_htlc(node_b_id, &bs_updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, true); @@ -2104,7 +2104,7 @@ fn monitor_update_claim_fail_no_response() { expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); - let mut bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_updates.update_fulfill_htlcs.remove(0)); do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false); expect_payment_sent!(nodes[0], payment_preimage_1); @@ -2274,9 +2274,9 @@ fn do_during_funding_monitor_fail( send_payment(&nodes[0], &[&nodes[1]], 8000000); close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true); let reason_a = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 100000); let reason_b = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_a_id], 100000); } #[test] @@ -2383,7 +2383,7 @@ fn test_pending_update_fee_ack_on_reconnect() { let id = PaymentId(payment_hash.0); nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[1], 1); - let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_initial_send_msgs = get_htlc_update_msgs(&nodes[1], &node_a_id); // bs_initial_send_msgs are not delivered until they are re-generated after reconnect { @@ -2392,7 +2392,7 @@ fn test_pending_update_fee_ack_on_reconnect() { } nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); - let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], node_b_id); + let as_update_fee_msgs = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(as_update_fee_msgs.update_fee.is_some()); nodes[1].node.handle_update_fee(node_a_id, as_update_fee_msgs.update_fee.as_ref().unwrap()); @@ -2447,13 +2447,13 @@ fn test_pending_update_fee_ack_on_reconnect() { &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); check_added_monitors!(nodes[1], 1); - let bs_second_cs = get_htlc_update_msgs!(nodes[1], node_a_id).commitment_signed; + let bs_second_cs = get_htlc_update_msgs(&nodes[1], &node_a_id).commitment_signed; nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); nodes[1].node.handle_commitment_signed_batch_test( node_a_id, - &get_htlc_update_msgs!(nodes[0], node_b_id).commitment_signed, + &get_htlc_update_msgs(&nodes[0], &node_b_id).commitment_signed, ); check_added_monitors!(nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); @@ -2507,14 +2507,15 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 2000); - let mut cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fulfill_htlc(node_c_id, cs_updates.update_fulfill_htlcs.remove(0)); - let mut bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); check_added_monitors!(nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); mine_transaction(&nodes[1], &bs_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], 100000); check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); @@ -2550,7 +2551,7 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { } nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); - let update_msgs = get_htlc_update_msgs!(nodes[0], node_b_id); + let update_msgs = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(update_msgs.update_fee.is_some()); if deliver_update { nodes[1].node.handle_update_fee(node_a_id, update_msgs.update_fee.as_ref().unwrap()); @@ -2605,7 +2606,7 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); - let as_second_update = get_htlc_update_msgs!(nodes[0], node_b_id); + let as_second_update = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); check_added_monitors!(nodes[0], 1); @@ -2619,7 +2620,7 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - let bs_second_cs = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_second_cs = get_htlc_update_msgs(&nodes[1], &node_a_id); check_added_monitors!(nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); @@ -2914,7 +2915,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f &[HTLCHandlingFailureType::Receive { payment_hash }], ); check_added_monitors!(nodes[2], 1); - get_htlc_update_msgs!(nodes[2], node_b_id); + get_htlc_update_msgs(&nodes[2], &node_b_id); // Note that we don't populate fulfill_msg.attribution_data here, which will lead to hold times being // unavailable. } else { @@ -2922,7 +2923,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 100_000); - let cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1); // Check that the message we're about to deliver matches the one generated. Ignore attribution data. @@ -2940,7 +2941,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let mut bs_updates = None; if htlc_status != HTLCStatusAtDupClaim::HoldingCell { - bs_updates = Some(get_htlc_update_msgs!(nodes[1], node_a_id)); + bs_updates = Some(get_htlc_update_msgs(&nodes[1], &node_a_id)); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc( node_b_id, @@ -2978,7 +2979,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f check_added_monitors!(nodes[1], 1); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); // We finally receive the second payment, but don't claim it - bs_updates = Some(get_htlc_update_msgs!(nodes[1], node_a_id)); + bs_updates = Some(get_htlc_update_msgs(&nodes[1], &node_a_id)); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc( node_b_id, @@ -3070,9 +3071,9 @@ fn test_temporary_error_during_shutdown() { assert_eq!(txn_a.len(), 1); check_spends!(txn_a[0], funding_tx); let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_a_id], 100000); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 100000); } #[test] @@ -3309,7 +3310,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { let node_a_ser = nodes[0].node.encode(); reload_node!(nodes[0], &node_a_ser, &[], persister, new_chain_monitor, node_a_reload); - check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, ClosureReason::DisconnectedPeer, false, &[node_b_id], 100000); assert!(nodes[0].node.list_channels().is_empty()); } @@ -3417,7 +3418,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo let node_b_ser = nodes[1].node.encode(); reload_node!(nodes[1], &node_b_ser, &[], persister, new_chain_monitor, node_b_reload); - check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, ClosureReason::DisconnectedPeer, false, &[node_a_id], 100000); assert!(nodes[1].node.list_channels().is_empty()); } @@ -3468,7 +3469,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); - let mut cs_htlc_fulfill = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut cs_htlc_fulfill = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1] .node .handle_update_fulfill_htlc(node_c_id, cs_htlc_fulfill.update_fulfill_htlcs.remove(0)); @@ -3483,7 +3484,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode check_added_monitors(&nodes[0], 1); expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000); - let mut as_htlc_fulfill = get_htlc_update_msgs!(nodes[0], node_b_id); + let mut as_htlc_fulfill = get_htlc_update_msgs(&nodes[0], &node_b_id); if completion_mode != BlockedUpdateComplMode::Sync { // We use to incorrectly handle monitor update completion in cases where we completed a // monitor update async or after reload. We test both based on the `completion_mode`. @@ -3559,7 +3560,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode // When we fetch the next update the message getter will generate the next update for nodes[2], // generating a further monitor update. - let mut bs_htlc_fulfill = get_htlc_update_msgs!(nodes[1], node_c_id); + let mut bs_htlc_fulfill = get_htlc_update_msgs(&nodes[1], &node_c_id); check_added_monitors(&nodes[1], 1); nodes[2] @@ -4493,7 +4494,7 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { .unwrap(); check_added_monitors!(nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[0], 1, a_reason, [node_b_id], 1000000); + check_closed_event(&nodes[0], 1, a_reason, false, &[node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4502,7 +4503,8 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); + let b_reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, b_reason, false, &[node_a_id], 1000000); // Now that B has a pending forwarded payment across it with the inbound edge on-chain, claim // the payment on C and give B the preimage for it. @@ -4510,7 +4512,7 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); - let mut updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); check_added_monitors!(nodes[1], 1); @@ -4569,7 +4571,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { .unwrap(); check_added_monitors!(nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[0], 1, a_reason, [node_b_id], 1000000); + check_closed_event(&nodes[0], 1, a_reason, false, &[node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4578,7 +4580,8 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); + let b_reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, b_reason, false, &[node_a_id], 1000000); // Now that B has a pending payment with the inbound HTLC on a closed channel, claim the // payment on disk, but don't let the `ChannelMonitorUpdate` complete. This should prevent the diff --git a/lightning/src/ln/channel_open_tests.rs b/lightning/src/ln/channel_open_tests.rs index 3fd546aaff7..ad49fb07500 100644 --- a/lightning/src/ln/channel_open_tests.rs +++ b/lightning/src/ln/channel_open_tests.rs @@ -1045,7 +1045,7 @@ pub fn test_user_configurable_csv_delay() { panic!(); } let reason = ClosureReason::ProcessingError { err: reason_msg }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 1000000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 1000000); // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new() nodes[1].node.create_channel(node_a_id, 1000000, 1000000, 42, None, None).unwrap(); @@ -1705,7 +1705,7 @@ pub fn test_invalid_funding_tx() { confirm_transaction_at(&nodes[1], &tx, 1); let reason = ClosureReason::ProcessingError { err: expected_err.to_string() }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); check_added_monitors(&nodes[1], 1); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1963,7 +1963,7 @@ pub fn test_channel_close_when_not_timely_accepted() { // Since we disconnected from peer and did not connect back within time, // we should have forced-closed the channel by now. let reason = ClosureReason::FundingTimedOut; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); assert_eq!(nodes[0].node.list_channels().len(), 0); { @@ -2542,5 +2542,5 @@ fn test_fund_pending_channel() { let reason = ClosureReason::ProcessingError { err: "Error in transaction funding: Misuse error: Channel f7fee84016d554015f5166c0a0df6479942ef55fd70713883b0493493a38e13a with counterparty 0355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23 is not an unfunded, outbound channel ready to fund".to_owned(), }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100_000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100_000); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8abb2378627..ad11fd6def7 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18559,7 +18559,7 @@ mod tests { let fail = HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }; expect_htlc_failure_conditions(events, &[fail]); check_added_monitors!(nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); @@ -18585,7 +18585,7 @@ mod tests { expect_payment_claimed!(nodes[1], our_payment_hash, 200_000); check_added_monitors!(nodes[1], 2); - let mut bs_1st_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let mut bs_1st_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_1st_updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_1st_updates.commitment_signed); @@ -18593,7 +18593,7 @@ mod tests { let (as_first_raa, as_first_cs) = get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); check_added_monitors!(nodes[1], 1); - let mut bs_2nd_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let mut bs_2nd_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_first_cs); check_added_monitors!(nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -18602,7 +18602,7 @@ mod tests { check_added_monitors!(nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); - let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_second_updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); check_added_monitors!(nodes[0], 1); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); check_added_monitors!(nodes[1], 1); @@ -18679,7 +18679,7 @@ mod tests { let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); check_added_monitors!(nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); @@ -18726,7 +18726,7 @@ mod tests { let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); check_added_monitors!(nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); @@ -18775,7 +18775,7 @@ mod tests { let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); check_added_monitors!(nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); @@ -18822,7 +18822,7 @@ mod tests { RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap(); check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); assert_eq!(updates.update_add_htlcs.len(), 1); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); @@ -18833,7 +18833,7 @@ mod tests { expect_and_process_pending_htlcs(&nodes[1], false); expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash: mismatch_payment_hash }]); check_added_monitors(&nodes[1], 1); - let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let _ = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1); } @@ -18890,7 +18890,7 @@ mod tests { nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); check_added_monitors!(nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[nodes[1].node.get_our_node_id()], 100000); // Confirm that the channel_update was not sent immediately to node[1] but was cached. let node_1_events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -18954,7 +18954,7 @@ mod tests { .unwrap(); check_added_monitors!(nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 1_000_000); + check_closed_event(&nodes[0], 1, reason, false, &[nodes[1].node.get_our_node_id()], 1_000_000); { // Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been @@ -18991,8 +18991,8 @@ mod tests { nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 1_000_000); - check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 1_000_000); + check_closed_event(&nodes[0], 1, ClosureReason::DisconnectedPeer, false, &[nodes[1].node.get_our_node_id()], 1_000_000); + check_closed_event(&nodes[1], 1, ClosureReason::DisconnectedPeer, false, &[nodes[0].node.get_our_node_id()], 1_000_000); // At this point the state for the peers should have been removed. assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0); @@ -19438,7 +19438,7 @@ mod tests { check_closed_broadcast(&nodes[0], 1, false); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[nodes[1].node.get_our_node_id()], 100000); { let txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1); @@ -19476,7 +19476,7 @@ mod tests { let expected_close_reason = ClosureReason::ProcessingError { err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string() }; - check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event(&nodes[1], 1, expected_close_reason, false, &[nodes[0].node.get_our_node_id()], 100000); { let txn = nodes[1].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 5fac0fd9b4f..40b523aa626 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1103,16 +1103,6 @@ pub fn get_htlc_update_msgs(node: &Node, recipient: &PublicKey) -> msgs::Commitm } } -#[macro_export] -/// Gets an UpdateHTLCs MessageSendEvent -/// -/// Don't use this, use the identically-named function instead. -macro_rules! get_htlc_update_msgs { - ($node: expr, $node_id: expr) => { - $crate::ln::functional_test_utils::get_htlc_update_msgs(&$node, &$node_id) - }; -} - /// Fetches the first `msg_event` to the passed `node_id` in the passed `msg_events` vec. /// Returns the `msg_event`. /// @@ -2384,33 +2374,6 @@ pub fn check_closed_event( check_closed_events(node, expected_close_events.as_slice()); } -/// Check that a channel's closing channel events has been issued -/// -/// Don't use this, use the identically-named function instead. -#[macro_export] -macro_rules! check_closed_event { - ($node: expr, $events: expr, $reason: expr, $counterparty_node_ids: expr, $channel_capacity: expr) => { - check_closed_event!( - $node, - $events, - $reason, - false, - $counterparty_node_ids, - $channel_capacity - ); - }; - ($node: expr, $events: expr, $reason: expr, $is_check_discard_funding: expr, $counterparty_node_ids: expr, $channel_capacity: expr) => { - $crate::ln::functional_test_utils::check_closed_event( - &$node, - $events, - $reason, - $is_check_discard_funding, - &$counterparty_node_ids, - $channel_capacity, - ); - }; -} - pub fn handle_bump_events(node: &Node, expected_close: bool, expected_htlc_count: usize) { let events = node.chain_monitor.chain_monitor.get_and_clear_pending_events(); let mut close = false; @@ -3442,7 +3405,7 @@ fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) { // iterate from the receiving node to the origin node and handle update fail htlc. for (&node, &prev_node) in expected_path.iter().rev().zip(expected_path.iter().rev().skip(1)) { - let updates = get_htlc_update_msgs!(node, prev_node.node.get_our_node_id()); + let updates = get_htlc_update_msgs(node, &prev_node.node.get_our_node_id()); prev_node .node .handle_update_fail_htlc(node.node.get_our_node_id(), &updates.update_fail_htlcs[0]); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 8f3c94b821f..51ef2e706b1 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -211,27 +211,27 @@ pub fn fake_network_test() { // Close down the channels... close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true); let node_a_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, node_a_reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, node_a_reason, false, &[node_b_id], 100000); let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, node_b_reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, node_b_reason, false, &[node_a_id], 100000); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false); let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, node_b_reason, [node_c_id], 100000); + check_closed_event(&nodes[1], 1, node_b_reason, false, &[node_c_id], 100000); let node_c_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[2], 1, node_c_reason, [node_b_id], 100000); + check_closed_event(&nodes[2], 1, node_c_reason, false, &[node_b_id], 100000); close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true); let node_c_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[2], 1, node_c_reason, [node_d_id], 100000); + check_closed_event(&nodes[2], 1, node_c_reason, false, &[node_d_id], 100000); let node_d_reason = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[3], 1, node_d_reason, [node_c_id], 100000); + check_closed_event(&nodes[3], 1, node_d_reason, false, &[node_c_id], 100000); close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false); let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, node_b_reason, [node_d_id], 100000); + check_closed_event(&nodes[1], 1, node_b_reason, false, &[node_d_id], 100000); let node_d_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[3], 1, node_d_reason, [node_b_id], 100000); + check_closed_event(&nodes[3], 1, node_d_reason, false, &[node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -353,7 +353,8 @@ pub fn test_duplicate_htlc_different_direction_onchain() { } } check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); @@ -598,7 +599,7 @@ pub fn channel_monitor_network_test() { check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); { let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE); assert_eq!(node_txn.len(), 1); @@ -614,7 +615,8 @@ pub fn channel_monitor_network_test() { } assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); // One pending HTLC is discarded by the force-close: let (payment_preimage_1, payment_hash_1, ..) = @@ -645,8 +647,9 @@ pub fn channel_monitor_network_test() { assert_eq!(nodes[2].node.list_channels().len(), 1); let node_b_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[1], 1, node_b_reason, [node_c_id], 100000); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + check_closed_event(&nodes[1], 1, node_b_reason, false, &[node_c_id], 100000); + let node_c_reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[2], 1, node_c_reason, false, &[node_b_id], 100000); macro_rules! claim_funds { ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {{ @@ -699,8 +702,9 @@ pub fn channel_monitor_network_test() { assert_eq!(nodes[3].node.list_channels().len(), 1); let node_c_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[2], 1, node_c_reason, [node_d_id], 100000); - check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); + check_closed_event(&nodes[2], 1, node_c_reason, false, &[node_d_id], 100000); + let node_d_reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[3], 1, node_d_reason, false, &[node_c_id], 100000); // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and // confusing us in the following tests. @@ -767,7 +771,7 @@ pub fn channel_monitor_network_test() { check_added_monitors(&nodes[4], 1); test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS); let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash_2) }; - check_closed_event!(nodes[4], 1, reason, [node_d_id], 100000); + check_closed_event(&nodes[4], 1, reason, false, &[node_d_id], 100000); mine_transaction(&nodes[4], &node_txn[0]); check_preimage_claim(&nodes[4], &node_txn); @@ -785,7 +789,7 @@ pub fn channel_monitor_network_test() { Ok(ChannelMonitorUpdateStatus::Completed) ); let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash_2) }; - check_closed_event!(nodes[3], 1, reason, [node_id_4], 100000); + check_closed_event(&nodes[3], 1, reason, false, &[node_id_4], 100000); } #[xtest(feature = "_externalize_tests")] @@ -846,7 +850,8 @@ pub fn test_justice_tx_htlc_timeout() { assert_ne!(node_txn[0].input[0].previous_output, node_txn[0].input[1].previous_output); node_txn.clear(); } - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason.clone(), false, &[node_a_id], 100000); check_added_monitors(&nodes[1], 1); test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE); @@ -860,7 +865,7 @@ pub fn test_justice_tx_htlc_timeout() { Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT, ); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); check_added_monitors(&nodes[0], 1); // Broadcast revoked HTLC-timeout on node 1 mine_transaction(&nodes[1], &node_txn[1]); @@ -928,7 +933,8 @@ pub fn test_justice_tx_htlc_success() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast(&nodes[1], 1, true); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason.clone(), false, &[node_a_id], 100000); let node_txn = test_txn_broadcast( &nodes[1], &chan_6, @@ -937,7 +943,7 @@ pub fn test_justice_tx_htlc_success() { ); check_added_monitors(&nodes[1], 1); mine_transaction(&nodes[0], &node_txn[1]); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); test_revoked_htlc_claim_txn_broadcast( &nodes[0], node_txn[1].clone(), @@ -971,7 +977,8 @@ pub fn revoked_output_claim() { // Inform nodes[1] that nodes[0] broadcast a stale tx mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason.clone(), false, &[node_a_id], 100000); check_added_monitors(&nodes[1], 1); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output @@ -982,7 +989,7 @@ pub fn revoked_output_claim() { mine_transaction(&nodes[0], &revoked_local_txn[0]); get_announce_close_broadcast_events(&nodes, 0, 1); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1106,11 +1113,12 @@ pub fn claim_htlc_outputs() { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason.clone(), false, &[node_b_id], 100000); mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -1211,14 +1219,8 @@ pub fn do_test_multiple_package_conflicts(p2a_anchor: bool) { let node2_commit_tx = &node2_commit_tx[0]; check_spends!(node2_commit_tx, funding_tx_1_2); mine_transaction(&nodes[1], node2_commit_tx); - check_closed_event( - &nodes[1], - 1, - ClosureReason::CommitmentTxConfirmed, - false, - &[node_c_id], - CHAN_CAPACITY, - ); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], CHAN_CAPACITY); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); @@ -1429,7 +1431,7 @@ pub fn test_htlc_on_chain_success() { nodes[2].node.claim_funds(our_payment_preimage_2); expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000); check_added_monitors(&nodes[2], 2); - let updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -1438,7 +1440,8 @@ pub fn test_htlc_on_chain_success() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors(&nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[2], 1, reason, false, &[node_b_id], 100000); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx) assert_eq!(node_txn.len(), 2); check_spends!(node_txn[0], commitment_tx[0]); @@ -1559,7 +1562,8 @@ pub fn test_htlc_on_chain_success() { mine_transaction(&nodes[1], &node_a_commitment_tx[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert!(node_txn.len() == 1 || node_txn.len() == 2); // HTLC-Success, RBF bump of above aggregated HTLC txn let commitment_spend = if node_txn.len() == 1 { @@ -1696,7 +1700,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors(&nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[2], 1, reason, false, &[node_b_id], 100000); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 0); @@ -1779,7 +1784,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], commitment_tx[0]); @@ -1824,7 +1830,8 @@ pub fn test_simple_commitment_revoked_fail_backward() { let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); @@ -1927,7 +1934,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive( &[HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }], ); check_added_monitors(&nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -1944,7 +1951,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive( &[HTLCHandlingFailureType::Receive { payment_hash: second_payment_hash }], ); check_added_monitors(&nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -1964,7 +1971,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive( &[HTLCHandlingFailureType::Receive { payment_hash: third_payment_hash }], ); check_added_monitors(&nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -2331,7 +2338,7 @@ pub fn test_htlc_ignore_latest_remote_commitment() { check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); assert_eq!(node_txn.len(), 2); @@ -2342,7 +2349,8 @@ pub fn test_htlc_ignore_latest_remote_commitment() { connect_block(&nodes[1], &block); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); // Duplicate the connect_block call since this may happen due to other listeners // registering new transactions @@ -2406,7 +2414,7 @@ pub fn test_force_close_fail_back() { check_closed_broadcast!(nodes[2], true); check_added_monitors(&nodes[2], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[2], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[2], 1, reason, false, &[node_b_id], 100000); let commitment_tx = { let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -2422,7 +2430,8 @@ pub fn test_force_close_fail_back() { // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], 100000); // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. { @@ -2471,7 +2480,7 @@ pub fn test_dup_events_on_peer_disconnect() { nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, 1_000_000); check_added_monitors(&nodes[1], 1); - let mut claim_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut claim_msgs = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, claim_msgs.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); @@ -2537,8 +2546,8 @@ pub fn test_peer_disconnected_before_funding_broadcasted() { // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` and a // `DiscardFunding` event when the peers are disconnected and do not reconnect before the // funding transaction is broadcasted. - check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true, [node_b_id], 1000000); - check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false, [node_a_id], 1000000); + check_closed_event(&nodes[0], 2, ClosureReason::DisconnectedPeer, true, &[node_b_id], 1000000); + check_closed_event(&nodes[1], 1, ClosureReason::DisconnectedPeer, false, &[node_a_id], 1000000); } #[xtest(feature = "_externalize_tests")] @@ -3171,7 +3180,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { check_added_monitors(&nodes[1], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, as_resp.1.as_ref().unwrap()); - let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_second_commitment_signed = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(bs_second_commitment_signed.update_add_htlcs.is_empty()); assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty()); assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty()); @@ -3180,7 +3189,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); - let as_commitment_signed = get_htlc_update_msgs!(nodes[0], node_b_id); + let as_commitment_signed = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(as_commitment_signed.update_add_htlcs.is_empty()); assert!(as_commitment_signed.update_fulfill_htlcs.is_empty()); assert!(as_commitment_signed.update_fail_htlcs.is_empty()); @@ -3312,7 +3321,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { ); check_added_monitors(&nodes[1], 1); - let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let htlc_timeout_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(htlc_timeout_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1); assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty()); @@ -3478,7 +3487,7 @@ pub fn test_claim_sizeable_push_msat() { check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); @@ -3517,7 +3526,7 @@ pub fn test_claim_on_remote_sizeable_push_msat() { check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -3527,7 +3536,8 @@ pub fn test_claim_on_remote_sizeable_push_msat() { mine_transaction(&nodes[1], &node_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -3557,7 +3567,8 @@ pub fn test_claim_on_remote_revoked_sizeable_push_msat() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); mine_transaction(&nodes[1], &node_txn[0]); @@ -3611,7 +3622,8 @@ pub fn test_static_spendable_outputs_preimage_tx() { assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); mine_transaction(&nodes[1], &node_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -3657,7 +3669,8 @@ pub fn test_static_spendable_outputs_timeout_tx() { assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); mine_transaction(&nodes[1], &node_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let conditions = PaymentFailedConditions::new().from_mon_update(); expect_payment_failed_conditions(&nodes[1], our_payment_hash, false, conditions); @@ -3697,7 +3710,8 @@ fn do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(split_tx: b mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); // If the HTLC expires in more than COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE blocks, we'll // claim both the revoked and HTLC outputs in one transaction, otherwise we'll split them as we @@ -3752,7 +3766,8 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -3774,7 +3789,8 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, txn)); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); // There will be 2 justice transactions: // - One on the unpinnable, revoked to_self output on the commitment transaction and on @@ -3832,7 +3848,8 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(revoked_htlc_txn.len(), 1); @@ -3852,7 +3869,8 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, txn)); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); // There will be 2 justice transactions, one on the revoked HTLC output on the commitment // transaction, and one on the revoked to_self output on the HTLC-success transaction. @@ -3923,7 +3941,7 @@ pub fn test_onchain_to_onchain_claim() { nodes[2].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); check_added_monitors(&nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); @@ -3932,7 +3950,8 @@ pub fn test_onchain_to_onchain_claim() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors(&nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[2], 1, reason, false, &[node_b_id], 100000); let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx) assert_eq!(c_txn.len(), 1); @@ -4015,7 +4034,8 @@ pub fn test_onchain_to_onchain_claim() { // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); mine_transaction(&nodes[1], &commitment_tx[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: HTLC-Success tx assert_eq!(b_txn.len(), 1); @@ -4092,7 +4112,8 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { mine_transaction(&nodes[1], &commitment_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], 100000); // Confirm blocks until both HTLCs expire and get a transaction which times out one HTLC. connect_blocks(&nodes[1], TEST_FINAL_CLTV + config.channel_config.cltv_expiry_delta as u32); @@ -4127,9 +4148,9 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { nodes[4].node.claim_funds(our_payment_preimage); expect_payment_claimed!(nodes[4], dup_payment_hash, 800_000); check_added_monitors(&nodes[4], 1); - let mut updates = get_htlc_update_msgs!(nodes[4], node_c_id); + let mut updates = get_htlc_update_msgs(&nodes[4], &node_c_id); nodes[2].node.handle_update_fulfill_htlc(node_e_id, updates.update_fulfill_htlcs.remove(0)); - let _cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let _cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); expect_payment_forwarded!(nodes[2], nodes[1], nodes[4], Some(196), false, false); check_added_monitors(&nodes[2], 1); do_commitment_signed_dance(&nodes[2], &nodes[4], &updates.commitment_signed, false, false); @@ -4140,7 +4161,8 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { mine_transaction(&nodes[2], &commitment_txn[0]); check_closed_broadcast(&nodes[2], 1, true); check_added_monitors(&nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[2], 1, reason, false, &[node_b_id], 100000); let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs) @@ -4182,7 +4204,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }], ); - let htlc_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let htlc_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id; @@ -4200,7 +4222,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // provide to node A. mine_transaction(&nodes[1], htlc_success_tx_to_confirm); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(392), true, true); - let mut updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); @@ -4247,7 +4269,8 @@ pub fn test_dynamic_spendable_outputs_local_htlc_success_tx() { _ => panic!("Unexepected event"), } check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); let node_tx = { let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); @@ -4404,7 +4427,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[4], &failed_destinations); check_added_monitors(&nodes[4], 1); - let four_removes = get_htlc_update_msgs!(nodes[4], node_d_id); + let four_removes = get_htlc_update_msgs(&nodes[4], &node_d_id); nodes[3].node.handle_update_fail_htlc(node_e_id, &four_removes.update_fail_htlcs[0]); nodes[3].node.handle_update_fail_htlc(node_e_id, &four_removes.update_fail_htlcs[1]); nodes[3].node.handle_update_fail_htlc(node_e_id, &four_removes.update_fail_htlcs[2]); @@ -4423,7 +4446,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[5], &failed_destinations_2); check_added_monitors(&nodes[5], 1); - let two_removes = get_htlc_update_msgs!(nodes[5], node_d_id); + let two_removes = get_htlc_update_msgs(&nodes[5], &node_d_id); nodes[3].node.handle_update_fail_htlc(node_f_id, &two_removes.update_fail_htlcs[0]); nodes[3].node.handle_update_fail_htlc(node_f_id, &two_removes.update_fail_htlcs[1]); do_commitment_signed_dance(&nodes[3], &nodes[5], &two_removes.commitment_signed, false, false); @@ -4441,7 +4464,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno ]; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[3], &failed_destinations_3); check_added_monitors(&nodes[3], 1); - let six_removes = get_htlc_update_msgs!(nodes[3], node_c_id); + let six_removes = get_htlc_update_msgs(&nodes[3], &node_c_id); nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[0]); nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[1]); nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[2]); @@ -4720,7 +4743,8 @@ pub fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { mine_transaction(&nodes[0], &local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let htlc_timeout = { @@ -4842,7 +4866,8 @@ pub fn test_key_derivation_params() { connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); let htlc_timeout = { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -4891,7 +4916,7 @@ pub fn test_static_output_closing_tx() { mine_transaction(&nodes[0], &closing_tx); let reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); @@ -4900,7 +4925,7 @@ pub fn test_static_output_closing_tx() { mine_transaction(&nodes[1], &closing_tx); let reason = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -4928,7 +4953,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 }); - let mut bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); @@ -4949,7 +4974,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { @@ -4969,7 +4994,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); - let _as_update = get_htlc_update_msgs!(nodes[0], node_b_id); + let _as_update = get_htlc_update_msgs(&nodes[0], &node_b_id); // As far as A is concerned, the HTLC is now present only in the latest remote commitment // transaction, however it is not in A's latest local commitment, so we can just broadcast that @@ -4988,7 +5013,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { @@ -5015,7 +5040,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no ); check_added_monitors(&nodes[1], 1); - let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_updates.commitment_signed); check_added_monitors(&nodes[0], 1); @@ -5044,7 +5069,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(our_payment_hash) }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } else { expect_payment_failed!(nodes[0], our_payment_hash, true); } @@ -5313,7 +5338,7 @@ pub fn test_free_and_fail_holding_cell_htlcs() { check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, amt_1); - let mut update_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut update_msgs = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_msgs.update_fulfill_htlcs.remove(0)); do_commitment_signed_dance(&nodes[0], &nodes[1], &update_msgs.commitment_signed, false, true); expect_payment_sent!(nodes[0], payment_preimage_1); @@ -5728,7 +5753,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { ); check_added_monitors(&nodes[1], 1); - let remove = get_htlc_update_msgs!(nodes[1], node_a_id); + let remove = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &remove.update_fail_htlcs[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &remove.commitment_signed); check_added_monitors(&nodes[0], 1); @@ -5760,7 +5785,8 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); @@ -5834,7 +5860,8 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { if local { // We fail dust-HTLC 1 by broadcast of local commitment tx mine_transaction(&nodes[0], &as_commitment_tx[0]); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); @@ -5860,7 +5887,8 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { mine_transaction(&nodes[0], &bs_commitment_tx[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires @@ -6270,7 +6298,8 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { ); check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 1000000); connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above) let revoked_htlc_txn = { @@ -6585,7 +6614,7 @@ pub fn test_counterparty_raa_skip_no_crash() { check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -6623,7 +6652,8 @@ pub fn test_bump_txn_sanitize_tracking_maps() { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 1000000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 1000000); let penalty_txn = { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 2); //ChannelMonitor: justice txn * 2 @@ -6682,7 +6712,8 @@ pub fn test_channel_conf_timeout() { connect_blocks(&nodes[1], 1); check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [node_a_id], 1000000); + let reason = ClosureReason::FundingTimedOut; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 1000000); let close_ev = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(close_ev.len(), 1); match close_ev[0] { @@ -7287,7 +7318,7 @@ pub fn test_update_err_monitor_lockdown() { check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); - let mut updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); { @@ -7443,7 +7474,7 @@ pub fn test_concurrent_monitor_claim() { nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(updates.update_add_htlcs.len(), 1); nodes[0].node.handle_update_add_htlc(node_b_id, &updates.update_add_htlcs[0]); { @@ -7501,7 +7532,7 @@ pub fn test_concurrent_monitor_claim() { connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); check_closed_broadcast(&nodes[0], 1, true); let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash_timeout) }; - check_closed_event!(&nodes[0], 1, reason, false, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); watchtower_alice.chain_monitor.block_connected( &create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height, @@ -7561,7 +7592,7 @@ pub fn test_pre_lockin_no_chan_closed_update() { assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty()); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }; - check_closed_event!(nodes[0], 2, reason, true, [node_b_id], 100000); + check_closed_event(&nodes[0], 2, reason, true, &[node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -7602,7 +7633,8 @@ pub fn test_htlc_no_detection() { ); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); let htlc_timeout = { @@ -7685,7 +7717,14 @@ fn do_test_onchain_htlc_settlement_after_close( check_closed_broadcast!(nodes[force_closing_node], true); check_added_monitors(&nodes[force_closing_node], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[force_closing_node], 1, reason, [counterparty_node_id], 100000); + check_closed_event( + &nodes[force_closing_node], + 1, + reason, + false, + &[counterparty_node_id], + 100000, + ); if go_onchain_before_fulfill { let txn_to_broadcast = match broadcast_alice { @@ -7700,7 +7739,7 @@ fn do_test_onchain_htlc_settlement_after_close( check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } } @@ -7711,7 +7750,7 @@ fn do_test_onchain_htlc_settlement_after_close( check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); - let mut carol_updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut carol_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(carol_updates.update_add_htlcs.is_empty()); assert!(carol_updates.update_fail_htlcs.is_empty()); assert!(carol_updates.update_fail_malformed_htlcs.is_empty()); @@ -7789,7 +7828,7 @@ fn do_test_onchain_htlc_settlement_after_close( check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); if broadcast_alice { @@ -7981,7 +8020,7 @@ pub fn test_error_chans_closed() { let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); assert_eq!(nodes[0].node.list_usable_channels().len(), 2); @@ -8004,7 +8043,7 @@ pub fn test_error_chans_closed() { let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }; - check_closed_event!(nodes[0], 2, reason, [node_b_id; 2], 100000); + check_closed_event(&nodes[0], 2, reason, false, &[node_b_id; 2], 100000); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -8072,7 +8111,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t check_closed_broadcast(&nodes[1], 1, false); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[1], 1, reason, [node_c_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], 100000); check_added_monitors(&nodes[1], 1); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -8116,7 +8155,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t ); check_added_monitors(&nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); @@ -8223,7 +8262,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { nodes[1].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[1], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[1], node_a_id); + let fail_updates_1 = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2); nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates_1.update_fail_htlcs[0]); @@ -8258,7 +8297,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { nodes[1].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[1], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[1], node_a_id); + let fail_updates_1 = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates_1.update_fail_htlcs[0]); let commitment = &fail_updates_1.commitment_signed; do_commitment_signed_dance(&nodes[0], &nodes[1], commitment, false, false); @@ -8391,7 +8430,7 @@ pub fn test_inconsistent_mpp_params() { check_added_monitors(&nodes[3], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[3], node_c_id); + let fail_updates_1 = get_htlc_update_msgs(&nodes[3], &node_c_id); nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); let commitment = &fail_updates_1.commitment_signed; do_commitment_signed_dance(&nodes[2], &nodes[3], commitment, false, false); @@ -8402,7 +8441,7 @@ pub fn test_inconsistent_mpp_params() { ); check_added_monitors(&nodes[2], 1); - let fail_updates_2 = get_htlc_update_msgs!(nodes[2], node_a_id); + let fail_updates_2 = get_htlc_update_msgs(&nodes[2], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_c_id, &fail_updates_2.update_fail_htlcs[0]); let commitment = &fail_updates_2.commitment_signed; do_commitment_signed_dance(&nodes[0], &nodes[2], commitment, false, false); @@ -9018,7 +9057,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { check_added_monitors(&nodes[0], 1); // Clear the failed htlc - let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); @@ -9194,7 +9233,7 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) check_added_monitors(&nodes[1], 1); // Clear the failed htlc - let updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); @@ -9362,7 +9401,7 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash check_added_monitors(&nodes[1], 1); - let fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates.update_fail_htlcs[0]); let commitment = &fail_updates.commitment_signed; do_commitment_signed_dance(&nodes[0], &nodes[1], commitment, false, true); @@ -9803,7 +9842,7 @@ fn do_test_multi_post_event_actions(do_reload: bool) { expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000); for dest in &[1, 2] { - let mut htlc_fulfill = get_htlc_update_msgs!(nodes[*dest], node_a_id); + let mut htlc_fulfill = get_htlc_update_msgs(&nodes[*dest], &node_a_id); let dest_node_id = nodes[*dest].node.get_our_node_id(); nodes[0] .node diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 3a1fc876747..2b59f46edeb 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -332,7 +332,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { // the pending htlc should be promoted to committed nodes[0].node.handle_revoke_and_ack(node_b_id, &as_revoke_and_ack); check_added_monitors(&nodes[0], 1); - let commitment_update_2 = get_htlc_update_msgs!(nodes[0], node_b_id); + let commitment_update_2 = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &as_commitment_signed); let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); @@ -507,7 +507,7 @@ pub fn channel_reserve_in_flight_removes() { nodes[1].node.claim_funds(payment_preimage_1); expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1); check_added_monitors(&nodes[1], 1); - let mut bs_removes = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_removes = get_htlc_update_msgs(&nodes[1], &node_a_id); // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not // remove the second HTLC when we send the HTLC back from B to A. @@ -530,11 +530,11 @@ pub fn channel_reserve_in_flight_removes() { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors(&nodes[1], 1); - let mut bs_cs = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_cs = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors(&nodes[0], 1); - let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); + let as_cs = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); check_added_monitors(&nodes[1], 1); @@ -563,7 +563,7 @@ pub fn channel_reserve_in_flight_removes() { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); - let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); + let as_cs = get_htlc_update_msgs(&nodes[0], &node_b_id); // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing. @@ -596,7 +596,7 @@ pub fn channel_reserve_in_flight_removes() { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); - let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); + let as_cs = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); check_added_monitors(&nodes[1], 1); @@ -683,7 +683,7 @@ pub fn holding_cell_htlc_counting() { expect_htlc_failure_conditions(events, &[fail]); check_added_monitors(&nodes[1], 1); - let bs_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_updates.update_fail_htlcs[0]); let commitment = &bs_fail_updates.commitment_signed; do_commitment_signed_dance(&nodes[0], &nodes[1], commitment, false, true); @@ -701,7 +701,7 @@ pub fn holding_cell_htlc_counting() { let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); check_added_monitors(&nodes[1], 1); - let as_updates = get_htlc_update_msgs!(nodes[1], node_c_id); + let as_updates = get_htlc_update_msgs(&nodes[1], &node_c_id); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_commitment_signed); check_added_monitors(&nodes[1], 1); @@ -1086,9 +1086,9 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { assert_eq!(nodes[0].node.list_channels().len(), 0); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); + let reason = ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() }; check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() }, - [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1276,7 +1276,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data.clone() }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1381,7 +1381,7 @@ pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let mut updates = get_htlc_update_msgs(&nodes[0], &node_b_id); updates.update_add_htlcs[0].amount_msat = 0; nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); @@ -1395,7 +1395,7 @@ pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { let reason = ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string(), }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1543,7 +1543,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let mut updates = get_htlc_update_msgs(&nodes[0], &node_b_id); updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat - 1; nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); @@ -1551,7 +1551,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1581,7 +1581,7 @@ pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let mut updates = get_htlc_update_msgs(&nodes[0], &node_b_id); // Even though channel-initiator senders are required to respect the fee_spike_reserve, // at this time channel-initiatee receivers are not required to enforce that senders @@ -1594,7 +1594,7 @@ pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1662,7 +1662,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1684,7 +1684,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let mut updates = get_htlc_update_msgs(&nodes[0], &node_b_id); updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2) .counterparty_max_htlc_value_in_flight_msat + 1; @@ -1697,7 +1697,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 1000000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 1000000); } #[xtest(feature = "_externalize_tests")] @@ -1718,7 +1718,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, reason, id).unwrap(); check_added_monitors(&nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let mut updates = get_htlc_update_msgs(&nodes[0], &node_b_id); updates.update_add_htlcs[0].cltv_expiry = 500000000; nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); @@ -1727,7 +1727,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { assert_eq!(err_msg.data, "Remote provided CLTV expiry in seconds instead of block height"); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1751,7 +1751,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); //Disconnect and Reconnect @@ -1793,7 +1793,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1816,7 +1816,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); let update_msg = msgs::UpdateFulfillHTLC { @@ -1837,7 +1837,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1860,7 +1860,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); let update_msg = msgs::UpdateFailHTLC { @@ -1881,7 +1881,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1904,7 +1904,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitme let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); let update_msg = msgs::UpdateFailMalformedHTLC { channel_id: chan.2, @@ -1924,7 +1924,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitme .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1983,7 +1983,7 @@ pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find"); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -2044,7 +2044,7 @@ pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -2068,7 +2068,7 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let mut updates = get_htlc_update_msgs(&nodes[0], &node_b_id); updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); @@ -2115,7 +2115,7 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set"); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 1000000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 1000000); } #[xtest(feature = "_externalize_tests")] diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index db310f5add5..dca18e20bf8 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -72,12 +72,12 @@ fn chanmon_fail_from_stale_commitment() { let bs_txn = get_local_commitment_txn!(nodes[1], chan_id_2); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); // Don't bother delivering the new HTLC add/commits, instead confirming the pre-HTLC commitment @@ -85,13 +85,13 @@ fn chanmon_fail_from_stale_commitment() { mine_transaction(&nodes[1], &bs_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[2].node.get_our_node_id()], 100000); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); check_added_monitors!(nodes[1], 1); - let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let fail_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &fail_updates.commitment_signed, true, true); @@ -141,7 +141,7 @@ fn revoked_output_htlc_resolution_timing() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 1000000); // Two justice transactions will be broadcast, one on the unpinnable, revoked to_self output, // and one on the pinnable revoked HTLC output. @@ -188,7 +188,7 @@ fn archive_fully_resolved_monitors() { check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 1_000_000); + check_closed_event(&nodes[0], 1, reason, false, &[nodes[1].node.get_our_node_id()], 1_000_000); let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(commitment_tx.len(), 1); @@ -196,7 +196,7 @@ fn archive_fully_resolved_monitors() { mine_transaction(&nodes[0], &commitment_tx[0]); mine_transaction(&nodes[1], &commitment_tx[0]); let reason = ClosureReason::CommitmentTxConfirmed; - check_closed_event!(nodes[1], 1, reason, [nodes[0].node.get_our_node_id()], 1_000_000); + check_closed_event(&nodes[1], 1, reason, false, &[nodes[0].node.get_our_node_id()], 1_000_000); check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); @@ -429,8 +429,8 @@ fn do_chanmon_claim_value_coop_close(keyed_anchors: bool, p2a_anchor: bool) { spendable_outputs_b ); - check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, false, &[nodes[1].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, false, &[nodes[0].node.get_our_node_id()], 1000000); } #[test] @@ -568,7 +568,7 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 3_000_100); - let mut b_htlc_msgs = get_htlc_update_msgs!(&nodes[1], nodes[0].node.get_our_node_id()); + let mut b_htlc_msgs = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); // We claim the dust payment here as well, but it won't impact our claimable balances as its // dust and thus doesn't appear on chain at all. nodes[1].node.claim_funds(dust_payment_preimage); @@ -588,7 +588,7 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c check_added_monitors!(nodes[0], 1); let (as_raa, as_cs) = get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); - let _htlc_updates = get_htlc_update_msgs!(&nodes[1], nodes[0].node.get_our_node_id()); + let _htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs); let _bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -682,11 +682,11 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[1].node.get_our_node_id()], 1000000); assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 1000000); assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -887,7 +887,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); @@ -899,14 +899,14 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 20_000_000); nodes[1].node.claim_funds(payment_preimage_2); - get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_2, 20_000_000); @@ -921,7 +921,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[0], 1, reason, false, &[nodes[1].node.get_our_node_id()], 1000000); if keyed_anchors || p2a_anchor { handle_bump_close_event(&nodes[0]); } @@ -981,7 +981,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b mine_transaction(&nodes[1], &commitment_tx); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 1000000); let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_claim_txn.len(), 1); check_spends!(bs_htlc_claim_txn[0], commitment_tx); @@ -1211,7 +1211,7 @@ fn test_no_preimage_inbound_htlc_balances() { nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[1].node.get_our_node_id()], 1000000); assert_eq!(as_pre_spend_claims, sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); @@ -1219,7 +1219,7 @@ fn test_no_preimage_inbound_htlc_balances() { mine_transaction(&nodes[1], &as_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 1000000); let node_b_commitment_claimable = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; let mut bs_pre_spend_claims = sorted_vec(vec![Balance::ClaimableAwaitingConfirmations { @@ -1428,7 +1428,7 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc nodes[1].node.claim_funds(claimed_payment_preimage); expect_payment_claimed!(nodes[1], claimed_payment_hash, 3_000_100); check_added_monitors!(nodes[1], 1); - let _b_htlc_msgs = get_htlc_update_msgs!(&nodes[1], nodes[0].node.get_our_node_id()); + let _b_htlc_msgs = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); connect_blocks(&nodes[0], htlc_cltv_timeout + 1 - 10); check_closed_broadcast!(nodes[0], true); @@ -1724,7 +1724,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 1000000); if keyed_anchors || p2a_anchor { handle_bump_htlc_event(&nodes[1], 1); } @@ -1768,7 +1768,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[1].node.get_our_node_id()], 1000000); let to_remote_conf_height = nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1; let revoked_to_self_claim = { @@ -2021,7 +2021,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho nodes[0].node.claim_funds(claimed_payment_preimage); expect_payment_claimed!(nodes[0], claimed_payment_hash, 3_000_100); check_added_monitors!(nodes[0], 1); - let _a_htlc_msgs = get_htlc_update_msgs!(&nodes[0], nodes[1].node.get_our_node_id()); + let _a_htlc_msgs = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose { balance_candidates: vec![HolderCommitmentTransactionBalance { @@ -2048,7 +2048,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho mine_transaction(&nodes[1], &as_revoked_txn[0]); check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 1000000); check_added_monitors!(nodes[1], 1); let mut claim_txn = nodes[1].tx_broadcaster.txn_broadcast(); @@ -2090,7 +2090,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho mine_transaction(&nodes[0], &as_revoked_txn[0]); check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); - check_closed_event!(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, [nodes[1].node.get_our_node_id()], 1_000_000); + check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[1].node.get_our_node_id()], 1_000_000); handle_bump_htlc_event(&nodes[0], 1); } let htlc_success_claim = if keyed_anchors || p2a_anchor { @@ -2338,8 +2338,7 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool check_added_monitors(&nodes[0], 1); check_closed_broadcast(&nodes[0], 1, true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(&nodes[0], 1, reason, false, - [nodes[1].node.get_our_node_id()], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[nodes[1].node.get_our_node_id()], 100000); let commitment_tx = { let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); @@ -2422,8 +2421,8 @@ fn do_test_monitor_rebroadcast_pending_claims(keyed_anchors: bool, p2a_anchor: b check_spends!(&commitment_txn[0], &funding_tx); mine_transaction(&nodes[0], &commitment_txn[0]); check_closed_broadcast!(&nodes[0], true); - check_closed_event!(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, - false, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, + false, &[nodes[1].node.get_our_node_id()], 1000000); check_added_monitors(&nodes[0], 1); // Set up a helper closure we'll use throughout our test. We should only expect retries without @@ -2788,7 +2787,7 @@ fn do_test_anchors_aggregated_revoked_htlc_tx(p2a_anchor: bool) { // Bob force closes by restarting with the outdated state, prompting the ChannelMonitors to // broadcast the latest commitment transaction known to them, which in our case is the one with // the HTLCs still pending. - check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager, [nodes[0].node.get_our_node_id(); 2], 1000000); + check_closed_event(&nodes[1], 2, ClosureReason::OutdatedChannelManager, false, &[nodes[0].node.get_our_node_id(); 2], 1000000); check_added_monitors(&nodes[1], 2); // Bob should now receive two events to bump his revoked commitment transaction fees. @@ -2831,7 +2830,7 @@ fn do_test_anchors_aggregated_revoked_htlc_tx(p2a_anchor: bool) { } check_closed_broadcast(&nodes[0], 2, true); check_added_monitors!(&nodes[0], 2); - check_closed_event!(&nodes[0], 2, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id(); 2], 1000000); + check_closed_event(&nodes[0], 2, ClosureReason::CommitmentTxConfirmed, false, &[nodes[1].node.get_our_node_id(); 2], 1000000); // Alice should detect the confirmed revoked commitments, and attempt to claim all of the // revoked outputs in aggregated transactions per channel, grouped into pinnable and unpinnable @@ -3063,8 +3062,7 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c check_added_monitors(&nodes[0], 1); check_closed_broadcast(&nodes[0], 1, true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(&nodes[0], 1, reason, false, - [nodes[1].node.get_our_node_id()], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[nodes[1].node.get_our_node_id()], 100000); handle_bump_close_event(&nodes[0]); let commitment_tx = { @@ -3093,8 +3091,7 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c check_added_monitors(&nodes[1], 1); commitment_tx_conf_height }; - check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, - [nodes[0].node.get_our_node_id()], 100000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000); assert!(get_monitor!(nodes[1], chan_id).get_counterparty_payment_script().is_p2wsh()); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); @@ -3173,12 +3170,12 @@ fn do_test_monitor_claims_with_random_signatures(keyed_anchors: bool, p2a_anchor check_closed_broadcast!(closing_node, true); check_added_monitors!(closing_node, 1); let message = "ChannelMonitor-initiated commitment transaction broadcast".to_string(); - check_closed_event!(closing_node, 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }, [other_node.node.get_our_node_id()], 1_000_000); + check_closed_event(&closing_node, 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }, false, &[other_node.node.get_our_node_id()], 1_000_000); mine_transaction(other_node, &commitment_tx); check_closed_broadcast!(other_node, true); check_added_monitors!(other_node, 1); - check_closed_event!(other_node, 1, ClosureReason::CommitmentTxConfirmed, [closing_node.node.get_our_node_id()], 1_000_000); + check_closed_event(&other_node, 1, ClosureReason::CommitmentTxConfirmed, false, &[closing_node.node.get_our_node_id()], 1_000_000); // If we update the best block to the new height before providing the confirmed transactions, // we'll see another broadcast of the commitment transaction. @@ -3464,7 +3461,7 @@ fn do_test_lost_preimage_monitor_events(on_counterparty_tx: bool, p2a_anchor: bo .unwrap(); check_added_monitors(&nodes[2], 1); let c_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[2], 1, c_reason, [node_b_id], 1_000_000); + check_closed_event(&nodes[2], 1, c_reason, false, &[node_b_id], 1_000_000); check_closed_broadcast(&nodes[2], 1, false); handle_bump_events(&nodes[2], true, 0); @@ -3478,7 +3475,7 @@ fn do_test_lost_preimage_monitor_events(on_counterparty_tx: bool, p2a_anchor: bo .unwrap(); check_added_monitors(&nodes[1], 1); let b_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[1], 1, b_reason, [node_c_id], 1_000_000); + check_closed_event(&nodes[1], 1, b_reason, false, &[node_c_id], 1_000_000); check_closed_broadcast(&nodes[1], 1, false); handle_bump_events(&nodes[1], true, 0); @@ -3678,7 +3675,7 @@ fn do_test_lost_timeout_monitor_events(confirm_tx: CommitmentType, dust_htlcs: b .unwrap(); check_added_monitors(&nodes[2], 1); let c_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[2], 1, c_reason, [node_b_id], 1_000_000); + check_closed_event(&nodes[2], 1, c_reason, false, &[node_b_id], 1_000_000); check_closed_broadcast(&nodes[2], 1, false); handle_bump_events(&nodes[2], true, 0); @@ -3692,7 +3689,7 @@ fn do_test_lost_timeout_monitor_events(confirm_tx: CommitmentType, dust_htlcs: b .unwrap(); check_added_monitors(&nodes[1], 1); let b_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[1], 1, b_reason, [node_c_id], 1_000_000); + check_closed_event(&nodes[1], 1, b_reason, false, &[node_c_id], 1_000_000); check_closed_broadcast(&nodes[1], 1, false); handle_bump_events(&nodes[1], true, 0); diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 3a6965c6646..2d274002bcf 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -2424,7 +2424,7 @@ fn rejects_keysend_to_non_static_invoice_path() { .with_payment_preimage(payment_preimage) .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); do_pass_along_path(args); - let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, true, PaymentFailedConditions::new()); @@ -2506,7 +2506,7 @@ fn no_double_pay_with_stale_channelmanager() { let monitor_1 = get_monitor!(nodes[0], chan_id_1).encode(); reload_node!(nodes[0], &alice_chan_manager_serialized, &[&monitor_0, &monitor_1], persister, chain_monitor, alice_deserialized); // The stale manager results in closing the channels. - check_closed_event!(nodes[0], 2, ClosureReason::OutdatedChannelManager, [bob_id, bob_id], 10_000_000); + check_closed_event(&nodes[0], 2, ClosureReason::OutdatedChannelManager, false, &[bob_id, bob_id], 10_000_000); check_added_monitors!(nodes[0], 2); // Alice receives a duplicate invoice, but the payment should be transitioned to Retryable by now. diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 0581e4de3a6..3c764a08218 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -134,7 +134,7 @@ fn run_onion_failure_test_with_fail_intercept( .send_payment_with_route(route.clone(), *payment_hash, recipient_onion, payment_id) .unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); // temper update_add (0 => 1) let mut update_add_0 = update_0.update_add_htlcs[0].clone(); if test_case == 0 || test_case == 3 || test_case == 100 { @@ -154,7 +154,7 @@ fn run_onion_failure_test_with_fail_intercept( &[expected_failure_type.clone().unwrap()] ); check_added_monitors(&nodes[1], 1); - let update_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_1_0 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); let fail_len = update_1_0.update_fail_htlcs.len(); let malformed_len = update_1_0.update_fail_malformed_htlcs.len(); assert!(fail_len + malformed_len == 1); @@ -169,7 +169,7 @@ fn run_onion_failure_test_with_fail_intercept( } expect_htlc_forward!(&nodes[1]); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); assert_eq!(update_1.update_add_htlcs.len(), 1); // tamper update_add (1 => 2) @@ -204,7 +204,7 @@ fn run_onion_failure_test_with_fail_intercept( } check_added_monitors!(&nodes[2], 1); - let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let update_2_1 = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); assert!(update_2_1.update_fail_htlcs.len() == 1); let mut fail_msg = update_2_1.update_fail_htlcs[0].clone(); @@ -224,7 +224,7 @@ fn run_onion_failure_test_with_fail_intercept( do_commitment_signed_dance(&nodes[1], &nodes[2], commitment, true, false); // backward fail on 1 - let update_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_1_0 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(update_1_0.update_fail_htlcs.len() == 1); update_1_0 }, @@ -1549,7 +1549,7 @@ fn test_overshoot_final_cltv() { .unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add_0 = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add_0); do_commitment_signed_dance(&nodes[1], &nodes[0], &update_0.commitment_signed, false, true); @@ -1568,7 +1568,7 @@ fn test_overshoot_final_cltv() { expect_and_process_pending_htlcs(&nodes[1], false); check_added_monitors!(&nodes[1], 1); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); let mut update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_1); do_commitment_signed_dance(&nodes[2], &nodes[1], &update_1.commitment_signed, false, true); @@ -2436,7 +2436,7 @@ fn test_phantom_onion_hmac_failure() { .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); @@ -2469,7 +2469,7 @@ fn test_phantom_onion_hmac_failure() { &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[1].node.process_pending_htlc_forwards(); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); @@ -2509,7 +2509,7 @@ fn test_phantom_invalid_onion_payload() { .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); @@ -2570,7 +2570,7 @@ fn test_phantom_invalid_onion_payload() { &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[1].node.process_pending_htlc_forwards(); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); @@ -2608,7 +2608,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); @@ -2636,7 +2636,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[1].node.process_pending_htlc_forwards(); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); @@ -2677,7 +2677,7 @@ fn test_phantom_failure_too_low_cltv() { .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); @@ -2690,7 +2690,7 @@ fn test_phantom_failure_too_low_cltv() { &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[1].node.process_pending_htlc_forwards(); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); @@ -2730,7 +2730,7 @@ fn test_phantom_failure_modified_cltv() { .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); // Modify the route to have a too-low cltv. @@ -2745,7 +2745,7 @@ fn test_phantom_failure_modified_cltv() { ); check_added_monitors(&nodes[1], 1); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2785,7 +2785,7 @@ fn test_phantom_failure_expires_too_soon() { .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); connect_blocks(&nodes[1], CLTV_FAR_FAR_AWAY); @@ -2798,7 +2798,7 @@ fn test_phantom_failure_expires_too_soon() { ); check_added_monitors(&nodes[1], 1); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2835,7 +2835,7 @@ fn test_phantom_failure_too_low_recv_amt() { .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); @@ -2850,7 +2850,7 @@ fn test_phantom_failure_too_low_recv_amt() { &[HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }], ); nodes[1].node.process_pending_htlc_forwards(); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); @@ -2905,7 +2905,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); @@ -2917,7 +2917,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { ); check_added_monitors(&nodes[1], 1); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2955,7 +2955,7 @@ fn test_phantom_failure_reject_payment() { .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); @@ -2980,7 +2980,7 @@ fn test_phantom_failure_reject_payment() { ); nodes[1].node.process_pending_htlc_forwards(); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index ac26f79ef99..2c58eabff50 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -164,7 +164,7 @@ fn mpp_retry() { let events = nodes[2].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id }; expect_htlc_failure_conditions(events, &[fail]); - let htlc_updates = get_htlc_update_msgs!(nodes[2], node_a_id); + let htlc_updates = get_htlc_update_msgs(&nodes[2], &node_a_id); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); @@ -283,7 +283,7 @@ fn mpp_retry_overpay() { let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id }; expect_htlc_failure_conditions(events, &[fail]); - let htlc_updates = get_htlc_update_msgs!(nodes[2], node_a_id); + let htlc_updates = get_htlc_update_msgs(&nodes[2], &node_a_id); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); @@ -381,7 +381,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let fail = HTLCHandlingFailureType::Receive { payment_hash: hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[3], &[fail]); - let htlc_fail_updates = get_htlc_update_msgs!(nodes[3], node_b_id); + let htlc_fail_updates = get_htlc_update_msgs(&nodes[3], &node_b_id); assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); nodes[1].node.handle_update_fail_htlc(node_d_id, &htlc_fail_updates.update_fail_htlcs[0]); check_added_monitors!(nodes[3], 1); @@ -394,7 +394,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_3_id }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[fail_type]); - let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let htlc_fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); check_added_monitors!(nodes[1], 1); @@ -623,14 +623,14 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_0, params, retry).unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], node_b_id); + let update_0 = get_htlc_update_msgs(&nodes[0], &node_b_id); let update_add_0 = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(node_a_id, &update_add_0); do_commitment_signed_dance(&nodes[1], &nodes[0], &update_0.commitment_signed, false, true); expect_and_process_pending_htlcs(&nodes[1], false); check_added_monitors!(&nodes[1], 1); - let update_1 = get_htlc_update_msgs!(nodes[1], node_d_id); + let update_1 = get_htlc_update_msgs(&nodes[1], &node_d_id); let update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(node_b_id, &update_add_1); do_commitment_signed_dance(&nodes[3], &nodes[1], &update_1.commitment_signed, false, true); @@ -672,14 +672,14 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_1, params, retry).unwrap(); check_added_monitors!(nodes[0], 1); - let update_2 = get_htlc_update_msgs!(nodes[0], node_c_id); + let update_2 = get_htlc_update_msgs(&nodes[0], &node_c_id); let update_add_2 = update_2.update_add_htlcs[0].clone(); nodes[2].node.handle_update_add_htlc(node_a_id, &update_add_2); do_commitment_signed_dance(&nodes[2], &nodes[0], &update_2.commitment_signed, false, true); expect_and_process_pending_htlcs(&nodes[2], false); check_added_monitors!(&nodes[2], 1); - let update_3 = get_htlc_update_msgs!(nodes[2], node_d_id); + let update_3 = get_htlc_update_msgs(&nodes[2], &node_d_id); let update_add_3 = update_3.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(node_c_id, &update_add_3); do_commitment_signed_dance(&nodes[3], &nodes[2], &update_3.commitment_signed, false, true); @@ -713,7 +713,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { check_added_monitors!(nodes[3], 1); // Fail back along nodes[2] - let update_fail_0 = get_htlc_update_msgs!(&nodes[3], &node_c_id); + let update_fail_0 = get_htlc_update_msgs(&nodes[3], &node_c_id); nodes[2].node.handle_update_fail_htlc(node_d_id, &update_fail_0.update_fail_htlcs[0]); let commitment = &update_fail_0.commitment_signed; do_commitment_signed_dance(&nodes[2], &nodes[3], commitment, false, false); @@ -723,7 +723,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail_type]); check_added_monitors!(nodes[2], 1); - let update_fail_1 = get_htlc_update_msgs!(nodes[2], node_a_id); + let update_fail_1 = get_htlc_update_msgs(&nodes[2], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_c_id, &update_fail_1.update_fail_htlcs[0]); let commitment = &update_fail_1.commitment_signed; do_commitment_signed_dance(&nodes[0], &nodes[2], commitment, false, false); @@ -830,7 +830,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { check_added_monitors(&nodes[1], 1); // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected - let _ = get_htlc_update_msgs!(nodes[1], node_a_id); + let _ = get_htlc_update_msgs(&nodes[1], &node_a_id); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); @@ -849,7 +849,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and // force-close the channel. - check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager, [node_b_id], 100000); + let reason = ClosureReason::OutdatedChannelManager; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); nodes[0].node.timer_tick_occurred(); @@ -887,8 +888,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { } => { assert_eq!(node_id, node_b_id); nodes[1].node.handle_error(node_a_id, msg); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", - &node_b_id)) }, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", + &node_b_id)) }, false, &[node_a_id], 100000); check_added_monitors!(nodes[1], 1); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); @@ -903,7 +904,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); - let mut htlc_fulfill = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut htlc_fulfill = get_htlc_update_msgs(&nodes[2], &node_b_id); let fulfill_msg = htlc_fulfill.update_fulfill_htlcs.remove(0); nodes[1].node.handle_update_fulfill_htlc(node_c_id, fulfill_msg); check_added_monitors!(nodes[1], 1); @@ -1065,7 +1066,14 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and // force-close the channel. - check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager, [node_b_id], 100000); + check_closed_event( + &nodes[0], + 1, + ClosureReason::OutdatedChannelManager, + false, + &[node_b_id], + 100000, + ); nodes[0].node.timer_tick_occurred(); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); @@ -1100,7 +1108,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { &node_b_id ); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(msg) }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); check_added_monitors!(nodes[1], 1); bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); }, @@ -1116,7 +1124,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail_type]); check_added_monitors!(nodes[2], 1); - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let htlc_fulfill_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fail_htlc(node_c_id, &htlc_fulfill_updates.update_fail_htlcs[0]); let commitment = &htlc_fulfill_updates.commitment_signed; do_commitment_signed_dance(&nodes[1], &nodes[2], commitment, false, false); @@ -1272,7 +1280,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); @@ -1294,7 +1302,8 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( mine_transaction(&nodes[1], &commitment_tx); check_closed_broadcast(&nodes[1], 1, false); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); let htlc_success_tx = { let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1); @@ -1451,7 +1460,7 @@ fn test_fulfill_restart_failure() { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 100_000); - let mut htlc_fulfill = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut htlc_fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); let fulfill_msg = htlc_fulfill.update_fulfill_htlcs.remove(0); nodes[0].node.handle_update_fulfill_htlc(node_b_id, fulfill_msg); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); @@ -1467,7 +1476,7 @@ fn test_fulfill_restart_failure() { expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[fail_type]); check_added_monitors!(nodes[1], 1); - let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let htlc_fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); let commitment = &htlc_fail_updates.commitment_signed; do_commitment_signed_dance(&nodes[0], &nodes[1], commitment, false, false); @@ -1557,7 +1566,7 @@ fn sent_probe_is_probe_of_sending_node() { _ => panic!(), } - get_htlc_update_msgs!(nodes[0], node_b_id); + get_htlc_update_msgs(&nodes[0], &node_b_id); check_added_monitors!(nodes[0], 1); } @@ -1606,7 +1615,7 @@ fn failed_probe_yields_event() { // node[0] -- update_add_htlcs -> node[1] check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); let probe_event = SendEvent::from_commitment_update(node_b_id, channel_id, updates); nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); check_added_monitors!(nodes[1], 0); @@ -1615,7 +1624,7 @@ fn failed_probe_yields_event() { // node[0] <- update_fail_htlcs -- node[1] check_added_monitors!(nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); let _events = nodes[1].node.get_and_clear_pending_events(); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); check_added_monitors!(nodes[0], 0); @@ -1657,7 +1666,7 @@ fn onchain_failed_probe_yields_event() { // node[0] -- update_add_htlcs -> node[1] check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); let probe_event = SendEvent::from_commitment_update(node_b_id, chan_id, updates); nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); check_added_monitors!(nodes[1], 0); @@ -1665,7 +1674,7 @@ fn onchain_failed_probe_yields_event() { expect_and_process_pending_htlcs(&nodes[1], false); check_added_monitors!(nodes[1], 1); - let _ = get_htlc_update_msgs!(nodes[1], node_c_id); + let _ = get_htlc_update_msgs(&nodes[1], &node_c_id); // Don't bother forwarding the HTLC onwards and just confirm the force-close transaction on // Node A, which after 6 confirmations should result in a probe failure event. @@ -2307,7 +2316,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }; expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[fail]); nodes[1].node.process_pending_htlc_forwards(); - let update_fail = get_htlc_update_msgs!(nodes[1], node_a_id); + let update_fail = get_htlc_update_msgs(&nodes[1], &node_a_id); check_added_monitors!(&nodes[1], 1); assert!(update_fail.update_fail_htlcs.len() == 1); let fail_msg = update_fail.update_fail_htlcs[0].clone(); @@ -2395,7 +2404,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[fail_type]); check_added_monitors!(nodes[1], 1); - let htlc_fail = get_htlc_update_msgs!(nodes[1], node_a_id); + let htlc_fail = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(htlc_fail.update_add_htlcs.is_empty()); assert_eq!(htlc_fail.update_fail_htlcs.len(), 1); assert!(htlc_fail.update_fail_malformed_htlcs.is_empty()); @@ -2647,7 +2656,7 @@ fn do_automatic_retries(test: AutoRetry) { ($failing_channel_id: expr, $expect_pending_htlcs_forwardable: expr) => { // Send a payment attempt that fails due to lack of liquidity on the second hop check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], node_b_id); + let update_0 = get_htlc_update_msgs(&nodes[0], &node_b_id); let mut update_add = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(node_a_id, &update_add); let commitment = &update_0.commitment_signed; @@ -2662,7 +2671,7 @@ fn do_automatic_retries(test: AutoRetry) { }], ); nodes[1].node.process_pending_htlc_forwards(); - let update_1 = get_htlc_update_msgs!(nodes[1], node_a_id); + let update_1 = get_htlc_update_msgs(&nodes[1], &node_a_id); check_added_monitors!(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); @@ -3051,7 +3060,7 @@ fn auto_retry_partial_failure() { expect_payment_claimable!(nodes[1], payment_hash, payment_secret, amt_msat); nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, amt_msat); - let mut bs_claim = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_claim = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(bs_claim.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_claim.update_fulfill_htlcs.remove(0)); @@ -3062,7 +3071,7 @@ fn auto_retry_partial_failure() { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_third_raa); check_added_monitors!(nodes[1], 4); - let mut bs_2nd_claim = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_2nd_claim = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_third_cs); check_added_monitors!(nodes[1], 1); @@ -3584,7 +3593,7 @@ fn no_extra_retries_on_back_to_back_fail() { ); check_added_monitors(&nodes[1], 1); - let bs_fail_update = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_fail_update = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(bs_fail_update.update_fail_htlcs.len(), 2); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_update.update_fail_htlcs[0]); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_update.update_fail_htlcs[1]); @@ -3642,7 +3651,7 @@ fn no_extra_retries_on_back_to_back_fail() { ); check_added_monitors(&nodes[1], 1); - let bs_fail_update = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_fail_update = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_update.update_fail_htlcs[0]); let commitment = &bs_fail_update.commitment_signed; do_commitment_signed_dance(&nodes[0], &nodes[1], commitment, false, true); @@ -3868,7 +3877,7 @@ fn test_simple_partial_retry() { expect_and_process_pending_htlcs(&nodes[1], false); check_added_monitors!(nodes[1], 1); - let bs_second_forward = get_htlc_update_msgs!(nodes[1], node_c_id); + let bs_second_forward = get_htlc_update_msgs(&nodes[1], &node_c_id); nodes[2].node.handle_update_add_htlc(node_b_id, &bs_second_forward.update_add_htlcs[0]); let commitment = &bs_second_forward.commitment_signed; do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); @@ -4058,7 +4067,7 @@ fn test_threaded_payment_retries() { route.route_params = Some(new_route_params.clone()); nodes[0].router.expect_find_route(new_route_params, Ok(route.clone())); - let bs_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_updates.update_fail_htlcs[0]); // The "normal" commitment_signed_dance delivers the final RAA and then calls // `check_added_monitors` to ensure only the one RAA-generated monitor update was created. @@ -4127,12 +4136,12 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); if at_midpoint { - let mut updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); check_added_monitors!(nodes[0], 1); } else { - let mut fulfill = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, fulfill.update_fulfill_htlcs.remove(0)); do_commitment_signed_dance(&nodes[0], &nodes[1], &fulfill.commitment_signed, false, false); // Ignore the PaymentSent event which is now pending on nodes[0] - if we were to handle it we'd @@ -4340,7 +4349,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) { .unwrap(); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(&nodes[1], 1, reason, false, [node_d_id], 1000000); + check_closed_event(&nodes[1], 1, reason, false, &[node_d_id], 1000000); check_closed_broadcast(&nodes[1], 1, true); let bs_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_tx.len(), 1); @@ -4349,7 +4358,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) { check_closed_broadcast(&nodes[3], 1, true); check_added_monitors(&nodes[3], 1); let reason = ClosureReason::CommitmentTxConfirmed; - check_closed_event!(&nodes[3], 1, reason, false, [node_b_id], 1000000); + check_closed_event(&nodes[3], 1, reason, false, &[node_b_id], 1000000); nodes[3].node.claim_funds(payment_preimage); check_added_monitors(&nodes[3], 2); @@ -4551,7 +4560,7 @@ fn test_retry_custom_tlvs() { expect_htlc_failure_conditions(events, &[fail]); check_added_monitors!(nodes[1], 1); - let htlc_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let htlc_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; assert_eq!(update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); @@ -4744,7 +4753,7 @@ fn do_test_custom_tlvs_consistency( ); check_added_monitors!(nodes[3], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[3], node_c_id); + let fail_updates_1 = get_htlc_update_msgs(&nodes[3], &node_c_id); nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); let commitment = &fail_updates_1.commitment_signed; do_commitment_signed_dance(&nodes[2], &nodes[3], commitment, false, false); @@ -4754,7 +4763,7 @@ fn do_test_custom_tlvs_consistency( expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail]); check_added_monitors!(nodes[2], 1); - let fail_updates_2 = get_htlc_update_msgs!(nodes[2], node_a_id); + let fail_updates_2 = get_htlc_update_msgs(&nodes[2], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_c_id, &fail_updates_2.update_fail_htlcs[0]); let commitment = &fail_updates_2.commitment_signed; do_commitment_signed_dance(&nodes[0], &nodes[2], commitment, false, false); @@ -5229,7 +5238,7 @@ fn test_non_strict_forwarding() { HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: routed_chan_id }; expect_htlc_failure_conditions(events, &[fail]); - let updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); let events = nodes[0].node.get_and_clear_pending_events(); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index ce2da8d7d07..c14d730d9ad 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -94,7 +94,7 @@ fn test_priv_forwarding_rejection() { ); check_added_monitors(&nodes[1], 1); - let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let htlc_fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(htlc_fail_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); assert!(htlc_fail_updates.update_fail_malformed_htlcs.is_empty()); @@ -622,7 +622,7 @@ fn test_inbound_scid_privacy() { 1, ); - let mut updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); @@ -698,7 +698,7 @@ fn test_scid_alias_returned() { nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); - let as_updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &as_updates.commitment_signed, false, true); @@ -711,7 +711,7 @@ fn test_scid_alias_returned() { expect_htlc_failure_conditions(events, &expected_failures); check_added_monitors!(nodes[1], 1); - let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, true); @@ -735,7 +735,7 @@ fn test_scid_alias_returned() { nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); - let as_updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &as_updates.commitment_signed, false, true); @@ -749,7 +749,7 @@ fn test_scid_alias_returned() { ); check_added_monitors(&nodes[1], 1); - let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, true); @@ -1012,7 +1012,7 @@ fn test_0conf_close_no_early_chan_update() { nodes[0].node.force_close_all_channels_broadcasting_latest_txn(message.clone()); check_added_monitors!(nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(&nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); let _ = get_err_msg(&nodes[0], &node_b_id); } @@ -1128,14 +1128,14 @@ fn test_0conf_channel_reorg() { err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." .to_owned(), }; - check_closed_event!(&nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." .to_owned(), }; - check_closed_event!(&nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); } diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index 9b1eab9842b..6daf4d65b9d 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -104,7 +104,7 @@ fn allow_shutdown_while_awaiting_quiescence(local_shutdown: bool) { check_added_monitors!(local_node, 1); // Attempt to send an HTLC, but don't fully commit it yet. - let update_add = get_htlc_update_msgs!(local_node, remote_node_id); + let update_add = get_htlc_update_msgs(&local_node, &remote_node_id); remote_node.node.handle_update_add_htlc(local_node_id, &update_add.update_add_htlcs[0]); remote_node .node @@ -149,7 +149,7 @@ fn allow_shutdown_while_awaiting_quiescence(local_shutdown: bool) { ); check_added_monitors(remote_node, 1); - let update_fail = get_htlc_update_msgs!(remote_node, local_node_id); + let update_fail = get_htlc_update_msgs(&remote_node, &local_node_id); local_node.node.handle_update_fail_htlc(remote_node_id, &update_fail.update_fail_htlcs[0]); local_node .node @@ -201,7 +201,7 @@ fn test_quiescence_waits_for_async_signer_and_monitor_update() { check_added_monitors(&nodes[1], 1); expect_payment_claimed!(&nodes[1], payment_hash, payment_amount); - let mut update = get_htlc_update_msgs!(&nodes[1], node_id_0); + let mut update = get_htlc_update_msgs(&nodes[1], &node_id_0); nodes[0].node.handle_update_fulfill_htlc(node_id_1, update.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(node_id_1, &update.commitment_signed); check_added_monitors(&nodes[0], 1); @@ -313,7 +313,7 @@ fn test_quiescence_on_final_revoke_and_ack_pending_monitor_update() { let stfu = get_event_msg!(&nodes[1], MessageSendEvent::SendStfu, node_id_0); nodes[0].node.handle_stfu(node_id_1, &stfu); - let update_add = get_htlc_update_msgs!(&nodes[0], node_id_1); + let update_add = get_htlc_update_msgs(&nodes[0], &node_id_1); nodes[1].node.handle_update_add_htlc(node_id_0, &update_add.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_id_0, &update_add.commitment_signed); check_added_monitors(&nodes[1], 1); @@ -385,7 +385,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { nodes[0].node.send_payment_with_route(route2, payment_hash2, onion2, payment_id2).unwrap(); check_added_monitors!(&nodes[0], 1); - let update_add = get_htlc_update_msgs!(&nodes[0], node_id_1); + let update_add = get_htlc_update_msgs(&nodes[0], &node_id_1); nodes[1].node.handle_update_add_htlc(node_id_0, &update_add.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &update_add.commitment_signed, false, false); expect_and_process_pending_htlcs(&nodes[1], false); @@ -413,7 +413,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { // Now that quiescence is over, nodes are allowed to make updates again. nodes[1] will have its // outbound HTLC finally go out, along with the fail/claim of nodes[0]'s payment. - let mut update = get_htlc_update_msgs!(&nodes[1], node_id_0); + let mut update = get_htlc_update_msgs(&nodes[1], &node_id_0); check_added_monitors(&nodes[1], 1); nodes[0].node.handle_update_add_htlc(node_id_1, &update.update_add_htlcs[0]); if fail_htlc { @@ -448,7 +448,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { } check_added_monitors(&nodes[0], 1); - let mut update = get_htlc_update_msgs!(&nodes[0], node_id_1); + let mut update = get_htlc_update_msgs(&nodes[0], &node_id_1); if fail_htlc { nodes[1].node.handle_update_fail_htlc(node_id_0, &update.update_fail_htlcs[0]); } else { diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index c1e46fdc50c..094b54dff5a 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -466,7 +466,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { } nodes[0].node = &nodes_0_deserialized; - check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager, [nodes[3].node.get_our_node_id()], 100000); + check_closed_event(&nodes[0], 1, ClosureReason::OutdatedChannelManager, false, &[nodes[3].node.get_our_node_id()], 100000); { // Channel close should result in a commitment tx nodes[0].node.timer_tick_occurred(); let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -631,9 +631,9 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, // has sent, but not a newer revocation secret, so A just (correctly) closes. check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { + check_closed_event(&nodes[0], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a future remote commitment transaction: 2 (received) vs 1 (expected)".to_owned() - }, [nodes[1].node.get_our_node_id()], 1000000); + }, false, &[nodes[1].node.get_our_node_id()], 1000000); } else { assert!(reconnect_res.is_err()); // Skip the `Drop` handler for `Node`s as some may be in an invalid (panicked) state. @@ -650,7 +650,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, check_added_monitors!(nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[0], 1, reason, false, &[nodes[1].node.get_our_node_id()], 1000000); { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -698,8 +698,8 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, nodes[1].node.handle_error(nodes[0].node.get_our_node_id(), &err_msgs_0[0]); assert!(nodes[1].node.list_usable_channels().is_empty()); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) } - , [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) } + , false, &[nodes[0].node.get_our_node_id()], 1000000); check_closed_broadcast!(nodes[1], false); } } @@ -890,7 +890,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest let mut fulfill = updates.update_fulfill_htlcs.remove(0); nodes[2].node.handle_update_fulfill_htlc(nodes[3].node.get_our_node_id(), fulfill); check_added_monitors!(nodes[2], 1); - let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); + let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); do_commitment_signed_dance(&nodes[2], &nodes[3], &updates.commitment_signed, false, true); cs_updates @@ -1007,7 +1007,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht check_added_monitors!(nodes[2], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[2], 1, reason, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event(&nodes[2], 1, reason, false, &[nodes[1].node.get_our_node_id()], 100000); check_closed_broadcast!(nodes[2], true); let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); @@ -1016,7 +1016,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht // Note that this checks that this is the only event on nodes[1], implying the // `HTLCIntercepted` event has been removed in the `use_intercept` case. - check_closed_event!(nodes[1], 1, ClosureReason::OutdatedChannelManager, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event(&nodes[1], 1, ClosureReason::OutdatedChannelManager, false, &[nodes[2].node.get_our_node_id()], 100000); if use_intercept { // Attempt to forward the HTLC back out over nodes[1]' still-open channel, ensuring we get @@ -1268,7 +1268,7 @@ fn test_htlc_localremoved_persistence() { RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap(); check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); expect_and_process_pending_htlcs(&nodes[1], false); @@ -1276,7 +1276,7 @@ fn test_htlc_localremoved_persistence() { check_added_monitors(&nodes[1], 1); // Save the update_fail_htlc message for later comparison. - let msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let msgs = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); let htlc_fail_msg = msgs.update_fail_htlcs[0].clone(); // Reload nodes. diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 8d7b3f5546d..d1988eedda0 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -66,7 +66,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { nodes[2].node.claim_funds(our_payment_preimage); expect_payment_claimed!(nodes[2], our_payment_hash, 1_000_000); check_added_monitors!(nodes[2], 1); - get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let claim_txn = if local_commitment { // Broadcast node 1 commitment txn to broadcast the HTLC-Timeout @@ -80,7 +80,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { connect_block(&nodes[2], &create_dummy_block(nodes[2].best_block_hash(), 42, node_1_commitment_txn.clone())); check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[1].node.get_our_node_id()], 100000); let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_2_commitment_txn.len(), 1); // ChannelMonitor: 1 offered HTLC-Claim check_spends!(node_2_commitment_txn[0], node_1_commitment_txn[0]); @@ -114,7 +114,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { }; check_closed_broadcast!(nodes[1], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[2].node.get_our_node_id()], 100000); // Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1. connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); check_added_monitors!(nodes[1], 0); @@ -141,7 +141,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { check_added_monitors!(nodes[1], 1); // Which should result in an immediate claim/fail of the HTLC: - let mut htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let mut htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); if claim { assert_eq!(htlc_updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), htlc_updates.update_fulfill_htlcs.remove(0)); @@ -198,7 +198,7 @@ fn test_counterparty_revoked_reorg() { let payment_hash_4 = route_payment(&nodes[1], &[&nodes[0]], 4_000).1; nodes[0].node.claim_funds(payment_preimage_3); - let _ = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let _ = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); check_added_monitors!(nodes[0], 1); expect_payment_claimed!(nodes[0], payment_hash_3, 4_000_000); @@ -212,7 +212,7 @@ fn test_counterparty_revoked_reorg() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 1000000); // Connect up to one block before the revoked transaction would be considered final, then do a // reorg that disconnects the full chain and goes up to the height at which the revoked @@ -390,12 +390,12 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ if reorg_after_reload || !reload_node { handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs."); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) } - , [nodes[0].node.get_our_node_id()], 100000); + let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) }; + check_closed_event(&nodes[1], 1, reason, false, &[nodes[0].node.get_our_node_id()], 100000); } - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: expected_err.to_owned() }, - [nodes[1].node.get_our_node_id()], 100000); + check_closed_event(&nodes[0], 1, ClosureReason::ProcessingError { err: expected_err.to_owned() }, + false, &[nodes[1].node.get_our_node_id()], 100000); // Now check that we can create a new channel if reload_node && !reorg_after_reload { @@ -484,7 +484,7 @@ fn test_set_outpoints_partial_claiming() { // Connect blocks on node A commitment transaction mine_transaction(&nodes[0], &remote_txn[0]); check_closed_broadcast!(nodes[0], true); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[1].node.get_our_node_id()], 1000000); check_added_monitors!(nodes[0], 1); // Verify node A broadcast tx claiming both HTLCs { @@ -585,11 +585,11 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) { check_closed_broadcast!(nodes[0], true); assert!(nodes[0].node.list_channels().is_empty()); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[1].node.get_our_node_id()], 1000000); check_closed_broadcast!(nodes[1], true); assert!(nodes[1].node.list_channels().is_empty()); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 1000000); assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 03f0702a78d..36174719cb3 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -78,9 +78,9 @@ fn pre_funding_lock_shutdown_test() { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 8000000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 8000000); let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 8000000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_a_id], 8000000); } #[test] @@ -129,9 +129,9 @@ fn expect_channel_shutdown_state() { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 100000); let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_a_id], 100000); } #[test] @@ -179,7 +179,7 @@ fn expect_channel_shutdown_state_with_htlc() { expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); // Fulfil HTLCs on node1 and node0 - let mut updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -188,7 +188,7 @@ fn expect_channel_shutdown_state_with_htlc() { nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors!(nodes[1], 1); - let mut updates_2 = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); // Still in "resolvingHTLCs" on chan1 after htlc removed on chan2 @@ -220,9 +220,9 @@ fn expect_channel_shutdown_state_with_htlc() { let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(node_1_none.is_none()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 100000); let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_a_id], 100000); // Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary assert!(nodes[0].node.list_channels().is_empty()); @@ -289,9 +289,9 @@ fn test_lnd_bug_6039() { assert!(node_1_none.is_none()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 100000); let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_a_id], 100000); // Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary assert!(nodes[0].node.list_channels().is_empty()); @@ -320,7 +320,7 @@ fn shutdown_on_unfunded_channel() { }, ); let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 1_000_000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 1_000_000); } #[test] @@ -338,7 +338,7 @@ fn close_on_unfunded_channel() { nodes[0].node.close_channel(&chan_id, &node_b_id).unwrap(); let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 1_000_000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 1_000_000); } #[test] @@ -375,9 +375,16 @@ fn expect_channel_shutdown_state_with_force_closure() { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + check_closed_event( + &nodes[0], + 1, + ClosureReason::CommitmentTxConfirmed, + false, + &[node_b_id], + 100000, + ); let reason_b = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_a_id], 100000); } #[test] @@ -455,7 +462,7 @@ fn updates_shutdown_wait() { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); - let mut updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -464,7 +471,7 @@ fn updates_shutdown_wait() { nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors!(nodes[1], 1); - let mut updates_2 = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); assert!(updates_2.update_add_htlcs.is_empty()); @@ -488,9 +495,9 @@ fn updates_shutdown_wait() { assert!(node_1_none.is_none()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 100000); let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_a_id], 100000); assert!(nodes[0].node.list_channels().is_empty()); @@ -501,9 +508,9 @@ fn updates_shutdown_wait() { assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[2].node.list_channels().is_empty()); let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_c_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_c_id], 100000); let reason_c = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[2], 1, reason_c, [node_b_id], 100000); + check_closed_event(&nodes[2], 1, reason_c, false, &[node_b_id], 100000); } #[test] @@ -550,7 +557,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { .send_payment(our_payment_hash, onion, id, route_params, Retry::Attempts(0)) .unwrap(); check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); assert_eq!(updates.update_add_htlcs.len(), 1); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); @@ -574,7 +581,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { ); check_added_monitors(&nodes[1], 1); - let updates_2 = get_htlc_update_msgs!(nodes[1], node_a_id); + let updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(updates_2.update_add_htlcs.is_empty()); assert!(updates_2.update_fulfill_htlcs.is_empty()); assert_eq!(updates_2.update_fail_htlcs.len(), 1); @@ -630,7 +637,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[2].node.list_channels().is_empty()); let reason_a = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 100000); let event1 = ExpectedCloseEvent { channel_capacity_sats: Some(100000), channel_id: None, @@ -653,7 +660,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { }; check_closed_events(&nodes[1], &[event1, event2]); let reason_c = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[2], 1, reason_c, [node_b_id], 100000); + check_closed_event(&nodes[2], 1, reason_c, false, &[node_b_id], 100000); } fn do_test_shutdown_rebroadcast(recv_count: u8) { @@ -721,7 +728,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 100_000); - let mut updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -730,7 +737,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors!(nodes[1], 1); - let mut updates_2 = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); assert!(updates_2.update_add_htlcs.is_empty()); @@ -804,7 +811,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(node_1_none.is_none()); let reason = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } else { // If one node, however, received + responded with an identical closing_signed we end // up erroring and node[0] will try to broadcast its own latest commitment transaction. @@ -836,7 +843,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { check_closed_broadcast!(nodes[1], false); check_added_monitors!(nodes[1], 1); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } assert!(nodes[0].node.list_channels().is_empty()); @@ -849,11 +856,11 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert!(nodes[2].node.list_channels().is_empty()); let reason_a = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 100000); let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_c_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_c_id], 100000); let reason_c = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[2], 1, reason_c, [node_b_id], 100000); + check_closed_event(&nodes[2], 1, reason_c, false, &[node_b_id], 100000); } #[test] @@ -1038,7 +1045,7 @@ fn test_unsupported_anysegwit_upfront_shutdown_script() { _ => panic!("Unexpected event"), } let reason = ClosureReason::ProcessingError { err: "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: OP_PUSHNUM_16 OP_PUSHBYTES_2 0028".to_string() }; - check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } #[test] @@ -1363,10 +1370,10 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { if timeout_step == TimeoutStep::NoTimeout { nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.1.unwrap()); let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_a_id], 100000); } let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 100000); } if timeout_step != TimeoutStep::NoTimeout { @@ -1394,7 +1401,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { let reason = ClosureReason::ProcessingError { err: "closing_signed negotiation failed to finish within two timer ticks".to_string(), }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } else { assert!(txn[0].output[0].script_pubkey.is_p2wpkh()); assert!(txn[0].output[1].script_pubkey.is_p2wpkh()); @@ -1458,9 +1465,9 @@ fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) { let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); assert!(node_0_none.is_none()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 100000); let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_a_id], 100000); } #[test] @@ -1532,9 +1539,9 @@ fn simple_target_feerate_shutdown() { let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); assert!(node_0_none.is_none()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 100000); let reason_b = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_a_id], 100000); } fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { @@ -1637,9 +1644,9 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { assert!(node_1_none.is_none()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, reason_a, false, &[node_b_id], 100000); let reason_b = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason_b, false, &[node_a_id], 100000); } #[test] diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index 29243ba8374..5c3499ce5a6 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -1067,7 +1067,7 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { let initial_commit_sig_for_acceptor = negotiate_splice_tx(&nodes[0], &nodes[1], channel_id, initiator_contribution); assert_eq!(initial_commit_sig_for_acceptor.htlc_signatures.len(), 1); - let initial_commit_sig_for_initiator = get_htlc_update_msgs!(&nodes[1], node_id_0); + let initial_commit_sig_for_initiator = get_htlc_update_msgs(&nodes[1], &node_id_0); assert_eq!(initial_commit_sig_for_initiator.commitment_signed.len(), 1); assert_eq!(initial_commit_sig_for_initiator.commitment_signed[0].htlc_signatures.len(), 1); diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs index d9267988826..b155bde3cfa 100644 --- a/lightning/src/ln/update_fee_tests.rs +++ b/lightning/src/ln/update_fee_tests.rs @@ -107,7 +107,7 @@ pub fn test_async_inbound_update_fee() { check_added_monitors(&nodes[1], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); // deliver (2) - let bs_update = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_update = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(bs_update.update_add_htlcs.is_empty()); // (4) assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4) assert!(bs_update.update_fail_htlcs.is_empty()); // (4) @@ -116,7 +116,7 @@ pub fn test_async_inbound_update_fee() { check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); // deliver (3) - let as_update = get_htlc_update_msgs!(nodes[0], node_b_id); + let as_update = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(as_update.update_add_htlcs.is_empty()); // (5) assert!(as_update.update_fulfill_htlcs.is_empty()); // (5) assert!(as_update.update_fail_htlcs.is_empty()); // (5) @@ -289,7 +289,7 @@ pub fn test_multi_flight_update_fee() { // Deliver (1), generating (3) and (4) nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_msg); - let as_second_update = get_htlc_update_msgs!(nodes[0], node_b_id); + let as_second_update = get_htlc_update_msgs(&nodes[0], &node_b_id); check_added_monitors(&nodes[0], 1); assert!(as_second_update.update_add_htlcs.is_empty()); assert!(as_second_update.update_fulfill_htlcs.is_empty()); @@ -439,7 +439,7 @@ pub fn do_test_update_fee_that_funder_cannot_afford(channel_type_features: Chann } nodes[0].node.timer_tick_occurred(); check_added_monitors(&nodes[0], 1); - let update_msg = get_htlc_update_msgs!(nodes[0], node_b_id); + let update_msg = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_fee(node_a_id, &update_msg.update_fee.unwrap()); @@ -531,7 +531,7 @@ pub fn do_test_update_fee_that_funder_cannot_afford(channel_type_features: Chann check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); let reason = ClosureReason::ProcessingError { err: err.to_string() }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], channel_value); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], channel_value); } #[xtest(feature = "_externalize_tests")] @@ -629,7 +629,7 @@ pub fn test_update_fee_that_saturates_subs() { check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); let reason = ClosureReason::ProcessingError { err: err.to_string() }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 10_000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 10_000); } #[xtest(feature = "_externalize_tests")] @@ -692,7 +692,7 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { check_added_monitors(&nodes[1], 1); // AwaitingRemoteRevoke ends here - let commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); + let commitment_update = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(commitment_update.update_add_htlcs.len(), 1); assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0); assert_eq!(commitment_update.update_fail_htlcs.len(), 0); @@ -734,9 +734,9 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { send_payment(&nodes[0], &[&nodes[1]], 800000); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); let node_a_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, node_a_reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, node_a_reason, false, &[node_b_id], 100000); let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, node_b_reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, node_b_reason, false, &[node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -834,7 +834,7 @@ pub fn test_update_fee() { // Deliver (6), creating (7): nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg_0); - let commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); + let commitment_update = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(commitment_update.update_add_htlcs.is_empty()); assert!(commitment_update.update_fulfill_htlcs.is_empty()); assert!(commitment_update.update_fail_htlcs.is_empty()); @@ -858,9 +858,9 @@ pub fn test_update_fee() { assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); let node_a_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; - check_closed_event!(nodes[0], 1, node_a_reason, [node_b_id], 100000); + check_closed_event(&nodes[0], 1, node_a_reason, false, &[node_b_id], 100000); let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; - check_closed_event!(nodes[1], 1, node_b_reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, node_b_reason, false, &[node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -995,7 +995,7 @@ pub fn accept_busted_but_better_fee() { peer_feerate_sat_per_kw: 1000, required_feerate_sat_per_kw: 5000, }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); }, diff --git a/lightning/src/ln/zero_fee_commitment_tests.rs b/lightning/src/ln/zero_fee_commitment_tests.rs index b4fd9e0be88..8854ef32d26 100644 --- a/lightning/src/ln/zero_fee_commitment_tests.rs +++ b/lightning/src/ln/zero_fee_commitment_tests.rs @@ -189,22 +189,24 @@ fn test_htlc_claim_chunking() { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!( - nodes[0], + check_closed_event( + &nodes[0], 1, ClosureReason::CommitmentTxConfirmed, - [nodes[1].node.get_our_node_id()], - CHAN_CAPACITY + false, + &[nodes[1].node.get_our_node_id()], + CHAN_CAPACITY, ); assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!( - nodes[1], + check_closed_event( + &nodes[1], 1, ClosureReason::CommitmentTxConfirmed, - [nodes[0].node.get_our_node_id()], - CHAN_CAPACITY + false, + &[nodes[0].node.get_our_node_id()], + CHAN_CAPACITY, ); assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); @@ -362,7 +364,7 @@ fn test_anchor_tx_too_big() { check_closed_broadcast!(nodes[1], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; - check_closed_event!(nodes[1], 1, reason, [node_a_id], CHAN_CAPACITY); + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], CHAN_CAPACITY); let mut events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(events.len(), 1);