diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index f8551ee7a70..f3ecc72806a 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -14,6 +14,9 @@ function PIN_RELEASE_DEPS { # syn 2.0.107 requires rustc 1.68.0 [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p syn --precise "2.0.106" --verbose + # quote 1.0.42 requires rustc 1.68.0 + [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p quote --precise "1.0.41" --verbose + return 0 # Don't fail the script if our rustc is higher than the last check } @@ -54,6 +57,7 @@ echo -e "\n\nTesting upgrade from prior versions of LDK" pushd lightning-tests [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p syn --precise "2.0.106" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p quote --precise "1.0.41" --verbose cargo test popd @@ -125,6 +129,7 @@ echo -e "\n\nTesting no_std build on a downstream no-std crate" # check no-std compatibility across dependencies pushd no-std-check [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p syn --precise "2.0.106" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p quote --precise "1.0.41" --verbose cargo check --verbose --color always [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 9f03de47d23..943e1f2d63a 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -43,13 +43,16 @@ use lightning::chain::{ chainmonitor, channelmonitor, BestBlock, ChannelMonitorUpdateStatus, Confirm, Watch, }; use lightning::events; -use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; +use lightning::ln::channel::{ + FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS, +}; use lightning::ln::channel_state::ChannelDetails; use lightning::ln::channelmanager::{ ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, RecentPaymentDetails, RecipientOnionFields, }; use lightning::ln::functional_test_utils::*; +use lightning::ln::funding::{FundingTxInput, SpliceContribution}; use lightning::ln::inbound_payment::ExpandedKey; use lightning::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, CommitmentUpdate, Init, MessageSendEvent, @@ -68,6 +71,7 @@ use lightning::sign::{ }; use lightning::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::util::config::UserConfig; +use lightning::util::errors::APIError; use lightning::util::hash_tables::*; use lightning::util::logger::Logger; use lightning::util::ser::{LengthReadable, ReadableArgs, Writeable, Writer}; @@ -163,6 +167,63 @@ impl Writer for VecWriter { } } +pub struct TestWallet { + secret_key: SecretKey, + utxos: Mutex>, + secp: Secp256k1, +} + +impl TestWallet { + pub fn new(secret_key: SecretKey) -> Self { + Self { secret_key, utxos: Mutex::new(Vec::new()), secp: Secp256k1::new() } + } + + fn get_change_script(&self) -> Result { + let public_key = bitcoin::PublicKey::new(self.secret_key.public_key(&self.secp)); + Ok(ScriptBuf::new_p2wpkh(&public_key.wpubkey_hash().unwrap())) + } + + pub fn add_utxo(&self, outpoint: bitcoin::OutPoint, value: Amount) -> TxOut { + let public_key = bitcoin::PublicKey::new(self.secret_key.public_key(&self.secp)); + let utxo = lightning::events::bump_transaction::Utxo::new_v0_p2wpkh( + outpoint, + value, + &public_key.wpubkey_hash().unwrap(), + ); + self.utxos.lock().unwrap().push(utxo.clone()); + utxo.output + } + + pub fn sign_tx( + &self, mut tx: Transaction, + ) -> Result { + let utxos = self.utxos.lock().unwrap(); + for i in 0..tx.input.len() { + if let Some(utxo) = + utxos.iter().find(|utxo| utxo.outpoint == tx.input[i].previous_output) + { + let sighash = bitcoin::sighash::SighashCache::new(&tx).p2wpkh_signature_hash( + i, + &utxo.output.script_pubkey, + utxo.output.value, + bitcoin::EcdsaSighashType::All, + )?; + let signature = self.secp.sign_ecdsa( + &secp256k1::Message::from_digest(sighash.to_byte_array()), + &self.secret_key, + ); + let bitcoin_sig = bitcoin::ecdsa::Signature { + signature, + sighash_type: bitcoin::EcdsaSighashType::All, + }; + tx.input[i].witness = + bitcoin::Witness::p2wpkh(&bitcoin_sig, &self.secret_key.public_key(&self.secp)); + } + } + Ok(tx) + } +} + /// The LDK API requires that any time we tell it we're done persisting a `ChannelMonitor[Update]` /// we never pass it in as the "latest" `ChannelMonitor` on startup. However, we can pass /// out-of-date monitors as long as we never told LDK we finished persisting them, which we do by @@ -671,6 +732,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; config.channel_handshake_config.announce_for_forwarding = true; + config.reject_inbound_splices = false; if anchors { config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; config.manually_accept_inbound_channels = true; @@ -724,6 +786,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; config.channel_handshake_config.announce_for_forwarding = true; + config.reject_inbound_splices = false; if anchors { config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; config.manually_accept_inbound_channels = true; @@ -984,6 +1047,30 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }}; } + let wallet_a = TestWallet::new(SecretKey::from_slice(&[1; 32]).unwrap()); + let wallet_b = TestWallet::new(SecretKey::from_slice(&[2; 32]).unwrap()); + let wallet_c = TestWallet::new(SecretKey::from_slice(&[3; 32]).unwrap()); + let wallets = vec![wallet_a, wallet_b, wallet_c]; + let coinbase_tx = bitcoin::Transaction { + version: bitcoin::transaction::Version::TWO, + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![bitcoin::TxIn { ..Default::default() }], + output: wallets + .iter() + .map(|w| TxOut { + value: Amount::from_sat(100_000), + script_pubkey: w.get_change_script().unwrap(), + }) + .collect(), + }; + let coinbase_txid = coinbase_tx.compute_txid(); + wallets.iter().enumerate().for_each(|(i, w)| { + w.add_utxo( + bitcoin::OutPoint { txid: coinbase_txid, vout: i as u32 }, + Amount::from_sat(100_000), + ); + }); + let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); let mut last_htlc_clear_fee_a = 253; let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); @@ -1073,6 +1160,34 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } *node_id == a_id }, + MessageSendEvent::SendSpliceInit { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, + MessageSendEvent::SendSpliceAck { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, + MessageSendEvent::SendSpliceLocked { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, + MessageSendEvent::SendTxAddInput { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, + MessageSendEvent::SendTxAddOutput { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, + MessageSendEvent::SendTxComplete { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, + MessageSendEvent::SendTxAbort { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, MessageSendEvent::SendChannelReady { .. } => continue, MessageSendEvent::SendAnnouncementSignatures { .. } => continue, MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { @@ -1208,7 +1323,79 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { dest.handle_stfu(nodes[$node].get_our_node_id(), msg); } } - } + }, + MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering tx_add_input from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_tx_add_input(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering tx_add_output from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_tx_add_output(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering tx_remove_input from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_tx_remove_input(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering tx_remove_output from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_tx_remove_output(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering tx_complete from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_tx_complete(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering tx_abort from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_tx_abort(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering splice_init from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_splice_init(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering splice_ack from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_splice_ack(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering splice_locked from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_splice_locked(nodes[$node].get_our_node_id(), msg); + } + } + }, MessageSendEvent::SendChannelReady { .. } => { // Can be generated as a reestablish response }, @@ -1347,6 +1534,25 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { events::Event::PaymentForwarded { .. } if $node == 1 => {}, events::Event::ChannelReady { .. } => {}, events::Event::HTLCHandlingFailed { .. } => {}, + + events::Event::FundingTransactionReadyForSigning { + channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } => { + let signed_tx = wallets[$node].sign_tx(unsigned_transaction).unwrap(); + nodes[$node] + .funding_transaction_signed( + &channel_id, + &counterparty_node_id, + signed_tx, + ) + .unwrap(); + }, + events::Event::SplicePending { .. } => {}, + events::Event::SpliceFailed { .. } => {}, + _ => { if out.may_fail.load(atomic::Ordering::Acquire) { return; @@ -1652,16 +1858,220 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0xa0 => { - nodes[0].maybe_propose_quiescence(&nodes[1].get_our_node_id(), &chan_a_id).unwrap() + let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap(); + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(10_000), + inputs: vec![input], + change_script: None, + }; + let funding_feerate_sat_per_kw = fee_est_a.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[0].splice_channel( + &chan_a_id, + &nodes[1].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } }, 0xa1 => { - nodes[1].maybe_propose_quiescence(&nodes[0].get_our_node_id(), &chan_a_id).unwrap() + let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 1).unwrap(); + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(10_000), + inputs: vec![input], + change_script: None, + }; + let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[1].splice_channel( + &chan_a_id, + &nodes[0].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } }, 0xa2 => { - nodes[1].maybe_propose_quiescence(&nodes[2].get_our_node_id(), &chan_b_id).unwrap() + let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap(); + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(10_000), + inputs: vec![input], + change_script: None, + }; + let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[1].splice_channel( + &chan_b_id, + &nodes[2].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } }, 0xa3 => { - nodes[2].maybe_propose_quiescence(&nodes[1].get_our_node_id(), &chan_b_id).unwrap() + let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 1).unwrap(); + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(10_000), + inputs: vec![input], + change_script: None, + }; + let funding_feerate_sat_per_kw = fee_est_c.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[2].splice_channel( + &chan_b_id, + &nodes[1].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } + }, + + // We conditionally splice out `MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS` only when the node + // has double the balance required to send a payment upon a `0xff` byte. We do this to + // ensure there's always liquidity available for a payment to succeed then. + 0xa4 => { + let outbound_capacity_msat = nodes[0] + .list_channels() + .iter() + .find(|chan| chan.channel_id == chan_a_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat >= 20_000_000 { + let contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[0].script_pubkey.clone(), + }], + }; + let funding_feerate_sat_per_kw = + fee_est_a.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[0].splice_channel( + &chan_a_id, + &nodes[1].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } + } + }, + 0xa5 => { + let outbound_capacity_msat = nodes[1] + .list_channels() + .iter() + .find(|chan| chan.channel_id == chan_a_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat >= 20_000_000 { + let contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), + }], + }; + let funding_feerate_sat_per_kw = + fee_est_b.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[1].splice_channel( + &chan_a_id, + &nodes[0].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } + } + }, + 0xa6 => { + let outbound_capacity_msat = nodes[1] + .list_channels() + .iter() + .find(|chan| chan.channel_id == chan_b_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat >= 20_000_000 { + let contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), + }], + }; + let funding_feerate_sat_per_kw = + fee_est_b.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[1].splice_channel( + &chan_b_id, + &nodes[2].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } + } + }, + 0xa7 => { + let outbound_capacity_msat = nodes[2] + .list_channels() + .iter() + .find(|chan| chan.channel_id == chan_b_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat >= 20_000_000 { + let contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[2].script_pubkey.clone(), + }], + }; + let funding_feerate_sat_per_kw = + fee_est_c.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[2].splice_channel( + &chan_b_id, + &nodes[1].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } + } }, 0xb0 | 0xb1 | 0xb2 => { @@ -1828,16 +2238,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }; } - // We may be pending quiescence, so first process all messages to ensure we can - // complete the quiescence handshake. - process_all_events!(); - - // Then exit quiescence and process all messages again, to resolve any pending - // HTLCs (only irrevocably committed ones) before attempting to send more payments. - nodes[0].exit_quiescence(&nodes[1].get_our_node_id(), &chan_a_id).unwrap(); - nodes[1].exit_quiescence(&nodes[0].get_our_node_id(), &chan_a_id).unwrap(); - nodes[1].exit_quiescence(&nodes[2].get_our_node_id(), &chan_b_id).unwrap(); - nodes[2].exit_quiescence(&nodes[1].get_our_node_id(), &chan_b_id).unwrap(); process_all_events!(); // Finally, make sure that at least one end of each channel can make a substantial payment diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index 3634d67f1da..47f929377de 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -91,7 +91,10 @@ use crate::ser::Base32Iterable; #[allow(missing_docs)] #[derive(PartialEq, Eq, Debug, Clone)] pub enum Bolt11ParseError { - Bech32Error(CheckedHrpstringError), + Bech32Error( + /// This is not exported to bindings users as the details don't matter much + CheckedHrpstringError, + ), ParseAmountError(ParseIntError), MalformedSignature(bitcoin::secp256k1::Error), BadPrefix, diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index 494a4f35cab..dda9922686d 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -619,7 +619,7 @@ impl PeerState { self.needs_persist |= true; } - fn prune_expired_request_state(&mut self) { + fn prune_pending_requests(&mut self) { self.pending_requests.retain(|_, entry| { match entry { LSPS2Request::GetInfo(_) => false, @@ -629,7 +629,9 @@ impl PeerState { }, } }); + } + fn prune_expired_request_state(&mut self) { self.outbound_channels_by_intercept_scid.retain(|intercept_scid, entry| { if entry.is_prunable() { // We abort the flow, and prune any data kept. @@ -1871,6 +1873,7 @@ where let mut peer_state_lock = inner_state_lock.lock().unwrap(); // We clean up the peer state, but leave removing the peer entry to the prune logic in // `persist` which removes it from the store. + peer_state_lock.prune_pending_requests(); peer_state_lock.prune_expired_request_state(); } } diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 4abd0cd88c0..d8d6c90921f 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -26,6 +26,8 @@ use bitcoin::block::Header; use bitcoin::hash_types::{BlockHash, Txid}; +use bitcoin::secp256k1::PublicKey; + use crate::chain; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; #[cfg(peer_storage)] @@ -57,7 +59,8 @@ use crate::util::persist::{KVStore, MonitorName, MonitorUpdatingPersisterAsync}; #[cfg(peer_storage)] use crate::util::ser::{VecWriter, Writeable}; use crate::util::wakers::{Future, Notifier}; -use bitcoin::secp256k1::PublicKey; + +use alloc::sync::Arc; #[cfg(peer_storage)] use core::iter::Cycle; use core::ops::Deref; @@ -250,6 +253,8 @@ impl Deref for LockedChannelMonitor<'_, Chann /// An unconstructable [`Persist`]er which is used under the hood when you call /// [`ChainMonitor::new_async_beta`]. +/// +/// This is not exported to bindings users as async is not supported outside of Rust. pub struct AsyncPersister< K: Deref + MaybeSend + MaybeSync + 'static, S: FutureSpawner, @@ -267,6 +272,7 @@ pub struct AsyncPersister< FE::Target: FeeEstimator, { persister: MonitorUpdatingPersisterAsync, + event_notifier: Arc, } impl< @@ -314,7 +320,8 @@ where &self, monitor_name: MonitorName, monitor: &ChannelMonitor<::EcdsaSigner>, ) -> ChannelMonitorUpdateStatus { - self.persister.spawn_async_persist_new_channel(monitor_name, monitor); + let notifier = Arc::clone(&self.event_notifier); + self.persister.spawn_async_persist_new_channel(monitor_name, monitor, notifier); ChannelMonitorUpdateStatus::InProgress } @@ -322,7 +329,8 @@ where &self, monitor_name: MonitorName, monitor_update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<::EcdsaSigner>, ) -> ChannelMonitorUpdateStatus { - self.persister.spawn_async_update_persisted_channel(monitor_name, monitor_update, monitor); + let notifier = Arc::clone(&self.event_notifier); + self.persister.spawn_async_update_channel(monitor_name, monitor_update, monitor, notifier); ChannelMonitorUpdateStatus::InProgress } @@ -382,7 +390,7 @@ pub struct ChainMonitor< /// A [`Notifier`] used to wake up the background processor in case we have any [`Event`]s for /// it to give to users (or [`MonitorEvent`]s for `ChannelManager` to process). - event_notifier: Notifier, + event_notifier: Arc, /// Messages to send to the peer. This is currently used to distribute PeerStorage to channel partners. pending_send_only_events: Mutex>, @@ -425,22 +433,25 @@ impl< /// [`MonitorUpdatingPersisterAsync`] and thus allows persistence to be completed async. /// /// Note that async monitor updating is considered beta, and bugs may be triggered by its use. + /// + /// This is not exported to bindings users as async is not supported outside of Rust. pub fn new_async_beta( chain_source: Option, broadcaster: T, logger: L, feeest: F, persister: MonitorUpdatingPersisterAsync, _entropy_source: ES, _our_peerstorage_encryption_key: PeerStorageKey, ) -> Self { + let event_notifier = Arc::new(Notifier::new()); Self { monitors: RwLock::new(new_hash_map()), chain_source, broadcaster, logger, fee_estimator: feeest, - persister: AsyncPersister { persister }, _entropy_source, pending_monitor_events: Mutex::new(Vec::new()), highest_chain_height: AtomicUsize::new(0), - event_notifier: Notifier::new(), + event_notifier: Arc::clone(&event_notifier), + persister: AsyncPersister { persister, event_notifier }, pending_send_only_events: Mutex::new(Vec::new()), #[cfg(peer_storage)] our_peerstorage_encryption_key: _our_peerstorage_encryption_key, @@ -656,7 +667,7 @@ where _entropy_source, pending_monitor_events: Mutex::new(Vec::new()), highest_chain_height: AtomicUsize::new(0), - event_notifier: Notifier::new(), + event_notifier: Arc::new(Notifier::new()), pending_send_only_events: Mutex::new(Vec::new()), #[cfg(peer_storage)] our_peerstorage_encryption_key: _our_peerstorage_encryption_key, diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 872087899df..11f008612a2 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -353,6 +353,9 @@ pub struct CoinSelection { /// which can provide a default implementation of this trait when used with [`Wallet`]. /// /// For a synchronous version of this trait, see [`sync::CoinSelectionSourceSync`]. +/// +/// This is not exported to bindings users as async is only supported in Rust. +// Note that updates to documentation on this trait should be copied to the synchronous version. pub trait CoinSelectionSource { /// Performs coin selection of a set of UTXOs, with at least 1 confirmation each, that are /// available to spend. Implementations are free to pick their coin selection algorithm of @@ -404,6 +407,9 @@ pub trait CoinSelectionSource { /// provide a default implementation to [`CoinSelectionSource`]. /// /// For a synchronous version of this trait, see [`sync::WalletSourceSync`]. +/// +/// This is not exported to bindings users as async is only supported in Rust. +// Note that updates to documentation on this trait should be copied to the synchronous version. pub trait WalletSource { /// Returns all UTXOs, with at least 1 confirmation each, that are available to spend. fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()>; @@ -419,11 +425,14 @@ pub trait WalletSource { fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; } -/// A wrapper over [`WalletSource`] that implements [`CoinSelection`] by preferring UTXOs that would -/// avoid conflicting double spends. If not enough UTXOs are available to do so, conflicting double -/// spends may happen. +/// A wrapper over [`WalletSource`] that implements [`CoinSelectionSource`] by preferring UTXOs +/// that would avoid conflicting double spends. If not enough UTXOs are available to do so, +/// conflicting double spends may happen. /// /// For a synchronous version of this wrapper, see [`sync::WalletSync`]. +/// +/// This is not exported to bindings users as async is only supported in Rust. +// Note that updates to documentation on this struct should be copied to the synchronous version. pub struct Wallet where W::Target: WalletSource + MaybeSend, @@ -670,7 +679,10 @@ where /// /// For a synchronous version of this handler, see [`sync::BumpTransactionEventHandlerSync`]. /// +/// This is not exported to bindings users as async is only supported in Rust. +/// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction +// Note that updates to documentation on this struct should be copied to the synchronous version. pub struct BumpTransactionEventHandler where B::Target: BroadcasterInterface, diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index c987b871868..653710a3358 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -28,13 +28,26 @@ use super::{ WalletSource, }; -/// A synchronous version of the [`WalletSource`] trait. +/// An alternative to [`CoinSelectionSourceSync`] that can be implemented and used along +/// [`WalletSync`] to provide a default implementation to [`CoinSelectionSourceSync`]. +/// +/// For an asynchronous version of this trait, see [`WalletSource`]. +// Note that updates to documentation on this trait should be copied to the asynchronous version. pub trait WalletSourceSync { - /// A synchronous version of [`WalletSource::list_confirmed_utxos`]. + /// Returns all UTXOs, with at least 1 confirmation each, that are available to spend. fn list_confirmed_utxos(&self) -> Result, ()>; - /// A synchronous version of [`WalletSource::get_change_script`]. + /// Returns a script to use for change above dust resulting from a successful coin selection + /// attempt. fn get_change_script(&self) -> Result; - /// A Synchronous version of [`WalletSource::sign_psbt`]. + /// Signs and provides the full [`TxIn::script_sig`] and [`TxIn::witness`] for all inputs within + /// the transaction known to the wallet (i.e., any provided via + /// [`WalletSource::list_confirmed_utxos`]). + /// + /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the + /// unsigned transaction and then sign it with your wallet. + /// + /// [`TxIn::script_sig`]: bitcoin::TxIn::script_sig + /// [`TxIn::witness`]: bitcoin::TxIn::witness fn sign_psbt(&self, psbt: Psbt) -> Result; } @@ -74,7 +87,12 @@ where } } -/// A synchronous wrapper around [`Wallet`] to be used in contexts where async is not available. +/// A wrapper over [`WalletSourceSync`] that implements [`CoinSelectionSourceSync`] by preferring +/// UTXOs that would avoid conflicting double spends. If not enough UTXOs are available to do so, +/// conflicting double spends may happen. +/// +/// For an asynchronous version of this wrapper, see [`Wallet`]. +// Note that updates to documentation on this struct should be copied to the asynchronous version. pub struct WalletSync where W::Target: WalletSourceSync + MaybeSend, @@ -136,15 +154,59 @@ where } } -/// A synchronous version of the [`CoinSelectionSource`] trait. +/// An abstraction over a bitcoin wallet that can perform coin selection over a set of UTXOs and can +/// sign for them. The coin selection method aims to mimic Bitcoin Core's `fundrawtransaction` RPC, +/// which most wallets should be able to satisfy. Otherwise, consider implementing +/// [`WalletSourceSync`], which can provide a default implementation of this trait when used with +/// [`WalletSync`]. +/// +/// For an asynchronous version of this trait, see [`CoinSelectionSource`]. +// Note that updates to documentation on this trait should be copied to the asynchronous version. pub trait CoinSelectionSourceSync { - /// A synchronous version of [`CoinSelectionSource::select_confirmed_utxos`]. + /// Performs coin selection of a set of UTXOs, with at least 1 confirmation each, that are + /// available to spend. Implementations are free to pick their coin selection algorithm of + /// choice, as long as the following requirements are met: + /// + /// 1. `must_spend` contains a set of [`Input`]s that must be included in the transaction + /// throughout coin selection, but must not be returned as part of the result. + /// 2. `must_pay_to` contains a set of [`TxOut`]s that must be included in the transaction + /// throughout coin selection. In some cases, like when funding an anchor transaction, this + /// set is empty. Implementations should ensure they handle this correctly on their end, + /// e.g., Bitcoin Core's `fundrawtransaction` RPC requires at least one output to be + /// provided, in which case a zero-value empty OP_RETURN output can be used instead. + /// 3. Enough inputs must be selected/contributed for the resulting transaction (including the + /// inputs and outputs noted above) to meet `target_feerate_sat_per_1000_weight`. + /// 4. The final transaction must have a weight smaller than `max_tx_weight`; if this + /// constraint can't be met, return an `Err`. In the case of counterparty-signed HTLC + /// transactions, we will remove a chunk of HTLCs and try your algorithm again. As for + /// anchor transactions, we will try your coin selection again with the same input-output + /// set when you call [`ChannelMonitor::rebroadcast_pending_claims`], as anchor transactions + /// cannot be downsized. + /// + /// Implementations must take note that [`Input::satisfaction_weight`] only tracks the weight of + /// the input's `script_sig` and `witness`. Some wallets, like Bitcoin Core's, may require + /// providing the full input weight. Failing to do so may lead to underestimating fee bumps and + /// delaying block inclusion. + /// + /// The `claim_id` must map to the set of external UTXOs assigned to the claim, such that they + /// can be re-used within new fee-bumped iterations of the original claiming transaction, + /// ensuring that claims don't double spend each other. If a specific `claim_id` has never had a + /// transaction associated with it, and all of the available UTXOs have already been assigned to + /// other claims, implementations must be willing to double spend their UTXOs. The choice of + /// which UTXOs to double spend is left to the implementation, but it must strive to keep the + /// set of other claims being double spent to a minimum. + /// + /// [`ChannelMonitor::rebroadcast_pending_claims`]: crate::chain::channelmonitor::ChannelMonitor::rebroadcast_pending_claims fn select_confirmed_utxos( &self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &[TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, ) -> Result; - /// A synchronous version of [`CoinSelectionSource::sign_psbt`]. + /// Signs and provides the full witness for all inputs within the transaction known to the + /// trait (i.e., any provided via [`CoinSelectionSourceSync::select_confirmed_utxos`]). + /// + /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the + /// unsigned transaction and then sign it with your wallet. fn sign_psbt(&self, psbt: Psbt) -> Result; } @@ -188,7 +250,14 @@ where } } -/// A synchronous wrapper around [`BumpTransactionEventHandler`] to be used in contexts where async is not available. +/// A handler for [`Event::BumpTransaction`] events that sources confirmed UTXOs from a +/// [`CoinSelectionSourceSync`] to fee bump transactions via Child-Pays-For-Parent (CPFP) or +/// Replace-By-Fee (RBF). +/// +/// For an asynchronous version of this handler, see [`BumpTransactionEventHandler`]. +/// +/// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction +// Note that updates to documentation on this struct should be copied to the synchronous version. pub struct BumpTransactionEventHandlerSync where B::Target: BroadcasterInterface, diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 71821081094..03728e28222 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -596,7 +596,7 @@ fn do_test_async_raa_peer_disconnect( } // Expect the RAA - let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _) = + let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { assert!(revoke_and_ack.is_none()); @@ -612,14 +612,14 @@ fn do_test_async_raa_peer_disconnect( dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { - let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _) = + let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_some()); assert!(commitment_signed.is_some()); assert!(resend_order == RAACommitmentOrder::RevokeAndACKFirst); } else { // Make sure we don't double send the RAA. - let (_, revoke_and_ack, commitment_signed, _, _, _, _) = + let (_, revoke_and_ack, commitment_signed, _, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_none()); assert!(commitment_signed.is_none()); @@ -746,7 +746,7 @@ fn do_test_async_commitment_signature_peer_disconnect( } // Expect the RAA - let (_, revoke_and_ack, commitment_signed, _, _, _, _) = + let (_, revoke_and_ack, commitment_signed, _, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_some()); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { @@ -760,11 +760,11 @@ fn do_test_async_commitment_signature_peer_disconnect( dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { - let (_, _, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, _, commitment_signed, _, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(commitment_signed.is_some()); } else { // Make sure we don't double send the CS. - let (_, _, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, _, commitment_signed, _, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(commitment_signed.is_none()); } } @@ -882,6 +882,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.4.is_none()); assert!(as_resp.5.is_none()); assert!(as_resp.6.is_none()); + assert!(as_resp.7.is_none()); if monitor_update_failure { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -904,6 +905,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.4.is_none()); assert!(as_resp.5.is_none()); assert!(as_resp.6.is_none()); + assert!(as_resp.7.is_none()); nodes[0].enable_channel_signer_op(&node_b_id, &chan_id, SignerOp::SignCounterpartyCommitment); nodes[0].node.signer_unblocked(Some((node_b_id, chan_id))); @@ -929,6 +931,9 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.6.is_none()); assert!(bs_resp.6.is_none()); + assert!(as_resp.7.is_none()); + assert!(bs_resp.7.is_none()); + // Now that everything is restored, get the CS + RAA and handle them. nodes[1] .node diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index 51bfc95e7e9..e759a4b54cb 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -116,19 +116,27 @@ pub const HTLC_SUCCESS_INPUT_P2A_ANCHOR_WITNESS_WEIGHT: u64 = 324; /// The size of the 2-of-2 multisig script const MULTISIG_SCRIPT_SIZE: u64 = 1 + // OP_2 1 + // data len - 33 + // pubkey1 + crate::sign::COMPRESSED_PUBLIC_KEY_SIZE as u64 + // pubkey1 1 + // data len - 33 + // pubkey2 + crate::sign::COMPRESSED_PUBLIC_KEY_SIZE as u64 + // pubkey2 1 + // OP_2 1; // OP_CHECKMULTISIG -/// The weight of a funding transaction input (2-of-2 P2WSH) -/// See https://github.com/lightning/bolts/blob/master/03-transactions.md#expected-weight-of-the-commitment-transaction + +/// The weight of a funding transaction input (2-of-2 P2WSH). +/// +/// Unlike in the [spec], 72 WU is used for the max signature size since 73 WU signatures are +/// non-standard. +/// +/// Note: If you have the `grind_signatures` feature enabled, this will be at least 1 byte +/// shorter. +/// +/// [spec]: https://github.com/lightning/bolts/blob/master/03-transactions.md#expected-weight-of-the-commitment-transaction pub const FUNDING_TRANSACTION_WITNESS_WEIGHT: u64 = 1 + // number_of_witness_elements 1 + // nil_len 1 + // sig len - 73 + // sig1 + crate::sign::MAX_STANDARD_SIGNATURE_SIZE as u64 + // sig1 1 + // sig len - 73 + // sig2 + crate::sign::MAX_STANDARD_SIGNATURE_SIZE as u64 + // sig2 1 + // witness_script_length MULTISIG_SCRIPT_SIZE; diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3c901d5e073..62b108fd20a 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1645,7 +1645,7 @@ where } chan.context.channel_state.clear_local_stfu_sent(); chan.context.channel_state.clear_remote_stfu_sent(); - if chan.should_reset_pending_splice_state() { + if chan.should_reset_pending_splice_state(false) { // If there was a pending splice negotiation that failed due to disconnecting, we // also take the opportunity to clean up our state. let splice_funding_failed = chan.reset_pending_splice_state(); @@ -1766,7 +1766,7 @@ where None }, ChannelPhase::Funded(funded_channel) => { - if funded_channel.should_reset_pending_splice_state() { + if funded_channel.should_reset_pending_splice_state(false) { funded_channel.reset_pending_splice_state() } else { debug_assert!(false, "We should never fail an interactive funding negotiation once we're exchanging tx_signatures"); @@ -1923,12 +1923,24 @@ where (had_constructor, None) }, ChannelPhase::Funded(funded_channel) => { - if funded_channel.has_pending_splice_awaiting_signatures() { + if funded_channel.has_pending_splice_awaiting_signatures() + && funded_channel + .context() + .interactive_tx_signing_session + .as_ref() + .expect("We have a pending splice awaiting signatures") + .has_received_commitment_signed() + { + // We only force close once the counterparty tries to abort after committing to + // the splice via their initial `commitment_signed`. This is because our monitor + // state is updated with the post-splice commitment transaction upon receiving + // their `commitment_signed`, so we would need another monitor update to abandon + // it, which we don't currently support. return Err(ChannelError::close( "Received tx_abort while awaiting tx_signatures exchange".to_owned(), )); } - if funded_channel.should_reset_pending_splice_state() { + if funded_channel.should_reset_pending_splice_state(true) { let has_funding_negotiation = funded_channel .pending_splice .as_ref() @@ -2007,18 +2019,32 @@ where | NegotiatingFundingFlags::THEIR_INIT_SENT ), ); + chan.context.assert_no_commitment_advancement( + chan.unfunded_context.transaction_number(), + "initial commitment_signed", + ); + + chan.context.channel_state = + ChannelState::FundingNegotiated(FundingNegotiatedFlags::new()); + chan.funding.channel_transaction_parameters.funding_outpoint = + Some(funding_outpoint); let interactive_tx_constructor = chan .interactive_tx_constructor .take() .expect("PendingV2Channel::interactive_tx_constructor should be set"); - let commitment_signed = chan.context.funding_tx_constructed( - &mut chan.funding, - funding_outpoint, - false, - chan.unfunded_context.transaction_number(), - &&logger, - )?; + + let commitment_signed = + chan.context.get_initial_commitment_signed_v2(&chan.funding, &&logger); + let commitment_signed = match commitment_signed { + Some(commitment_signed) => commitment_signed, + // TODO(dual_funding): Support async signing + None => { + return Err(AbortReason::InternalError( + "Failed to compute commitment_signed signatures", + )); + }, + }; (interactive_tx_constructor, commitment_signed) }, @@ -2047,14 +2073,11 @@ where ) }) .and_then(|(is_initiator, mut funding, interactive_tx_constructor)| { - match chan.context.funding_tx_constructed( - &mut funding, - funding_outpoint, - true, - chan.holder_commitment_point.next_transaction_number(), - &&logger, - ) { - Ok(commitment_signed) => { + funding.channel_transaction_parameters.funding_outpoint = + Some(funding_outpoint); + match chan.context.get_initial_commitment_signed_v2(&funding, &&logger) + { + Some(commitment_signed) => { // Advance the state pending_splice.funding_negotiation = Some(FundingNegotiation::AwaitingSignatures { @@ -2063,14 +2086,17 @@ where }); Ok((interactive_tx_constructor, commitment_signed)) }, - Err(e) => { + // TODO(splicing): Support async signing + None => { // Restore the taken state for later error handling pending_splice.funding_negotiation = Some(FundingNegotiation::ConstructingTransaction { funding, interactive_tx_constructor, }); - Err(e) + Err(AbortReason::InternalError( + "Failed to compute commitment_signed signatures", + )) }, } })? @@ -2695,19 +2721,6 @@ impl FundingNegotiation { } impl PendingFunding { - fn can_abandon_state(&self) -> bool { - self.funding_negotiation - .as_ref() - .map(|funding_negotiation| { - !matches!(funding_negotiation, FundingNegotiation::AwaitingSignatures { .. }) - }) - .unwrap_or_else(|| { - let has_negotiated_candidates = !self.negotiated_candidates.is_empty(); - debug_assert!(has_negotiated_candidates); - !has_negotiated_candidates - }) - } - fn check_get_splice_locked( &mut self, context: &ChannelContext, confirmed_funding_index: usize, height: u32, ) -> Option @@ -6204,38 +6217,6 @@ where Ok(()) } - #[rustfmt::skip] - fn funding_tx_constructed( - &mut self, funding: &mut FundingScope, funding_outpoint: OutPoint, is_splice: bool, - holder_commitment_transaction_number: u64, logger: &L, - ) -> Result - where - L::Target: Logger - { - funding.channel_transaction_parameters.funding_outpoint = Some(funding_outpoint); - - if is_splice { - debug_assert_eq!( - holder_commitment_transaction_number, - self.counterparty_next_commitment_transaction_number, - ); - } else { - self.assert_no_commitment_advancement(holder_commitment_transaction_number, "initial commitment_signed"); - self.channel_state = ChannelState::FundingNegotiated(FundingNegotiatedFlags::new()); - } - - let commitment_signed = self.get_initial_commitment_signed_v2(&funding, logger); - let commitment_signed = match commitment_signed { - Some(commitment_signed) => commitment_signed, - // TODO(splicing): Support async signing - None => { - return Err(AbortReason::InternalError("Failed to compute commitment_signed signatures")); - }, - }; - - Ok(commitment_signed) - } - /// Asserts that the commitment tx numbers have not advanced from their initial number. fn assert_no_commitment_advancement( &self, holder_commitment_transaction_number: u64, msg_name: &str, @@ -6561,6 +6542,11 @@ fn estimate_v2_funding_transaction_fee( .saturating_add(BASE_INPUT_WEIGHT) .saturating_add(EMPTY_SCRIPT_SIG_WEIGHT) .saturating_add(FUNDING_TRANSACTION_WITNESS_WEIGHT); + #[cfg(feature = "grind_signatures")] + { + // Guarantees a low R signature + weight -= 1; + } } } @@ -6885,7 +6871,7 @@ pub struct SpliceFundingFailed { } macro_rules! maybe_create_splice_funding_failed { - ($pending_splice: expr, $get: ident, $contributed_inputs_and_outputs: ident) => {{ + ($funded_channel: expr, $pending_splice: expr, $get: ident, $contributed_inputs_and_outputs: ident) => {{ $pending_splice .and_then(|pending_splice| pending_splice.funding_negotiation.$get()) .filter(|funding_negotiation| funding_negotiation.is_initiator()) @@ -6907,10 +6893,12 @@ macro_rules! maybe_create_splice_funding_failed { interactive_tx_constructor, .. } => interactive_tx_constructor.$contributed_inputs_and_outputs(), - FundingNegotiation::AwaitingSignatures { .. } => { - debug_assert!(false); - (Vec::new(), Vec::new()) - }, + FundingNegotiation::AwaitingSignatures { .. } => $funded_channel + .context + .interactive_tx_signing_session + .$get() + .expect("We have a pending splice awaiting signatures") + .$contributed_inputs_and_outputs(), }; SpliceFundingFailed { @@ -6949,7 +6937,7 @@ where fn maybe_fail_splice_negotiation(&mut self) -> Option { if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { - if self.should_reset_pending_splice_state() { + if self.should_reset_pending_splice_state(false) { self.reset_pending_splice_state() } else { match self.quiescent_action.take() { @@ -7023,19 +7011,54 @@ where /// Returns a boolean indicating whether we should reset the splice's /// [`PendingFunding::funding_negotiation`]. - fn should_reset_pending_splice_state(&self) -> bool { + fn should_reset_pending_splice_state(&self, counterparty_aborted: bool) -> bool { self.pending_splice .as_ref() - .map(|pending_splice| pending_splice.can_abandon_state()) + .map(|pending_splice| { + pending_splice + .funding_negotiation + .as_ref() + .map(|funding_negotiation| { + let is_awaiting_signatures = matches!( + funding_negotiation, + FundingNegotiation::AwaitingSignatures { .. } + ); + if counterparty_aborted { + !is_awaiting_signatures + || !self + .context() + .interactive_tx_signing_session + .as_ref() + .expect("We have a pending splice awaiting signatures") + .has_received_commitment_signed() + } else { + !is_awaiting_signatures + } + }) + .unwrap_or_else(|| { + let has_negotiated_candidates = + !pending_splice.negotiated_candidates.is_empty(); + debug_assert!(has_negotiated_candidates); + !has_negotiated_candidates + }) + }) .unwrap_or(false) } fn reset_pending_splice_state(&mut self) -> Option { - debug_assert!(self.should_reset_pending_splice_state()); - debug_assert!(self.context.interactive_tx_signing_session.is_none()); - self.context.channel_state.clear_quiescent(); + debug_assert!(self.should_reset_pending_splice_state(true)); + debug_assert!( + self.context.interactive_tx_signing_session.is_none() + || !self + .context + .interactive_tx_signing_session + .as_ref() + .expect("We have a pending splice awaiting signatures") + .has_received_commitment_signed() + ); let splice_funding_failed = maybe_create_splice_funding_failed!( + self, self.pending_splice.as_mut(), take, into_contributed_inputs_and_outputs @@ -7045,15 +7068,19 @@ where self.pending_splice.take(); } + self.context.channel_state.clear_quiescent(); + self.context.interactive_tx_signing_session.take(); + splice_funding_failed } pub(super) fn maybe_splice_funding_failed(&self) -> Option { - if !self.should_reset_pending_splice_state() { + if !self.should_reset_pending_splice_state(false) { return None; } maybe_create_splice_funding_failed!( + self, self.pending_splice.as_ref(), as_ref, to_contributed_inputs_and_outputs @@ -12008,7 +12035,7 @@ where pub fn abandon_splice( &mut self, ) -> Result<(msgs::TxAbort, Option), APIError> { - if self.should_reset_pending_splice_state() { + if self.should_reset_pending_splice_state(false) { let tx_abort = msgs::TxAbort { channel_id: self.context.channel_id(), data: Vec::new() }; let splice_funding_failed = self.reset_pending_splice_state(); @@ -13075,7 +13102,13 @@ where self.context.channel_state.set_awaiting_quiescence(); if self.context.is_live() { - Ok(Some(self.send_stfu(logger)?)) + match self.send_stfu(logger) { + Ok(stfu) => Ok(Some(stfu)), + Err(e) => { + log_debug!(logger, "{e}"); + Ok(None) + }, + } } else { log_debug!(logger, "Waiting for peer reconnection to send stfu"); Ok(None) @@ -14377,7 +14410,7 @@ where } channel_state.clear_local_stfu_sent(); channel_state.clear_remote_stfu_sent(); - if self.should_reset_pending_splice_state() + if self.should_reset_pending_splice_state(false) || !self.has_pending_splice_awaiting_signatures() { // We shouldn't be quiescent anymore upon reconnecting if: @@ -14751,7 +14784,7 @@ where // We don't have to worry about resetting the pending `FundingNegotiation` because we // can only read `FundingNegotiation::AwaitingSignatures` variants anyway. let pending_splice = - self.pending_splice.as_ref().filter(|_| !self.should_reset_pending_splice_state()); + self.pending_splice.as_ref().filter(|_| !self.should_reset_pending_splice_state(false)); write_tlv_fields!(writer, { (0, self.context.announcement_sigs, option), @@ -17413,19 +17446,19 @@ mod tests { // 2 inputs, initiator, 2000 sat/kw feerate assert_eq!( estimate_v2_funding_transaction_fee(&two_inputs, &[], true, false, 2000), - 1520, + if cfg!(feature = "grind_signatures") { 1512 } else { 1516 }, ); // higher feerate assert_eq!( estimate_v2_funding_transaction_fee(&two_inputs, &[], true, false, 3000), - 2280, + if cfg!(feature = "grind_signatures") { 2268 } else { 2274 }, ); // only 1 input assert_eq!( estimate_v2_funding_transaction_fee(&one_input, &[], true, false, 2000), - 974, + if cfg!(feature = "grind_signatures") { 970 } else { 972 }, ); // 0 inputs @@ -17443,13 +17476,13 @@ mod tests { // splice initiator assert_eq!( estimate_v2_funding_transaction_fee(&one_input, &[], true, true, 2000), - 1746, + if cfg!(feature = "grind_signatures") { 1736 } else { 1740 }, ); // splice acceptor assert_eq!( estimate_v2_funding_transaction_fee(&one_input, &[], false, true, 2000), - 546, + if cfg!(feature = "grind_signatures") { 542 } else { 544 }, ); } @@ -17473,40 +17506,46 @@ mod tests { use crate::ln::channel::check_v2_funding_inputs_sufficient; // positive case, inputs well over intended contribution - assert_eq!( - check_v2_funding_inputs_sufficient( - 220_000, - &[ - funding_input_sats(200_000), - funding_input_sats(100_000), - ], - true, - true, - 2000, - ).unwrap(), - 2292, - ); + { + let expected_fee = if cfg!(feature = "grind_signatures") { 2278 } else { 2284 }; + assert_eq!( + check_v2_funding_inputs_sufficient( + 220_000, + &[ + funding_input_sats(200_000), + funding_input_sats(100_000), + ], + true, + true, + 2000, + ).unwrap(), + expected_fee, + ); + } // negative case, inputs clearly insufficient { - let res = check_v2_funding_inputs_sufficient( - 220_000, - &[ - funding_input_sats(100_000), - ], - true, - true, - 2000, - ); + let expected_fee = if cfg!(feature = "grind_signatures") { 1736 } else { 1740 }; assert_eq!( - res.err().unwrap(), - "Total input amount 100000 is lower than needed for contribution 220000, considering fees of 1746. Need more inputs.", + check_v2_funding_inputs_sufficient( + 220_000, + &[ + funding_input_sats(100_000), + ], + true, + true, + 2000, + ), + Err(format!( + "Total input amount 100000 is lower than needed for contribution 220000, considering fees of {}. Need more inputs.", + expected_fee, + )), ); } // barely covers { - let expected_fee: u64 = 2292; + let expected_fee = if cfg!(feature = "grind_signatures") { 2278 } else { 2284 }; assert_eq!( check_v2_funding_inputs_sufficient( (300_000 - expected_fee - 20) as i64, @@ -17524,25 +17563,28 @@ mod tests { // higher fee rate, does not cover { - let res = check_v2_funding_inputs_sufficient( - 298032, - &[ - funding_input_sats(200_000), - funding_input_sats(100_000), - ], - true, - true, - 2200, - ); + let expected_fee = if cfg!(feature = "grind_signatures") { 2506 } else { 2513 }; assert_eq!( - res.err().unwrap(), - "Total input amount 300000 is lower than needed for contribution 298032, considering fees of 2522. Need more inputs.", + check_v2_funding_inputs_sufficient( + 298032, + &[ + funding_input_sats(200_000), + funding_input_sats(100_000), + ], + true, + true, + 2200, + ), + Err(format!( + "Total input amount 300000 is lower than needed for contribution 298032, considering fees of {}. Need more inputs.", + expected_fee + )), ); } - // barely covers, less fees (no extra weight, no init) + // barely covers, less fees (no extra weight, not initiator) { - let expected_fee: u64 = 1092; + let expected_fee = if cfg!(feature = "grind_signatures") { 1084 } else { 1088 }; assert_eq!( check_v2_funding_inputs_sufficient( (300_000 - expected_fee - 20) as i64, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 78378978695..b6db7d23484 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -4996,9 +4996,17 @@ macro_rules! handle_chan_reestablish_msgs { ($src_node: expr, $dst_node: expr) => {{ let msg_events = $src_node.node.get_and_clear_pending_msg_events(); let mut idx = 0; + + let mut tx_abort = None; + if let Some(&MessageSendEvent::SendTxAbort { ref node_id, ref msg }) = msg_events.get(idx) { + idx += 1; + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + tx_abort = Some(msg.clone()); + } + let channel_ready = if let Some(&MessageSendEvent::SendChannelReady { ref node_id, ref msg }) = - msg_events.get(0) + msg_events.get(idx) { idx += 1; assert_eq!(*node_id, $dst_node.node.get_our_node_id()); @@ -5115,6 +5123,7 @@ macro_rules! handle_chan_reestablish_msgs { announcement_sigs, tx_signatures, stfu, + tx_abort, ) }}; } @@ -5127,6 +5136,7 @@ pub struct ReconnectArgs<'a, 'b, 'c, 'd> { pub send_stfu: (bool, bool), pub send_interactive_tx_commit_sig: (bool, bool), pub send_interactive_tx_sigs: (bool, bool), + pub send_tx_abort: (bool, bool), pub expect_renegotiated_funding_locked_monitor_update: (bool, bool), pub pending_responding_commitment_signed: (bool, bool), /// Indicates that the pending responding commitment signed will be a dup for the recipient, @@ -5150,6 +5160,7 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { send_stfu: (false, false), send_interactive_tx_commit_sig: (false, false), send_interactive_tx_sigs: (false, false), + send_tx_abort: (false, false), expect_renegotiated_funding_locked_monitor_update: (false, false), pending_responding_commitment_signed: (false, false), pending_responding_commitment_signed_dup_monitor: (false, false), @@ -5174,6 +5185,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { send_stfu, send_interactive_tx_commit_sig, send_interactive_tx_sigs, + send_tx_abort, expect_renegotiated_funding_locked_monitor_update, pending_htlc_adds, pending_htlc_claims, @@ -5305,6 +5317,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { &commitment_update.commitment_signed, ) } + if send_tx_abort.0 { + let tx_abort = chan_msgs.7.take().unwrap(); + node_a.node.handle_tx_abort(node_b_id, &tx_abort); + } else { + assert!(chan_msgs.7.is_none()); + } if send_interactive_tx_sigs.0 { let tx_signatures = chan_msgs.5.take().unwrap(); node_a.node.handle_tx_signatures(node_b_id, &tx_signatures); @@ -5417,6 +5435,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { &commitment_update.commitment_signed, ) } + if send_tx_abort.1 { + let tx_abort = chan_msgs.7.take().unwrap(); + node_a.node.handle_tx_abort(node_b_id, &tx_abort); + } else { + assert!(chan_msgs.7.is_none()); + } if send_interactive_tx_sigs.1 { let tx_signatures = chan_msgs.5.take().unwrap(); node_b.node.handle_tx_signatures(node_a_id, &tx_signatures); diff --git a/lightning/src/ln/funding.rs b/lightning/src/ln/funding.rs index ae7e36ac452..f80b2b6daea 100644 --- a/lightning/src/ln/funding.rs +++ b/lightning/src/ln/funding.rs @@ -9,12 +9,13 @@ //! Types pertaining to funding channels. +use alloc::vec::Vec; + use bitcoin::{Amount, ScriptBuf, SignedAmount, TxOut}; use bitcoin::{Script, Sequence, Transaction, Weight}; use crate::events::bump_transaction::Utxo; use crate::ln::chan_utils::EMPTY_SCRIPT_SIG_WEIGHT; -use crate::prelude::Vec; use crate::sign::{P2TR_KEY_PATH_WITNESS_WEIGHT, P2WPKH_WITNESS_WEIGHT}; /// The components of a splice's funding transaction that are contributed by one party. @@ -142,7 +143,13 @@ impl FundingTxInput { /// [`TxIn::sequence`]: bitcoin::TxIn::sequence /// [`set_sequence`]: Self::set_sequence pub fn new_p2wpkh(prevtx: Transaction, vout: u32) -> Result { - let witness_weight = Weight::from_wu(P2WPKH_WITNESS_WEIGHT); + let witness_weight = Weight::from_wu(P2WPKH_WITNESS_WEIGHT) + - if cfg!(feature = "grind_signatures") { + // Guarantees a low R signature + Weight::from_wu(1) + } else { + Weight::ZERO + }; FundingTxInput::new(prevtx, vout, witness_weight, Script::is_p2wpkh) } @@ -223,4 +230,9 @@ impl FundingTxInput { pub fn set_sequence(&mut self, sequence: Sequence) { self.sequence = sequence; } + + /// Converts the [`FundingTxInput`] into a [`Utxo`] for coin selection. + pub fn into_utxo(self) -> Utxo { + self.utxo + } } diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index d1a985f30fe..badd8428db3 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -29,6 +29,7 @@ use bitcoin::{ use crate::chain::chaininterface::fee_for_weight; use crate::ln::chan_utils::{ BASE_INPUT_WEIGHT, EMPTY_SCRIPT_SIG_WEIGHT, FUNDING_TRANSACTION_WITNESS_WEIGHT, + SEGWIT_MARKER_FLAG_WEIGHT, }; use crate::ln::channel::{FundingNegotiationContext, TOTAL_BITCOIN_SUPPLY_SATOSHIS}; use crate::ln::funding::FundingTxInput; @@ -257,16 +258,20 @@ impl_writeable_tlv_based!(ConstructedTransaction, { (11, shared_output_index, required), }); +/// The percent tolerance given to the remote when estimating if they paid enough fees. +const REMOTE_FEE_TOLERANCE_PERCENT: u64 = 95; + impl ConstructedTransaction { fn new(context: NegotiationContext) -> Result { let remote_inputs_value = context.remote_inputs_value(); let remote_outputs_value = context.remote_outputs_value(); let remote_weight_contributed = context.remote_weight_contributed(); - let satisfaction_weight = - Weight::from_wu(context.inputs.iter().fold(0u64, |value, (_, input)| { - value.saturating_add(input.satisfaction_weight().to_wu()) - })); + let expected_witness_weight = context.inputs.iter().fold(0u64, |value, (_, input)| { + value + .saturating_add(input.satisfaction_weight().to_wu()) + .saturating_sub(EMPTY_SCRIPT_SIG_WEIGHT) + }); let lock_time = context.tx_locktime; @@ -315,8 +320,10 @@ impl ConstructedTransaction { // - the peer's paid feerate does not meet or exceed the agreed feerate (based on the minimum fee). let remote_fees_contributed = remote_inputs_value.saturating_sub(remote_outputs_value); - let required_remote_contribution_fee = - fee_for_weight(context.feerate_sat_per_kw, remote_weight_contributed); + let required_remote_contribution_fee = fee_for_weight( + (context.feerate_sat_per_kw as u64 * REMOTE_FEE_TOLERANCE_PERCENT / 100) as u32, + remote_weight_contributed, + ); if remote_fees_contributed < required_remote_contribution_fee { return Err(AbortReason::InsufficientFees); } @@ -337,8 +344,13 @@ impl ConstructedTransaction { return Err(AbortReason::MissingFundingOutput); } - let tx_weight = tx.tx.weight().checked_add(satisfaction_weight).unwrap_or(Weight::MAX); - if tx_weight > Weight::from_wu(MAX_STANDARD_TX_WEIGHT as u64) { + let tx_weight = tx + .tx + .weight() + .to_wu() + .saturating_add(SEGWIT_MARKER_FLAG_WEIGHT) + .saturating_add(expected_witness_weight); + if tx_weight > MAX_STANDARD_TX_WEIGHT as u64 { return Err(AbortReason::TransactionTooLarge); } @@ -350,6 +362,36 @@ impl ConstructedTransaction { NegotiationError { reason, contributed_inputs, contributed_outputs } } + fn to_contributed_inputs_and_outputs(&self) -> (Vec, Vec) { + let contributed_inputs = self + .tx + .input + .iter() + .zip(self.input_metadata.iter()) + .enumerate() + .filter(|(_, (_, input))| input.is_local(self.holder_is_initiator)) + .filter(|(index, _)| { + self.shared_input_index + .map(|shared_index| *index != shared_index as usize) + .unwrap_or(true) + }) + .map(|(_, (txin, _))| txin.previous_output) + .collect(); + + let contributed_outputs = self + .tx + .output + .iter() + .zip(self.output_metadata.iter()) + .enumerate() + .filter(|(_, (_, output))| output.is_local(self.holder_is_initiator)) + .filter(|(index, _)| *index != self.shared_output_index as usize) + .map(|(_, (txout, _))| txout.clone()) + .collect(); + + (contributed_inputs, contributed_outputs) + } + fn into_contributed_inputs_and_outputs(self) -> (Vec, Vec) { let contributed_inputs = self .tx @@ -859,6 +901,10 @@ impl InteractiveTxSigningSession { self.unsigned_tx.into_negotiation_error(reason) } + pub(super) fn to_contributed_inputs_and_outputs(&self) -> (Vec, Vec) { + self.unsigned_tx.to_contributed_inputs_and_outputs() + } + pub(super) fn into_contributed_inputs_and_outputs(self) -> (Vec, Vec) { self.unsigned_tx.into_contributed_inputs_and_outputs() } @@ -1706,7 +1752,15 @@ impl InputOwned { InputOwned::Single(single) => single.satisfaction_weight, // TODO(taproot): Needs to consider different weights based on channel type InputOwned::Shared(_) => { - Weight::from_wu(EMPTY_SCRIPT_SIG_WEIGHT + FUNDING_TRANSACTION_WITNESS_WEIGHT) + let mut weight = 0; + weight += EMPTY_SCRIPT_SIG_WEIGHT + FUNDING_TRANSACTION_WITNESS_WEIGHT; + #[cfg(feature = "grind_signatures")] + { + // Guarantees a low R signature + weight -= 1; + } + + Weight::from_wu(weight) }, } } @@ -2312,6 +2366,11 @@ pub(super) fn calculate_change_output_value( weight = weight.saturating_add(BASE_INPUT_WEIGHT); weight = weight.saturating_add(EMPTY_SCRIPT_SIG_WEIGHT); weight = weight.saturating_add(FUNDING_TRANSACTION_WITNESS_WEIGHT); + #[cfg(feature = "grind_signatures")] + { + // Guarantees a low R signature + weight -= 1; + } } } @@ -2364,7 +2423,7 @@ mod tests { use super::{ get_output_weight, ConstructedTransaction, InteractiveTxSigningSession, TxInMetadata, P2TR_INPUT_WEIGHT_LOWER_BOUND, P2WPKH_INPUT_WEIGHT_LOWER_BOUND, - P2WSH_INPUT_WEIGHT_LOWER_BOUND, TX_COMMON_FIELDS_WEIGHT, + P2WSH_INPUT_WEIGHT_LOWER_BOUND, REMOTE_FEE_TOLERANCE_PERCENT, TX_COMMON_FIELDS_WEIGHT, }; const TEST_FEERATE_SATS_PER_KW: u32 = FEERATE_FLOOR_SATS_PER_KW * 10; @@ -2829,7 +2888,7 @@ mod tests { let outputs_weight = get_output_weight(&generate_p2wsh_script_pubkey()).to_wu(); let amount_adjusted_with_p2wpkh_fee = 1_000_000 - fee_for_weight( - TEST_FEERATE_SATS_PER_KW, + (TEST_FEERATE_SATS_PER_KW as u64 * REMOTE_FEE_TOLERANCE_PERCENT / 100) as u32, P2WPKH_INPUT_WEIGHT_LOWER_BOUND + TX_COMMON_FIELDS_WEIGHT + outputs_weight, ); do_test_interactive_tx_constructor(TestSession { @@ -2865,7 +2924,7 @@ mod tests { }); let amount_adjusted_with_p2wsh_fee = 1_000_000 - fee_for_weight( - TEST_FEERATE_SATS_PER_KW, + (TEST_FEERATE_SATS_PER_KW as u64 * REMOTE_FEE_TOLERANCE_PERCENT / 100) as u32, P2WSH_INPUT_WEIGHT_LOWER_BOUND + TX_COMMON_FIELDS_WEIGHT + outputs_weight, ); do_test_interactive_tx_constructor(TestSession { @@ -2901,7 +2960,7 @@ mod tests { }); let amount_adjusted_with_p2tr_fee = 1_000_000 - fee_for_weight( - TEST_FEERATE_SATS_PER_KW, + (TEST_FEERATE_SATS_PER_KW as u64 * REMOTE_FEE_TOLERANCE_PERCENT / 100) as u32, P2TR_INPUT_WEIGHT_LOWER_BOUND + TX_COMMON_FIELDS_WEIGHT + outputs_weight, ); do_test_interactive_tx_constructor(TestSession { @@ -3351,21 +3410,23 @@ mod tests { FundingTxInput::new_p2wpkh(prevtx, 0).unwrap() }) .collect(); - let our_contributed = 110_000; let txout = TxOut { value: Amount::from_sat(10_000), script_pubkey: ScriptBuf::new() }; let outputs = vec![txout]; let funding_feerate_sat_per_1000_weight = 3000; - let total_inputs: u64 = input_prevouts.iter().map(|o| o.value.to_sat()).sum(); - let total_outputs: u64 = outputs.iter().map(|o| o.value.to_sat()).sum(); - let gross_change = total_inputs - total_outputs - our_contributed; - let fees = 1746; - let common_fees = 234; + let total_inputs: Amount = input_prevouts.iter().map(|o| o.value).sum(); + let total_outputs: Amount = outputs.iter().map(|o| o.value).sum(); + let fees = if cfg!(feature = "grind_signatures") { + Amount::from_sat(1734) + } else { + Amount::from_sat(1740) + }; + let common_fees = Amount::from_sat(234); // There is leftover for change let context = FundingNegotiationContext { is_initiator: true, - our_funding_contribution: SignedAmount::from_sat(our_contributed as i64), + our_funding_contribution: SignedAmount::from_sat(110_000), funding_tx_locktime: AbsoluteLockTime::ZERO, funding_feerate_sat_per_1000_weight, shared_funding_input: None, @@ -3373,16 +3434,18 @@ mod tests { our_funding_outputs: outputs, change_script: None, }; + let gross_change = + total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some(gross_change - fees - common_fees)), + Ok(Some((gross_change - fees - common_fees).to_sat())), ); // There is leftover for change, without common fees let context = FundingNegotiationContext { is_initiator: false, ..context }; assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some(gross_change - fees)), + Ok(Some((gross_change - fees).to_sat())), ); // Insufficient inputs, no leftover @@ -3413,21 +3476,25 @@ mod tests { our_funding_contribution: SignedAmount::from_sat(117_992), ..context }; + let gross_change = + total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 100), - Ok(Some(262)), + Ok(Some((gross_change - fees).to_sat())), ); // Larger fee, smaller change let context = FundingNegotiationContext { is_initiator: true, - our_funding_contribution: SignedAmount::from_sat(our_contributed as i64), + our_funding_contribution: SignedAmount::from_sat(110_000), funding_feerate_sat_per_1000_weight: funding_feerate_sat_per_1000_weight * 3, ..context }; + let gross_change = + total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some(4060)), + Ok(Some((gross_change - fees * 3 - common_fees * 3).to_sat())), ); } diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index f95883e9434..e387ac3bfdd 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -395,17 +395,28 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { // retry later. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let (persister_0a, persister_0b, persister_0c, persister_1a, persister_1b, persister_1c); + let ( + persister_0a, + persister_0b, + persister_0c, + persister_0d, + persister_1a, + persister_1b, + persister_1c, + persister_1d, + ); let ( chain_monitor_0a, chain_monitor_0b, chain_monitor_0c, + chain_monitor_0d, chain_monitor_1a, chain_monitor_1b, chain_monitor_1c, + chain_monitor_1d, ); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let (node_0a, node_0b, node_0c, node_1a, node_1b, node_1c); + let (node_0a, node_0b, node_0c, node_0d, node_1a, node_1b, node_1c, node_1d); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_id_0 = nodes[0].node.get_our_node_id(); @@ -533,9 +544,56 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { reconnect_args.send_announcement_sigs = (true, true); reconnect_nodes(reconnect_args); - // Attempt a splice negotiation that completes, (i.e. `tx_signatures` are exchanged). Reconnecting - // should not abort the negotiation or reset the splice state. - let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, contribution); + nodes[0] + .node + .splice_channel( + &channel_id, + &node_id_1, + contribution.clone(), + FEERATE_FLOOR_SATS_PER_KW, + None, + ) + .unwrap(); + + // Attempt a splice negotiation that ends before the initial `commitment_signed` messages are + // exchanged. The node missing the other's `commitment_signed` upon reconnecting should + // implicitly abort the negotiation and reset the splice state such that we're able to retry + // another splice later. + let stfu = get_event_msg!(nodes[0], MessageSendEvent::SendStfu, node_id_1); + nodes[1].node.handle_stfu(node_id_0, &stfu); + let stfu = get_event_msg!(nodes[1], MessageSendEvent::SendStfu, node_id_0); + nodes[0].node.handle_stfu(node_id_1, &stfu); + + let splice_init = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceInit, node_id_1); + nodes[1].node.handle_splice_init(node_id_0, &splice_init); + let splice_ack = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceAck, node_id_0); + nodes[0].node.handle_splice_ack(node_id_1, &splice_ack); + + let tx_add_input = get_event_msg!(nodes[0], MessageSendEvent::SendTxAddInput, node_id_1); + nodes[1].node.handle_tx_add_input(node_id_0, &tx_add_input); + let tx_complete = get_event_msg!(nodes[1], MessageSendEvent::SendTxComplete, node_id_0); + nodes[0].node.handle_tx_complete(node_id_1, &tx_complete); + + let tx_add_output = get_event_msg!(nodes[0], MessageSendEvent::SendTxAddOutput, node_id_1); + nodes[1].node.handle_tx_add_output(node_id_0, &tx_add_output); + let tx_complete = get_event_msg!(nodes[1], MessageSendEvent::SendTxComplete, node_id_0); + nodes[0].node.handle_tx_complete(node_id_1, &tx_complete); + + let tx_add_output = get_event_msg!(nodes[0], MessageSendEvent::SendTxAddOutput, node_id_1); + nodes[1].node.handle_tx_add_output(node_id_0, &tx_add_output); + let tx_complete = get_event_msg!(nodes[1], MessageSendEvent::SendTxComplete, node_id_0); + nodes[0].node.handle_tx_complete(node_id_1, &tx_complete); + + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2); + if let MessageSendEvent::SendTxComplete { .. } = &msg_events[0] { + } else { + panic!("Unexpected event"); + } + if let MessageSendEvent::UpdateHTLCs { .. } = &msg_events[1] { + } else { + panic!("Unexpected event"); + } if reload { let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); @@ -561,6 +619,44 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { nodes[1].node.peer_disconnected(node_id_0); } + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_channel_ready = (true, false); + reconnect_args.send_announcement_sigs = (true, true); + reconnect_args.send_tx_abort = (true, false); + reconnect_nodes(reconnect_args); + + let tx_abort = get_event_msg!(nodes[0], MessageSendEvent::SendTxAbort, node_id_1); + nodes[1].node.handle_tx_abort(node_id_0, &tx_abort); + let _event = get_event!(nodes[0], Event::SpliceFailed); + + // Attempt a splice negotiation that completes, (i.e. `tx_signatures` are exchanged). Reconnecting + // should not abort the negotiation or reset the splice state. + let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, contribution); + + if reload { + let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); + reload_node!( + nodes[0], + &nodes[0].node.encode(), + &[&encoded_monitor_0], + persister_0d, + chain_monitor_0d, + node_0d + ); + let encoded_monitor_1 = get_monitor!(nodes[1], channel_id).encode(); + reload_node!( + nodes[1], + &nodes[1].node.encode(), + &[&encoded_monitor_1], + persister_1d, + chain_monitor_1d, + node_1d + ); + } else { + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + } + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_announcement_sigs = (true, true); reconnect_nodes(reconnect_args); diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 615b2991f17..15e744e1a7a 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -599,6 +599,8 @@ where /// /// Returns an error if the parameterized [`Router`] is unable to create a blinded path for the offer. /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. + /// /// [`DefaultMessageRouter`]: crate::onion_message::messenger::DefaultMessageRouter pub fn create_offer_builder( &self, entropy_source: ES, peers: Vec, @@ -620,6 +622,8 @@ where /// This gives users full control over how the [`BlindedMessagePath`] is constructed, /// including the option to omit it entirely. /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. + /// /// See [`Self::create_offer_builder`] for more details on usage. pub fn create_offer_builder_using_router( &self, router: ME, entropy_source: ES, peers: Vec, @@ -646,6 +650,8 @@ where /// 2. Use [`Self::create_static_invoice_builder`] to create a [`StaticInvoice`] from this /// [`Offer`] plus the returned [`Nonce`], and provide the static invoice to the /// aforementioned always-online node. + /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. pub fn create_async_receive_offer_builder( &self, entropy_source: ES, message_paths_to_always_online_node: Vec, ) -> Result<(OfferBuilder<'_, DerivedMetadata, secp256k1::All>, Nonce), Bolt12SemanticError> @@ -728,6 +734,8 @@ where /// - `amount_msats` is invalid, or /// - The parameterized [`Router`] is unable to create a blinded path for the refund. /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. + /// /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed /// [`RouteParameters::from_payment_params_and_value`]: crate::routing::router::RouteParameters::from_payment_params_and_value pub fn create_refund_builder( @@ -764,6 +772,8 @@ where /// return an error if the provided [`MessageRouter`] fails to construct a valid /// [`BlindedMessagePath`] for the refund. /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. + /// /// [`Refund`]: crate::offers::refund::Refund /// [`BlindedMessagePath`]: crate::blinded_path::message::BlindedMessagePath /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice @@ -799,6 +809,8 @@ where /// # Nonce /// The nonce is used to create a unique [`InvoiceRequest::payer_metadata`] for the invoice request. /// These will be used to verify the corresponding [`Bolt12Invoice`] when it is received. + /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. pub fn create_invoice_request_builder<'a>( &'a self, offer: &'a Offer, nonce: Nonce, payment_id: PaymentId, ) -> Result, Bolt12SemanticError> { @@ -814,6 +826,8 @@ where /// Creates a [`StaticInvoiceBuilder`] from the corresponding [`Offer`] and [`Nonce`] that were /// created via [`Self::create_async_receive_offer_builder`]. + /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. pub fn create_static_invoice_builder<'a, ES: Deref, R: Deref>( &self, router: &R, entropy_source: ES, offer: &'a Offer, offer_nonce: Nonce, payment_secret: PaymentSecret, relative_expiry_secs: u32, @@ -886,6 +900,8 @@ where /// /// Returns an error if the refund targets a different chain or if no valid /// blinded path can be constructed. + /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. pub fn create_invoice_builder_from_refund<'a, ES: Deref, R: Deref>( &'a self, router: &R, entropy_source: ES, refund: &'a Refund, payment_hash: PaymentHash, payment_secret: PaymentSecret, usable_channels: Vec, diff --git a/lightning/src/offers/merkle.rs b/lightning/src/offers/merkle.rs index 4f27130bcc9..1a38fe5441f 100644 --- a/lightning/src/offers/merkle.rs +++ b/lightning/src/offers/merkle.rs @@ -94,6 +94,9 @@ pub enum SignError { } /// A function for signing a [`TaggedHash`]. +/// +/// This is not exported to bindings users as signing functions should just be used per-signed-type +/// instead. pub trait SignFn> { /// Signs a [`TaggedHash`] computed over the merkle root of `message`'s TLV stream. fn sign(&self, message: &T) -> Result; diff --git a/lightning/src/offers/parse.rs b/lightning/src/offers/parse.rs index 8e8d01b4e50..99dd1bb938d 100644 --- a/lightning/src/offers/parse.rs +++ b/lightning/src/offers/parse.rs @@ -142,7 +142,10 @@ pub enum Bolt12ParseError { /// being parsed. InvalidBech32Hrp, /// The string could not be bech32 decoded. - Bech32(CheckedHrpstringError), + Bech32( + /// This is not exported to bindings users as the details don't matter much + CheckedHrpstringError, + ), /// The bech32 decoded string could not be decoded as the expected message type. Decode(DecodeError), /// The parsed message has invalid semantics. diff --git a/lightning/src/sign/ecdsa.rs b/lightning/src/sign/ecdsa.rs index bfa3a27acb1..e13285722af 100644 --- a/lightning/src/sign/ecdsa.rs +++ b/lightning/src/sign/ecdsa.rs @@ -7,7 +7,8 @@ use bitcoin::secp256k1::ecdsa::Signature; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; use crate::ln::chan_utils::{ - ClosingTransaction, CommitmentTransaction, HTLCOutputInCommitment, HolderCommitmentTransaction, + ChannelTransactionParameters, ClosingTransaction, CommitmentTransaction, + HTLCOutputInCommitment, HolderCommitmentTransaction, }; use crate::ln::msgs::UnsignedChannelAnnouncement; use crate::types::payment::PaymentPreimage; @@ -15,7 +16,7 @@ use crate::types::payment::PaymentPreimage; #[allow(unused_imports)] use crate::prelude::*; -use crate::sign::{ChannelSigner, ChannelTransactionParameters, HTLCDescriptor}; +use crate::sign::{ChannelSigner, HTLCDescriptor}; /// A trait to sign Lightning channel transactions as described in /// [BOLT 3](https://github.com/lightning/bolts/blob/master/03-transactions.md). diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 5f525cea6b9..ac24a4a4040 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -81,6 +81,11 @@ pub mod ecdsa; pub mod taproot; pub mod tx_builder; +pub(crate) const COMPRESSED_PUBLIC_KEY_SIZE: usize = bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE; + +pub(crate) const MAX_STANDARD_SIGNATURE_SIZE: usize = + bitcoin::secp256k1::constants::MAX_SIGNATURE_SIZE; + /// Information about a spendable output to a P2WSH script. /// /// See [`SpendableOutputDescriptor::DelayedPaymentOutput`] for more details on how to spend this. @@ -112,12 +117,15 @@ pub struct DelayedPaymentOutputDescriptor { impl DelayedPaymentOutputDescriptor { /// The maximum length a well-formed witness spending one of these should have. - /// Note: If you have the grind_signatures feature enabled, this will be at least 1 byte + /// + /// Note: If you have the `grind_signatures` feature enabled, this will be at least 1 byte /// shorter. - // Calculated as 1 byte length + 73 byte signature, 1 byte empty vec push, 1 byte length plus - // redeemscript push length. - pub const MAX_WITNESS_LENGTH: u64 = - 1 + 73 + 1 + chan_utils::REVOKEABLE_REDEEMSCRIPT_MAX_LENGTH as u64 + 1; + pub const MAX_WITNESS_LENGTH: u64 = (1 /* witness items */ + + 1 /* sig push */ + + MAX_STANDARD_SIGNATURE_SIZE + + 1 /* empty vec push */ + + 1 /* redeemscript push */ + + chan_utils::REVOKEABLE_REDEEMSCRIPT_MAX_LENGTH) as u64; } impl_writeable_tlv_based!(DelayedPaymentOutputDescriptor, { @@ -131,15 +139,18 @@ impl_writeable_tlv_based!(DelayedPaymentOutputDescriptor, { (13, channel_transaction_parameters, (option: ReadableArgs, Some(channel_value_satoshis.0.unwrap()))), }); -pub(crate) const P2WPKH_WITNESS_WEIGHT: u64 = 1 /* num stack items */ + - 1 /* sig length */ + - 73 /* sig including sighash flag */ + - 1 /* pubkey length */ + - 33 /* pubkey */; +/// Witness weight for satisfying a P2WPKH spend. +pub(crate) const P2WPKH_WITNESS_WEIGHT: u64 = (1 /* witness items */ + + 1 /* sig push */ + + MAX_STANDARD_SIGNATURE_SIZE + + 1 /* pubkey push */ + + COMPRESSED_PUBLIC_KEY_SIZE) as u64; -/// Witness weight for satisying a P2TR key-path spend. -pub(crate) const P2TR_KEY_PATH_WITNESS_WEIGHT: u64 = 1 /* witness items */ - + 1 /* schnorr sig len */ + 64 /* schnorr sig */; +/// Witness weight for satisfying a P2TR key-path spend. +pub(crate) const P2TR_KEY_PATH_WITNESS_WEIGHT: u64 = (1 /* witness items */ + + 1 /* sig push */ + + bitcoin::secp256k1::constants::SCHNORR_SIGNATURE_SIZE) + as u64; /// If a [`KeysManager`] is built with [`KeysManager::new`] with `v2_remote_key_derivation` set /// (and for all channels after they've been spliced), the script which we receive funds to on-chain @@ -188,14 +199,21 @@ impl StaticPaymentOutputDescriptor { } /// The maximum length a well-formed witness spending one of these should have. - /// Note: If you have the grind_signatures feature enabled, this will be at least 1 byte + /// + /// Note: If you have the `grind_signatures` feature enabled, this will be at least 1 byte /// shorter. pub fn max_witness_length(&self) -> u64 { if self.needs_csv_1_for_spend() { - let witness_script_weight = 1 /* pubkey push */ + 33 /* pubkey */ + - 1 /* OP_CHECKSIGVERIFY */ + 1 /* OP_1 */ + 1 /* OP_CHECKSEQUENCEVERIFY */; - 1 /* num witness items */ + 1 /* sig push */ + 73 /* sig including sighash flag */ + - 1 /* witness script push */ + witness_script_weight + let witness_script_weight = 1 /* pubkey push */ + + COMPRESSED_PUBLIC_KEY_SIZE + + 1 /* OP_CHECKSIGVERIFY */ + + 1 /* OP_1 */ + + 1 /* OP_CHECKSEQUENCEVERIFY */; + (1 /* num witness items */ + + 1 /* sig push */ + + MAX_STANDARD_SIGNATURE_SIZE + + 1 /* witness script push */ + + witness_script_weight) as u64 } else { P2WPKH_WITNESS_WEIGHT } @@ -511,7 +529,7 @@ impl SpendableOutputDescriptor { sequence: Sequence::ZERO, witness: Witness::new(), }); - witness_weight += 1 + 73 + 34; + witness_weight += P2WPKH_WITNESS_WEIGHT; #[cfg(feature = "grind_signatures")] { // Guarantees a low R signature @@ -1058,6 +1076,8 @@ pub trait SignerProvider { /// A helper trait that describes an on-chain wallet capable of returning a (change) destination /// script. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub trait ChangeDestinationSource { /// Returns a script pubkey which can be used as a change destination for /// [`OutputSpender::spend_spendable_outputs`]. @@ -1069,6 +1089,9 @@ pub trait ChangeDestinationSource { /// A synchronous helper trait that describes an on-chain wallet capable of returning a (change) destination script. pub trait ChangeDestinationSourceSync { + /// Returns a script pubkey which can be used as a change destination for + /// [`OutputSpender::spend_spendable_outputs`]. + /// /// This method should return a different value each time it is called, to avoid linking /// on-chain funds controlled to the same user. fn get_change_destination_script(&self) -> Result; diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index 7161bc77123..eefa40d1055 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -94,27 +94,39 @@ pub(crate) fn dummy_waker() -> Waker { #[cfg(feature = "std")] /// A type alias for a future that returns a result of type `T` or error `E`. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub type AsyncResult<'a, T, E> = Pin> + 'a + Send>>; #[cfg(not(feature = "std"))] /// A type alias for a future that returns a result of type `T` or error `E`. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub type AsyncResult<'a, T, E> = Pin> + 'a>>; /// Marker trait to optionally implement `Sync` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. #[cfg(feature = "std")] pub use core::marker::Sync as MaybeSync; #[cfg(not(feature = "std"))] /// Marker trait to optionally implement `Sync` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub trait MaybeSync {} #[cfg(not(feature = "std"))] impl MaybeSync for T where T: ?Sized {} /// Marker trait to optionally implement `Send` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. #[cfg(feature = "std")] pub use core::marker::Send as MaybeSend; #[cfg(not(feature = "std"))] /// Marker trait to optionally implement `Send` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub trait MaybeSend {} #[cfg(not(feature = "std"))] impl MaybeSend for T where T: ?Sized {} diff --git a/lightning/src/util/native_async.rs b/lightning/src/util/native_async.rs index dc26cb42bd0..886146e976d 100644 --- a/lightning/src/util/native_async.rs +++ b/lightning/src/util/native_async.rs @@ -18,6 +18,8 @@ use core::future::Future; use core::pin::Pin; /// A generic trait which is able to spawn futures in the background. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub trait FutureSpawner: MaybeSend + MaybeSync + 'static { /// Spawns the given future as a background task. /// diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index e75f35e65cd..d00e29e686a 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -38,6 +38,7 @@ use crate::util::async_poll::{dummy_waker, AsyncResult, MaybeSend, MaybeSync}; use crate::util::logger::Logger; use crate::util::native_async::FutureSpawner; use crate::util::ser::{Readable, ReadableArgs, Writeable}; +use crate::util::wakers::Notifier; /// The alphabet of characters allowed for namespaces and keys. pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = @@ -117,21 +118,75 @@ pub const OUTPUT_SWEEPER_PERSISTENCE_KEY: &str = "output_sweeper"; /// updates applied to be current) with another implementation. pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2]; -/// A synchronous version of the [`KVStore`] trait. +/// Provides an interface that allows storage and retrieval of persisted values that are associated +/// with given keys. +/// +/// In order to avoid collisions the key space is segmented based on the given `primary_namespace`s +/// and `secondary_namespace`s. Implementations of this trait are free to handle them in different +/// ways, as long as per-namespace key uniqueness is asserted. +/// +/// Keys and namespaces are required to be valid ASCII strings in the range of +/// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty +/// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if +/// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means +/// that concerns should always be separated by primary namespace first, before secondary +/// namespaces are used. While the number of primary namespaces will be relatively small and is +/// determined at compile time, there may be many secondary namespaces per primary namespace. Note +/// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given +/// namespace, i.e., conflicts between keys and equally named +/// primary namespaces/secondary namespaces must be avoided. +/// +/// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` +/// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to +/// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`. +/// +/// For an asynchronous version of this trait, see [`KVStore`]. +// Note that updates to documentation on this trait should be copied to the asynchronous version. pub trait KVStoreSync { - /// A synchronous version of the [`KVStore::read`] method. + /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and + /// `key`. + /// + /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given + /// `primary_namespace` and `secondary_namespace`. + /// + /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> Result, io::Error>; - /// A synchronous version of the [`KVStore::write`] method. + /// Persists the given data under the given `key`. + /// + /// Will create the given `primary_namespace` and `secondary_namespace` if not already present in the store. fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> Result<(), io::Error>; - /// A synchronous version of the [`KVStore::remove`] method. + /// Removes any data that had previously been persisted under the given `key`. + /// + /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily + /// remove the given `key` at some point in time after the method returns, e.g., as part of an + /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to + /// [`KVStoreSync::list`] might include the removed key until the changes are actually persisted. + /// + /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent + /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could + /// potentially get lost on crash after the method returns. Therefore, this flag should only be + /// set for `remove` operations that can be safely replayed at a later time. + /// + /// All removal operations must complete in a consistent total order with [`Self::write`]s + /// to the same key. Whether a removal operation is `lazy` or not, [`Self::write`] operations + /// to the same key which occur before a removal completes must cancel/overwrite the pending + /// removal. + /// + /// Returns successfully if no data will be stored for the given `primary_namespace`, + /// `secondary_namespace`, and `key`, independently of whether it was present before its + /// invokation or not. fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> Result<(), io::Error>; - /// A synchronous version of the [`KVStore::list`] method. + /// Returns a list of keys that are stored under the given `secondary_namespace` in + /// `primary_namespace`. + /// + /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the + /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown. fn list( &self, primary_namespace: &str, secondary_namespace: &str, ) -> Result, io::Error>; @@ -154,6 +209,7 @@ where } } +/// This is not exported to bindings users as async is only supported in Rust. impl KVStore for KVStoreSyncWrapper where K::Target: KVStoreSync, @@ -212,6 +268,11 @@ where /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to /// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`. +/// +/// For a synchronous version of this trait, see [`KVStoreSync`]. +/// +/// This is not exported to bindings users as async is only supported in Rust. +// Note that updates to documentation on this trait should be copied to the synchronous version. pub trait KVStore { /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and /// `key`. @@ -716,6 +777,8 @@ where /// Unlike [`MonitorUpdatingPersister`], this does not implement [`Persist`], but is instead used /// directly by the [`ChainMonitor`] via [`ChainMonitor::new_async_beta`]. /// +/// This is not exported to bindings users as async is only supported in Rust. +/// /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor /// [`ChainMonitor::new_async_beta`]: crate::chain::chainmonitor::ChainMonitor::new_async_beta pub struct MonitorUpdatingPersisterAsync< @@ -875,6 +938,7 @@ where pub(crate) fn spawn_async_persist_new_channel( &self, monitor_name: MonitorName, monitor: &ChannelMonitor<::EcdsaSigner>, + notifier: Arc, ) { let inner = Arc::clone(&self.0); // Note that `persist_new_channel` is a sync method which calls all the way through to the @@ -884,7 +948,10 @@ where let completion = (monitor.channel_id(), monitor.get_latest_update_id()); self.0.future_spawner.spawn(async move { match future.await { - Ok(()) => inner.async_completed_updates.lock().unwrap().push(completion), + Ok(()) => { + inner.async_completed_updates.lock().unwrap().push(completion); + notifier.notify(); + }, Err(e) => { log_error!( inner.logger, @@ -895,9 +962,10 @@ where }); } - pub(crate) fn spawn_async_update_persisted_channel( + pub(crate) fn spawn_async_update_channel( &self, monitor_name: MonitorName, update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<::EcdsaSigner>, + notifier: Arc, ) { let inner = Arc::clone(&self.0); // Note that `update_persisted_channel` is a sync method which calls all the way through to @@ -914,6 +982,7 @@ where match future.await { Ok(()) => if let Some(completion) = completion { inner.async_completed_updates.lock().unwrap().push(completion); + notifier.notify(); }, Err(e) => { log_error!( diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index d78b3e921cc..f821aa5afc0 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -229,6 +229,8 @@ impl<'a, R: Read> Read for FixedLengthReader<'a, R> { } } +/// This is not exported to bindings users as reads are always from byte arrays, never streams, in +/// bindings. impl<'a, R: Read> LengthLimitedRead for FixedLengthReader<'a, R> { #[inline] fn remaining_bytes(&self) -> u64 { @@ -350,6 +352,9 @@ where /// A [`io::Read`] that limits the amount of bytes that can be read. Implementations should ensure /// that the object being read will only consume a fixed number of bytes from the underlying /// [`io::Read`], see [`FixedLengthReader`] for an example. +/// +/// This is not exported to bindings users as reads are always from byte arrays, never streams, in +/// bindings. pub trait LengthLimitedRead: Read { /// The number of bytes remaining to be read. fn remaining_bytes(&self) -> u64; @@ -379,6 +384,9 @@ where /// /// Any type that implements [`Readable`] also automatically has a [`LengthReadable`] /// implementation, but some types, most notably onion packets, only implement [`LengthReadable`]. +/// +/// This is not exported to bindings users as reads are always from byte arrays, never streams, in +/// bindings. pub trait LengthReadable where Self: Sized, diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index cba92f64c0c..b31dab1ccf6 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -317,10 +317,11 @@ impl_writeable_tlv_based_enum!(OutputSpendStatus, ); /// A utility that keeps track of [`SpendableOutputDescriptor`]s, persists them in a given -/// [`KVStoreSync`] and regularly retries sweeping them based on a callback given to the constructor +/// [`KVStore`] and regularly retries sweeping them based on a callback given to the constructor /// methods. /// -/// Users should call [`Self::track_spendable_outputs`] for any [`SpendableOutputDescriptor`]s received via [`Event::SpendableOutputs`]. +/// Users should call [`Self::track_spendable_outputs`] for any [`SpendableOutputDescriptor`]s +/// received via [`Event::SpendableOutputs`]. /// /// This needs to be notified of chain state changes either via its [`Listen`] or [`Confirm`] /// implementation and hence has to be connected with the utilized chain data sources. @@ -329,7 +330,12 @@ impl_writeable_tlv_based_enum!(OutputSpendStatus, /// required to give their chain data sources (i.e., [`Filter`] implementation) to the respective /// constructor. /// +/// For a synchronous version of this struct, see [`OutputSweeperSync`]. +/// +/// This is not exported to bindings users as async is not supported outside of Rust. +/// /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs +// Note that updates to documentation on this struct should be copied to the synchronous version. pub struct OutputSweeper where B::Target: BroadcasterInterface, @@ -876,7 +882,24 @@ where } } -/// A synchronous wrapper around [`OutputSweeper`] to be used in contexts where async is not available. +/// A utility that keeps track of [`SpendableOutputDescriptor`]s, persists them in a given +/// [`KVStoreSync`] and regularly retries sweeping them based on a callback given to the constructor +/// methods. +/// +/// Users should call [`Self::track_spendable_outputs`] for any [`SpendableOutputDescriptor`]s +/// received via [`Event::SpendableOutputs`]. +/// +/// This needs to be notified of chain state changes either via its [`Listen`] or [`Confirm`] +/// implementation and hence has to be connected with the utilized chain data sources. +/// +/// If chain data is provided via the [`Confirm`] interface or via filtered blocks, users are +/// required to give their chain data sources (i.e., [`Filter`] implementation) to the respective +/// constructor. +/// +/// For an asynchronous version of this struct, see [`OutputSweeper`]. +/// +/// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs +// Note that updates to documentation on this struct should be copied to the asynchronous version. pub struct OutputSweeperSync where B::Target: BroadcasterInterface, @@ -903,6 +926,9 @@ where O::Target: OutputSpender, { /// Constructs a new [`OutputSweeperSync`] instance. + /// + /// If chain data is provided via the [`Confirm`] interface or via filtered blocks, users also + /// need to register their [`Filter`] implementation via the given `chain_data_source`. pub fn new( best_block: BestBlock, broadcaster: B, fee_estimator: E, chain_data_source: Option, output_spender: O, change_destination_source: D, kv_store: K, logger: L, @@ -925,7 +951,21 @@ where Self { sweeper } } - /// Wrapper around [`OutputSweeper::track_spendable_outputs`]. + /// Tells the sweeper to track the given outputs descriptors. + /// + /// Usually, this should be called based on the values emitted by the + /// [`Event::SpendableOutputs`]. + /// + /// The given `exclude_static_outputs` flag controls whether the sweeper will filter out + /// [`SpendableOutputDescriptor::StaticOutput`]s, which may be handled directly by the on-chain + /// wallet implementation. + /// + /// If `delay_until_height` is set, we will delay the spending until the respective block + /// height is reached. This can be used to batch spends, e.g., to reduce on-chain fees. + /// + /// Returns `Err` on persistence failure, in which case the call may be safely retried. + /// + /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs pub fn track_spendable_outputs( &self, output_descriptors: Vec, channel_id: Option, exclude_static_outputs: bool, delay_until_height: Option, @@ -947,7 +987,9 @@ where } } - /// Returns a list of the currently tracked spendable outputs. Wraps [`OutputSweeper::tracked_spendable_outputs`]. + /// Returns a list of the currently tracked spendable outputs. + /// + /// Wraps [`OutputSweeper::tracked_spendable_outputs`]. pub fn tracked_spendable_outputs(&self) -> Vec { self.sweeper.tracked_spendable_outputs() } @@ -958,8 +1000,10 @@ where self.sweeper.current_best_block() } - /// Regenerates and broadcasts the spending transaction for any outputs that are pending. Wraps - /// [`OutputSweeper::regenerate_and_broadcast_spend_if_necessary`]. + /// Regenerates and broadcasts the spending transaction for any outputs that are pending. This method will be a + /// no-op if a sweep is already pending. + /// + /// Wraps [`OutputSweeper::regenerate_and_broadcast_spend_if_necessary`]. pub fn regenerate_and_broadcast_spend_if_necessary(&self) -> Result<(), ()> { let mut fut = Box::pin(self.sweeper.regenerate_and_broadcast_spend_if_necessary()); let mut waker = dummy_waker(); @@ -979,6 +1023,8 @@ where /// this [`OutputSweeperSync`], fetching an async [`OutputSweeper`] won't accomplish much, all /// the async methods will hang waiting on your sync [`KVStore`] and likely confuse your async /// runtime. This exists primarily for LDK-internal use, including outside of this crate. + /// + /// This is not exported to bindings users as async is not supported outside of Rust. #[doc(hidden)] pub fn sweeper_async( &self, diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 54ed67d4714..c9f9ba2d086 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -2220,10 +2220,16 @@ impl TestWalletSource { utxo.output.value, EcdsaSighashType::All, )?; + #[cfg(not(feature = "grind_signatures"))] let signature = self.secp.sign_ecdsa( &secp256k1::Message::from_digest(sighash.to_byte_array()), &self.secret_key, ); + #[cfg(feature = "grind_signatures")] + let signature = self.secp.sign_ecdsa_low_r( + &secp256k1::Message::from_digest(sighash.to_byte_array()), + &self.secret_key, + ); let bitcoin_sig = bitcoin::ecdsa::Signature { signature, sighash_type: EcdsaSighashType::All }; tx.input[i].witness =