From a61593adb00032e6cd9e1d53492af8f63318b0e1 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 1 Nov 2025 22:26:53 +0000 Subject: [PATCH 01/25] `notify` `ChainMonitor`'s `EventNotifier` on async write completion In c08476753f296319cd74c8394ff8cb6bb5a0e7b5 we a `ChainMonitor::new_async_beta` and a corresponding `MonitorUpdatingPersisterAsync` struct. To avoid circular references, the `ChainMonitor` owns the `MonitorUpdatingPersisterAsync` which uses a `FutureSpawner` to let async writes happen in the background after it returns control flow ownership to the `ChainMonitor`. This is great, except that because `MonitorUpdatingPersisterAsync` thus doesn't have a reference to the `ChainMonitor`, we have to poll for completion of the futures. We do so in the new `Persist::get_and_clear_completed_updates` that was added in the above-referenced commit. But on async monitor write completion we're supposed to call `ChainMonitor`'s `event_notifier.notify()` (ultimately waking up the background processor which `await`s the corresponding update future). This didn't happen in the new async flow. Here we move `ChainMonitor`'s `event_notifier` into an `Arc` and pass a reference to it through to the `MonitorUpdatingPersisterAsync` which can then directly `notify()` it and wake the background processor. Backport of 092a50ae556bff93f4e605fbc1bf0201b39c90d4 --- lightning/src/chain/chainmonitor.rs | 21 ++++++++++++++------- lightning/src/util/persist.rs | 11 +++++++++-- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 4abd0cd88c0..62b6c9f5541 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -26,6 +26,8 @@ use bitcoin::block::Header; use bitcoin::hash_types::{BlockHash, Txid}; +use bitcoin::secp256k1::PublicKey; + use crate::chain; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; #[cfg(peer_storage)] @@ -57,7 +59,8 @@ use crate::util::persist::{KVStore, MonitorName, MonitorUpdatingPersisterAsync}; #[cfg(peer_storage)] use crate::util::ser::{VecWriter, Writeable}; use crate::util::wakers::{Future, Notifier}; -use bitcoin::secp256k1::PublicKey; + +use alloc::sync::Arc; #[cfg(peer_storage)] use core::iter::Cycle; use core::ops::Deref; @@ -267,6 +270,7 @@ pub struct AsyncPersister< FE::Target: FeeEstimator, { persister: MonitorUpdatingPersisterAsync, + event_notifier: Arc, } impl< @@ -314,7 +318,8 @@ where &self, monitor_name: MonitorName, monitor: &ChannelMonitor<::EcdsaSigner>, ) -> ChannelMonitorUpdateStatus { - self.persister.spawn_async_persist_new_channel(monitor_name, monitor); + let notifier = Arc::clone(&self.event_notifier); + self.persister.spawn_async_persist_new_channel(monitor_name, monitor, notifier); ChannelMonitorUpdateStatus::InProgress } @@ -322,7 +327,8 @@ where &self, monitor_name: MonitorName, monitor_update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<::EcdsaSigner>, ) -> ChannelMonitorUpdateStatus { - self.persister.spawn_async_update_persisted_channel(monitor_name, monitor_update, monitor); + let notifier = Arc::clone(&self.event_notifier); + self.persister.spawn_async_update_channel(monitor_name, monitor_update, monitor, notifier); ChannelMonitorUpdateStatus::InProgress } @@ -382,7 +388,7 @@ pub struct ChainMonitor< /// A [`Notifier`] used to wake up the background processor in case we have any [`Event`]s for /// it to give to users (or [`MonitorEvent`]s for `ChannelManager` to process). - event_notifier: Notifier, + event_notifier: Arc, /// Messages to send to the peer. This is currently used to distribute PeerStorage to channel partners. pending_send_only_events: Mutex>, @@ -430,17 +436,18 @@ impl< persister: MonitorUpdatingPersisterAsync, _entropy_source: ES, _our_peerstorage_encryption_key: PeerStorageKey, ) -> Self { + let event_notifier = Arc::new(Notifier::new()); Self { monitors: RwLock::new(new_hash_map()), chain_source, broadcaster, logger, fee_estimator: feeest, - persister: AsyncPersister { persister }, _entropy_source, pending_monitor_events: Mutex::new(Vec::new()), highest_chain_height: AtomicUsize::new(0), - event_notifier: Notifier::new(), + event_notifier: Arc::clone(&event_notifier), + persister: AsyncPersister { persister, event_notifier }, pending_send_only_events: Mutex::new(Vec::new()), #[cfg(peer_storage)] our_peerstorage_encryption_key: _our_peerstorage_encryption_key, @@ -656,7 +663,7 @@ where _entropy_source, pending_monitor_events: Mutex::new(Vec::new()), highest_chain_height: AtomicUsize::new(0), - event_notifier: Notifier::new(), + event_notifier: Arc::new(Notifier::new()), pending_send_only_events: Mutex::new(Vec::new()), #[cfg(peer_storage)] our_peerstorage_encryption_key: _our_peerstorage_encryption_key, diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index e75f35e65cd..49addd7dbc3 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -38,6 +38,7 @@ use crate::util::async_poll::{dummy_waker, AsyncResult, MaybeSend, MaybeSync}; use crate::util::logger::Logger; use crate::util::native_async::FutureSpawner; use crate::util::ser::{Readable, ReadableArgs, Writeable}; +use crate::util::wakers::Notifier; /// The alphabet of characters allowed for namespaces and keys. pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = @@ -875,6 +876,7 @@ where pub(crate) fn spawn_async_persist_new_channel( &self, monitor_name: MonitorName, monitor: &ChannelMonitor<::EcdsaSigner>, + notifier: Arc, ) { let inner = Arc::clone(&self.0); // Note that `persist_new_channel` is a sync method which calls all the way through to the @@ -884,7 +886,10 @@ where let completion = (monitor.channel_id(), monitor.get_latest_update_id()); self.0.future_spawner.spawn(async move { match future.await { - Ok(()) => inner.async_completed_updates.lock().unwrap().push(completion), + Ok(()) => { + inner.async_completed_updates.lock().unwrap().push(completion); + notifier.notify(); + }, Err(e) => { log_error!( inner.logger, @@ -895,9 +900,10 @@ where }); } - pub(crate) fn spawn_async_update_persisted_channel( + pub(crate) fn spawn_async_update_channel( &self, monitor_name: MonitorName, update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<::EcdsaSigner>, + notifier: Arc, ) { let inner = Arc::clone(&self.0); // Note that `update_persisted_channel` is a sync method which calls all the way through to @@ -914,6 +920,7 @@ where match future.await { Ok(()) => if let Some(completion) = completion { inner.async_completed_updates.lock().unwrap().push(completion); + notifier.notify(); }, Err(e) => { log_error!( From 7b2b62e87d72351580abb7b0298b9ec450c0b428 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 3 Nov 2025 10:13:58 +0100 Subject: [PATCH 02/25] Don't prune `pending_requests` in `persist` Previously, we would also prune any pending `GetInfo` or expired `Buy` requests before we `persist`. This could have lead to races where we drop a pending request and even remove the peer when calling `persist`. Here, we simply split the pruning logic for the `pending_requests` and expired JIT channel state, and only prune the latter before persisting. This generally makes sense, as `pending_requests` isn't currently persisted, so there is no need to prune before repersisting. Both are however still pruned on peer disconnection. Backport of 5f8c54a6794bf3b19cbba1aed7aaed724f155353 --- lightning-liquidity/src/lsps2/service.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index 494a4f35cab..dda9922686d 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -619,7 +619,7 @@ impl PeerState { self.needs_persist |= true; } - fn prune_expired_request_state(&mut self) { + fn prune_pending_requests(&mut self) { self.pending_requests.retain(|_, entry| { match entry { LSPS2Request::GetInfo(_) => false, @@ -629,7 +629,9 @@ impl PeerState { }, } }); + } + fn prune_expired_request_state(&mut self) { self.outbound_channels_by_intercept_scid.retain(|intercept_scid, entry| { if entry.is_prunable() { // We abort the flow, and prune any data kept. @@ -1871,6 +1873,7 @@ where let mut peer_state_lock = inner_state_lock.lock().unwrap(); // We clean up the peer state, but leave removing the peer entry to the prune logic in // `persist` which removes it from the store. + peer_state_lock.prune_pending_requests(); peer_state_lock.prune_expired_request_state(); } } From 48be54f5e5b47a1db142bece99a9ff248232b7bf Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 28 Dec 2024 00:24:51 +0000 Subject: [PATCH 03/25] Mark `CheckedHrpstringError` instances as no-export The details of these errors aren't particularly relevant, and its not worth the effort of exporting them, so we just mark them no-export here. Backport of 16aabce7b372b8760fca1ba2acac585883c2d047 --- lightning-invoice/src/lib.rs | 5 ++++- lightning/src/offers/parse.rs | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index 3634d67f1da..47f929377de 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -91,7 +91,10 @@ use crate::ser::Base32Iterable; #[allow(missing_docs)] #[derive(PartialEq, Eq, Debug, Clone)] pub enum Bolt11ParseError { - Bech32Error(CheckedHrpstringError), + Bech32Error( + /// This is not exported to bindings users as the details don't matter much + CheckedHrpstringError, + ), ParseAmountError(ParseIntError), MalformedSignature(bitcoin::secp256k1::Error), BadPrefix, diff --git a/lightning/src/offers/parse.rs b/lightning/src/offers/parse.rs index 8e8d01b4e50..99dd1bb938d 100644 --- a/lightning/src/offers/parse.rs +++ b/lightning/src/offers/parse.rs @@ -142,7 +142,10 @@ pub enum Bolt12ParseError { /// being parsed. InvalidBech32Hrp, /// The string could not be bech32 decoded. - Bech32(CheckedHrpstringError), + Bech32( + /// This is not exported to bindings users as the details don't matter much + CheckedHrpstringError, + ), /// The bech32 decoded string could not be decoded as the expected message type. Decode(DecodeError), /// The parsed message has invalid semantics. From f7ff7991f8b87a10b7d0e647807dcd5ee725e9c9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Nov 2025 19:58:24 +0000 Subject: [PATCH 04/25] Mark BOLT 12 builders no-export in the new `OffersMessageFlow` As move semantics do not map to bindings, the BOLT 12 object builders are marked no-export. In the new `OffersMesageFlow`, here, we also mark the methods which return these builders as no-export. Backport of 7fb0f2a457bdef4e72a5547f9ace2375fb304fc0 Trivial conflicts resolved in: * lightning/src/offers/flow.rs --- lightning/src/offers/flow.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 615b2991f17..15e744e1a7a 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -599,6 +599,8 @@ where /// /// Returns an error if the parameterized [`Router`] is unable to create a blinded path for the offer. /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. + /// /// [`DefaultMessageRouter`]: crate::onion_message::messenger::DefaultMessageRouter pub fn create_offer_builder( &self, entropy_source: ES, peers: Vec, @@ -620,6 +622,8 @@ where /// This gives users full control over how the [`BlindedMessagePath`] is constructed, /// including the option to omit it entirely. /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. + /// /// See [`Self::create_offer_builder`] for more details on usage. pub fn create_offer_builder_using_router( &self, router: ME, entropy_source: ES, peers: Vec, @@ -646,6 +650,8 @@ where /// 2. Use [`Self::create_static_invoice_builder`] to create a [`StaticInvoice`] from this /// [`Offer`] plus the returned [`Nonce`], and provide the static invoice to the /// aforementioned always-online node. + /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. pub fn create_async_receive_offer_builder( &self, entropy_source: ES, message_paths_to_always_online_node: Vec, ) -> Result<(OfferBuilder<'_, DerivedMetadata, secp256k1::All>, Nonce), Bolt12SemanticError> @@ -728,6 +734,8 @@ where /// - `amount_msats` is invalid, or /// - The parameterized [`Router`] is unable to create a blinded path for the refund. /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. + /// /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed /// [`RouteParameters::from_payment_params_and_value`]: crate::routing::router::RouteParameters::from_payment_params_and_value pub fn create_refund_builder( @@ -764,6 +772,8 @@ where /// return an error if the provided [`MessageRouter`] fails to construct a valid /// [`BlindedMessagePath`] for the refund. /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. + /// /// [`Refund`]: crate::offers::refund::Refund /// [`BlindedMessagePath`]: crate::blinded_path::message::BlindedMessagePath /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice @@ -799,6 +809,8 @@ where /// # Nonce /// The nonce is used to create a unique [`InvoiceRequest::payer_metadata`] for the invoice request. /// These will be used to verify the corresponding [`Bolt12Invoice`] when it is received. + /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. pub fn create_invoice_request_builder<'a>( &'a self, offer: &'a Offer, nonce: Nonce, payment_id: PaymentId, ) -> Result, Bolt12SemanticError> { @@ -814,6 +826,8 @@ where /// Creates a [`StaticInvoiceBuilder`] from the corresponding [`Offer`] and [`Nonce`] that were /// created via [`Self::create_async_receive_offer_builder`]. + /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. pub fn create_static_invoice_builder<'a, ES: Deref, R: Deref>( &self, router: &R, entropy_source: ES, offer: &'a Offer, offer_nonce: Nonce, payment_secret: PaymentSecret, relative_expiry_secs: u32, @@ -886,6 +900,8 @@ where /// /// Returns an error if the refund targets a different chain or if no valid /// blinded path can be constructed. + /// + /// This is not exported to bindings users as builder patterns don't map outside of move semantics. pub fn create_invoice_builder_from_refund<'a, ES: Deref, R: Deref>( &'a self, router: &R, entropy_source: ES, refund: &'a Refund, payment_hash: PaymentHash, payment_secret: PaymentSecret, usable_channels: Vec, From 090de007ccd0a3f356689802f8316340bec33fb3 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Nov 2025 20:00:15 +0000 Subject: [PATCH 05/25] Require the high-level sign traits instead of `SignFn` in bindings Methods like `StaticInvoice::sign` have their own signing-function traits, so there's no actual need to use `SignFn` directly, its just a useful generic wrapper. Sadly, because it uses `AsRef` bounds, which aren't really practical to map in bindings, we can't really expose it directly in bindings. Because there's an alternative and its tricky to expose, we simply mark it no-export here. Backport of 2bf737cea1e4e5dde69fa1f3f7c28bb888713f20 --- lightning/src/offers/merkle.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lightning/src/offers/merkle.rs b/lightning/src/offers/merkle.rs index 4f27130bcc9..1a38fe5441f 100644 --- a/lightning/src/offers/merkle.rs +++ b/lightning/src/offers/merkle.rs @@ -94,6 +94,9 @@ pub enum SignError { } /// A function for signing a [`TaggedHash`]. +/// +/// This is not exported to bindings users as signing functions should just be used per-signed-type +/// instead. pub trait SignFn> { /// Signs a [`TaggedHash`] computed over the merkle root of `message`'s TLV stream. fn sign(&self, message: &T) -> Result; From 71287da54ac417978478e703a355353c7093a4f1 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Nov 2025 00:01:15 +0000 Subject: [PATCH 06/25] Mark async traits and structs no-export We do not currently support async traits or methods in our home-grown bindings logic, so here mark them no-export. Backport of 3f82fd613ba10f4e41e211dab113b2d45ad6f84d --- lightning/src/chain/chainmonitor.rs | 4 ++++ lightning/src/events/bump_transaction/mod.rs | 8 ++++++++ lightning/src/sign/mod.rs | 2 ++ lightning/src/util/async_poll.rs | 12 ++++++++++++ lightning/src/util/native_async.rs | 2 ++ lightning/src/util/persist.rs | 5 +++++ lightning/src/util/sweep.rs | 4 ++++ 7 files changed, 37 insertions(+) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 62b6c9f5541..d8d6c90921f 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -253,6 +253,8 @@ impl Deref for LockedChannelMonitor<'_, Chann /// An unconstructable [`Persist`]er which is used under the hood when you call /// [`ChainMonitor::new_async_beta`]. +/// +/// This is not exported to bindings users as async is not supported outside of Rust. pub struct AsyncPersister< K: Deref + MaybeSend + MaybeSync + 'static, S: FutureSpawner, @@ -431,6 +433,8 @@ impl< /// [`MonitorUpdatingPersisterAsync`] and thus allows persistence to be completed async. /// /// Note that async monitor updating is considered beta, and bugs may be triggered by its use. + /// + /// This is not exported to bindings users as async is not supported outside of Rust. pub fn new_async_beta( chain_source: Option, broadcaster: T, logger: L, feeest: F, persister: MonitorUpdatingPersisterAsync, _entropy_source: ES, diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 872087899df..9401a2b658a 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -353,6 +353,8 @@ pub struct CoinSelection { /// which can provide a default implementation of this trait when used with [`Wallet`]. /// /// For a synchronous version of this trait, see [`sync::CoinSelectionSourceSync`]. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub trait CoinSelectionSource { /// Performs coin selection of a set of UTXOs, with at least 1 confirmation each, that are /// available to spend. Implementations are free to pick their coin selection algorithm of @@ -404,6 +406,8 @@ pub trait CoinSelectionSource { /// provide a default implementation to [`CoinSelectionSource`]. /// /// For a synchronous version of this trait, see [`sync::WalletSourceSync`]. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub trait WalletSource { /// Returns all UTXOs, with at least 1 confirmation each, that are available to spend. fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()>; @@ -424,6 +428,8 @@ pub trait WalletSource { /// spends may happen. /// /// For a synchronous version of this wrapper, see [`sync::WalletSync`]. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub struct Wallet where W::Target: WalletSource + MaybeSend, @@ -670,6 +676,8 @@ where /// /// For a synchronous version of this handler, see [`sync::BumpTransactionEventHandlerSync`]. /// +/// This is not exported to bindings users as async is only supported in Rust. +/// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction pub struct BumpTransactionEventHandler where diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 5f525cea6b9..66632263c42 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -1058,6 +1058,8 @@ pub trait SignerProvider { /// A helper trait that describes an on-chain wallet capable of returning a (change) destination /// script. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub trait ChangeDestinationSource { /// Returns a script pubkey which can be used as a change destination for /// [`OutputSpender::spend_spendable_outputs`]. diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index 7161bc77123..eefa40d1055 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -94,27 +94,39 @@ pub(crate) fn dummy_waker() -> Waker { #[cfg(feature = "std")] /// A type alias for a future that returns a result of type `T` or error `E`. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub type AsyncResult<'a, T, E> = Pin> + 'a + Send>>; #[cfg(not(feature = "std"))] /// A type alias for a future that returns a result of type `T` or error `E`. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub type AsyncResult<'a, T, E> = Pin> + 'a>>; /// Marker trait to optionally implement `Sync` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. #[cfg(feature = "std")] pub use core::marker::Sync as MaybeSync; #[cfg(not(feature = "std"))] /// Marker trait to optionally implement `Sync` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub trait MaybeSync {} #[cfg(not(feature = "std"))] impl MaybeSync for T where T: ?Sized {} /// Marker trait to optionally implement `Send` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. #[cfg(feature = "std")] pub use core::marker::Send as MaybeSend; #[cfg(not(feature = "std"))] /// Marker trait to optionally implement `Send` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub trait MaybeSend {} #[cfg(not(feature = "std"))] impl MaybeSend for T where T: ?Sized {} diff --git a/lightning/src/util/native_async.rs b/lightning/src/util/native_async.rs index dc26cb42bd0..886146e976d 100644 --- a/lightning/src/util/native_async.rs +++ b/lightning/src/util/native_async.rs @@ -18,6 +18,8 @@ use core::future::Future; use core::pin::Pin; /// A generic trait which is able to spawn futures in the background. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub trait FutureSpawner: MaybeSend + MaybeSync + 'static { /// Spawns the given future as a background task. /// diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 49addd7dbc3..7be957a1f8a 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -155,6 +155,7 @@ where } } +/// This is not exported to bindings users as async is only supported in Rust. impl KVStore for KVStoreSyncWrapper where K::Target: KVStoreSync, @@ -213,6 +214,8 @@ where /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to /// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`. +/// +/// This is not exported to bindings users as async is only supported in Rust. pub trait KVStore { /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and /// `key`. @@ -717,6 +720,8 @@ where /// Unlike [`MonitorUpdatingPersister`], this does not implement [`Persist`], but is instead used /// directly by the [`ChainMonitor`] via [`ChainMonitor::new_async_beta`]. /// +/// This is not exported to bindings users as async is only supported in Rust. +/// /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor /// [`ChainMonitor::new_async_beta`]: crate::chain::chainmonitor::ChainMonitor::new_async_beta pub struct MonitorUpdatingPersisterAsync< diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index cba92f64c0c..d19bea2114b 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -329,6 +329,8 @@ impl_writeable_tlv_based_enum!(OutputSpendStatus, /// required to give their chain data sources (i.e., [`Filter`] implementation) to the respective /// constructor. /// +/// This is not exported to bindings users as async is not supported outside of Rust. +/// /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs pub struct OutputSweeper where @@ -979,6 +981,8 @@ where /// this [`OutputSweeperSync`], fetching an async [`OutputSweeper`] won't accomplish much, all /// the async methods will hang waiting on your sync [`KVStore`] and likely confuse your async /// runtime. This exists primarily for LDK-internal use, including outside of this crate. + /// + /// This is not exported to bindings users as async is not supported outside of Rust. #[doc(hidden)] pub fn sweeper_async( &self, From bc4f71a60294aec8f9649596b0e95e90e4b8325e Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Nov 2025 14:52:40 +0000 Subject: [PATCH 07/25] Import `ChannelTransactionParameters` from its real path Rather than importing `ChannelTransactionParameters` via the `use` in `lightning::sign`, import it via its real path in `lightning::sign::ecdsa`. This makes reading the code (incredibly marginally) simpler, but also makes the bindings generator happy. Backport of b07111e330941e4380b1886034bbffd39681dd20 --- lightning/src/sign/ecdsa.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lightning/src/sign/ecdsa.rs b/lightning/src/sign/ecdsa.rs index bfa3a27acb1..e13285722af 100644 --- a/lightning/src/sign/ecdsa.rs +++ b/lightning/src/sign/ecdsa.rs @@ -7,7 +7,8 @@ use bitcoin::secp256k1::ecdsa::Signature; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; use crate::ln::chan_utils::{ - ClosingTransaction, CommitmentTransaction, HTLCOutputInCommitment, HolderCommitmentTransaction, + ChannelTransactionParameters, ClosingTransaction, CommitmentTransaction, + HTLCOutputInCommitment, HolderCommitmentTransaction, }; use crate::ln::msgs::UnsignedChannelAnnouncement; use crate::types::payment::PaymentPreimage; @@ -15,7 +16,7 @@ use crate::types::payment::PaymentPreimage; #[allow(unused_imports)] use crate::prelude::*; -use crate::sign::{ChannelSigner, ChannelTransactionParameters, HTLCDescriptor}; +use crate::sign::{ChannelSigner, HTLCDescriptor}; /// A trait to sign Lightning channel transactions as described in /// [BOLT 3](https://github.com/lightning/bolts/blob/master/03-transactions.md). From e1a143c51982510d06a532247f56fc2e7f842be5 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Nov 2025 00:22:02 +0000 Subject: [PATCH 08/25] Copy no-export tags to new serialization traits In 0.2 we added new `LengthLimitedRead` and `LengthReadable` traits, but forgot to copy the no-export tags from the existing `Read` and `Readable` traits. Here we add the missing tags. Backport of 8eabcc1299470c1478b5c2f4778c40f5ba0487e3 --- lightning/src/util/ser.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index d78b3e921cc..f821aa5afc0 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -229,6 +229,8 @@ impl<'a, R: Read> Read for FixedLengthReader<'a, R> { } } +/// This is not exported to bindings users as reads are always from byte arrays, never streams, in +/// bindings. impl<'a, R: Read> LengthLimitedRead for FixedLengthReader<'a, R> { #[inline] fn remaining_bytes(&self) -> u64 { @@ -350,6 +352,9 @@ where /// A [`io::Read`] that limits the amount of bytes that can be read. Implementations should ensure /// that the object being read will only consume a fixed number of bytes from the underlying /// [`io::Read`], see [`FixedLengthReader`] for an example. +/// +/// This is not exported to bindings users as reads are always from byte arrays, never streams, in +/// bindings. pub trait LengthLimitedRead: Read { /// The number of bytes remaining to be read. fn remaining_bytes(&self) -> u64; @@ -379,6 +384,9 @@ where /// /// Any type that implements [`Readable`] also automatically has a [`LengthReadable`] /// implementation, but some types, most notably onion packets, only implement [`LengthReadable`]. +/// +/// This is not exported to bindings users as reads are always from byte arrays, never streams, in +/// bindings. pub trait LengthReadable where Self: Sized, From 6a8a7df60fe08d8019d2da9189a0b6c24e82bb2b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Nov 2025 00:14:02 +0000 Subject: [PATCH 09/25] `use` `Vec` from `alloc` rather than the crate prelude This avoids confusing the bindings generator. Backport of 126ac5092571d7c738d02fb06b06bd878c1107bd --- lightning/src/ln/funding.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/funding.rs b/lightning/src/ln/funding.rs index ae7e36ac452..3cabb8201a9 100644 --- a/lightning/src/ln/funding.rs +++ b/lightning/src/ln/funding.rs @@ -9,12 +9,13 @@ //! Types pertaining to funding channels. +use alloc::vec::Vec; + use bitcoin::{Amount, ScriptBuf, SignedAmount, TxOut}; use bitcoin::{Script, Sequence, Transaction, Weight}; use crate::events::bump_transaction::Utxo; use crate::ln::chan_utils::EMPTY_SCRIPT_SIG_WEIGHT; -use crate::prelude::Vec; use crate::sign::{P2TR_KEY_PATH_WITNESS_WEIGHT, P2WPKH_WITNESS_WEIGHT}; /// The components of a splice's funding transaction that are contributed by one party. From b54692b87508e8b29419f85fac8f1f15b9e1d308 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Nov 2025 20:28:58 +0000 Subject: [PATCH 10/25] Copy async docs from async bump transaction objects to sync ones When we added the async traits and wrapper structs for the transaction-bumping logic, we didn't bother copying the documentation to the sync wrappers as we figured the links sufficed. Sadly, however, this means that our bindings logic will have no docs but a broken link to an async object that doesn't exist. Instead, here, we copy the docs from the async objects to the sync ones, at least leaving behind a comment noting that both need updating whenever one gets updated. We also fix one bogus link in `Wallet`'s docs. Backport of c31144718e362826519d38796ce4b1898f3a29f2 --- lightning/src/events/bump_transaction/mod.rs | 10 ++- lightning/src/events/bump_transaction/sync.rs | 87 +++++++++++++++++-- 2 files changed, 85 insertions(+), 12 deletions(-) diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 9401a2b658a..11f008612a2 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -355,6 +355,7 @@ pub struct CoinSelection { /// For a synchronous version of this trait, see [`sync::CoinSelectionSourceSync`]. /// /// This is not exported to bindings users as async is only supported in Rust. +// Note that updates to documentation on this trait should be copied to the synchronous version. pub trait CoinSelectionSource { /// Performs coin selection of a set of UTXOs, with at least 1 confirmation each, that are /// available to spend. Implementations are free to pick their coin selection algorithm of @@ -408,6 +409,7 @@ pub trait CoinSelectionSource { /// For a synchronous version of this trait, see [`sync::WalletSourceSync`]. /// /// This is not exported to bindings users as async is only supported in Rust. +// Note that updates to documentation on this trait should be copied to the synchronous version. pub trait WalletSource { /// Returns all UTXOs, with at least 1 confirmation each, that are available to spend. fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()>; @@ -423,13 +425,14 @@ pub trait WalletSource { fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; } -/// A wrapper over [`WalletSource`] that implements [`CoinSelection`] by preferring UTXOs that would -/// avoid conflicting double spends. If not enough UTXOs are available to do so, conflicting double -/// spends may happen. +/// A wrapper over [`WalletSource`] that implements [`CoinSelectionSource`] by preferring UTXOs +/// that would avoid conflicting double spends. If not enough UTXOs are available to do so, +/// conflicting double spends may happen. /// /// For a synchronous version of this wrapper, see [`sync::WalletSync`]. /// /// This is not exported to bindings users as async is only supported in Rust. +// Note that updates to documentation on this struct should be copied to the synchronous version. pub struct Wallet where W::Target: WalletSource + MaybeSend, @@ -679,6 +682,7 @@ where /// This is not exported to bindings users as async is only supported in Rust. /// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction +// Note that updates to documentation on this struct should be copied to the synchronous version. pub struct BumpTransactionEventHandler where B::Target: BroadcasterInterface, diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index c987b871868..653710a3358 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -28,13 +28,26 @@ use super::{ WalletSource, }; -/// A synchronous version of the [`WalletSource`] trait. +/// An alternative to [`CoinSelectionSourceSync`] that can be implemented and used along +/// [`WalletSync`] to provide a default implementation to [`CoinSelectionSourceSync`]. +/// +/// For an asynchronous version of this trait, see [`WalletSource`]. +// Note that updates to documentation on this trait should be copied to the asynchronous version. pub trait WalletSourceSync { - /// A synchronous version of [`WalletSource::list_confirmed_utxos`]. + /// Returns all UTXOs, with at least 1 confirmation each, that are available to spend. fn list_confirmed_utxos(&self) -> Result, ()>; - /// A synchronous version of [`WalletSource::get_change_script`]. + /// Returns a script to use for change above dust resulting from a successful coin selection + /// attempt. fn get_change_script(&self) -> Result; - /// A Synchronous version of [`WalletSource::sign_psbt`]. + /// Signs and provides the full [`TxIn::script_sig`] and [`TxIn::witness`] for all inputs within + /// the transaction known to the wallet (i.e., any provided via + /// [`WalletSource::list_confirmed_utxos`]). + /// + /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the + /// unsigned transaction and then sign it with your wallet. + /// + /// [`TxIn::script_sig`]: bitcoin::TxIn::script_sig + /// [`TxIn::witness`]: bitcoin::TxIn::witness fn sign_psbt(&self, psbt: Psbt) -> Result; } @@ -74,7 +87,12 @@ where } } -/// A synchronous wrapper around [`Wallet`] to be used in contexts where async is not available. +/// A wrapper over [`WalletSourceSync`] that implements [`CoinSelectionSourceSync`] by preferring +/// UTXOs that would avoid conflicting double spends. If not enough UTXOs are available to do so, +/// conflicting double spends may happen. +/// +/// For an asynchronous version of this wrapper, see [`Wallet`]. +// Note that updates to documentation on this struct should be copied to the asynchronous version. pub struct WalletSync where W::Target: WalletSourceSync + MaybeSend, @@ -136,15 +154,59 @@ where } } -/// A synchronous version of the [`CoinSelectionSource`] trait. +/// An abstraction over a bitcoin wallet that can perform coin selection over a set of UTXOs and can +/// sign for them. The coin selection method aims to mimic Bitcoin Core's `fundrawtransaction` RPC, +/// which most wallets should be able to satisfy. Otherwise, consider implementing +/// [`WalletSourceSync`], which can provide a default implementation of this trait when used with +/// [`WalletSync`]. +/// +/// For an asynchronous version of this trait, see [`CoinSelectionSource`]. +// Note that updates to documentation on this trait should be copied to the asynchronous version. pub trait CoinSelectionSourceSync { - /// A synchronous version of [`CoinSelectionSource::select_confirmed_utxos`]. + /// Performs coin selection of a set of UTXOs, with at least 1 confirmation each, that are + /// available to spend. Implementations are free to pick their coin selection algorithm of + /// choice, as long as the following requirements are met: + /// + /// 1. `must_spend` contains a set of [`Input`]s that must be included in the transaction + /// throughout coin selection, but must not be returned as part of the result. + /// 2. `must_pay_to` contains a set of [`TxOut`]s that must be included in the transaction + /// throughout coin selection. In some cases, like when funding an anchor transaction, this + /// set is empty. Implementations should ensure they handle this correctly on their end, + /// e.g., Bitcoin Core's `fundrawtransaction` RPC requires at least one output to be + /// provided, in which case a zero-value empty OP_RETURN output can be used instead. + /// 3. Enough inputs must be selected/contributed for the resulting transaction (including the + /// inputs and outputs noted above) to meet `target_feerate_sat_per_1000_weight`. + /// 4. The final transaction must have a weight smaller than `max_tx_weight`; if this + /// constraint can't be met, return an `Err`. In the case of counterparty-signed HTLC + /// transactions, we will remove a chunk of HTLCs and try your algorithm again. As for + /// anchor transactions, we will try your coin selection again with the same input-output + /// set when you call [`ChannelMonitor::rebroadcast_pending_claims`], as anchor transactions + /// cannot be downsized. + /// + /// Implementations must take note that [`Input::satisfaction_weight`] only tracks the weight of + /// the input's `script_sig` and `witness`. Some wallets, like Bitcoin Core's, may require + /// providing the full input weight. Failing to do so may lead to underestimating fee bumps and + /// delaying block inclusion. + /// + /// The `claim_id` must map to the set of external UTXOs assigned to the claim, such that they + /// can be re-used within new fee-bumped iterations of the original claiming transaction, + /// ensuring that claims don't double spend each other. If a specific `claim_id` has never had a + /// transaction associated with it, and all of the available UTXOs have already been assigned to + /// other claims, implementations must be willing to double spend their UTXOs. The choice of + /// which UTXOs to double spend is left to the implementation, but it must strive to keep the + /// set of other claims being double spent to a minimum. + /// + /// [`ChannelMonitor::rebroadcast_pending_claims`]: crate::chain::channelmonitor::ChannelMonitor::rebroadcast_pending_claims fn select_confirmed_utxos( &self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &[TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, ) -> Result; - /// A synchronous version of [`CoinSelectionSource::sign_psbt`]. + /// Signs and provides the full witness for all inputs within the transaction known to the + /// trait (i.e., any provided via [`CoinSelectionSourceSync::select_confirmed_utxos`]). + /// + /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the + /// unsigned transaction and then sign it with your wallet. fn sign_psbt(&self, psbt: Psbt) -> Result; } @@ -188,7 +250,14 @@ where } } -/// A synchronous wrapper around [`BumpTransactionEventHandler`] to be used in contexts where async is not available. +/// A handler for [`Event::BumpTransaction`] events that sources confirmed UTXOs from a +/// [`CoinSelectionSourceSync`] to fee bump transactions via Child-Pays-For-Parent (CPFP) or +/// Replace-By-Fee (RBF). +/// +/// For an asynchronous version of this handler, see [`BumpTransactionEventHandler`]. +/// +/// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction +// Note that updates to documentation on this struct should be copied to the synchronous version. pub struct BumpTransactionEventHandlerSync where B::Target: BroadcasterInterface, From 40e34d7a6a1df70a70c37b0233bad0203cd7cccc Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Nov 2025 20:41:30 +0000 Subject: [PATCH 11/25] Provide the same docs in `ChangeDestinationSource` as the sync one Backport of b55eec774e42de817d7341f69dead2f652a1cd89 --- lightning/src/sign/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 66632263c42..108e489e547 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -1071,6 +1071,9 @@ pub trait ChangeDestinationSource { /// A synchronous helper trait that describes an on-chain wallet capable of returning a (change) destination script. pub trait ChangeDestinationSourceSync { + /// Returns a script pubkey which can be used as a change destination for + /// [`OutputSpender::spend_spendable_outputs`]. + /// /// This method should return a different value each time it is called, to avoid linking /// on-chain funds controlled to the same user. fn get_change_destination_script(&self) -> Result; From df0ebe08c510e18052ca59348deb2469fdf82fc1 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Nov 2025 20:55:31 +0000 Subject: [PATCH 12/25] Copy async docs from async KVStore to sync one When we added the async trait for `KVStore`, we didn't bother copying the documentation to the sync wrappers as we figured the links sufficed. Sadly, however, this means that our bindings logic will have no docs but a broken link to an async object that doesn't exist. Instead, here, we copy the docs from the async objects to the sync ones, at least leaving behind a comment noting that both need updating whenever one gets updated. Backport of 9516400dbd266bcefc90c10529a831653d285526 --- lightning/src/util/persist.rs | 67 ++++++++++++++++++++++++++++++++--- 1 file changed, 62 insertions(+), 5 deletions(-) diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 7be957a1f8a..d00e29e686a 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -118,21 +118,75 @@ pub const OUTPUT_SWEEPER_PERSISTENCE_KEY: &str = "output_sweeper"; /// updates applied to be current) with another implementation. pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2]; -/// A synchronous version of the [`KVStore`] trait. +/// Provides an interface that allows storage and retrieval of persisted values that are associated +/// with given keys. +/// +/// In order to avoid collisions the key space is segmented based on the given `primary_namespace`s +/// and `secondary_namespace`s. Implementations of this trait are free to handle them in different +/// ways, as long as per-namespace key uniqueness is asserted. +/// +/// Keys and namespaces are required to be valid ASCII strings in the range of +/// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty +/// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if +/// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means +/// that concerns should always be separated by primary namespace first, before secondary +/// namespaces are used. While the number of primary namespaces will be relatively small and is +/// determined at compile time, there may be many secondary namespaces per primary namespace. Note +/// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given +/// namespace, i.e., conflicts between keys and equally named +/// primary namespaces/secondary namespaces must be avoided. +/// +/// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` +/// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to +/// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`. +/// +/// For an asynchronous version of this trait, see [`KVStore`]. +// Note that updates to documentation on this trait should be copied to the asynchronous version. pub trait KVStoreSync { - /// A synchronous version of the [`KVStore::read`] method. + /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and + /// `key`. + /// + /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given + /// `primary_namespace` and `secondary_namespace`. + /// + /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> Result, io::Error>; - /// A synchronous version of the [`KVStore::write`] method. + /// Persists the given data under the given `key`. + /// + /// Will create the given `primary_namespace` and `secondary_namespace` if not already present in the store. fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> Result<(), io::Error>; - /// A synchronous version of the [`KVStore::remove`] method. + /// Removes any data that had previously been persisted under the given `key`. + /// + /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily + /// remove the given `key` at some point in time after the method returns, e.g., as part of an + /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to + /// [`KVStoreSync::list`] might include the removed key until the changes are actually persisted. + /// + /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent + /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could + /// potentially get lost on crash after the method returns. Therefore, this flag should only be + /// set for `remove` operations that can be safely replayed at a later time. + /// + /// All removal operations must complete in a consistent total order with [`Self::write`]s + /// to the same key. Whether a removal operation is `lazy` or not, [`Self::write`] operations + /// to the same key which occur before a removal completes must cancel/overwrite the pending + /// removal. + /// + /// Returns successfully if no data will be stored for the given `primary_namespace`, + /// `secondary_namespace`, and `key`, independently of whether it was present before its + /// invokation or not. fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> Result<(), io::Error>; - /// A synchronous version of the [`KVStore::list`] method. + /// Returns a list of keys that are stored under the given `secondary_namespace` in + /// `primary_namespace`. + /// + /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the + /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown. fn list( &self, primary_namespace: &str, secondary_namespace: &str, ) -> Result, io::Error>; @@ -215,7 +269,10 @@ where /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to /// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`. /// +/// For a synchronous version of this trait, see [`KVStoreSync`]. +/// /// This is not exported to bindings users as async is only supported in Rust. +// Note that updates to documentation on this trait should be copied to the synchronous version. pub trait KVStore { /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and /// `key`. From 65ad2fc9cd8e85e438b247bade0d6a1c5cfa9ce4 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Nov 2025 21:16:06 +0000 Subject: [PATCH 13/25] Copy docs from `OutputSweeper` to `OutputSweeperSync` When we added the async wrapper for `OutputSweeper`, we didn't bother copying the documentation to the sync wrappers as we figured the links sufficed. Sadly, however, this means that our bindings logic will have no docs but a broken link to an async object that doesn't exist. Instead, here, we copy the docs from the async objects to the sync ones, at least leaving behind a comment noting that both need updating whenever one gets updated. Backport of 4bc993cc7fce2c1c0a0bc1566dcf6318efde47a1 --- lightning/src/util/sweep.rs | 56 ++++++++++++++++++++++++++++++++----- 1 file changed, 49 insertions(+), 7 deletions(-) diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index d19bea2114b..b31dab1ccf6 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -317,10 +317,11 @@ impl_writeable_tlv_based_enum!(OutputSpendStatus, ); /// A utility that keeps track of [`SpendableOutputDescriptor`]s, persists them in a given -/// [`KVStoreSync`] and regularly retries sweeping them based on a callback given to the constructor +/// [`KVStore`] and regularly retries sweeping them based on a callback given to the constructor /// methods. /// -/// Users should call [`Self::track_spendable_outputs`] for any [`SpendableOutputDescriptor`]s received via [`Event::SpendableOutputs`]. +/// Users should call [`Self::track_spendable_outputs`] for any [`SpendableOutputDescriptor`]s +/// received via [`Event::SpendableOutputs`]. /// /// This needs to be notified of chain state changes either via its [`Listen`] or [`Confirm`] /// implementation and hence has to be connected with the utilized chain data sources. @@ -329,9 +330,12 @@ impl_writeable_tlv_based_enum!(OutputSpendStatus, /// required to give their chain data sources (i.e., [`Filter`] implementation) to the respective /// constructor. /// +/// For a synchronous version of this struct, see [`OutputSweeperSync`]. +/// /// This is not exported to bindings users as async is not supported outside of Rust. /// /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs +// Note that updates to documentation on this struct should be copied to the synchronous version. pub struct OutputSweeper where B::Target: BroadcasterInterface, @@ -878,7 +882,24 @@ where } } -/// A synchronous wrapper around [`OutputSweeper`] to be used in contexts where async is not available. +/// A utility that keeps track of [`SpendableOutputDescriptor`]s, persists them in a given +/// [`KVStoreSync`] and regularly retries sweeping them based on a callback given to the constructor +/// methods. +/// +/// Users should call [`Self::track_spendable_outputs`] for any [`SpendableOutputDescriptor`]s +/// received via [`Event::SpendableOutputs`]. +/// +/// This needs to be notified of chain state changes either via its [`Listen`] or [`Confirm`] +/// implementation and hence has to be connected with the utilized chain data sources. +/// +/// If chain data is provided via the [`Confirm`] interface or via filtered blocks, users are +/// required to give their chain data sources (i.e., [`Filter`] implementation) to the respective +/// constructor. +/// +/// For an asynchronous version of this struct, see [`OutputSweeper`]. +/// +/// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs +// Note that updates to documentation on this struct should be copied to the asynchronous version. pub struct OutputSweeperSync where B::Target: BroadcasterInterface, @@ -905,6 +926,9 @@ where O::Target: OutputSpender, { /// Constructs a new [`OutputSweeperSync`] instance. + /// + /// If chain data is provided via the [`Confirm`] interface or via filtered blocks, users also + /// need to register their [`Filter`] implementation via the given `chain_data_source`. pub fn new( best_block: BestBlock, broadcaster: B, fee_estimator: E, chain_data_source: Option, output_spender: O, change_destination_source: D, kv_store: K, logger: L, @@ -927,7 +951,21 @@ where Self { sweeper } } - /// Wrapper around [`OutputSweeper::track_spendable_outputs`]. + /// Tells the sweeper to track the given outputs descriptors. + /// + /// Usually, this should be called based on the values emitted by the + /// [`Event::SpendableOutputs`]. + /// + /// The given `exclude_static_outputs` flag controls whether the sweeper will filter out + /// [`SpendableOutputDescriptor::StaticOutput`]s, which may be handled directly by the on-chain + /// wallet implementation. + /// + /// If `delay_until_height` is set, we will delay the spending until the respective block + /// height is reached. This can be used to batch spends, e.g., to reduce on-chain fees. + /// + /// Returns `Err` on persistence failure, in which case the call may be safely retried. + /// + /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs pub fn track_spendable_outputs( &self, output_descriptors: Vec, channel_id: Option, exclude_static_outputs: bool, delay_until_height: Option, @@ -949,7 +987,9 @@ where } } - /// Returns a list of the currently tracked spendable outputs. Wraps [`OutputSweeper::tracked_spendable_outputs`]. + /// Returns a list of the currently tracked spendable outputs. + /// + /// Wraps [`OutputSweeper::tracked_spendable_outputs`]. pub fn tracked_spendable_outputs(&self) -> Vec { self.sweeper.tracked_spendable_outputs() } @@ -960,8 +1000,10 @@ where self.sweeper.current_best_block() } - /// Regenerates and broadcasts the spending transaction for any outputs that are pending. Wraps - /// [`OutputSweeper::regenerate_and_broadcast_spend_if_necessary`]. + /// Regenerates and broadcasts the spending transaction for any outputs that are pending. This method will be a + /// no-op if a sweep is already pending. + /// + /// Wraps [`OutputSweeper::regenerate_and_broadcast_spend_if_necessary`]. pub fn regenerate_and_broadcast_spend_if_necessary(&self) -> Result<(), ()> { let mut fut = Box::pin(self.sweeper.regenerate_and_broadcast_spend_if_necessary()); let mut waker = dummy_waker(); From 4e7f4642f1af8ed77bdf10e08252eda39b2ad183 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 5 Nov 2025 11:10:14 -0600 Subject: [PATCH 14/25] Use 72 WU instead of 73 WU for signature weight When estimating signature weight, 73 WU was used in some places while 72 WU was used in others. Consistently use 72 WU and replace hardcoded values with constants. 73 WU signatures are non-standard and won't be produced by LDK. Additionally, using 73 WU along with grind_signatures adjustment is nonsensical. Backport of eb341de7884db765fd42d58b62bffe713940a404 --- lightning/src/ln/chan_utils.rs | 17 ++++++---- lightning/src/ln/channel.rs | 20 ++++++------ lightning/src/ln/interactivetxs.rs | 28 ++++++++++------- lightning/src/sign/mod.rs | 50 ++++++++++++++++++++---------- 4 files changed, 70 insertions(+), 45 deletions(-) diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index 51bfc95e7e9..1b5716e9cd1 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -116,19 +116,24 @@ pub const HTLC_SUCCESS_INPUT_P2A_ANCHOR_WITNESS_WEIGHT: u64 = 324; /// The size of the 2-of-2 multisig script const MULTISIG_SCRIPT_SIZE: u64 = 1 + // OP_2 1 + // data len - 33 + // pubkey1 + crate::sign::COMPRESSED_PUBLIC_KEY_SIZE as u64 + // pubkey1 1 + // data len - 33 + // pubkey2 + crate::sign::COMPRESSED_PUBLIC_KEY_SIZE as u64 + // pubkey2 1 + // OP_2 1; // OP_CHECKMULTISIG -/// The weight of a funding transaction input (2-of-2 P2WSH) -/// See https://github.com/lightning/bolts/blob/master/03-transactions.md#expected-weight-of-the-commitment-transaction + +/// The weight of a funding transaction input (2-of-2 P2WSH). +/// +/// Unlike in the [spec], 72 WU is used for the max signature size since 73 WU signatures are +/// non-standard. +/// +/// [spec]: https://github.com/lightning/bolts/blob/master/03-transactions.md#expected-weight-of-the-commitment-transaction pub const FUNDING_TRANSACTION_WITNESS_WEIGHT: u64 = 1 + // number_of_witness_elements 1 + // nil_len 1 + // sig len - 73 + // sig1 + crate::sign::MAX_STANDARD_SIGNATURE_SIZE as u64 + // sig1 1 + // sig len - 73 + // sig2 + crate::sign::MAX_STANDARD_SIGNATURE_SIZE as u64 + // sig2 1 + // witness_script_length MULTISIG_SCRIPT_SIZE; diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3c901d5e073..cbb51ef3ef1 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -17413,19 +17413,19 @@ mod tests { // 2 inputs, initiator, 2000 sat/kw feerate assert_eq!( estimate_v2_funding_transaction_fee(&two_inputs, &[], true, false, 2000), - 1520, + 1516, ); // higher feerate assert_eq!( estimate_v2_funding_transaction_fee(&two_inputs, &[], true, false, 3000), - 2280, + 2274, ); // only 1 input assert_eq!( estimate_v2_funding_transaction_fee(&one_input, &[], true, false, 2000), - 974, + 972, ); // 0 inputs @@ -17443,13 +17443,13 @@ mod tests { // splice initiator assert_eq!( estimate_v2_funding_transaction_fee(&one_input, &[], true, true, 2000), - 1746, + 1740, ); // splice acceptor assert_eq!( estimate_v2_funding_transaction_fee(&one_input, &[], false, true, 2000), - 546, + 544, ); } @@ -17484,7 +17484,7 @@ mod tests { true, 2000, ).unwrap(), - 2292, + 2284, ); // negative case, inputs clearly insufficient @@ -17500,13 +17500,13 @@ mod tests { ); assert_eq!( res.err().unwrap(), - "Total input amount 100000 is lower than needed for contribution 220000, considering fees of 1746. Need more inputs.", + "Total input amount 100000 is lower than needed for contribution 220000, considering fees of 1740. Need more inputs.", ); } // barely covers { - let expected_fee: u64 = 2292; + let expected_fee: u64 = 2284; assert_eq!( check_v2_funding_inputs_sufficient( (300_000 - expected_fee - 20) as i64, @@ -17536,13 +17536,13 @@ mod tests { ); assert_eq!( res.err().unwrap(), - "Total input amount 300000 is lower than needed for contribution 298032, considering fees of 2522. Need more inputs.", + "Total input amount 300000 is lower than needed for contribution 298032, considering fees of 2513. Need more inputs.", ); } // barely covers, less fees (no extra weight, no init) { - let expected_fee: u64 = 1092; + let expected_fee: u64 = 1088; assert_eq!( check_v2_funding_inputs_sufficient( (300_000 - expected_fee - 20) as i64, diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index d1a985f30fe..20d75e7ddb0 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -3351,21 +3351,19 @@ mod tests { FundingTxInput::new_p2wpkh(prevtx, 0).unwrap() }) .collect(); - let our_contributed = 110_000; let txout = TxOut { value: Amount::from_sat(10_000), script_pubkey: ScriptBuf::new() }; let outputs = vec![txout]; let funding_feerate_sat_per_1000_weight = 3000; - let total_inputs: u64 = input_prevouts.iter().map(|o| o.value.to_sat()).sum(); - let total_outputs: u64 = outputs.iter().map(|o| o.value.to_sat()).sum(); - let gross_change = total_inputs - total_outputs - our_contributed; - let fees = 1746; - let common_fees = 234; + let total_inputs: Amount = input_prevouts.iter().map(|o| o.value).sum(); + let total_outputs: Amount = outputs.iter().map(|o| o.value).sum(); + let fees = Amount::from_sat(1740); + let common_fees = Amount::from_sat(234); // There is leftover for change let context = FundingNegotiationContext { is_initiator: true, - our_funding_contribution: SignedAmount::from_sat(our_contributed as i64), + our_funding_contribution: SignedAmount::from_sat(110_000), funding_tx_locktime: AbsoluteLockTime::ZERO, funding_feerate_sat_per_1000_weight, shared_funding_input: None, @@ -3373,16 +3371,18 @@ mod tests { our_funding_outputs: outputs, change_script: None, }; + let gross_change = + total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some(gross_change - fees - common_fees)), + Ok(Some((gross_change - fees - common_fees).to_sat())), ); // There is leftover for change, without common fees let context = FundingNegotiationContext { is_initiator: false, ..context }; assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some(gross_change - fees)), + Ok(Some((gross_change - fees).to_sat())), ); // Insufficient inputs, no leftover @@ -3413,21 +3413,25 @@ mod tests { our_funding_contribution: SignedAmount::from_sat(117_992), ..context }; + let gross_change = + total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 100), - Ok(Some(262)), + Ok(Some((gross_change - fees).to_sat())), ); // Larger fee, smaller change let context = FundingNegotiationContext { is_initiator: true, - our_funding_contribution: SignedAmount::from_sat(our_contributed as i64), + our_funding_contribution: SignedAmount::from_sat(110_000), funding_feerate_sat_per_1000_weight: funding_feerate_sat_per_1000_weight * 3, ..context }; + let gross_change = + total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some(4060)), + Ok(Some((gross_change - fees * 3 - common_fees * 3).to_sat())), ); } diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 108e489e547..c65f29e5a24 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -81,6 +81,11 @@ pub mod ecdsa; pub mod taproot; pub mod tx_builder; +pub(crate) const COMPRESSED_PUBLIC_KEY_SIZE: usize = bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE; + +pub(crate) const MAX_STANDARD_SIGNATURE_SIZE: usize = + bitcoin::secp256k1::constants::MAX_SIGNATURE_SIZE; + /// Information about a spendable output to a P2WSH script. /// /// See [`SpendableOutputDescriptor::DelayedPaymentOutput`] for more details on how to spend this. @@ -114,10 +119,12 @@ impl DelayedPaymentOutputDescriptor { /// The maximum length a well-formed witness spending one of these should have. /// Note: If you have the grind_signatures feature enabled, this will be at least 1 byte /// shorter. - // Calculated as 1 byte length + 73 byte signature, 1 byte empty vec push, 1 byte length plus - // redeemscript push length. - pub const MAX_WITNESS_LENGTH: u64 = - 1 + 73 + 1 + chan_utils::REVOKEABLE_REDEEMSCRIPT_MAX_LENGTH as u64 + 1; + pub const MAX_WITNESS_LENGTH: u64 = (1 /* witness items */ + + 1 /* sig push */ + + MAX_STANDARD_SIGNATURE_SIZE + + 1 /* empty vec push */ + + 1 /* redeemscript push */ + + chan_utils::REVOKEABLE_REDEEMSCRIPT_MAX_LENGTH) as u64; } impl_writeable_tlv_based!(DelayedPaymentOutputDescriptor, { @@ -131,15 +138,18 @@ impl_writeable_tlv_based!(DelayedPaymentOutputDescriptor, { (13, channel_transaction_parameters, (option: ReadableArgs, Some(channel_value_satoshis.0.unwrap()))), }); -pub(crate) const P2WPKH_WITNESS_WEIGHT: u64 = 1 /* num stack items */ + - 1 /* sig length */ + - 73 /* sig including sighash flag */ + - 1 /* pubkey length */ + - 33 /* pubkey */; +/// Witness weight for satisfying a P2WPKH spend. +pub(crate) const P2WPKH_WITNESS_WEIGHT: u64 = (1 /* witness items */ + + 1 /* sig push */ + + MAX_STANDARD_SIGNATURE_SIZE + + 1 /* pubkey push */ + + COMPRESSED_PUBLIC_KEY_SIZE) as u64; -/// Witness weight for satisying a P2TR key-path spend. -pub(crate) const P2TR_KEY_PATH_WITNESS_WEIGHT: u64 = 1 /* witness items */ - + 1 /* schnorr sig len */ + 64 /* schnorr sig */; +/// Witness weight for satisfying a P2TR key-path spend. +pub(crate) const P2TR_KEY_PATH_WITNESS_WEIGHT: u64 = (1 /* witness items */ + + 1 /* sig push */ + + bitcoin::secp256k1::constants::SCHNORR_SIGNATURE_SIZE) + as u64; /// If a [`KeysManager`] is built with [`KeysManager::new`] with `v2_remote_key_derivation` set /// (and for all channels after they've been spliced), the script which we receive funds to on-chain @@ -192,10 +202,16 @@ impl StaticPaymentOutputDescriptor { /// shorter. pub fn max_witness_length(&self) -> u64 { if self.needs_csv_1_for_spend() { - let witness_script_weight = 1 /* pubkey push */ + 33 /* pubkey */ + - 1 /* OP_CHECKSIGVERIFY */ + 1 /* OP_1 */ + 1 /* OP_CHECKSEQUENCEVERIFY */; - 1 /* num witness items */ + 1 /* sig push */ + 73 /* sig including sighash flag */ + - 1 /* witness script push */ + witness_script_weight + let witness_script_weight = 1 /* pubkey push */ + + COMPRESSED_PUBLIC_KEY_SIZE + + 1 /* OP_CHECKSIGVERIFY */ + + 1 /* OP_1 */ + + 1 /* OP_CHECKSEQUENCEVERIFY */; + (1 /* num witness items */ + + 1 /* sig push */ + + MAX_STANDARD_SIGNATURE_SIZE + + 1 /* witness script push */ + + witness_script_weight) as u64 } else { P2WPKH_WITNESS_WEIGHT } @@ -511,7 +527,7 @@ impl SpendableOutputDescriptor { sequence: Sequence::ZERO, witness: Witness::new(), }); - witness_weight += 1 + 73 + 34; + witness_weight += P2WPKH_WITNESS_WEIGHT; #[cfg(feature = "grind_signatures")] { // Guarantees a low R signature From 714777567be2cfc3dc3a041fcaaff2a7f75b533c Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 5 Nov 2025 11:21:43 -0600 Subject: [PATCH 15/25] Account for grind_signatures in splice funding tx When estimating the splice funding transaction fees, adjust for grind_signatures. Since LDK supplies one of the signatures, only adjust by 1 WU even though spending the shared input requires two signatures. Backport of 469802612762ef747f532972e472f23f1031c702 --- lightning/src/ln/chan_utils.rs | 3 + lightning/src/ln/channel.rs | 102 ++++++++++++++++------------- lightning/src/ln/funding.rs | 8 ++- lightning/src/ln/interactivetxs.rs | 21 +++++- lightning/src/sign/mod.rs | 6 +- lightning/src/util/test_utils.rs | 6 ++ 6 files changed, 97 insertions(+), 49 deletions(-) diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index 1b5716e9cd1..e759a4b54cb 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -127,6 +127,9 @@ const MULTISIG_SCRIPT_SIZE: u64 = 1 + // OP_2 /// Unlike in the [spec], 72 WU is used for the max signature size since 73 WU signatures are /// non-standard. /// +/// Note: If you have the `grind_signatures` feature enabled, this will be at least 1 byte +/// shorter. +/// /// [spec]: https://github.com/lightning/bolts/blob/master/03-transactions.md#expected-weight-of-the-commitment-transaction pub const FUNDING_TRANSACTION_WITNESS_WEIGHT: u64 = 1 + // number_of_witness_elements 1 + // nil_len diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index cbb51ef3ef1..a114a94f76f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -6561,6 +6561,11 @@ fn estimate_v2_funding_transaction_fee( .saturating_add(BASE_INPUT_WEIGHT) .saturating_add(EMPTY_SCRIPT_SIG_WEIGHT) .saturating_add(FUNDING_TRANSACTION_WITNESS_WEIGHT); + #[cfg(feature = "grind_signatures")] + { + // Guarantees a low R signature + weight -= 1; + } } } @@ -17413,19 +17418,19 @@ mod tests { // 2 inputs, initiator, 2000 sat/kw feerate assert_eq!( estimate_v2_funding_transaction_fee(&two_inputs, &[], true, false, 2000), - 1516, + if cfg!(feature = "grind_signatures") { 1512 } else { 1516 }, ); // higher feerate assert_eq!( estimate_v2_funding_transaction_fee(&two_inputs, &[], true, false, 3000), - 2274, + if cfg!(feature = "grind_signatures") { 2268 } else { 2274 }, ); // only 1 input assert_eq!( estimate_v2_funding_transaction_fee(&one_input, &[], true, false, 2000), - 972, + if cfg!(feature = "grind_signatures") { 970 } else { 972 }, ); // 0 inputs @@ -17443,13 +17448,13 @@ mod tests { // splice initiator assert_eq!( estimate_v2_funding_transaction_fee(&one_input, &[], true, true, 2000), - 1740, + if cfg!(feature = "grind_signatures") { 1736 } else { 1740 }, ); // splice acceptor assert_eq!( estimate_v2_funding_transaction_fee(&one_input, &[], false, true, 2000), - 544, + if cfg!(feature = "grind_signatures") { 542 } else { 544 }, ); } @@ -17473,40 +17478,46 @@ mod tests { use crate::ln::channel::check_v2_funding_inputs_sufficient; // positive case, inputs well over intended contribution - assert_eq!( - check_v2_funding_inputs_sufficient( - 220_000, - &[ - funding_input_sats(200_000), - funding_input_sats(100_000), - ], - true, - true, - 2000, - ).unwrap(), - 2284, - ); + { + let expected_fee = if cfg!(feature = "grind_signatures") { 2278 } else { 2284 }; + assert_eq!( + check_v2_funding_inputs_sufficient( + 220_000, + &[ + funding_input_sats(200_000), + funding_input_sats(100_000), + ], + true, + true, + 2000, + ).unwrap(), + expected_fee, + ); + } // negative case, inputs clearly insufficient { - let res = check_v2_funding_inputs_sufficient( - 220_000, - &[ - funding_input_sats(100_000), - ], - true, - true, - 2000, - ); + let expected_fee = if cfg!(feature = "grind_signatures") { 1736 } else { 1740 }; assert_eq!( - res.err().unwrap(), - "Total input amount 100000 is lower than needed for contribution 220000, considering fees of 1740. Need more inputs.", + check_v2_funding_inputs_sufficient( + 220_000, + &[ + funding_input_sats(100_000), + ], + true, + true, + 2000, + ), + Err(format!( + "Total input amount 100000 is lower than needed for contribution 220000, considering fees of {}. Need more inputs.", + expected_fee, + )), ); } // barely covers { - let expected_fee: u64 = 2284; + let expected_fee = if cfg!(feature = "grind_signatures") { 2278 } else { 2284 }; assert_eq!( check_v2_funding_inputs_sufficient( (300_000 - expected_fee - 20) as i64, @@ -17524,25 +17535,28 @@ mod tests { // higher fee rate, does not cover { - let res = check_v2_funding_inputs_sufficient( - 298032, - &[ - funding_input_sats(200_000), - funding_input_sats(100_000), - ], - true, - true, - 2200, - ); + let expected_fee = if cfg!(feature = "grind_signatures") { 2506 } else { 2513 }; assert_eq!( - res.err().unwrap(), - "Total input amount 300000 is lower than needed for contribution 298032, considering fees of 2513. Need more inputs.", + check_v2_funding_inputs_sufficient( + 298032, + &[ + funding_input_sats(200_000), + funding_input_sats(100_000), + ], + true, + true, + 2200, + ), + Err(format!( + "Total input amount 300000 is lower than needed for contribution 298032, considering fees of {}. Need more inputs.", + expected_fee + )), ); } - // barely covers, less fees (no extra weight, no init) + // barely covers, less fees (no extra weight, not initiator) { - let expected_fee: u64 = 1088; + let expected_fee = if cfg!(feature = "grind_signatures") { 1084 } else { 1088 }; assert_eq!( check_v2_funding_inputs_sufficient( (300_000 - expected_fee - 20) as i64, diff --git a/lightning/src/ln/funding.rs b/lightning/src/ln/funding.rs index 3cabb8201a9..7281a5f6d69 100644 --- a/lightning/src/ln/funding.rs +++ b/lightning/src/ln/funding.rs @@ -143,7 +143,13 @@ impl FundingTxInput { /// [`TxIn::sequence`]: bitcoin::TxIn::sequence /// [`set_sequence`]: Self::set_sequence pub fn new_p2wpkh(prevtx: Transaction, vout: u32) -> Result { - let witness_weight = Weight::from_wu(P2WPKH_WITNESS_WEIGHT); + let witness_weight = Weight::from_wu(P2WPKH_WITNESS_WEIGHT) + - if cfg!(feature = "grind_signatures") { + // Guarantees a low R signature + Weight::from_wu(1) + } else { + Weight::ZERO + }; FundingTxInput::new(prevtx, vout, witness_weight, Script::is_p2wpkh) } diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index 20d75e7ddb0..547194d7a29 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -1706,7 +1706,15 @@ impl InputOwned { InputOwned::Single(single) => single.satisfaction_weight, // TODO(taproot): Needs to consider different weights based on channel type InputOwned::Shared(_) => { - Weight::from_wu(EMPTY_SCRIPT_SIG_WEIGHT + FUNDING_TRANSACTION_WITNESS_WEIGHT) + let mut weight = 0; + weight += EMPTY_SCRIPT_SIG_WEIGHT + FUNDING_TRANSACTION_WITNESS_WEIGHT; + #[cfg(feature = "grind_signatures")] + { + // Guarantees a low R signature + weight -= 1; + } + + Weight::from_wu(weight) }, } } @@ -2312,6 +2320,11 @@ pub(super) fn calculate_change_output_value( weight = weight.saturating_add(BASE_INPUT_WEIGHT); weight = weight.saturating_add(EMPTY_SCRIPT_SIG_WEIGHT); weight = weight.saturating_add(FUNDING_TRANSACTION_WITNESS_WEIGHT); + #[cfg(feature = "grind_signatures")] + { + // Guarantees a low R signature + weight -= 1; + } } } @@ -3357,7 +3370,11 @@ mod tests { let total_inputs: Amount = input_prevouts.iter().map(|o| o.value).sum(); let total_outputs: Amount = outputs.iter().map(|o| o.value).sum(); - let fees = Amount::from_sat(1740); + let fees = if cfg!(feature = "grind_signatures") { + Amount::from_sat(1734) + } else { + Amount::from_sat(1740) + }; let common_fees = Amount::from_sat(234); // There is leftover for change diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index c65f29e5a24..ac24a4a4040 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -117,7 +117,8 @@ pub struct DelayedPaymentOutputDescriptor { impl DelayedPaymentOutputDescriptor { /// The maximum length a well-formed witness spending one of these should have. - /// Note: If you have the grind_signatures feature enabled, this will be at least 1 byte + /// + /// Note: If you have the `grind_signatures` feature enabled, this will be at least 1 byte /// shorter. pub const MAX_WITNESS_LENGTH: u64 = (1 /* witness items */ + 1 /* sig push */ @@ -198,7 +199,8 @@ impl StaticPaymentOutputDescriptor { } /// The maximum length a well-formed witness spending one of these should have. - /// Note: If you have the grind_signatures feature enabled, this will be at least 1 byte + /// + /// Note: If you have the `grind_signatures` feature enabled, this will be at least 1 byte /// shorter. pub fn max_witness_length(&self) -> u64 { if self.needs_csv_1_for_spend() { diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 54ed67d4714..c9f9ba2d086 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -2220,10 +2220,16 @@ impl TestWalletSource { utxo.output.value, EcdsaSighashType::All, )?; + #[cfg(not(feature = "grind_signatures"))] let signature = self.secp.sign_ecdsa( &secp256k1::Message::from_digest(sighash.to_byte_array()), &self.secret_key, ); + #[cfg(feature = "grind_signatures")] + let signature = self.secp.sign_ecdsa_low_r( + &secp256k1::Message::from_digest(sighash.to_byte_array()), + &self.secret_key, + ); let bitcoin_sig = bitcoin::ecdsa::Signature { signature, sighash_type: EcdsaSighashType::All }; tx.input[i].witness = From bc7da95c7a315a352dfbd32fb859937506c471d3 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 5 Nov 2025 16:11:46 -0600 Subject: [PATCH 16/25] Use a tolerance when estimating remote fees The interactive-tx construction protocol uses an agreed upon fee rate. Since the bitcoind coin selection algorithm may underpay fees when no change output is needed, providing a tolerance when checking if the remote's fee contribution could avoid some unexpected failures. This commit introduces a 95% tolerance similar to Eclair. Backport of 3aa457407df22771c08a24a0f5c739b78442c20a --- lightning/src/ln/interactivetxs.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index 547194d7a29..54067455ee9 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -257,6 +257,9 @@ impl_writeable_tlv_based!(ConstructedTransaction, { (11, shared_output_index, required), }); +/// The percent tolerance given to the remote when estimating if they paid enough fees. +const REMOTE_FEE_TOLERANCE_PERCENT: u64 = 95; + impl ConstructedTransaction { fn new(context: NegotiationContext) -> Result { let remote_inputs_value = context.remote_inputs_value(); @@ -315,8 +318,10 @@ impl ConstructedTransaction { // - the peer's paid feerate does not meet or exceed the agreed feerate (based on the minimum fee). let remote_fees_contributed = remote_inputs_value.saturating_sub(remote_outputs_value); - let required_remote_contribution_fee = - fee_for_weight(context.feerate_sat_per_kw, remote_weight_contributed); + let required_remote_contribution_fee = fee_for_weight( + (context.feerate_sat_per_kw as u64 * REMOTE_FEE_TOLERANCE_PERCENT / 100) as u32, + remote_weight_contributed, + ); if remote_fees_contributed < required_remote_contribution_fee { return Err(AbortReason::InsufficientFees); } @@ -2377,7 +2382,7 @@ mod tests { use super::{ get_output_weight, ConstructedTransaction, InteractiveTxSigningSession, TxInMetadata, P2TR_INPUT_WEIGHT_LOWER_BOUND, P2WPKH_INPUT_WEIGHT_LOWER_BOUND, - P2WSH_INPUT_WEIGHT_LOWER_BOUND, TX_COMMON_FIELDS_WEIGHT, + P2WSH_INPUT_WEIGHT_LOWER_BOUND, REMOTE_FEE_TOLERANCE_PERCENT, TX_COMMON_FIELDS_WEIGHT, }; const TEST_FEERATE_SATS_PER_KW: u32 = FEERATE_FLOOR_SATS_PER_KW * 10; @@ -2842,7 +2847,7 @@ mod tests { let outputs_weight = get_output_weight(&generate_p2wsh_script_pubkey()).to_wu(); let amount_adjusted_with_p2wpkh_fee = 1_000_000 - fee_for_weight( - TEST_FEERATE_SATS_PER_KW, + (TEST_FEERATE_SATS_PER_KW as u64 * REMOTE_FEE_TOLERANCE_PERCENT / 100) as u32, P2WPKH_INPUT_WEIGHT_LOWER_BOUND + TX_COMMON_FIELDS_WEIGHT + outputs_weight, ); do_test_interactive_tx_constructor(TestSession { @@ -2878,7 +2883,7 @@ mod tests { }); let amount_adjusted_with_p2wsh_fee = 1_000_000 - fee_for_weight( - TEST_FEERATE_SATS_PER_KW, + (TEST_FEERATE_SATS_PER_KW as u64 * REMOTE_FEE_TOLERANCE_PERCENT / 100) as u32, P2WSH_INPUT_WEIGHT_LOWER_BOUND + TX_COMMON_FIELDS_WEIGHT + outputs_weight, ); do_test_interactive_tx_constructor(TestSession { @@ -2914,7 +2919,7 @@ mod tests { }); let amount_adjusted_with_p2tr_fee = 1_000_000 - fee_for_weight( - TEST_FEERATE_SATS_PER_KW, + (TEST_FEERATE_SATS_PER_KW as u64 * REMOTE_FEE_TOLERANCE_PERCENT / 100) as u32, P2TR_INPUT_WEIGHT_LOWER_BOUND + TX_COMMON_FIELDS_WEIGHT + outputs_weight, ); do_test_interactive_tx_constructor(TestSession { From 74e1069022b237a1b9b6f123a7d46b05f783a3f8 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 10 Nov 2025 17:34:56 -0600 Subject: [PATCH 17/25] Fix MAX_STANDARD_TX_WEIGHT check The interactive-tx construction protocol needs to make sure the constructed transaction does not exceed MAX_STANDARD_TX_WEIGHT. A naive estimate of the transaction weight after signing was used, but was not accurate. Specifically, it double-counted EMPTY_SCRIPT_SIG_WEIGHT and didn't include SEGWIT_MARKER_FLAG_WEIGHT. Backport of b4803c7e34567f6903de9848726cc73e8bf1234a --- lightning/src/ln/interactivetxs.rs | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index 54067455ee9..5db6ee08c16 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -29,6 +29,7 @@ use bitcoin::{ use crate::chain::chaininterface::fee_for_weight; use crate::ln::chan_utils::{ BASE_INPUT_WEIGHT, EMPTY_SCRIPT_SIG_WEIGHT, FUNDING_TRANSACTION_WITNESS_WEIGHT, + SEGWIT_MARKER_FLAG_WEIGHT, }; use crate::ln::channel::{FundingNegotiationContext, TOTAL_BITCOIN_SUPPLY_SATOSHIS}; use crate::ln::funding::FundingTxInput; @@ -266,10 +267,11 @@ impl ConstructedTransaction { let remote_outputs_value = context.remote_outputs_value(); let remote_weight_contributed = context.remote_weight_contributed(); - let satisfaction_weight = - Weight::from_wu(context.inputs.iter().fold(0u64, |value, (_, input)| { - value.saturating_add(input.satisfaction_weight().to_wu()) - })); + let expected_witness_weight = context.inputs.iter().fold(0u64, |value, (_, input)| { + value + .saturating_add(input.satisfaction_weight().to_wu()) + .saturating_sub(EMPTY_SCRIPT_SIG_WEIGHT) + }); let lock_time = context.tx_locktime; @@ -342,8 +344,13 @@ impl ConstructedTransaction { return Err(AbortReason::MissingFundingOutput); } - let tx_weight = tx.tx.weight().checked_add(satisfaction_weight).unwrap_or(Weight::MAX); - if tx_weight > Weight::from_wu(MAX_STANDARD_TX_WEIGHT as u64) { + let tx_weight = tx + .tx + .weight() + .to_wu() + .saturating_add(SEGWIT_MARKER_FLAG_WEIGHT) + .saturating_add(expected_witness_weight); + if tx_weight > MAX_STANDARD_TX_WEIGHT as u64 { return Err(AbortReason::TransactionTooLarge); } From 7711c3af472acdbead07cd5eb57dbdd589124ecc Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 10 Nov 2025 12:16:32 -0600 Subject: [PATCH 18/25] Add method to convert FundingTxInput to Utxo Useful for re-using code producing a FundingTxInput set to implement CoinSelectionSource. Backport of 2330ed9892f64c09762e77f6e4dfad59f89e0d1b --- lightning/src/ln/funding.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lightning/src/ln/funding.rs b/lightning/src/ln/funding.rs index 7281a5f6d69..f80b2b6daea 100644 --- a/lightning/src/ln/funding.rs +++ b/lightning/src/ln/funding.rs @@ -230,4 +230,9 @@ impl FundingTxInput { pub fn set_sequence(&mut self, sequence: Sequence) { self.sequence = sequence; } + + /// Converts the [`FundingTxInput`] into a [`Utxo`] for coin selection. + pub fn into_utxo(self) -> Utxo { + self.utxo + } } From 1f156c9a39a958dd486c291a14e36952ef4cd9b3 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Mon, 3 Nov 2025 13:41:39 -0800 Subject: [PATCH 19/25] Support capturing `tx_abort` send within channel reestablish tests Backport of 0cdcf5421ccf92d199996fc4de9a54ed138558c7 --- lightning/src/ln/async_signer_tests.rs | 17 +++++++++------ lightning/src/ln/functional_test_utils.rs | 26 ++++++++++++++++++++++- 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 71821081094..03728e28222 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -596,7 +596,7 @@ fn do_test_async_raa_peer_disconnect( } // Expect the RAA - let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _) = + let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { assert!(revoke_and_ack.is_none()); @@ -612,14 +612,14 @@ fn do_test_async_raa_peer_disconnect( dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { - let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _) = + let (_, revoke_and_ack, commitment_signed, resend_order, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_some()); assert!(commitment_signed.is_some()); assert!(resend_order == RAACommitmentOrder::RevokeAndACKFirst); } else { // Make sure we don't double send the RAA. - let (_, revoke_and_ack, commitment_signed, _, _, _, _) = + let (_, revoke_and_ack, commitment_signed, _, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_none()); assert!(commitment_signed.is_none()); @@ -746,7 +746,7 @@ fn do_test_async_commitment_signature_peer_disconnect( } // Expect the RAA - let (_, revoke_and_ack, commitment_signed, _, _, _, _) = + let (_, revoke_and_ack, commitment_signed, _, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_some()); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { @@ -760,11 +760,11 @@ fn do_test_async_commitment_signature_peer_disconnect( dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { - let (_, _, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, _, commitment_signed, _, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(commitment_signed.is_some()); } else { // Make sure we don't double send the CS. - let (_, _, commitment_signed, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); + let (_, _, commitment_signed, _, _, _, _, _) = handle_chan_reestablish_msgs!(dst, src); assert!(commitment_signed.is_none()); } } @@ -882,6 +882,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.4.is_none()); assert!(as_resp.5.is_none()); assert!(as_resp.6.is_none()); + assert!(as_resp.7.is_none()); if monitor_update_failure { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -904,6 +905,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.4.is_none()); assert!(as_resp.5.is_none()); assert!(as_resp.6.is_none()); + assert!(as_resp.7.is_none()); nodes[0].enable_channel_signer_op(&node_b_id, &chan_id, SignerOp::SignCounterpartyCommitment); nodes[0].node.signer_unblocked(Some((node_b_id, chan_id))); @@ -929,6 +931,9 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(as_resp.6.is_none()); assert!(bs_resp.6.is_none()); + assert!(as_resp.7.is_none()); + assert!(bs_resp.7.is_none()); + // Now that everything is restored, get the CS + RAA and handle them. nodes[1] .node diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 78378978695..b6db7d23484 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -4996,9 +4996,17 @@ macro_rules! handle_chan_reestablish_msgs { ($src_node: expr, $dst_node: expr) => {{ let msg_events = $src_node.node.get_and_clear_pending_msg_events(); let mut idx = 0; + + let mut tx_abort = None; + if let Some(&MessageSendEvent::SendTxAbort { ref node_id, ref msg }) = msg_events.get(idx) { + idx += 1; + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + tx_abort = Some(msg.clone()); + } + let channel_ready = if let Some(&MessageSendEvent::SendChannelReady { ref node_id, ref msg }) = - msg_events.get(0) + msg_events.get(idx) { idx += 1; assert_eq!(*node_id, $dst_node.node.get_our_node_id()); @@ -5115,6 +5123,7 @@ macro_rules! handle_chan_reestablish_msgs { announcement_sigs, tx_signatures, stfu, + tx_abort, ) }}; } @@ -5127,6 +5136,7 @@ pub struct ReconnectArgs<'a, 'b, 'c, 'd> { pub send_stfu: (bool, bool), pub send_interactive_tx_commit_sig: (bool, bool), pub send_interactive_tx_sigs: (bool, bool), + pub send_tx_abort: (bool, bool), pub expect_renegotiated_funding_locked_monitor_update: (bool, bool), pub pending_responding_commitment_signed: (bool, bool), /// Indicates that the pending responding commitment signed will be a dup for the recipient, @@ -5150,6 +5160,7 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { send_stfu: (false, false), send_interactive_tx_commit_sig: (false, false), send_interactive_tx_sigs: (false, false), + send_tx_abort: (false, false), expect_renegotiated_funding_locked_monitor_update: (false, false), pending_responding_commitment_signed: (false, false), pending_responding_commitment_signed_dup_monitor: (false, false), @@ -5174,6 +5185,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { send_stfu, send_interactive_tx_commit_sig, send_interactive_tx_sigs, + send_tx_abort, expect_renegotiated_funding_locked_monitor_update, pending_htlc_adds, pending_htlc_claims, @@ -5305,6 +5317,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { &commitment_update.commitment_signed, ) } + if send_tx_abort.0 { + let tx_abort = chan_msgs.7.take().unwrap(); + node_a.node.handle_tx_abort(node_b_id, &tx_abort); + } else { + assert!(chan_msgs.7.is_none()); + } if send_interactive_tx_sigs.0 { let tx_signatures = chan_msgs.5.take().unwrap(); node_a.node.handle_tx_signatures(node_b_id, &tx_signatures); @@ -5417,6 +5435,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { &commitment_update.commitment_signed, ) } + if send_tx_abort.1 { + let tx_abort = chan_msgs.7.take().unwrap(); + node_a.node.handle_tx_abort(node_b_id, &tx_abort); + } else { + assert!(chan_msgs.7.is_none()); + } if send_interactive_tx_sigs.1 { let tx_signatures = chan_msgs.5.take().unwrap(); node_b.node.handle_tx_signatures(node_a_id, &tx_signatures); From 17b99e0ea39445767005ea06f51ecbbdf6119a2b Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Mon, 3 Nov 2025 13:41:42 -0800 Subject: [PATCH 20/25] Allow counterparty tx_abort before handling initial commitment signed Upon processing the counterparty's initial `commitment_signed` for a splice, we queue a monitor update with the new commitment transactions spending the new funding transaction. Once handled, funding transaction signatures can be exchanged and we must start monitoring the chain for a possible commitment transaction broadcast spending the new funding transaction. Aborting the splice negotiation then would therefore be unsafe, hence why we currently force close in such cases. However, there is no reason to force close prior to receiving their initial `commitment_signed`, as this would imply the funding transaction signatures have yet to be exchanged, thus making a commitment broadcast spending said transaction impossible allowing us to abort the splice negotiation safely. Backport of d282a47fc5be6fa14eae864e4c95456fdb5f61d2 --- lightning/src/ln/channel.rs | 104 +++++++++++++++++++--------- lightning/src/ln/interactivetxs.rs | 34 +++++++++ lightning/src/ln/splicing_tests.rs | 106 +++++++++++++++++++++++++++-- 3 files changed, 207 insertions(+), 37 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a114a94f76f..c3ffe87503d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1645,7 +1645,7 @@ where } chan.context.channel_state.clear_local_stfu_sent(); chan.context.channel_state.clear_remote_stfu_sent(); - if chan.should_reset_pending_splice_state() { + if chan.should_reset_pending_splice_state(false) { // If there was a pending splice negotiation that failed due to disconnecting, we // also take the opportunity to clean up our state. let splice_funding_failed = chan.reset_pending_splice_state(); @@ -1766,7 +1766,7 @@ where None }, ChannelPhase::Funded(funded_channel) => { - if funded_channel.should_reset_pending_splice_state() { + if funded_channel.should_reset_pending_splice_state(false) { funded_channel.reset_pending_splice_state() } else { debug_assert!(false, "We should never fail an interactive funding negotiation once we're exchanging tx_signatures"); @@ -1923,12 +1923,24 @@ where (had_constructor, None) }, ChannelPhase::Funded(funded_channel) => { - if funded_channel.has_pending_splice_awaiting_signatures() { + if funded_channel.has_pending_splice_awaiting_signatures() + && funded_channel + .context() + .interactive_tx_signing_session + .as_ref() + .expect("We have a pending splice awaiting signatures") + .has_received_commitment_signed() + { + // We only force close once the counterparty tries to abort after committing to + // the splice via their initial `commitment_signed`. This is because our monitor + // state is updated with the post-splice commitment transaction upon receiving + // their `commitment_signed`, so we would need another monitor update to abandon + // it, which we don't currently support. return Err(ChannelError::close( "Received tx_abort while awaiting tx_signatures exchange".to_owned(), )); } - if funded_channel.should_reset_pending_splice_state() { + if funded_channel.should_reset_pending_splice_state(true) { let has_funding_negotiation = funded_channel .pending_splice .as_ref() @@ -2695,19 +2707,6 @@ impl FundingNegotiation { } impl PendingFunding { - fn can_abandon_state(&self) -> bool { - self.funding_negotiation - .as_ref() - .map(|funding_negotiation| { - !matches!(funding_negotiation, FundingNegotiation::AwaitingSignatures { .. }) - }) - .unwrap_or_else(|| { - let has_negotiated_candidates = !self.negotiated_candidates.is_empty(); - debug_assert!(has_negotiated_candidates); - !has_negotiated_candidates - }) - } - fn check_get_splice_locked( &mut self, context: &ChannelContext, confirmed_funding_index: usize, height: u32, ) -> Option @@ -6890,7 +6889,7 @@ pub struct SpliceFundingFailed { } macro_rules! maybe_create_splice_funding_failed { - ($pending_splice: expr, $get: ident, $contributed_inputs_and_outputs: ident) => {{ + ($funded_channel: expr, $pending_splice: expr, $get: ident, $contributed_inputs_and_outputs: ident) => {{ $pending_splice .and_then(|pending_splice| pending_splice.funding_negotiation.$get()) .filter(|funding_negotiation| funding_negotiation.is_initiator()) @@ -6912,10 +6911,12 @@ macro_rules! maybe_create_splice_funding_failed { interactive_tx_constructor, .. } => interactive_tx_constructor.$contributed_inputs_and_outputs(), - FundingNegotiation::AwaitingSignatures { .. } => { - debug_assert!(false); - (Vec::new(), Vec::new()) - }, + FundingNegotiation::AwaitingSignatures { .. } => $funded_channel + .context + .interactive_tx_signing_session + .$get() + .expect("We have a pending splice awaiting signatures") + .$contributed_inputs_and_outputs(), }; SpliceFundingFailed { @@ -6954,7 +6955,7 @@ where fn maybe_fail_splice_negotiation(&mut self) -> Option { if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { - if self.should_reset_pending_splice_state() { + if self.should_reset_pending_splice_state(false) { self.reset_pending_splice_state() } else { match self.quiescent_action.take() { @@ -7028,19 +7029,54 @@ where /// Returns a boolean indicating whether we should reset the splice's /// [`PendingFunding::funding_negotiation`]. - fn should_reset_pending_splice_state(&self) -> bool { + fn should_reset_pending_splice_state(&self, counterparty_aborted: bool) -> bool { self.pending_splice .as_ref() - .map(|pending_splice| pending_splice.can_abandon_state()) + .map(|pending_splice| { + pending_splice + .funding_negotiation + .as_ref() + .map(|funding_negotiation| { + let is_awaiting_signatures = matches!( + funding_negotiation, + FundingNegotiation::AwaitingSignatures { .. } + ); + if counterparty_aborted { + !is_awaiting_signatures + || !self + .context() + .interactive_tx_signing_session + .as_ref() + .expect("We have a pending splice awaiting signatures") + .has_received_commitment_signed() + } else { + !is_awaiting_signatures + } + }) + .unwrap_or_else(|| { + let has_negotiated_candidates = + !pending_splice.negotiated_candidates.is_empty(); + debug_assert!(has_negotiated_candidates); + !has_negotiated_candidates + }) + }) .unwrap_or(false) } fn reset_pending_splice_state(&mut self) -> Option { - debug_assert!(self.should_reset_pending_splice_state()); - debug_assert!(self.context.interactive_tx_signing_session.is_none()); - self.context.channel_state.clear_quiescent(); + debug_assert!(self.should_reset_pending_splice_state(true)); + debug_assert!( + self.context.interactive_tx_signing_session.is_none() + || !self + .context + .interactive_tx_signing_session + .as_ref() + .expect("We have a pending splice awaiting signatures") + .has_received_commitment_signed() + ); let splice_funding_failed = maybe_create_splice_funding_failed!( + self, self.pending_splice.as_mut(), take, into_contributed_inputs_and_outputs @@ -7050,15 +7086,19 @@ where self.pending_splice.take(); } + self.context.channel_state.clear_quiescent(); + self.context.interactive_tx_signing_session.take(); + splice_funding_failed } pub(super) fn maybe_splice_funding_failed(&self) -> Option { - if !self.should_reset_pending_splice_state() { + if !self.should_reset_pending_splice_state(false) { return None; } maybe_create_splice_funding_failed!( + self, self.pending_splice.as_ref(), as_ref, to_contributed_inputs_and_outputs @@ -12013,7 +12053,7 @@ where pub fn abandon_splice( &mut self, ) -> Result<(msgs::TxAbort, Option), APIError> { - if self.should_reset_pending_splice_state() { + if self.should_reset_pending_splice_state(false) { let tx_abort = msgs::TxAbort { channel_id: self.context.channel_id(), data: Vec::new() }; let splice_funding_failed = self.reset_pending_splice_state(); @@ -14382,7 +14422,7 @@ where } channel_state.clear_local_stfu_sent(); channel_state.clear_remote_stfu_sent(); - if self.should_reset_pending_splice_state() + if self.should_reset_pending_splice_state(false) || !self.has_pending_splice_awaiting_signatures() { // We shouldn't be quiescent anymore upon reconnecting if: @@ -14756,7 +14796,7 @@ where // We don't have to worry about resetting the pending `FundingNegotiation` because we // can only read `FundingNegotiation::AwaitingSignatures` variants anyway. let pending_splice = - self.pending_splice.as_ref().filter(|_| !self.should_reset_pending_splice_state()); + self.pending_splice.as_ref().filter(|_| !self.should_reset_pending_splice_state(false)); write_tlv_fields!(writer, { (0, self.context.announcement_sigs, option), diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index 5db6ee08c16..badd8428db3 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -362,6 +362,36 @@ impl ConstructedTransaction { NegotiationError { reason, contributed_inputs, contributed_outputs } } + fn to_contributed_inputs_and_outputs(&self) -> (Vec, Vec) { + let contributed_inputs = self + .tx + .input + .iter() + .zip(self.input_metadata.iter()) + .enumerate() + .filter(|(_, (_, input))| input.is_local(self.holder_is_initiator)) + .filter(|(index, _)| { + self.shared_input_index + .map(|shared_index| *index != shared_index as usize) + .unwrap_or(true) + }) + .map(|(_, (txin, _))| txin.previous_output) + .collect(); + + let contributed_outputs = self + .tx + .output + .iter() + .zip(self.output_metadata.iter()) + .enumerate() + .filter(|(_, (_, output))| output.is_local(self.holder_is_initiator)) + .filter(|(index, _)| *index != self.shared_output_index as usize) + .map(|(_, (txout, _))| txout.clone()) + .collect(); + + (contributed_inputs, contributed_outputs) + } + fn into_contributed_inputs_and_outputs(self) -> (Vec, Vec) { let contributed_inputs = self .tx @@ -871,6 +901,10 @@ impl InteractiveTxSigningSession { self.unsigned_tx.into_negotiation_error(reason) } + pub(super) fn to_contributed_inputs_and_outputs(&self) -> (Vec, Vec) { + self.unsigned_tx.to_contributed_inputs_and_outputs() + } + pub(super) fn into_contributed_inputs_and_outputs(self) -> (Vec, Vec) { self.unsigned_tx.into_contributed_inputs_and_outputs() } diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index f95883e9434..e387ac3bfdd 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -395,17 +395,28 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { // retry later. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let (persister_0a, persister_0b, persister_0c, persister_1a, persister_1b, persister_1c); + let ( + persister_0a, + persister_0b, + persister_0c, + persister_0d, + persister_1a, + persister_1b, + persister_1c, + persister_1d, + ); let ( chain_monitor_0a, chain_monitor_0b, chain_monitor_0c, + chain_monitor_0d, chain_monitor_1a, chain_monitor_1b, chain_monitor_1c, + chain_monitor_1d, ); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let (node_0a, node_0b, node_0c, node_1a, node_1b, node_1c); + let (node_0a, node_0b, node_0c, node_0d, node_1a, node_1b, node_1c, node_1d); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_id_0 = nodes[0].node.get_our_node_id(); @@ -533,9 +544,56 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { reconnect_args.send_announcement_sigs = (true, true); reconnect_nodes(reconnect_args); - // Attempt a splice negotiation that completes, (i.e. `tx_signatures` are exchanged). Reconnecting - // should not abort the negotiation or reset the splice state. - let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, contribution); + nodes[0] + .node + .splice_channel( + &channel_id, + &node_id_1, + contribution.clone(), + FEERATE_FLOOR_SATS_PER_KW, + None, + ) + .unwrap(); + + // Attempt a splice negotiation that ends before the initial `commitment_signed` messages are + // exchanged. The node missing the other's `commitment_signed` upon reconnecting should + // implicitly abort the negotiation and reset the splice state such that we're able to retry + // another splice later. + let stfu = get_event_msg!(nodes[0], MessageSendEvent::SendStfu, node_id_1); + nodes[1].node.handle_stfu(node_id_0, &stfu); + let stfu = get_event_msg!(nodes[1], MessageSendEvent::SendStfu, node_id_0); + nodes[0].node.handle_stfu(node_id_1, &stfu); + + let splice_init = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceInit, node_id_1); + nodes[1].node.handle_splice_init(node_id_0, &splice_init); + let splice_ack = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceAck, node_id_0); + nodes[0].node.handle_splice_ack(node_id_1, &splice_ack); + + let tx_add_input = get_event_msg!(nodes[0], MessageSendEvent::SendTxAddInput, node_id_1); + nodes[1].node.handle_tx_add_input(node_id_0, &tx_add_input); + let tx_complete = get_event_msg!(nodes[1], MessageSendEvent::SendTxComplete, node_id_0); + nodes[0].node.handle_tx_complete(node_id_1, &tx_complete); + + let tx_add_output = get_event_msg!(nodes[0], MessageSendEvent::SendTxAddOutput, node_id_1); + nodes[1].node.handle_tx_add_output(node_id_0, &tx_add_output); + let tx_complete = get_event_msg!(nodes[1], MessageSendEvent::SendTxComplete, node_id_0); + nodes[0].node.handle_tx_complete(node_id_1, &tx_complete); + + let tx_add_output = get_event_msg!(nodes[0], MessageSendEvent::SendTxAddOutput, node_id_1); + nodes[1].node.handle_tx_add_output(node_id_0, &tx_add_output); + let tx_complete = get_event_msg!(nodes[1], MessageSendEvent::SendTxComplete, node_id_0); + nodes[0].node.handle_tx_complete(node_id_1, &tx_complete); + + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2); + if let MessageSendEvent::SendTxComplete { .. } = &msg_events[0] { + } else { + panic!("Unexpected event"); + } + if let MessageSendEvent::UpdateHTLCs { .. } = &msg_events[1] { + } else { + panic!("Unexpected event"); + } if reload { let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); @@ -561,6 +619,44 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { nodes[1].node.peer_disconnected(node_id_0); } + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_channel_ready = (true, false); + reconnect_args.send_announcement_sigs = (true, true); + reconnect_args.send_tx_abort = (true, false); + reconnect_nodes(reconnect_args); + + let tx_abort = get_event_msg!(nodes[0], MessageSendEvent::SendTxAbort, node_id_1); + nodes[1].node.handle_tx_abort(node_id_0, &tx_abort); + let _event = get_event!(nodes[0], Event::SpliceFailed); + + // Attempt a splice negotiation that completes, (i.e. `tx_signatures` are exchanged). Reconnecting + // should not abort the negotiation or reset the splice state. + let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, contribution); + + if reload { + let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); + reload_node!( + nodes[0], + &nodes[0].node.encode(), + &[&encoded_monitor_0], + persister_0d, + chain_monitor_0d, + node_0d + ); + let encoded_monitor_1 = get_monitor!(nodes[1], channel_id).encode(); + reload_node!( + nodes[1], + &nodes[1].node.encode(), + &[&encoded_monitor_1], + persister_1d, + chain_monitor_1d, + node_1d + ); + } else { + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + } + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_announcement_sigs = (true, true); reconnect_nodes(reconnect_args); From a28295154d53583e49ac6ffe9804114ab06b139e Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Mon, 10 Nov 2025 09:58:07 -0800 Subject: [PATCH 21/25] Avoid returning user error when stfu is not ready to be sent yet When we propose quiescence due to a user initiated action, such as a splice, it's possible that the `stfu` message is not ready to be sent out yet due to the state machine pending a change. This would result in an error communicated back to the caller of `ChannelManager::splice_channel`, but this is unnecessary and confusing, the splice is still pending and will be carried out once quiescence is eventually negotiated. Backport of c2b15c212ad164cfa4aab148efa52146fa63df7c --- lightning/src/ln/channel.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c3ffe87503d..188ae633e9a 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -13120,7 +13120,13 @@ where self.context.channel_state.set_awaiting_quiescence(); if self.context.is_live() { - Ok(Some(self.send_stfu(logger)?)) + match self.send_stfu(logger) { + Ok(stfu) => Ok(Some(stfu)), + Err(e) => { + log_debug!(logger, "{e}"); + Ok(None) + }, + } } else { log_debug!(logger, "Waiting for peer reconnection to send stfu"); Ok(None) From 883142b01e3acfa3a4918a06680470e4b93b80cb Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Mon, 10 Nov 2025 10:01:00 -0800 Subject: [PATCH 22/25] Remove invalid splice debug assertion in funding_tx_constructed We incorrectly assumed that both commitments must be at the same height, but this is certainly possible whenever updates are proposed in both directions at the same time. Backport of a6ed667c6936ff7f188515e1633a90a5609b14b7 --- lightning/src/ln/channel.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 188ae633e9a..1fd42abf139 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -6213,12 +6213,7 @@ where { funding.channel_transaction_parameters.funding_outpoint = Some(funding_outpoint); - if is_splice { - debug_assert_eq!( - holder_commitment_transaction_number, - self.counterparty_next_commitment_transaction_number, - ); - } else { + if !is_splice { self.assert_no_commitment_advancement(holder_commitment_transaction_number, "initial commitment_signed"); self.channel_state = ChannelState::FundingNegotiated(FundingNegotiatedFlags::new()); } From 689aec2ef498259b4b0842e248f0b5863bd838e8 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Mon, 10 Nov 2025 10:01:32 -0800 Subject: [PATCH 23/25] Break up ChannelContext::funding_tx_constructed This method was originally intended to DRY the logic between `FundedChannel` and `PendingChannelV2` when handling `funding_tx_constructed`, though it's not very useful anymore. Backport of 25264d758d11b3f3b6af881fa1eff95c96131cca --- lightning/src/ln/channel.rs | 75 +++++++++++++++---------------------- 1 file changed, 31 insertions(+), 44 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 1fd42abf139..62b108fd20a 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2019,18 +2019,32 @@ where | NegotiatingFundingFlags::THEIR_INIT_SENT ), ); + chan.context.assert_no_commitment_advancement( + chan.unfunded_context.transaction_number(), + "initial commitment_signed", + ); + + chan.context.channel_state = + ChannelState::FundingNegotiated(FundingNegotiatedFlags::new()); + chan.funding.channel_transaction_parameters.funding_outpoint = + Some(funding_outpoint); let interactive_tx_constructor = chan .interactive_tx_constructor .take() .expect("PendingV2Channel::interactive_tx_constructor should be set"); - let commitment_signed = chan.context.funding_tx_constructed( - &mut chan.funding, - funding_outpoint, - false, - chan.unfunded_context.transaction_number(), - &&logger, - )?; + + let commitment_signed = + chan.context.get_initial_commitment_signed_v2(&chan.funding, &&logger); + let commitment_signed = match commitment_signed { + Some(commitment_signed) => commitment_signed, + // TODO(dual_funding): Support async signing + None => { + return Err(AbortReason::InternalError( + "Failed to compute commitment_signed signatures", + )); + }, + }; (interactive_tx_constructor, commitment_signed) }, @@ -2059,14 +2073,11 @@ where ) }) .and_then(|(is_initiator, mut funding, interactive_tx_constructor)| { - match chan.context.funding_tx_constructed( - &mut funding, - funding_outpoint, - true, - chan.holder_commitment_point.next_transaction_number(), - &&logger, - ) { - Ok(commitment_signed) => { + funding.channel_transaction_parameters.funding_outpoint = + Some(funding_outpoint); + match chan.context.get_initial_commitment_signed_v2(&funding, &&logger) + { + Some(commitment_signed) => { // Advance the state pending_splice.funding_negotiation = Some(FundingNegotiation::AwaitingSignatures { @@ -2075,14 +2086,17 @@ where }); Ok((interactive_tx_constructor, commitment_signed)) }, - Err(e) => { + // TODO(splicing): Support async signing + None => { // Restore the taken state for later error handling pending_splice.funding_negotiation = Some(FundingNegotiation::ConstructingTransaction { funding, interactive_tx_constructor, }); - Err(e) + Err(AbortReason::InternalError( + "Failed to compute commitment_signed signatures", + )) }, } })? @@ -6203,33 +6217,6 @@ where Ok(()) } - #[rustfmt::skip] - fn funding_tx_constructed( - &mut self, funding: &mut FundingScope, funding_outpoint: OutPoint, is_splice: bool, - holder_commitment_transaction_number: u64, logger: &L, - ) -> Result - where - L::Target: Logger - { - funding.channel_transaction_parameters.funding_outpoint = Some(funding_outpoint); - - if !is_splice { - self.assert_no_commitment_advancement(holder_commitment_transaction_number, "initial commitment_signed"); - self.channel_state = ChannelState::FundingNegotiated(FundingNegotiatedFlags::new()); - } - - let commitment_signed = self.get_initial_commitment_signed_v2(&funding, logger); - let commitment_signed = match commitment_signed { - Some(commitment_signed) => commitment_signed, - // TODO(splicing): Support async signing - None => { - return Err(AbortReason::InternalError("Failed to compute commitment_signed signatures")); - }, - }; - - Ok(commitment_signed) - } - /// Asserts that the commitment tx numbers have not advanced from their initial number. fn assert_no_commitment_advancement( &self, holder_commitment_transaction_number: u64, msg_name: &str, From 063947ff1f598fabc7dc201ef4d950784e9c1ba8 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Mon, 10 Nov 2025 09:57:08 -0800 Subject: [PATCH 24/25] Replace quiescence fuzz coverage with splicing Splicing should also have fuzz coverage, and it depends on the quiescence protocol. Backport of 1e7862829bd9349f7a442b5a6f836ba898f7e87f --- fuzz/src/chanmon_consistency.rs | 432 ++++++++++++++++++++++++++++++-- 1 file changed, 416 insertions(+), 16 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 9f03de47d23..943e1f2d63a 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -43,13 +43,16 @@ use lightning::chain::{ chainmonitor, channelmonitor, BestBlock, ChannelMonitorUpdateStatus, Confirm, Watch, }; use lightning::events; -use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; +use lightning::ln::channel::{ + FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS, +}; use lightning::ln::channel_state::ChannelDetails; use lightning::ln::channelmanager::{ ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, RecentPaymentDetails, RecipientOnionFields, }; use lightning::ln::functional_test_utils::*; +use lightning::ln::funding::{FundingTxInput, SpliceContribution}; use lightning::ln::inbound_payment::ExpandedKey; use lightning::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, CommitmentUpdate, Init, MessageSendEvent, @@ -68,6 +71,7 @@ use lightning::sign::{ }; use lightning::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::util::config::UserConfig; +use lightning::util::errors::APIError; use lightning::util::hash_tables::*; use lightning::util::logger::Logger; use lightning::util::ser::{LengthReadable, ReadableArgs, Writeable, Writer}; @@ -163,6 +167,63 @@ impl Writer for VecWriter { } } +pub struct TestWallet { + secret_key: SecretKey, + utxos: Mutex>, + secp: Secp256k1, +} + +impl TestWallet { + pub fn new(secret_key: SecretKey) -> Self { + Self { secret_key, utxos: Mutex::new(Vec::new()), secp: Secp256k1::new() } + } + + fn get_change_script(&self) -> Result { + let public_key = bitcoin::PublicKey::new(self.secret_key.public_key(&self.secp)); + Ok(ScriptBuf::new_p2wpkh(&public_key.wpubkey_hash().unwrap())) + } + + pub fn add_utxo(&self, outpoint: bitcoin::OutPoint, value: Amount) -> TxOut { + let public_key = bitcoin::PublicKey::new(self.secret_key.public_key(&self.secp)); + let utxo = lightning::events::bump_transaction::Utxo::new_v0_p2wpkh( + outpoint, + value, + &public_key.wpubkey_hash().unwrap(), + ); + self.utxos.lock().unwrap().push(utxo.clone()); + utxo.output + } + + pub fn sign_tx( + &self, mut tx: Transaction, + ) -> Result { + let utxos = self.utxos.lock().unwrap(); + for i in 0..tx.input.len() { + if let Some(utxo) = + utxos.iter().find(|utxo| utxo.outpoint == tx.input[i].previous_output) + { + let sighash = bitcoin::sighash::SighashCache::new(&tx).p2wpkh_signature_hash( + i, + &utxo.output.script_pubkey, + utxo.output.value, + bitcoin::EcdsaSighashType::All, + )?; + let signature = self.secp.sign_ecdsa( + &secp256k1::Message::from_digest(sighash.to_byte_array()), + &self.secret_key, + ); + let bitcoin_sig = bitcoin::ecdsa::Signature { + signature, + sighash_type: bitcoin::EcdsaSighashType::All, + }; + tx.input[i].witness = + bitcoin::Witness::p2wpkh(&bitcoin_sig, &self.secret_key.public_key(&self.secp)); + } + } + Ok(tx) + } +} + /// The LDK API requires that any time we tell it we're done persisting a `ChannelMonitor[Update]` /// we never pass it in as the "latest" `ChannelMonitor` on startup. However, we can pass /// out-of-date monitors as long as we never told LDK we finished persisting them, which we do by @@ -671,6 +732,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; config.channel_handshake_config.announce_for_forwarding = true; + config.reject_inbound_splices = false; if anchors { config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; config.manually_accept_inbound_channels = true; @@ -724,6 +786,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut config = UserConfig::default(); config.channel_config.forwarding_fee_proportional_millionths = 0; config.channel_handshake_config.announce_for_forwarding = true; + config.reject_inbound_splices = false; if anchors { config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; config.manually_accept_inbound_channels = true; @@ -984,6 +1047,30 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }}; } + let wallet_a = TestWallet::new(SecretKey::from_slice(&[1; 32]).unwrap()); + let wallet_b = TestWallet::new(SecretKey::from_slice(&[2; 32]).unwrap()); + let wallet_c = TestWallet::new(SecretKey::from_slice(&[3; 32]).unwrap()); + let wallets = vec![wallet_a, wallet_b, wallet_c]; + let coinbase_tx = bitcoin::Transaction { + version: bitcoin::transaction::Version::TWO, + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![bitcoin::TxIn { ..Default::default() }], + output: wallets + .iter() + .map(|w| TxOut { + value: Amount::from_sat(100_000), + script_pubkey: w.get_change_script().unwrap(), + }) + .collect(), + }; + let coinbase_txid = coinbase_tx.compute_txid(); + wallets.iter().enumerate().for_each(|(i, w)| { + w.add_utxo( + bitcoin::OutPoint { txid: coinbase_txid, vout: i as u32 }, + Amount::from_sat(100_000), + ); + }); + let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); let mut last_htlc_clear_fee_a = 253; let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); @@ -1073,6 +1160,34 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } *node_id == a_id }, + MessageSendEvent::SendSpliceInit { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, + MessageSendEvent::SendSpliceAck { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, + MessageSendEvent::SendSpliceLocked { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, + MessageSendEvent::SendTxAddInput { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, + MessageSendEvent::SendTxAddOutput { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, + MessageSendEvent::SendTxComplete { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, + MessageSendEvent::SendTxAbort { ref node_id, .. } => { + if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } + *node_id == a_id + }, MessageSendEvent::SendChannelReady { .. } => continue, MessageSendEvent::SendAnnouncementSignatures { .. } => continue, MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { @@ -1208,7 +1323,79 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { dest.handle_stfu(nodes[$node].get_our_node_id(), msg); } } - } + }, + MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering tx_add_input from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_tx_add_input(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering tx_add_output from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_tx_add_output(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering tx_remove_input from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_tx_remove_input(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering tx_remove_output from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_tx_remove_output(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering tx_complete from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_tx_complete(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering tx_abort from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_tx_abort(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering splice_init from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_splice_init(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering splice_ack from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_splice_ack(nodes[$node].get_our_node_id(), msg); + } + } + }, + MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { + for (idx, dest) in nodes.iter().enumerate() { + if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering splice_locked from node {} to node {}.\n", $node, idx).as_bytes()); + dest.handle_splice_locked(nodes[$node].get_our_node_id(), msg); + } + } + }, MessageSendEvent::SendChannelReady { .. } => { // Can be generated as a reestablish response }, @@ -1347,6 +1534,25 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { events::Event::PaymentForwarded { .. } if $node == 1 => {}, events::Event::ChannelReady { .. } => {}, events::Event::HTLCHandlingFailed { .. } => {}, + + events::Event::FundingTransactionReadyForSigning { + channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } => { + let signed_tx = wallets[$node].sign_tx(unsigned_transaction).unwrap(); + nodes[$node] + .funding_transaction_signed( + &channel_id, + &counterparty_node_id, + signed_tx, + ) + .unwrap(); + }, + events::Event::SplicePending { .. } => {}, + events::Event::SpliceFailed { .. } => {}, + _ => { if out.may_fail.load(atomic::Ordering::Acquire) { return; @@ -1652,16 +1858,220 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0xa0 => { - nodes[0].maybe_propose_quiescence(&nodes[1].get_our_node_id(), &chan_a_id).unwrap() + let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap(); + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(10_000), + inputs: vec![input], + change_script: None, + }; + let funding_feerate_sat_per_kw = fee_est_a.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[0].splice_channel( + &chan_a_id, + &nodes[1].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } }, 0xa1 => { - nodes[1].maybe_propose_quiescence(&nodes[0].get_our_node_id(), &chan_a_id).unwrap() + let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 1).unwrap(); + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(10_000), + inputs: vec![input], + change_script: None, + }; + let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[1].splice_channel( + &chan_a_id, + &nodes[0].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } }, 0xa2 => { - nodes[1].maybe_propose_quiescence(&nodes[2].get_our_node_id(), &chan_b_id).unwrap() + let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap(); + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(10_000), + inputs: vec![input], + change_script: None, + }; + let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[1].splice_channel( + &chan_b_id, + &nodes[2].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } }, 0xa3 => { - nodes[2].maybe_propose_quiescence(&nodes[1].get_our_node_id(), &chan_b_id).unwrap() + let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 1).unwrap(); + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(10_000), + inputs: vec![input], + change_script: None, + }; + let funding_feerate_sat_per_kw = fee_est_c.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[2].splice_channel( + &chan_b_id, + &nodes[1].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } + }, + + // We conditionally splice out `MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS` only when the node + // has double the balance required to send a payment upon a `0xff` byte. We do this to + // ensure there's always liquidity available for a payment to succeed then. + 0xa4 => { + let outbound_capacity_msat = nodes[0] + .list_channels() + .iter() + .find(|chan| chan.channel_id == chan_a_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat >= 20_000_000 { + let contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[0].script_pubkey.clone(), + }], + }; + let funding_feerate_sat_per_kw = + fee_est_a.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[0].splice_channel( + &chan_a_id, + &nodes[1].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } + } + }, + 0xa5 => { + let outbound_capacity_msat = nodes[1] + .list_channels() + .iter() + .find(|chan| chan.channel_id == chan_a_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat >= 20_000_000 { + let contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), + }], + }; + let funding_feerate_sat_per_kw = + fee_est_b.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[1].splice_channel( + &chan_a_id, + &nodes[0].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } + } + }, + 0xa6 => { + let outbound_capacity_msat = nodes[1] + .list_channels() + .iter() + .find(|chan| chan.channel_id == chan_b_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat >= 20_000_000 { + let contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), + }], + }; + let funding_feerate_sat_per_kw = + fee_est_b.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[1].splice_channel( + &chan_b_id, + &nodes[2].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } + } + }, + 0xa7 => { + let outbound_capacity_msat = nodes[2] + .list_channels() + .iter() + .find(|chan| chan.channel_id == chan_b_id) + .map(|chan| chan.outbound_capacity_msat) + .unwrap(); + if outbound_capacity_msat >= 20_000_000 { + let contribution = SpliceContribution::SpliceOut { + outputs: vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[2].script_pubkey.clone(), + }], + }; + let funding_feerate_sat_per_kw = + fee_est_c.ret_val.load(atomic::Ordering::Acquire); + if let Err(e) = nodes[2].splice_channel( + &chan_b_id, + &nodes[1].get_our_node_id(), + contribution, + funding_feerate_sat_per_kw, + None, + ) { + assert!( + matches!(e, APIError::APIMisuseError { ref err } if err.contains("splice pending")), + "{:?}", + e + ); + } + } }, 0xb0 | 0xb1 | 0xb2 => { @@ -1828,16 +2238,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }; } - // We may be pending quiescence, so first process all messages to ensure we can - // complete the quiescence handshake. - process_all_events!(); - - // Then exit quiescence and process all messages again, to resolve any pending - // HTLCs (only irrevocably committed ones) before attempting to send more payments. - nodes[0].exit_quiescence(&nodes[1].get_our_node_id(), &chan_a_id).unwrap(); - nodes[1].exit_quiescence(&nodes[0].get_our_node_id(), &chan_a_id).unwrap(); - nodes[1].exit_quiescence(&nodes[2].get_our_node_id(), &chan_b_id).unwrap(); - nodes[2].exit_quiescence(&nodes[1].get_our_node_id(), &chan_b_id).unwrap(); process_all_events!(); // Finally, make sure that at least one end of each channel can make a substantial payment From 03eea88ab7266124dda62f4b56597d367e462d86 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 12 Nov 2025 13:19:42 +0000 Subject: [PATCH 25/25] Add additional MSRV pin for `quote` --- ci/ci-tests.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index f8551ee7a70..f3ecc72806a 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -14,6 +14,9 @@ function PIN_RELEASE_DEPS { # syn 2.0.107 requires rustc 1.68.0 [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p syn --precise "2.0.106" --verbose + # quote 1.0.42 requires rustc 1.68.0 + [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p quote --precise "1.0.41" --verbose + return 0 # Don't fail the script if our rustc is higher than the last check } @@ -54,6 +57,7 @@ echo -e "\n\nTesting upgrade from prior versions of LDK" pushd lightning-tests [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p syn --precise "2.0.106" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p quote --precise "1.0.41" --verbose cargo test popd @@ -125,6 +129,7 @@ echo -e "\n\nTesting no_std build on a downstream no-std crate" # check no-std compatibility across dependencies pushd no-std-check [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p syn --precise "2.0.106" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p quote --precise "1.0.41" --verbose cargo check --verbose --color always [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd