From 4629ee29dedf0a65943c517c534d2b3500c07516 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 12 Nov 2025 08:11:10 +0100 Subject: [PATCH 1/5] Implement Display for Record As a preparatory step for unifying test log output. --- lightning/src/ln/functional_tests.rs | 4 ++-- lightning/src/util/logger.rs | 8 ++++++++ lightning/src/util/test_utils.rs | 4 +--- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index c161a9664c0..a4de62a11e5 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -7336,7 +7336,7 @@ pub fn test_concurrent_monitor_claim() { // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain let chain_source = test_utils::TestChainSource::new(Network::Testnet); - let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice")); + let logger = test_utils::TestLogger::with_id("alice".to_string()); let persister = test_utils::TestPersister::new(); let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::new(Mutex::new( nodes[0].blocks.lock().unwrap().clone(), @@ -7387,7 +7387,7 @@ pub fn test_concurrent_monitor_claim() { // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first. let chain_source = test_utils::TestChainSource::new(Network::Testnet); - let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob")); + let logger = test_utils::TestLogger::with_id("bob".to_string()); let persister = test_utils::TestPersister::new(); let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks)); diff --git a/lightning/src/util/logger.rs b/lightning/src/util/logger.rs index 283d3158144..253e8bd8db1 100644 --- a/lightning/src/util/logger.rs +++ b/lightning/src/util/logger.rs @@ -17,6 +17,7 @@ use bitcoin::secp256k1::PublicKey; use core::cmp; use core::fmt; +use core::fmt::Display; use core::ops::Deref; use crate::ln::types::ChannelId; @@ -152,6 +153,13 @@ impl<$($args)?> Record<$($args)?> { } } } + +impl<$($args)?> Display for Record<$($args)?> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let context = format!("{} [{}:{}]", self.level, self.module_path, self.line); + write!(f, "{:<48} {}", context, self.args) + } +} } } #[cfg(not(c_bindings))] impl_record!('a, ); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index ad8ea224205..d0a8bb3fde6 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -1698,9 +1698,7 @@ impl TestLogger { impl Logger for TestLogger { fn log(&self, record: Record) { - let context = - format!("{} {} [{}:{}]", self.id, record.level, record.module_path, record.line); - let s = format!("{:<55} {}", context, record.args); + let s = format!("{:<6} {}", self.id, record); #[cfg(ldk_bench)] { // When benchmarking, we don't actually want to print logs, but we do want to format From 25b440ef608f92bb8a610b630308fe8a462217f4 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 11 Nov 2025 14:09:24 +0100 Subject: [PATCH 2/5] Align log levels Makes sure that 4 and 5 character log levels line up. --- lightning/src/util/logger.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/util/logger.rs b/lightning/src/util/logger.rs index 253e8bd8db1..f53c02748a8 100644 --- a/lightning/src/util/logger.rs +++ b/lightning/src/util/logger.rs @@ -156,7 +156,7 @@ impl<$($args)?> Record<$($args)?> { impl<$($args)?> Display for Record<$($args)?> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let context = format!("{} [{}:{}]", self.level, self.module_path, self.line); + let context = format!("{:<5} [{}:{}]", self.level, self.module_path, self.line); write!(f, "{:<48} {}", context, self.args) } } From 9ee6b26064f40a1fbd833ef31f72f591cf4d0117 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 12 Nov 2025 08:44:53 +0100 Subject: [PATCH 3/5] Unify test logger output Now that we have Display implemented on Record, the various test logger implementations can make use of that. --- fuzz/src/full_stack.rs | 9 +-------- fuzz/src/onion_message.rs | 15 ++++++--------- fuzz/src/utils/test_logger.rs | 11 +---------- lightning-dns-resolver/src/lib.rs | 2 +- lightning-net-tokio/src/lib.rs | 9 +-------- 5 files changed, 10 insertions(+), 36 deletions(-) diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 97a74871ea4..617050f2427 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -1608,14 +1608,7 @@ mod tests { .unwrap() .entry((record.module_path.to_string(), format!("{}", record.args))) .or_insert(0) += 1; - println!( - "{:<5} [{} : {}, {}] {}", - record.level.to_string(), - record.module_path, - record.file, - record.line, - record.args - ); + println!("{}", record); } } diff --git a/fuzz/src/onion_message.rs b/fuzz/src/onion_message.rs index 934d748d6a5..09634a1c373 100644 --- a/fuzz/src/onion_message.rs +++ b/fuzz/src/onion_message.rs @@ -102,7 +102,11 @@ impl MessageRouter for TestMessageRouter { fn find_path( &self, _sender: PublicKey, _peers: Vec, destination: Destination, ) -> Result { - Ok(OnionMessagePath { intermediate_nodes: vec![], destination, first_node_addresses: vec![] }) + Ok(OnionMessagePath { + intermediate_nodes: vec![], + destination, + first_node_addresses: vec![], + }) } fn create_blinded_paths( @@ -328,14 +332,7 @@ mod tests { let mut lines_lock = self.lines.lock().unwrap(); let key = (record.module_path.to_string(), format!("{}", record.args)); *lines_lock.entry(key).or_insert(0) += 1; - println!( - "{:<5} [{} : {}, {}] {}", - record.level.to_string(), - record.module_path, - record.file, - record.line, - record.args - ); + println!("{}", record); } } diff --git a/fuzz/src/utils/test_logger.rs b/fuzz/src/utils/test_logger.rs index 6d9de02e387..f8369879447 100644 --- a/fuzz/src/utils/test_logger.rs +++ b/fuzz/src/utils/test_logger.rs @@ -59,15 +59,6 @@ impl<'a, Out: Output> Write for LockedWriteAdapter<'a, Out> { impl Logger for TestLogger { fn log(&self, record: Record) { - write!( - LockedWriteAdapter(&self.out), - "{:<5} {} [{} : {}] {}\n", - record.level.to_string(), - self.id, - record.module_path, - record.line, - record.args - ) - .unwrap(); + write!(LockedWriteAdapter(&self.out), "{:<6} {}", self.id, record).unwrap(); } } diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index f5b1d53fc8a..5e069574af7 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -192,7 +192,7 @@ mod test { } impl Logger for TestLogger { fn log(&self, record: lightning::util::logger::Record) { - eprintln!("{}: {}", self.node, record.args); + eprintln!("{:<8} {}", self.node, record); } } impl Deref for TestLogger { diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 068f77a84bb..5d620ec4413 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -629,14 +629,7 @@ mod tests { pub struct TestLogger(); impl lightning::util::logger::Logger for TestLogger { fn log(&self, record: lightning::util::logger::Record) { - println!( - "{:<5} [{} : {}, {}] {}", - record.level.to_string(), - record.module_path, - record.file, - record.line, - record.args - ); + println!("{}", record); } } From 1b62becc01f712bbb4e346cc549e009be49fd812 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 11 Nov 2025 12:53:25 +0100 Subject: [PATCH 4/5] Remove redundant channel ID logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, log messages often included the channel ID both in the message text and in the structured `channel_id` field. This led to redundant information and made it difficult to quickly identify which log lines pertain to a given channel, as the ID could appear in different positions or sometimes not at all. This change removes the channel ID from the message in all cases where it is already present in the structured field. A test run was used to verify that the structured field always appeared in these log calls. Some exceptions remain—for example, calls where the structured field contains a temporary channel ID and the message contains the final ID were left unchanged. --- lightning/src/chain/chainmonitor.rs | 60 ++----- lightning/src/chain/channelmonitor.rs | 11 +- lightning/src/ln/chanmon_update_fail_tests.rs | 3 +- lightning/src/ln/channel.rs | 159 +++++++---------- lightning/src/ln/channelmanager.rs | 160 +++++++++--------- lightning/src/ln/functional_tests.rs | 4 +- lightning/src/ln/htlc_reserve_unit_tests.rs | 16 +- lightning/src/ln/invoice_utils.rs | 28 +-- lightning/src/ln/payment_tests.rs | 3 +- lightning/src/ln/peer_handler.rs | 13 +- 10 files changed, 202 insertions(+), 255 deletions(-) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index d8d6c90921f..7cd838930bd 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -580,11 +580,7 @@ where let has_pending_claims = monitor_state.monitor.has_pending_claims(); if has_pending_claims || get_partition_key(channel_id) % partition_factor == 0 { - log_trace!( - logger, - "Syncing Channel Monitor for channel {}", - log_funding_info!(monitor) - ); + log_trace!(logger, "Syncing Channel Monitor"); // Even though we don't track monitor updates from chain-sync as pending, we still want // updates per-channel to be well-ordered so that users don't see a // `ChannelMonitorUpdate` after a channel persist for a channel with the same @@ -592,11 +588,9 @@ where let _pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); match self.persister.update_persisted_channel(monitor.persistence_key(), None, monitor) { - ChannelMonitorUpdateStatus::Completed => log_trace!( - logger, - "Finished syncing Channel Monitor for channel {} for block-data", - log_funding_info!(monitor) - ), + ChannelMonitorUpdateStatus::Completed => { + log_trace!(logger, "Finished syncing Channel Monitor for block-data") + }, ChannelMonitorUpdateStatus::InProgress => { log_trace!( logger, @@ -961,16 +955,12 @@ where } if have_monitors_to_prune { let mut monitors = self.monitors.write().unwrap(); - monitors.retain(|channel_id, monitor_holder| { + monitors.retain(|_channel_id, monitor_holder| { let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None); let (is_fully_resolved, _) = monitor_holder.monitor.check_and_update_full_resolution_status(&logger); if is_fully_resolved { - log_info!( - logger, - "Archiving fully resolved ChannelMonitor for channel ID {}", - channel_id - ); + log_info!(logger, "Archiving fully resolved ChannelMonitor"); self.persister .archive_persisted_channel(monitor_holder.monitor.persistence_key()); false @@ -1106,11 +1096,7 @@ where }, hash_map::Entry::Vacant(e) => e, }; - log_trace!( - logger, - "Loaded existing ChannelMonitor for channel {}", - log_funding_info!(monitor) - ); + log_trace!(logger, "Loaded existing ChannelMonitor"); if let Some(ref chain_source) = self.chain_source { monitor.load_outputs_to_watch(chain_source, &self.logger); } @@ -1366,25 +1352,17 @@ where }, hash_map::Entry::Vacant(e) => e, }; - log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor)); + log_trace!(logger, "Got new ChannelMonitor"); let update_id = monitor.get_latest_update_id(); let mut pending_monitor_updates = Vec::new(); let persist_res = self.persister.persist_new_channel(monitor.persistence_key(), &monitor); match persist_res { ChannelMonitorUpdateStatus::InProgress => { - log_info!( - logger, - "Persistence of new ChannelMonitor for channel {} in progress", - log_funding_info!(monitor) - ); + log_info!(logger, "Persistence of new ChannelMonitor in progress",); pending_monitor_updates.push(update_id); }, ChannelMonitorUpdateStatus::Completed => { - log_info!( - logger, - "Persistence of new ChannelMonitor for channel {} completed", - log_funding_info!(monitor) - ); + log_info!(logger, "Persistence of new ChannelMonitor completed",); }, ChannelMonitorUpdateStatus::UnrecoverableError => { let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; @@ -1426,12 +1404,7 @@ where Some(monitor_state) => { let monitor = &monitor_state.monitor; let logger = WithChannelMonitor::from(&self.logger, &monitor, None); - log_trace!( - logger, - "Updating ChannelMonitor to id {} for channel {}", - update.update_id, - log_funding_info!(monitor) - ); + log_trace!(logger, "Updating ChannelMonitor to id {}", update.update_id,); // We hold a `pending_monitor_updates` lock through `update_monitor` to ensure we // have well-ordered updates from the users' point of view. See the @@ -1452,7 +1425,7 @@ where // We don't want to persist a `monitor_update` which results in a failure to apply later // while reading `channel_monitor` with updates from storage. Instead, we should persist // the entire `channel_monitor` here. - log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor)); + log_warn!(logger, "Failed to update ChannelMonitor. Going ahead and persisting the entire ChannelMonitor"); self.persister.update_persisted_channel( monitor.persistence_key(), None, @@ -1468,18 +1441,17 @@ where match persist_res { ChannelMonitorUpdateStatus::InProgress => { pending_monitor_updates.push(update_id); - log_debug!(logger, - "Persistence of ChannelMonitorUpdate id {:?} for channel {} in progress", + log_debug!( + logger, + "Persistence of ChannelMonitorUpdate id {:?} in progress", update_id, - log_funding_info!(monitor) ); }, ChannelMonitorUpdateStatus::Completed => { log_debug!( logger, - "Persistence of ChannelMonitorUpdate id {:?} for channel {} completed", + "Persistence of ChannelMonitorUpdate id {:?} completed", update_id, - log_funding_info!(monitor) ); }, ChannelMonitorUpdateStatus::UnrecoverableError => { diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 1d035b68650..d217f8a7995 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -4223,8 +4223,8 @@ impl ChannelMonitorImpl { log_info!(logger, "Applying pre-0.1 force close update to monitor {} with {} change(s).", log_funding_info!(self), updates.updates.len()); } else { - log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} change(s).", - log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len()); + log_info!(logger, "Applying update, bringing update_id from {} to {} with {} change(s).", + self.latest_update_id, updates.update_id, updates.updates.len()); } // ChannelMonitor updates may be applied after force close if we receive a preimage for a @@ -4351,7 +4351,7 @@ impl ChannelMonitorImpl { self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger, true); } else if !self.holder_tx_signed { log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast"); - log_error!(logger, " in channel monitor for channel {}!", &self.channel_id()); + log_error!(logger, " in channel monitor!"); log_error!(logger, " Read the docs for ChannelMonitor::broadcast_latest_holder_commitment_txn to take manual action!"); } else { // If we generated a MonitorEvent::HolderForceClosed, the ChannelManager @@ -5479,7 +5479,7 @@ impl ChannelMonitorImpl { } else { "".to_string() }; - log_info!(logger, "{desc} for channel {} confirmed with txid {txid}{action}", self.channel_id()); + log_info!(logger, "{desc} confirmed with txid {txid}{action}"); self.alternative_funding_confirmed = Some((txid, height)); @@ -5531,8 +5531,7 @@ impl ChannelMonitorImpl { .map(|(txid, _)| txid) .unwrap_or_else(|| self.funding.funding_txid()) ); - log_info!(logger, "Channel {} closed by funding output spend in txid {txid}", - self.channel_id()); + log_info!(logger, "Channel closed by funding output spend in txid {txid}"); if !self.funding_spend_seen { self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(())); } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 1a9af4f2071..919920f3d23 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -150,8 +150,7 @@ fn test_monitor_and_persister_update_fail() { } logger.assert_log_regex( "lightning::chain::chainmonitor", - regex::Regex::new("Failed to update ChannelMonitor for channel [0-9a-f]*.") - .unwrap(), + regex::Regex::new("Failed to update ChannelMonitor").unwrap(), 1, ); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c5942f38da6..6137c9d187c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5195,8 +5195,8 @@ where { log_info!( logger, - "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", - &self.channel_id(), + "Attempting to fail HTLC due to fee spike buffer violation. Rebalancing is required.", + ); return Err(LocalHTLCFailureReason::FeeSpikeBuffer); } @@ -6291,11 +6291,7 @@ where { let signatures = self.get_initial_counterparty_commitment_signatures(funding, logger); if let Some((signature, htlc_signatures)) = signatures { - log_info!( - logger, - "Generated commitment_signed for peer for channel {}", - &self.channel_id() - ); + log_info!(logger, "Generated commitment_signed for peer",); if matches!(self.channel_state, ChannelState::FundingNegotiated(_)) { // We shouldn't expect any HTLCs before `ChannelReady`. debug_assert!(htlc_signatures.is_empty()); @@ -6396,9 +6392,9 @@ where log_info!( logger, - "Funding txid {} for channel {} confirmed in block {}", + "Funding txid {} confirmed in block {}", funding_txo.txid, - &self.channel_id(), + block_hash, ); @@ -7371,8 +7367,7 @@ where } log_trace!( logger, - "Adding HTLC claim to holding_cell in channel {}! Current state: {}", - &self.context.channel_id(), + "Adding HTLC claim to holding_cell! Current state: {}", self.context.channel_state.to_u32() ); self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { @@ -7403,9 +7398,8 @@ where } log_trace!( logger, - "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", + "Upgrading HTLC {} to LocalRemoved with a Fulfill!", &htlc.payment_hash, - &self.context.channel_id ); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill( payment_preimage_arg.clone(), @@ -7565,13 +7559,13 @@ where _ => {} } } - log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id()); + log_trace!(logger, "Placing failure for HTLC ID {} in holding cell.", htlc_id_arg); self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg)); return Ok(None); } - log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg, - E::Message::name(), &self.context.channel_id()); + log_trace!(logger, "Failing HTLC ID {} back with {} message.", htlc_id_arg, + E::Message::name()); { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; htlc.state = err_contents.clone().to_inbound_htlc_state(); @@ -7938,15 +7932,17 @@ where let counterparty_bitcoin_tx = counterparty_trusted_tx.built_transaction(); log_trace!( logger, - "Splice initial counterparty tx for channel {} is: txid {} tx {}", - &self.context.channel_id(), + "Splice initial counterparty tx is: txid {} tx {}", counterparty_bitcoin_tx.txid, encode::serialize_hex(&counterparty_bitcoin_tx.transaction) ); } - log_info!(logger, "Received splice initial commitment_signed from peer for channel {} with funding txid {}", - &self.context.channel_id(), pending_splice_funding.get_funding_txo().unwrap().txid); + log_info!( + logger, + "Received splice initial commitment_signed from peer with funding txid {}", + pending_splice_funding.get_funding_txo().unwrap().txid + ); self.context.latest_monitor_update_id += 1; let monitor_update = ChannelMonitorUpdate { @@ -8244,8 +8240,8 @@ where self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); } - log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", - &self.context.channel_id); + log_debug!(logger, "Received valid commitment_signed from peer, updated HTLC state but awaiting a monitor update resolution to reply.", + ); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -8313,14 +8309,13 @@ where { log_trace!( logger, - "Freeing holding cell with {} HTLC updates{} in channel {}", + "Freeing holding cell with {} HTLC updates{}", self.context.holding_cell_htlc_updates.len(), if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, - &self.context.channel_id() ); let mut monitor_update = ChannelMonitorUpdate { @@ -8377,7 +8372,12 @@ where update_add_count += 1; }, Err((_, msg)) => { - log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id()); + log_info!( + logger, + "Failed to send HTLC with payment_hash {} due to {}", + &payment_hash, + msg + ); // If we fail to send here, then this HTLC should be failed // backwards. Failing to send here indicates that this HTLC may // keep being put back into the holding cell without ever being @@ -8470,8 +8470,8 @@ where self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.", - &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" }, + log_debug!(logger, "Freeing holding cell resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.", + if update_fee.is_some() { "a fee update, " } else { "" }, update_add_count, update_fulfill_count, update_fail_count); self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); @@ -8608,11 +8608,7 @@ where self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived; } - log_trace!( - logger, - "Updating HTLCs on receipt of RAA in channel {}...", - &self.context.channel_id() - ); + log_trace!(logger, "Updating HTLCs on receipt of RAA..."); let mut to_forward_infos = Vec::new(); let mut pending_update_adds = Vec::new(); let mut revoked_htlcs = Vec::new(); @@ -8836,8 +8832,8 @@ where self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.", - &self.context.channel_id(), release_state_str); + log_debug!(logger, "Received a valid revoke_and_ack with holding cell HTLCs freed. {} monitor update.", + release_state_str); self.monitor_updating_paused( false, @@ -8865,8 +8861,7 @@ where log_debug!( logger, - "Received a valid revoke_and_ack for channel {}. {} monitor update.", - &self.context.channel_id(), + "Received a valid revoke_and_ack. {} monitor update.", release_state_str ); if self.context.channel_state.can_generate_new_commitment() { @@ -8882,12 +8877,7 @@ where } else { "can continue progress" }; - log_debug!( - logger, - "Holding back commitment update until channel {} {}", - &self.context.channel_id, - reason - ); + log_debug!(logger, "Holding back commitment update until {}", reason); } self.monitor_updating_paused( @@ -8900,8 +8890,8 @@ where ); return_with_htlcs_to_fail!(htlcs_to_fail); } else { - log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.", - &self.context.channel_id(), release_state_str); + log_debug!(logger, "Received a valid revoke_and_ack with no reply necessary. {} monitor update.", + release_state_str); self.monitor_updating_paused( false, @@ -8955,9 +8945,8 @@ where { log_info!( logger, - "Sending 0conf splice_locked txid {} to our peer for channel {}", + "Sending 0conf splice_locked txid {} to our peer", splice_txid, - &self.context.channel_id ); } @@ -9632,12 +9621,12 @@ where } } if !self.holder_commitment_point.can_advance() { - log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available", - &self.context.channel_id(), self.holder_commitment_point.next_transaction_number()); + log_trace!(logger, "Last revoke-and-ack pending for sequence {} because the next per-commitment point is not available", + self.holder_commitment_point.next_transaction_number()); } if per_commitment_secret.is_none() { - log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment secret for {} is not available", - &self.context.channel_id(), self.holder_commitment_point.next_transaction_number(), + log_trace!(logger, "Last revoke-and-ack pending for sequence {} because the next per-commitment secret for {} is not available", + self.holder_commitment_point.next_transaction_number(), self.holder_commitment_point.next_transaction_number() + 2); } // Technically if HolderCommitmentPoint::can_advance is false, @@ -9646,8 +9635,8 @@ where // CS before we have any commitment point available. Blocking our // RAA here is a convenient way to make sure that post-funding // we're only ever waiting on one commitment point at a time. - log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available", - &self.context.channel_id(), self.holder_commitment_point.next_transaction_number()); + log_trace!(logger, "Last revoke-and-ack pending for sequence {} because the next per-commitment point is not available", + self.holder_commitment_point.next_transaction_number()); self.context.signer_pending_revoke_and_ack = true; None } @@ -9727,8 +9716,8 @@ where None }; - log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", - &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" }, + log_trace!(logger, "Regenerating latest commitment update with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", + if update_fee.is_some() { " update_fee," } else { "" }, update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger) { if self.context.signer_pending_commitment_update { @@ -10067,9 +10056,9 @@ where if msg.next_local_commitment_number == next_counterparty_commitment_number { if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { - log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); + log_debug!(logger, "Reconnected with only lost outbound RAA"); } else { - log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); + log_debug!(logger, "Reconnected with no loss"); } Ok(ReestablishResponses { @@ -10092,9 +10081,9 @@ where assert!(tx_signatures.is_none()); if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { - log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id()); + log_debug!(logger, "Reconnected channel with lost outbound RAA and lost remote commitment tx"); } else { - log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id()); + log_debug!(logger, "Reconnected channel with only lost remote commitment tx"); } if self.context.channel_state.is_monitor_update_in_progress() { @@ -10112,7 +10101,7 @@ where } else { let commitment_update = if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst && self.context.signer_pending_revoke_and_ack { - log_trace!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for revoke and ack", &self.context.channel_id()); + log_trace!(logger, "Reconnected channel with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for revoke and ack"); self.context.signer_pending_commitment_update = true; None } else { @@ -10120,7 +10109,7 @@ where }; let raa = if self.context.resend_order == RAACommitmentOrder::CommitmentFirst && self.context.signer_pending_commitment_update && required_revoke.is_some() { - log_trace!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for commitment update", &self.context.channel_id()); + log_trace!(logger, "Reconnected channel with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for commitment update"); self.context.signer_pending_revoke_and_ack = true; None } else { @@ -10923,14 +10912,12 @@ where pub fn on_startup_drop_completed_blocked_mon_updates_through( &mut self, logger: &L, loaded_mon_update_id: u64, ) { - let channel_id = self.context.channel_id(); self.context.blocked_monitor_updates.retain(|update| { if update.update.update_id <= loaded_mon_update_id { log_info!( logger, - "Dropping completed ChannelMonitorUpdate id {} on channel {} due to a stale ChannelManager", + "Dropping completed ChannelMonitorUpdate id {} due to a stale ChannelManager", update.update.update_id, - channel_id, ); false } else { @@ -11143,21 +11130,11 @@ where return None; } } else { - log_info!( - logger, - "Waiting on splice_locked txid {} for channel {}", - splice_txid, - &self.context.channel_id, - ); + log_info!(logger, "Waiting on splice_locked txid {}", splice_txid); return None; } - log_info!( - logger, - "Promoting splice funding txid {} for channel {}", - splice_txid, - &self.context.channel_id, - ); + log_info!(logger, "Promoting splice funding txid {}", splice_txid); let discarded_funding = { // Scope `funding` to avoid unintentionally using it later since it is swapped below. @@ -11263,7 +11240,7 @@ where } if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { - log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); + log_info!(logger, "Sending a channel_ready to our peer"); let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger); return Ok((Some(FundingConfirmedMessage::Establishment(channel_ready)), announcement_sigs)); } @@ -11389,7 +11366,7 @@ where let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer { self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger) } else { None }; - log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); + log_info!(logger, "Sending a channel_ready to our peer"); return Ok((Some(FundingConfirmedMessage::Establishment(channel_ready)), timed_out_htlcs, announcement_sigs)); } @@ -11412,7 +11389,7 @@ where } } else if !self.funding.is_outbound() && self.funding.funding_tx_confirmed_in.is_none() && height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { - log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id); + log_info!(logger, "Closing channel due to funding timeout"); // If funding_tx_confirmed_in is unset, the channel must not be active assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new())); assert!(!self.context.channel_state.is_our_channel_ready()); @@ -11458,9 +11435,9 @@ where height, ) { log_info!( - logger, "Sending splice_locked txid {} to our peer for channel {}", + logger, "Sending splice_locked txid {} to our peer", splice_locked.splice_txid, - &self.context.channel_id + ); let (funding_txo, monitor_update, announcement_sigs, discarded_funding) = chain_node_signer @@ -11621,7 +11598,7 @@ where return None; } - log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id()); + log_trace!(logger, "Creating an announcement_signatures message"); let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) { Ok(a) => a, Err(e) => { @@ -11829,7 +11806,7 @@ where let dummy_pubkey = PublicKey::from_slice(&pk).unwrap(); let remote_last_secret = if self.context.counterparty_next_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.counterparty_next_commitment_transaction_number + 2).unwrap(); - log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id()); + log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {}", log_bytes!(remote_last_secret)); remote_last_secret } else { log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id()); @@ -12449,12 +12426,7 @@ where NS::Target: NodeSigner, L::Target: Logger, { - log_info!( - logger, - "Received splice_locked txid {} from our peer for channel {}", - msg.splice_txid, - &self.context.channel_id, - ); + log_info!(logger, "Received splice_locked txid {} from our peer", msg.splice_txid,); let pending_splice = match self.pending_splice.as_mut() { Some(pending_splice) => pending_splice, @@ -12476,9 +12448,8 @@ where if pending_splice.sent_funding_txid.is_none() { log_info!( logger, - "Waiting for enough confirmations to send splice_locked txid {} for channel {}", + "Waiting for enough confirmations to send splice_locked txid {}", msg.splice_txid, - &self.context.channel_id, ); return Ok(None); } @@ -12837,19 +12808,19 @@ where htlc_signatures = res.1; let trusted_tx = counterparty_commitment_tx.trust(); - log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", + log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {}", encode::serialize_hex(&trusted_tx.built_transaction().transaction), &trusted_tx.txid(), encode::serialize_hex(&funding.get_funding_redeemscript()), - log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id()); + log_bytes!(signature.serialize_compact()[..])); let counterparty_keys = trusted_tx.keys(); debug_assert_eq!(htlc_signatures.len(), trusted_tx.nondust_htlcs().len()); for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(trusted_tx.nondust_htlcs()) { - log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", + log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {}", encode::serialize_hex(&chan_utils::build_htlc_transaction(&trusted_tx.txid(), trusted_tx.negotiated_feerate_per_kw(), funding.get_holder_selected_contest_delay(), htlc, funding.get_channel_type(), &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, funding.get_channel_type(), &counterparty_keys)), log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()), - log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id()); + log_bytes!(htlc_sig.serialize_compact()[..])); } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 644920557d2..16f468193b0 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3365,7 +3365,7 @@ macro_rules! convert_channel_err { ChannelError::Close((msg, reason)) => { let (mut shutdown_res, chan_update) = $close(reason); let logger = WithChannelContext::from(&$self.logger, &$chan.context(), None); - log_error!(logger, "Closed channel {} due to close-required error: {}", $channel_id, msg); + log_error!(logger, "Closed channel due to close-required error: {}", msg); $locked_close(&mut shutdown_res, $chan); let err = MsgHandleErrInternal::from_finish_shutdown(msg, $channel_id, shutdown_res, chan_update); @@ -3664,7 +3664,7 @@ macro_rules! handle_monitor_update_completion { /// Returns whether the monitor update is completed, `false` if the update is in-progress. fn handle_monitor_update_res( - cm: &CM, update_res: ChannelMonitorUpdateStatus, channel_id: ChannelId, logger: LG, + cm: &CM, update_res: ChannelMonitorUpdateStatus, logger: LG, ) -> bool { debug_assert!(cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire)); match update_res { @@ -3678,8 +3678,10 @@ fn handle_monitor_update_res( if cm.get_cm().monitor_update_type.swap(1, Ordering::Relaxed) == 2 { panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); } - log_debug!(logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.", - channel_id); + log_debug!( + logger, + "ChannelMonitor update in flight, holding messages until the update completes.", + ); false }, ChannelMonitorUpdateStatus::Completed => { @@ -3695,8 +3697,7 @@ fn handle_monitor_update_res( macro_rules! handle_initial_monitor { ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - let update_completed = - handle_monitor_update_res($self, $update_res, $chan.context.channel_id(), logger); + let update_completed = handle_monitor_update_res($self, $update_res, logger); if update_completed { handle_monitor_update_completion!( $self, @@ -3731,7 +3732,7 @@ fn handle_new_monitor_update_internal( if cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire) { let update_res = cm.get_cm().chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]); - let update_completed = handle_monitor_update_res(cm, update_res, channel_id, logger); + let update_completed = handle_monitor_update_res(cm, update_res, logger); if update_completed { let _ = in_flight_updates.remove(update_idx); } @@ -4647,7 +4648,7 @@ where }; if let Some(mut chan) = peer_state.channel_by_id.remove(channel_id) { - log_error!(logger, "Force-closing channel {}", channel_id); + log_error!(logger, "Force-closing channel"); let err = ChannelError::Close((message, reason)); let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan); mem::drop(peer_state_lock); @@ -4660,7 +4661,7 @@ where let _ = handle_error!(self, Err::<(), _>(e), *peer_node_id); Ok(()) } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() { - log_error!(logger, "Force-closing inbound channel request {}", &channel_id); + log_error!(logger, "Force-closing inbound channel request"); if !is_from_counterparty && peer_state.is_connected { peer_state.pending_msg_events.push( MessageSendEvent::HandleError { @@ -5151,11 +5152,7 @@ where }); } let logger = WithChannelContext::from(&self.logger, &chan.context, None); - log_trace!( - logger, - "Attempting to generate broadcast channel update for channel {}", - &chan.context.channel_id() - ); + log_trace!(logger, "Attempting to generate broadcast channel update",); self.get_channel_update_for_unicast(chan) } @@ -5173,14 +5170,14 @@ where #[rustfmt::skip] fn get_channel_update_for_unicast(&self, chan: &FundedChannel) -> Result { let logger = WithChannelContext::from(&self.logger, &chan.context, None); - log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id()); + log_trace!(logger, "Attempting to generate channel update"); let short_channel_id = match chan.funding.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), Some(id) => id, }; let logger = WithChannelContext::from(&self.logger, &chan.context, None); - log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id()); + log_trace!(logger, "Generating channel update"); let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..]; let enabled = chan.context.is_enabled(); @@ -6765,7 +6762,7 @@ where }, None => { let error = format!( - "Channel with id {next_hop_channel_id} not found for the passed counterparty node_id {next_node_id}" + "Channel not found for the passed counterparty node_id {next_node_id}" ); let logger = WithContext::from( &self.logger, @@ -6774,7 +6771,11 @@ where None, ); log_error!(logger, "{error} when attempting to forward intercepted HTLC"); - return Err(APIError::ChannelUnavailable { err: error }); + return Err(APIError::ChannelUnavailable { + err: format!( + "Channel with id {next_hop_channel_id} not found for the passed counterparty node_id {next_node_id}" + ), + }); }, } }; @@ -7508,8 +7509,8 @@ where } else { "alternate" }; - log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}", - prev_outbound_scid_alias, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id); + log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} with corresponding peer {}", + prev_outbound_scid_alias, &payment_hash, short_chan_id, channel_description, &counterparty_node_id); if let Err((reason, msg)) = optimal_channel.queue_add_htlc( *outgoing_amt_msat, *payment_hash, @@ -8071,8 +8072,8 @@ where chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersistNoEvents; } - log_trace!(logger, "Channel {} qualifies for a feerate change from {} to {}.", - &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate); + log_trace!(logger, "Channel qualifies for a feerate change from {} to {}.", + chan.context.get_feerate_sat_per_1000_weight(), new_feerate); chan.queue_update_fee(new_feerate, &self.fee_estimator, &&logger); NotifyOption::DoPersist @@ -8211,8 +8212,8 @@ where if peer_state.is_connected { if funded_chan.should_disconnect_peer_awaiting_response() { let logger = WithChannelContext::from(&self.logger, &funded_chan.context, None); - log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}", - counterparty_node_id, chan_id); + log_debug!(logger, "Disconnecting peer {} due to not making any progress", + counterparty_node_id); pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::DisconnectPeerWithWarning { @@ -8234,8 +8235,8 @@ where let context = chan.context(); let logger = WithChannelContext::from(&self.logger, context, None); log_error!(logger, - "Force-closing pending channel with ID {} for not establishing in a timely manner", - context.channel_id()); + "Force-closing pending channel for not establishing in a timely manner", + ); let reason = ClosureReason::FundingTimedOut; let msg = "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(); let err = ChannelError::Close((msg, reason)); @@ -8960,8 +8961,11 @@ where let (action_opt, raa_blocker_opt) = completion_action(Some(htlc_value_msat), false); if let Some(action) = action_opt { - log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}", - chan_id, action); + log_trace!( + logger, + "Tracking monitor update completion action: {:?}", + action + ); peer_state .monitor_update_blocked_actions .entry(chan_id) @@ -9032,8 +9036,8 @@ where mem::drop(peer_state_lock); - log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}", - chan_id, action); + log_trace!(logger, "Completing monitor update completion action as claim was redundant: {:?}", + action); if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { downstream_counterparty_node_id: node_id, blocking_action: blocker, @@ -9134,8 +9138,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(action) = action_opt { log_trace!( logger, - "Tracking monitor update completion action for closed channel {}: {:?}", - chan_id, + "Tracking monitor update completion action for closed channel: {:?}", action ); peer_state @@ -9518,8 +9521,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ channel_ready_order: ChannelReadyOrder, ) -> (Option<(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { let logger = WithChannelContext::from(&self.logger, &channel.context, None); - log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", - &channel.context.channel_id(), + log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(), pending_update_adds.len(), @@ -10809,7 +10811,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let announcement_sigs_opt = try_channel_entry!(self, peer_state, res, chan_entry); if let Some(announcement_sigs) = announcement_sigs_opt { - log_trace!(logger, "Sending announcement_signatures for channel {}", chan.context.channel_id()); + log_trace!(logger, "Sending announcement_signatures"); peer_state.pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), msg: announcement_sigs, @@ -10820,7 +10822,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // counterparty's announcement_signatures. Thus, we only bother to send a // channel_update here if the channel is not public, i.e. we're not sending an // announcement_signatures. - log_trace!(logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id()); + log_trace!(logger, "Sending private initial channel_update for our counterparty"); if let Ok(msg) = self.get_channel_update_for_unicast(chan) { peer_state.pending_msg_events.push(MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id.clone(), @@ -10872,9 +10874,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if !chan.received_shutdown() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); - log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.", - msg.channel_id, - if chan.sent_shutdown() { " after we initiated shutdown" } else { "" }); + log_info!( + logger, + "Received a shutdown message from our counterparty {}.", + if chan.sent_shutdown() { + " after we initiated shutdown" + } else { + "" + } + ); } let funding_txo_opt = chan.funding.get_funding_txo(); @@ -10914,7 +10922,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ chan_entry.get().context(), None, ); - log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); + log_error!(logger, "Immediately closing unfunded channel as peer asked to cooperatively shut it down (which is unnecessary)"); let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); @@ -11077,8 +11085,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let HTLCSource::PreviousHopData(prev_hop) = &res.0 { let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, - "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor", - msg.channel_id); + "Holding the next revoke_and_ack until the preimage is durably persisted in the inbound edge's ChannelMonitor", + ); peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id) .or_insert_with(Vec::new) .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop)); @@ -11667,7 +11675,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ return Ok(NotifyOption::SkipPersistNoEvents); } else { let logger = WithChannelContext::from(&self.logger, &chan.context, None); - log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id); + log_debug!(logger, "Received channel_update {:?}.", msg); let did_change = try_channel_entry!(self, peer_state, chan.channel_update(&msg), chan_entry); // If nothing changed after applying their update, we don't need to bother // persisting. @@ -11755,8 +11763,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, hash_map::Entry::Vacant(_) => { - log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure", - msg.channel_id); + log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel to force channel closure", + ); // Unfortunately, lnd doesn't force close on errors // (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119). // One of the few ways to get an lnd counterparty to force close is by @@ -11955,11 +11963,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(announcement_sigs) = splice_promotion.announcement_sigs { - log_trace!( - logger, - "Sending announcement_signatures for channel {}", - chan.context.channel_id() - ); + log_trace!(logger, "Sending announcement_signatures",); peer_state.pending_msg_events.push( MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), @@ -12298,8 +12302,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(shutdown) = shutdown_result { let context = chan.context(); let logger = WithChannelContext::from(&self.logger, context, None); - let chan_id = context.channel_id(); - log_trace!(logger, "Removing channel {} now that the signer is unblocked", chan_id); + log_trace!(logger, "Removing channel now that the signer is unblocked"); let (remove, err) = if let Some(funded) = chan.as_funded_mut() { let err = convert_channel_err!(self, peer_state, shutdown, funded, COOP_CLOSED); @@ -13552,8 +13555,8 @@ where // Check that, while holding the peer lock, we don't have anything else // blocking monitor updates for this channel. If we do, release the monitor // update(s) when those blockers complete. - log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first", - &channel_id); + log_trace!(logger, "Delaying monitor unlock as another channel's mon update needs to complete first", + ); break; } @@ -13562,8 +13565,8 @@ where if let Some(chan) = chan_entry.get_mut().as_funded_mut() { let channel_funding_outpoint = chan.funding_outpoint(); if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { - log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor", - channel_id); + log_debug!(logger, "Unlocking monitor updating and updating monitor", + ); handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update, peer_state_lck, peer_state, per_peer_state, chan); if further_update_exists { @@ -13572,8 +13575,8 @@ where continue; } } else { - log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update", - channel_id); + log_trace!(logger, "Unlocked monitor updating without monitors to update", + ); } } } @@ -14339,7 +14342,7 @@ where Some(FundingConfirmedMessage::Establishment(channel_ready)) => { send_channel_ready!(self, pending_msg_events, funded_channel, channel_ready); if funded_channel.context.is_usable() && peer_state.is_connected { - log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel_id); + log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty"); if let Ok(msg) = self.get_channel_update_for_unicast(funded_channel) { pending_msg_events.push(MessageSendEvent::SendChannelUpdate { node_id: funded_channel.context.get_counterparty_node_id(), @@ -14347,7 +14350,7 @@ where }); } } else { - log_trace!(logger, "Sending channel_ready WITHOUT channel_update for {}", channel_id); + log_trace!(logger, "Sending channel_ready WITHOUT channel_update"); } }, Some(FundingConfirmedMessage::Splice(splice_locked, funding_txo, monitor_update_opt, discarded_funding)) => { @@ -14440,7 +14443,7 @@ where } if let Some(announcement_sigs) = announcement_sigs { if peer_state.is_connected { - log_trace!(logger, "Sending announcement_signatures for channel {}", funded_channel.context.channel_id()); + log_trace!(logger, "Sending announcement_signatures"); pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures { node_id: funded_channel.context.get_counterparty_node_id(), msg: announcement_sigs, @@ -16803,26 +16806,26 @@ where if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() { - log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", - &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); + log_error!(logger, " The ChannelMonitor is at update_id {} but the ChannelManager is at update_id {}.", + monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); } if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() { - log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.", - &channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number()); + log_error!(logger, " The ChannelMonitor is at holder commitment number {} but the ChannelManager is at holder commitment number {}.", + monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number()); } if channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() { - log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.", - &channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number()); + log_error!(logger, " The ChannelMonitor is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.", + monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number()); } if channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() { - log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", - &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); + log_error!(logger, " The ChannelMonitor is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", + monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); } let shutdown_result = channel.force_shutdown(ClosureReason::OutdatedChannelManager); @@ -16894,8 +16897,8 @@ where Some(*payment_hash), ); log_info!(logger, - "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", - &channel.context.channel_id(), &payment_hash); + "Failing HTLC with hash {} as it is missing in the ChannelMonitor but was present in the (stale) ChannelManager", + &payment_hash); failed_htlcs.push(( channel_htlc_source.clone(), *payment_hash, @@ -16911,8 +16914,8 @@ where &logger, monitor.get_latest_update_id(), ); - log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates", - &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(), + log_info!(logger, "Successfully loaded at update_id {} against monitor at update id {} with {} blocked updates", + channel.context.get_latest_monitor_update_id(), monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending()); if let Some(short_channel_id) = channel.funding.get_short_channel_id() { short_to_chan_info.insert( @@ -17009,8 +17012,7 @@ where let channel_id = monitor.channel_id(); log_info!( logger, - "Queueing monitor update to ensure missing channel {} is force closed", - &channel_id + "Queueing monitor update to ensure missing channel is force closed", ); let monitor_update = ChannelMonitorUpdate { update_id: monitor.get_latest_update_id().saturating_add(1), @@ -17336,8 +17338,8 @@ where { // If the channel is ahead of the monitor, return DangerousValue: log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); - log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight", - chan_id, monitor.get_latest_update_id(), max_in_flight_update_id); + log_error!(logger, " The ChannelMonitor is at update_id {} with update_id through {} in-flight", + monitor.get_latest_update_id(), max_in_flight_update_id); log_error!( logger, " but the ChannelManager is at update_id {}.", @@ -17555,8 +17557,8 @@ where let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias && update_add_htlc.htlc_id == prev_hop_data.htlc_id; if matches { - log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); + log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel", + &htlc.payment_hash); } !matches }); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index a4de62a11e5..f06c943e71c 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -5143,7 +5143,7 @@ pub fn test_fail_holding_cell_htlc_upon_free() { assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log( "lightning::ln::channel", - format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), + "Freeing holding cell with 1 HTLC updates".to_string(), 1, ); @@ -5254,7 +5254,7 @@ pub fn test_free_and_fail_holding_cell_htlcs() { assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log( "lightning::ln::channel", - format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), + "Freeing holding cell with 2 HTLC updates".to_string(), 1, ); diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index dc5d07c180e..ad68b773c3b 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -961,8 +961,12 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { }, _ => panic!("Unexpected event"), }; - nodes[1].logger.assert_log("lightning::ln::channel", - format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); + nodes[1].logger.assert_log( + "lightning::ln::channel", + "Attempting to fail HTLC due to fee spike buffer violation. Rebalancing is required." + .to_string(), + 1, + ); check_added_monitors(&nodes[1], 3); } else { @@ -2408,8 +2412,12 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { }, _ => panic!("Unexpected event"), }; - nodes[1].logger.assert_log("lightning::ln::channel", - format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); + nodes[1].logger.assert_log( + "lightning::ln::channel", + "Attempting to fail HTLC due to fee spike buffer violation. Rebalancing is required." + .to_string(), + 1, + ); check_added_monitors(&nodes[1], 3); } diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index 7c0190a23a9..c3b3bc33ba9 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -403,7 +403,7 @@ where if channel.get_inbound_payment_scid().is_none() || channel.counterparty.forwarding_info.is_none() { - log_trace!(logger, "Ignoring channel {} for invoice route hints", &channel.channel_id); + log_trace!(logger, "Ignoring channel for invoice route hints"); continue; } @@ -418,8 +418,7 @@ where // look at the public channels instead. log_trace!( logger, - "Not including channels in invoice route hints on account of public channel {}", - &channel.channel_id + "Not including channels in invoice route hints on account of public channel", ); return vec![].into_iter().take(MAX_CHANNEL_HINTS).map(route_hint_from_channel); } @@ -465,20 +464,20 @@ where if new_now_public || new_channel_preferable { log_trace!(logger, - "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints", + "Preferring counterparty {} channel (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints", log_pubkey!(channel.counterparty.node_id), - &channel.channel_id, channel.short_channel_id, + channel.short_channel_id, channel.inbound_capacity_msat, &entry.get().channel_id, entry.get().short_channel_id, current_max_capacity); entry.insert(channel); } else { log_trace!(logger, - "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints", + "Preferring counterparty {} channel {} (SCID {:?}, {} msats) (SCID {:?}, {} msats) for invoice route hints", log_pubkey!(channel.counterparty.node_id), &entry.get().channel_id, entry.get().short_channel_id, current_max_capacity, - &channel.channel_id, channel.short_channel_id, + channel.short_channel_id, channel.inbound_capacity_msat); } }, @@ -519,24 +518,15 @@ where }; if include_channel { - log_trace!( - logger, - "Including channel {} in invoice route hints", - &channel.channel_id - ); + log_trace!(logger, "Including channel in invoice route hints",); } else if !has_enough_capacity { log_trace!( logger, - "Ignoring channel {} without enough capacity for invoice route hints", - &channel.channel_id + "Ignoring channel without enough capacity for invoice route hints", ); } else { debug_assert!(!channel.is_usable || (has_pub_unconf_chan && !channel.is_announced)); - log_trace!( - logger, - "Ignoring channel {} with disconnected peer", - &channel.channel_id - ); + log_trace!(logger, "Ignoring channel with disconnected peer",); } include_channel diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 9eb85173a83..819f5915cfc 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -2289,8 +2289,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt); let err = format!( "Channel with id {} not found for the passed counterparty node_id {}", - log_bytes!([42; 32]), - node_c_id, + chan_id, node_c_id, ); assert_eq!(unknown_chan_id_err, Err(APIError::ChannelUnavailable { err })); diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 74f081b03ae..04a7e9cbea1 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -3102,9 +3102,16 @@ where self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::SendShutdown { ref node_id, ref msg } => { - log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling Shutdown event in peer_handler for node {} for channel {}", - node_id, - &msg.channel_id); + log_debug!( + WithContext::from( + &self.logger, + Some(*node_id), + Some(msg.channel_id), + None + ), + "Handling Shutdown event in peer_handler for node {}", + node_id + ); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { From f2483eb1d78362964d1fb73493672597addb2499 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 11 Nov 2025 13:44:11 +0100 Subject: [PATCH 5/5] Log structured channel id Since most log messages no longer include the channel ID in their text, this commit updates the test logger to output the structured channel ID instead. The ID is truncated for readability. --- lightning/src/util/logger.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/lightning/src/util/logger.rs b/lightning/src/util/logger.rs index f53c02748a8..724e407de84 100644 --- a/lightning/src/util/logger.rs +++ b/lightning/src/util/logger.rs @@ -107,7 +107,8 @@ pub struct Record<$($args)?> { /// generated. pub peer_id: Option, /// The channel id of the channel pertaining to the logged record. May be a temporary id before - /// the channel has been funded. + /// the channel has been funded. Since channel_id is not repeated in the message body, + /// include it in the log output so entries remain clear. pub channel_id: Option, #[cfg(not(c_bindings))] /// The message body. @@ -156,8 +157,10 @@ impl<$($args)?> Record<$($args)?> { impl<$($args)?> Display for Record<$($args)?> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let chan_id = self.channel_id.map(|id| format!("{}", id)); + let chan_str = chan_id.as_deref().and_then(|s| s.get(..6)).unwrap_or(""); let context = format!("{:<5} [{}:{}]", self.level, self.module_path, self.line); - write!(f, "{:<48} {}", context, self.args) + write!(f, "{:<48} {:<6} {}", context, chan_str, self.args) } } } } @@ -166,9 +169,11 @@ impl_record!('a, ); #[cfg(c_bindings)] impl_record!(, 'a); -/// A trait encapsulating the operations required of a logger. +/// A trait encapsulating the operations required of a logger. Keep in mind that log messages might not be entirely +/// self-explanatory and may need accompanying context fields to be fully understood. pub trait Logger { - /// Logs the [`Record`]. + /// Logs the [`Record`]. Since the record's [`Record::channel_id`] is not embedded in the message body, log + /// implementations should print it alongside the message to keep entries clear. fn log(&self, record: Record); }