Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 1 addition & 8 deletions fuzz/src/full_stack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1608,14 +1608,7 @@ mod tests {
.unwrap()
.entry((record.module_path.to_string(), format!("{}", record.args)))
.or_insert(0) += 1;
println!(
"{:<5} [{} : {}, {}] {}",
record.level.to_string(),
record.module_path,
record.file,
record.line,
record.args
);
println!("{}", record);
}
}

Expand Down
15 changes: 6 additions & 9 deletions fuzz/src/onion_message.rs
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,11 @@ impl MessageRouter for TestMessageRouter {
fn find_path(
&self, _sender: PublicKey, _peers: Vec<PublicKey>, destination: Destination,
) -> Result<OnionMessagePath, ()> {
Ok(OnionMessagePath { intermediate_nodes: vec![], destination, first_node_addresses: vec![] })
Ok(OnionMessagePath {
intermediate_nodes: vec![],
destination,
first_node_addresses: vec![],
})
}

fn create_blinded_paths<T: secp256k1::Signing + secp256k1::Verification>(
Expand Down Expand Up @@ -328,14 +332,7 @@ mod tests {
let mut lines_lock = self.lines.lock().unwrap();
let key = (record.module_path.to_string(), format!("{}", record.args));
*lines_lock.entry(key).or_insert(0) += 1;
println!(
"{:<5} [{} : {}, {}] {}",
record.level.to_string(),
record.module_path,
record.file,
record.line,
record.args
);
println!("{}", record);
}
}

Expand Down
11 changes: 1 addition & 10 deletions fuzz/src/utils/test_logger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,15 +59,6 @@ impl<'a, Out: Output> Write for LockedWriteAdapter<'a, Out> {

impl<Out: Output> Logger for TestLogger<Out> {
fn log(&self, record: Record) {
write!(
LockedWriteAdapter(&self.out),
"{:<5} {} [{} : {}] {}\n",
record.level.to_string(),
self.id,
record.module_path,
record.line,
record.args
)
.unwrap();
write!(LockedWriteAdapter(&self.out), "{:<6} {}", self.id, record).unwrap();
}
}
2 changes: 1 addition & 1 deletion lightning-dns-resolver/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ mod test {
}
impl Logger for TestLogger {
fn log(&self, record: lightning::util::logger::Record) {
eprintln!("{}: {}", self.node, record.args);
eprintln!("{:<8} {}", self.node, record);
}
}
impl Deref for TestLogger {
Expand Down
9 changes: 1 addition & 8 deletions lightning-net-tokio/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -629,14 +629,7 @@ mod tests {
pub struct TestLogger();
impl lightning::util::logger::Logger for TestLogger {
fn log(&self, record: lightning::util::logger::Record) {
println!(
"{:<5} [{} : {}, {}] {}",
record.level.to_string(),
record.module_path,
record.file,
record.line,
record.args
);
println!("{}", record);
}
}

Expand Down
1 change: 1 addition & 0 deletions lightning/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ _externalize_tests = ["inventory", "_test_utils"]
# Allow signing of local transactions that may have been revoked or will be revoked, for functional testing (e.g. justice tx handling).
# This is unsafe to use in production because it may result in the counterparty publishing taking our funds.
unsafe_revoked_tx_signing = []
safe_channels = []

std = []

Expand Down
58 changes: 18 additions & 40 deletions lightning/src/chain/chainmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -580,23 +580,17 @@ where

let has_pending_claims = monitor_state.monitor.has_pending_claims();
if has_pending_claims || get_partition_key(channel_id) % partition_factor == 0 {
log_trace!(
logger,
"Syncing Channel Monitor for channel {}",
log_funding_info!(monitor)
);
log_trace!(logger, "Syncing Channel Monitor");
// Even though we don't track monitor updates from chain-sync as pending, we still want
// updates per-channel to be well-ordered so that users don't see a
// `ChannelMonitorUpdate` after a channel persist for a channel with the same
// `latest_update_id`.
let _pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
match self.persister.update_persisted_channel(monitor.persistence_key(), None, monitor)
{
ChannelMonitorUpdateStatus::Completed => log_trace!(
logger,
"Finished syncing Channel Monitor for channel {} for block-data",
log_funding_info!(monitor)
),
ChannelMonitorUpdateStatus::Completed => {
log_trace!(logger, "Finished syncing Channel Monitor for block-data")
},
ChannelMonitorUpdateStatus::InProgress => {
log_trace!(
logger,
Expand Down Expand Up @@ -961,16 +955,12 @@ where
}
if have_monitors_to_prune {
let mut monitors = self.monitors.write().unwrap();
monitors.retain(|channel_id, monitor_holder| {
monitors.retain(|_channel_id, monitor_holder| {
let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None);
let (is_fully_resolved, _) =
monitor_holder.monitor.check_and_update_full_resolution_status(&logger);
if is_fully_resolved {
log_info!(
logger,
"Archiving fully resolved ChannelMonitor for channel ID {}",
channel_id
);
log_info!(logger, "Archiving fully resolved ChannelMonitor");
self.persister
.archive_persisted_channel(monitor_holder.monitor.persistence_key());
false
Expand Down Expand Up @@ -1106,11 +1096,7 @@ where
},
hash_map::Entry::Vacant(e) => e,
};
log_trace!(
logger,
"Loaded existing ChannelMonitor for channel {}",
log_funding_info!(monitor)
);
log_trace!(logger, "Loaded existing ChannelMonitor");
if let Some(ref chain_source) = self.chain_source {
monitor.load_outputs_to_watch(chain_source, &self.logger);
}
Expand Down Expand Up @@ -1366,25 +1352,17 @@ where
},
hash_map::Entry::Vacant(e) => e,
};
log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
log_trace!(logger, "Got new ChannelMonitor");
let update_id = monitor.get_latest_update_id();
let mut pending_monitor_updates = Vec::new();
let persist_res = self.persister.persist_new_channel(monitor.persistence_key(), &monitor);
match persist_res {
ChannelMonitorUpdateStatus::InProgress => {
log_info!(
logger,
"Persistence of new ChannelMonitor for channel {} in progress",
log_funding_info!(monitor)
);
log_info!(logger, "Persistence of new ChannelMonitor in progress",);
pending_monitor_updates.push(update_id);
},
ChannelMonitorUpdateStatus::Completed => {
log_info!(
logger,
"Persistence of new ChannelMonitor for channel {} completed",
log_funding_info!(monitor)
);
log_info!(logger, "Persistence of new ChannelMonitor completed",);
},
ChannelMonitorUpdateStatus::UnrecoverableError => {
let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
Expand Down Expand Up @@ -1428,9 +1406,10 @@ where
let logger = WithChannelMonitor::from(&self.logger, &monitor, None);
log_trace!(
logger,
"Updating ChannelMonitor to id {} for channel {}",
"Updating ChannelMonitor to id {} for channel {} with updates {:#?}",
update.update_id,
log_funding_info!(monitor)
log_funding_info!(monitor),
update.updates
);

// We hold a `pending_monitor_updates` lock through `update_monitor` to ensure we
Expand All @@ -1452,7 +1431,7 @@ where
// We don't want to persist a `monitor_update` which results in a failure to apply later
// while reading `channel_monitor` with updates from storage. Instead, we should persist
// the entire `channel_monitor` here.
log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor));
log_warn!(logger, "Failed to update ChannelMonitor. Going ahead and persisting the entire ChannelMonitor");
self.persister.update_persisted_channel(
monitor.persistence_key(),
None,
Expand All @@ -1468,18 +1447,17 @@ where
match persist_res {
ChannelMonitorUpdateStatus::InProgress => {
pending_monitor_updates.push(update_id);
log_debug!(logger,
"Persistence of ChannelMonitorUpdate id {:?} for channel {} in progress",
log_debug!(
logger,
"Persistence of ChannelMonitorUpdate id {:?} in progress",
update_id,
log_funding_info!(monitor)
);
},
ChannelMonitorUpdateStatus::Completed => {
log_debug!(
logger,
"Persistence of ChannelMonitorUpdate id {:?} for channel {} completed",
"Persistence of ChannelMonitorUpdate id {:?} completed",
update_id,
log_funding_info!(monitor)
);
},
ChannelMonitorUpdateStatus::UnrecoverableError => {
Expand Down
Loading
Loading