diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 19333c5823a..7b4f29896c7 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -53,11 +53,8 @@ use lightning::sign::{ }; use lightning::util::logger::Logger; use lightning::util::persist::{ - KVStore, KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_PERSISTENCE_KEY, - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + KVStore, KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_KEY, CHANNEL_MANAGER_NAMESPACE, + NETWORK_GRAPH_KEY, NETWORK_GRAPH_NAMESPACE, SCORER_KEY, SCORER_NAMESPACE, }; use lightning::util::sweep::{OutputSweeper, OutputSweeperSync}; #[cfg(feature = "std")] @@ -942,14 +939,8 @@ where if let Some(duration_since_epoch) = fetch_time() { if update_scorer(scorer, &event, duration_since_epoch) { log_trace!(logger, "Persisting scorer after update"); - if let Err(e) = kv_store - .write( - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, - scorer.encode(), - ) - .await + if let Err(e) = + kv_store.write(SCORER_NAMESPACE, "", SCORER_KEY, scorer.encode()).await { log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e); // We opt not to abort early on persistence failure here as persisting @@ -1080,9 +1071,9 @@ where let fut = async { kv_store .write( - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_KEY, + CHANNEL_MANAGER_NAMESPACE, + "", + CHANNEL_MANAGER_KEY, channel_manager.get_cm().encode(), ) .await @@ -1143,9 +1134,9 @@ where let fut = async { if let Err(e) = kv_store .write( - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_NAMESPACE, + "", + NETWORK_GRAPH_KEY, network_graph.encode(), ) .await @@ -1183,14 +1174,8 @@ where log_trace!(logger, "Persisting scorer"); } let fut = async { - if let Err(e) = kv_store - .write( - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, - scorer.encode(), - ) - .await + if let Err(e) = + kv_store.write(SCORER_NAMESPACE, "", SCORER_KEY, scorer.encode()).await { log_error!( logger, @@ -1301,30 +1286,18 @@ where // ChannelMonitor update(s) persisted without a corresponding ChannelManager update. kv_store .write( - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_KEY, + CHANNEL_MANAGER_NAMESPACE, + "", + CHANNEL_MANAGER_KEY, channel_manager.get_cm().encode(), ) .await?; if let Some(ref scorer) = scorer { - kv_store - .write( - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, - scorer.encode(), - ) - .await?; + kv_store.write(SCORER_NAMESPACE, "", SCORER_KEY, scorer.encode()).await?; } if let Some(network_graph) = gossip_sync.network_graph() { kv_store - .write( - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, - network_graph.encode(), - ) + .write(NETWORK_GRAPH_NAMESPACE, "", NETWORK_GRAPH_KEY, network_graph.encode()) .await?; } Ok(()) @@ -1526,12 +1499,9 @@ impl BackgroundProcessor { .expect("Time should be sometime after 1970"); if update_scorer(scorer, &event, duration_since_epoch) { log_trace!(logger, "Persisting scorer after update"); - if let Err(e) = kv_store.write( - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, - scorer.encode(), - ) { + if let Err(e) = + kv_store.write(SCORER_NAMESPACE, "", SCORER_KEY, scorer.encode()) + { log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e) } } @@ -1627,9 +1597,9 @@ impl BackgroundProcessor { if channel_manager.get_cm().get_and_clear_needs_persistence() { log_trace!(logger, "Persisting ChannelManager..."); (kv_store.write( - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_KEY, + CHANNEL_MANAGER_NAMESPACE, + "", + CHANNEL_MANAGER_KEY, channel_manager.get_cm().encode(), ))?; log_trace!(logger, "Done persisting ChannelManager."); @@ -1666,9 +1636,9 @@ impl BackgroundProcessor { duration_since_epoch.as_secs(), ); if let Err(e) = kv_store.write( - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_NAMESPACE, + "", + NETWORK_GRAPH_KEY, network_graph.encode(), ) { log_error!(logger, "Error: Failed to persist network graph, check your disk and permissions {}", e); @@ -1694,12 +1664,9 @@ impl BackgroundProcessor { .expect("Time should be sometime after 1970"); log_trace!(logger, "Calling time_passed and persisting scorer"); scorer.write_lock().time_passed(duration_since_epoch); - if let Err(e) = kv_store.write( - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, - scorer.encode(), - ) { + if let Err(e) = + kv_store.write(SCORER_NAMESPACE, "", SCORER_KEY, scorer.encode()) + { log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e); } } @@ -1735,24 +1702,19 @@ impl BackgroundProcessor { // some races where users quit while channel updates were in-flight, with // ChannelMonitor update(s) persisted without a corresponding ChannelManager update. kv_store.write( - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_KEY, + CHANNEL_MANAGER_NAMESPACE, + "", + CHANNEL_MANAGER_KEY, channel_manager.get_cm().encode(), )?; if let Some(ref scorer) = scorer { - kv_store.write( - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, - scorer.encode(), - )?; + kv_store.write(SCORER_NAMESPACE, "", SCORER_KEY, scorer.encode())?; } if let Some(network_graph) = gossip_sync.network_graph() { kv_store.write( - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_NAMESPACE, + "", + NETWORK_GRAPH_KEY, network_graph.encode(), )?; } @@ -1844,12 +1806,8 @@ mod tests { use lightning::types::payment::PaymentHash; use lightning::util::config::UserConfig; use lightning::util::persist::{ - KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_PERSISTENCE_KEY, - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_KEY, CHANNEL_MANAGER_NAMESPACE, + NETWORK_GRAPH_KEY, NETWORK_GRAPH_NAMESPACE, SCORER_KEY, SCORER_NAMESPACE, }; use lightning::util::ser::Writeable; use lightning::util::sweep::{ @@ -2104,19 +2062,15 @@ mod tests { fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> lightning::io::Result<()> { - if primary_namespace == CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE - && secondary_namespace == CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE - && key == CHANNEL_MANAGER_PERSISTENCE_KEY - { + if primary_namespace == CHANNEL_MANAGER_NAMESPACE && key == CHANNEL_MANAGER_KEY { + assert_eq!(secondary_namespace, ""); if let Some((error, message)) = self.manager_error { return Err(std::io::Error::new(error, message).into()); } } - if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE - && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE - && key == NETWORK_GRAPH_PERSISTENCE_KEY - { + if primary_namespace == NETWORK_GRAPH_NAMESPACE && key == NETWORK_GRAPH_KEY { + assert_eq!(secondary_namespace, ""); if let Some(sender) = &self.graph_persistence_notifier { match sender.send(()) { Ok(()) => {}, @@ -2131,10 +2085,8 @@ mod tests { } } - if primary_namespace == SCORER_PERSISTENCE_PRIMARY_NAMESPACE - && secondary_namespace == SCORER_PERSISTENCE_SECONDARY_NAMESPACE - && key == SCORER_PERSISTENCE_KEY - { + if primary_namespace == SCORER_NAMESPACE && key == SCORER_KEY { + assert_eq!(secondary_namespace, ""); if let Some((error, message)) = self.scorer_error { return Err(std::io::Error::new(error, message).into()); } diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index 636967a6937..f01ec85ae90 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -5,7 +5,7 @@ use lightning::ln::functional_test_utils::{ }; use lightning::util::persist::{ migrate_kv_store_data, read_channel_monitors, KVStoreSync, MigratableKVStore, - KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN, + NAMESPACE_ALPHABET, NAMESPACE_MAX_LEN, }; use lightning::util::test_utils; use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event}; @@ -46,8 +46,8 @@ pub(crate) fn do_read_write_remove_list_persist( assert_eq!(listed_keys.len(), 0); // Ensure we have no issue operating with primary_namespace/secondary_namespace/key being - // KVSTORE_NAMESPACE_KEY_MAX_LEN - let max_chars = "A".repeat(KVSTORE_NAMESPACE_KEY_MAX_LEN); + // NAMESPACE_MAX_LEN + let max_chars = "A".repeat(NAMESPACE_MAX_LEN); kv_store.write(&max_chars, &max_chars, &max_chars, data.clone()).unwrap(); let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap(); @@ -76,17 +76,16 @@ pub(crate) fn do_test_data_migration let primary_namespace = if i == 0 { String::new() } else { - format!("testspace{}", KVSTORE_NAMESPACE_KEY_ALPHABET.chars().nth(i).unwrap()) + format!("testspace{}", NAMESPACE_ALPHABET.chars().nth(i).unwrap()) }; for j in 0..num_secondary_namespaces { let secondary_namespace = if i == 0 || j == 0 { String::new() } else { - format!("testsubspace{}", KVSTORE_NAMESPACE_KEY_ALPHABET.chars().nth(j).unwrap()) + format!("testsubspace{}", NAMESPACE_ALPHABET.chars().nth(j).unwrap()) }; for k in 0..num_keys { - let key = - format!("testkey{}", KVSTORE_NAMESPACE_KEY_ALPHABET.chars().nth(k).unwrap()); + let key = format!("testkey{}", NAMESPACE_ALPHABET.chars().nth(k).unwrap()); source_store .write(&primary_namespace, &secondary_namespace, &key, dummy_data.clone()) .unwrap(); diff --git a/lightning-persister/src/utils.rs b/lightning-persister/src/utils.rs index e8e7be5ce5d..ca543ffab17 100644 --- a/lightning-persister/src/utils.rs +++ b/lightning-persister/src/utils.rs @@ -1,9 +1,8 @@ use lightning::types::string::PrintableString; -use lightning::util::persist::{KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN}; +use lightning::util::persist::{NAMESPACE_ALPHABET, NAMESPACE_MAX_LEN}; pub(crate) fn is_valid_kvstore_str(key: &str) -> bool { - key.len() <= KVSTORE_NAMESPACE_KEY_MAX_LEN - && key.chars().all(|c| KVSTORE_NAMESPACE_KEY_ALPHABET.contains(c)) + key.len() <= NAMESPACE_MAX_LEN && key.chars().all(|c| NAMESPACE_ALPHABET.contains(c)) } pub(crate) fn check_namespace_key_validity( diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 1a9af4f2071..0bb772de5d7 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -28,9 +28,7 @@ use crate::ln::types::ChannelId; use crate::sign::NodeSigner; use crate::util::native_async::FutureQueue; use crate::util::persist::{ - MonitorName, MonitorUpdatingPersisterAsync, CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + MonitorName, MonitorUpdatingPersisterAsync, CHANNEL_MONITOR_NAMESPACE, MONITOR_UPDATE_NAMESPACE, }; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_channel_signer::TestChannelSigner; @@ -4939,11 +4937,7 @@ fn native_async_persist() { let funding_txo = OutPoint { txid: funding_tx.compute_txid(), index: 0 }; let key = MonitorName::V1Channel(funding_txo).to_string(); - let pending_writes = kv_store.list_pending_async_writes( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - &key, - ); + let pending_writes = kv_store.list_pending_async_writes(CHANNEL_MONITOR_NAMESPACE, "", &key); assert_eq!(pending_writes.len(), 1); // Once we complete the future, the write will still be pending until the future gets `poll`ed. @@ -4971,37 +4965,19 @@ fn native_async_persist() { persist_futures.poll_futures(); assert_eq!(async_chain_monitor.release_pending_monitor_events().len(), 0); - let pending_writes = kv_store.list_pending_async_writes( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "1", - ); + let pending_writes = kv_store.list_pending_async_writes(MONITOR_UPDATE_NAMESPACE, &key, "1"); assert_eq!(pending_writes.len(), 1); - let pending_writes = kv_store.list_pending_async_writes( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "2", - ); + let pending_writes = kv_store.list_pending_async_writes(MONITOR_UPDATE_NAMESPACE, &key, "2"); assert_eq!(pending_writes.len(), 1); - kv_store.complete_async_writes_through( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "1", - usize::MAX, - ); + kv_store.complete_async_writes_through(MONITOR_UPDATE_NAMESPACE, &key, "1", usize::MAX); persist_futures.poll_futures(); // While the `ChainMonitor` could return a `MonitorEvent::Completed` here, it currently // doesn't. If that ever changes we should validate that the `Completed` event has the correct // `monitor_update_id` (1). assert!(async_chain_monitor.release_pending_monitor_events().is_empty()); - kv_store.complete_async_writes_through( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "2", - usize::MAX, - ); + kv_store.complete_async_writes_through(MONITOR_UPDATE_NAMESPACE, &key, "2", usize::MAX); persist_futures.poll_futures(); let completed_persist = async_chain_monitor.release_pending_monitor_events(); assert_eq!(completed_persist.len(), 1); @@ -5020,34 +4996,16 @@ fn native_async_persist() { persist_futures.poll_futures(); assert_eq!(async_chain_monitor.release_pending_monitor_events().len(), 0); - let pending_writes = kv_store.list_pending_async_writes( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "3", - ); + let pending_writes = kv_store.list_pending_async_writes(MONITOR_UPDATE_NAMESPACE, &key, "3"); assert_eq!(pending_writes.len(), 1); - let pending_writes = kv_store.list_pending_async_writes( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "4", - ); + let pending_writes = kv_store.list_pending_async_writes(MONITOR_UPDATE_NAMESPACE, &key, "4"); assert_eq!(pending_writes.len(), 1); - kv_store.complete_async_writes_through( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "4", - usize::MAX, - ); + kv_store.complete_async_writes_through(MONITOR_UPDATE_NAMESPACE, &key, "4", usize::MAX); persist_futures.poll_futures(); assert_eq!(async_chain_monitor.release_pending_monitor_events().len(), 0); - kv_store.complete_async_writes_through( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &key, - "3", - usize::MAX, - ); + kv_store.complete_async_writes_through(MONITOR_UPDATE_NAMESPACE, &key, "3", usize::MAX); persist_futures.poll_futures(); let completed_persist = async_chain_monitor.release_pending_monitor_events(); assert_eq!(completed_persist.len(), 1); diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index d00e29e686a..6f1ad74fbe5 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -41,76 +41,59 @@ use crate::util::ser::{Readable, ReadableArgs, Writeable}; use crate::util::wakers::Notifier; /// The alphabet of characters allowed for namespaces and keys. -pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = +pub const NAMESPACE_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-"; /// The maximum number of characters namespaces and keys may have. -pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120; +pub const NAMESPACE_MAX_LEN: usize = 120; /// The primary namespace under which the [`ChannelManager`] will be persisted. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager -pub const CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE: &str = ""; -/// The secondary namespace under which the [`ChannelManager`] will be persisted. -/// -/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager -pub const CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +pub const CHANNEL_MANAGER_NAMESPACE: &str = ""; /// The key under which the [`ChannelManager`] will be persisted. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager -pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager"; +pub const CHANNEL_MANAGER_KEY: &str = "manager"; /// The primary namespace under which [`ChannelMonitor`]s will be persisted. -pub const CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitors"; -/// The secondary namespace under which [`ChannelMonitor`]s will be persisted. -pub const CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +pub const CHANNEL_MONITOR_NAMESPACE: &str = "monitors"; /// The primary namespace under which [`ChannelMonitorUpdate`]s will be persisted. -pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitor_updates"; +/// +/// Note that unlike all other LDK-native persistence calls, monitor updates have a non-empty +/// secondary namespace. +pub const MONITOR_UPDATE_NAMESPACE: &str = "monitor_updates"; /// The primary namespace under which archived [`ChannelMonitor`]s will be persisted. -pub const ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "archived_monitors"; -/// The secondary namespace under which archived [`ChannelMonitor`]s will be persisted. -pub const ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +pub const ARCHIVED_MONITOR_NAMESPACE: &str = "archived_monitors"; /// The primary namespace under which the [`NetworkGraph`] will be persisted. /// /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph -pub const NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE: &str = ""; -/// The secondary namespace under which the [`NetworkGraph`] will be persisted. -/// -/// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph -pub const NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +pub const NETWORK_GRAPH_NAMESPACE: &str = ""; /// The key under which the [`NetworkGraph`] will be persisted. /// /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph -pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph"; +pub const NETWORK_GRAPH_KEY: &str = "network_graph"; /// The primary namespace under which the [`WriteableScore`] will be persisted. /// /// [`WriteableScore`]: crate::routing::scoring::WriteableScore -pub const SCORER_PERSISTENCE_PRIMARY_NAMESPACE: &str = ""; -/// The secondary namespace under which the [`WriteableScore`] will be persisted. -/// -/// [`WriteableScore`]: crate::routing::scoring::WriteableScore -pub const SCORER_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +pub const SCORER_NAMESPACE: &str = ""; /// The key under which the [`WriteableScore`] will be persisted. /// /// [`WriteableScore`]: crate::routing::scoring::WriteableScore -pub const SCORER_PERSISTENCE_KEY: &str = "scorer"; +pub const SCORER_KEY: &str = "scorer"; /// The primary namespace under which [`OutputSweeper`] state will be persisted. /// /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper -pub const OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE: &str = ""; -/// The secondary namespace under which [`OutputSweeper`] state will be persisted. -/// -/// [`OutputSweeper`]: crate::util::sweep::OutputSweeper -pub const OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +pub const OUTPUT_SWEEPER_NAMESPACE: &str = ""; /// The secondary namespace under which [`OutputSweeper`] state will be persisted. /// The key under which [`OutputSweeper`] state will be persisted. /// /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper -pub const OUTPUT_SWEEPER_PERSISTENCE_KEY: &str = "output_sweeper"; +pub const OUTPUT_SWEEPER_KEY: &str = "output_sweeper"; /// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`]. /// @@ -126,15 +109,14 @@ pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2]; /// ways, as long as per-namespace key uniqueness is asserted. /// /// Keys and namespaces are required to be valid ASCII strings in the range of -/// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty -/// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if -/// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means -/// that concerns should always be separated by primary namespace first, before secondary -/// namespaces are used. While the number of primary namespaces will be relatively small and is -/// determined at compile time, there may be many secondary namespaces per primary namespace. Note -/// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given -/// namespace, i.e., conflicts between keys and equally named -/// primary namespaces/secondary namespaces must be avoided. +/// [`NAMESPACE_ALPHABET`] and no longer than [`NAMESPACE_MAX_LEN`]. Empty primary namespaces and +/// secondary namespaces (`""`) are assumed to be a valid, however, if `primary_namespace` is empty, +/// `secondary_namespace` is required to be empty, too. This means that concerns should always be +/// separated by primary namespace first, before secondary namespaces are used. While the number of +/// primary namespaces will be relatively small and is determined at compile time, there may be many +/// secondary namespaces per primary namespace. Note that per-namespace uniqueness needs to also +/// hold for keys *and* namespaces in any given namespace, i.e., conflicts between keys and equally +/// named primary namespaces/secondary namespaces must be avoided. /// /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to @@ -255,15 +237,14 @@ where /// ways, as long as per-namespace key uniqueness is asserted. /// /// Keys and namespaces are required to be valid ASCII strings in the range of -/// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty -/// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if -/// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means -/// that concerns should always be separated by primary namespace first, before secondary -/// namespaces are used. While the number of primary namespaces will be relatively small and is -/// determined at compile time, there may be many secondary namespaces per primary namespace. Note -/// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given -/// namespace, i.e., conflicts between keys and equally named -/// primary namespaces/secondary namespaces must be avoided. +/// [`NAMESPACE_ALPHABET`] and no longer than [`NAMESPACE_MAX_LEN`]. Empty primary namespaces and +/// secondary namespaces (`""`) are assumed to be a valid, however, if `primary_namespace` is +/// empty, `secondary_namespace` is required to be empty, too. This means that concerns should +/// always be separated by primary namespace first, before secondary namespaces are used. While the +/// number of primary namespaces will be relatively small and is determined at compile time, there +/// may be many secondary namespaces per primary namespace. Note that per-namespace uniqueness +/// needs to also hold for keys *and* namespaces in any given namespace, i.e., conflicts between +/// keys and equally named primary namespaces/secondary namespaces must be avoided. /// /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to @@ -286,6 +267,9 @@ pub trait KVStore { ) -> AsyncResult<'static, Vec, io::Error>; /// Persists the given data under the given `key`. /// + /// Note that LDK-originating persistence calls will always set `secondary_namespace` to "" + /// unless `primary_namespace` is [`MONITOR_UPDATE_NAMESPACE`]. + /// /// The order of multiple writes to the same key needs to be retained while persisting /// asynchronously. In other words, if two writes to the same key occur, the state (as seen by /// [`Self::read`]) must either see the first write then the second, or only ever the second, @@ -380,12 +364,8 @@ impl Persist, ) -> chain::ChannelMonitorUpdateStatus { - match self.write( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - &monitor_name.to_string(), - monitor.encode(), - ) { + let key = monitor_name.to_string(); + match self.write(CHANNEL_MONITOR_NAMESPACE, "", &key, monitor.encode()) { Ok(()) => chain::ChannelMonitorUpdateStatus::Completed, Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError, } @@ -395,12 +375,8 @@ impl Persist, monitor: &ChannelMonitor, ) -> chain::ChannelMonitorUpdateStatus { - match self.write( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - &monitor_name.to_string(), - monitor.encode(), - ) { + let key = monitor_name.to_string(); + match self.write(CHANNEL_MONITOR_NAMESPACE, "", &key, monitor.encode()) { Ok(()) => chain::ChannelMonitorUpdateStatus::Completed, Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError, } @@ -408,29 +384,15 @@ impl Persist monitor, Err(_) => return, }; - match self.write( - ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - monitor_key.as_str(), - monitor, - ) { + match self.write(ARCHIVED_MONITOR_NAMESPACE, "", monitor_key.as_str(), monitor) { Ok(()) => {}, Err(_e) => return, }; - let _ = self.remove( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - monitor_key.as_str(), - true, - ); + let _ = self.remove(CHANNEL_MONITOR_NAMESPACE, "", monitor_key.as_str(), true); } } @@ -445,16 +407,9 @@ where { let mut res = Vec::new(); - for stored_key in kv_store.list( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - )? { + for stored_key in kv_store.list(CHANNEL_MONITOR_NAMESPACE, "")? { match ::EcdsaSigner>)>>::read( - &mut io::Cursor::new(kv_store.read( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - &stored_key, - )?), + &mut io::Cursor::new(kv_store.read(CHANNEL_MONITOR_NAMESPACE, "", &stored_key)?), (&*entropy_source, &*signer_provider), ) { Ok(Some((block_hash, channel_monitor))) => { @@ -526,13 +481,13 @@ fn poll_sync_future(future: F) -> F::Output { /// - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s. /// - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`] /// -/// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE`], +/// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_NAMESPACE`], /// using the familiar encoding of an [`OutPoint`] (e.g., `[SOME-64-CHAR-HEX-STRING]_1`) for v1 /// channels or a [`ChannelId`] (e.g., `[SOME-64-CHAR-HEX-STRING]`) for v2 channels. /// /// Each [`ChannelMonitorUpdate`] is stored in a dynamic secondary namespace, as follows: /// -/// - primary namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE`] +/// - primary namespace: [`MONITOR_UPDATE_NAMESPACE`] /// - secondary namespace: [the monitor's encoded outpoint or channel id name] /// /// Under that secondary namespace, each update is stored with a number string, like `21`, which @@ -545,14 +500,14 @@ fn poll_sync_future(future: F) -> F::Output { /// /// Full channel monitors would be stored at a single key: /// -/// `[CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1` +/// `[CHANNEL_MONITOR_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1` /// /// Updates would be stored as follows (with `/` delimiting primary_namespace/secondary_namespace/key): /// /// ```text -/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1 -/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2 -/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3 +/// [MONITOR_UPDATE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1 +/// [MONITOR_UPDATE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2 +/// [MONITOR_UPDATE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3 /// ``` /// ... and so on. /// @@ -724,8 +679,8 @@ where log_error!( self.0 .0.logger, "Failed to write ChannelMonitor {}/{}/{} reason: {}", - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MONITOR_NAMESPACE, + "", monitor_name, e ); @@ -866,9 +821,7 @@ where Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { - let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; - let monitor_list = self.0.kv_store.list(primary, secondary).await?; + let monitor_list = self.0.kv_store.list(CHANNEL_MONITOR_NAMESPACE, "").await?; let mut res = Vec::with_capacity(monitor_list.len()); for monitor_key in monitor_list { let result = @@ -1086,9 +1039,7 @@ where Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { - let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; - let monitor_bytes = self.kv_store.read(primary, secondary, monitor_key).await?; + let monitor_bytes = self.kv_store.read(CHANNEL_MONITOR_NAMESPACE, "", monitor_key).await?; let mut monitor_cursor = io::Cursor::new(monitor_bytes); // Discard the sentinel bytes if found. if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) { @@ -1130,13 +1081,13 @@ where async fn read_monitor_update( &self, monitor_key: &str, update_name: &UpdateName, ) -> Result { - let primary = CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE; + let primary = MONITOR_UPDATE_NAMESPACE; let update_bytes = self.kv_store.read(primary, monitor_key, update_name.as_str()).await?; ChannelMonitorUpdate::read(&mut &update_bytes[..]).map_err(|e| { log_error!( self.logger, "Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}", - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_NAMESPACE, monitor_key, update_name.as_str(), e, @@ -1146,9 +1097,7 @@ where } async fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> { - let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; - let monitor_keys = self.kv_store.list(primary, secondary).await?; + let monitor_keys = self.kv_store.list(CHANNEL_MONITOR_NAMESPACE, "").await?; for monitor_key in monitor_keys { let monitor_name = MonitorName::from_str(&monitor_key)?; let maybe_monitor = self.maybe_read_monitor(&monitor_name, &monitor_key).await?; @@ -1167,13 +1116,14 @@ where async fn cleanup_stale_updates_for_monitor_to( &self, monitor_key: &str, latest_update_id: u64, lazy: bool, ) -> Result<(), io::Error> { - let primary = CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE; - let updates = self.kv_store.list(primary, monitor_key).await?; + let updates = self.kv_store.list(MONITOR_UPDATE_NAMESPACE, monitor_key).await?; for update in updates { let update_name = UpdateName::new(update)?; // if the update_id is lower than the stored monitor, delete if update_name.0 <= latest_update_id { - self.kv_store.remove(primary, monitor_key, update_name.as_str(), lazy).await?; + self.kv_store + .remove(MONITOR_UPDATE_NAMESPACE, monitor_key, update_name.as_str(), lazy) + .await?; } } Ok(()) @@ -1198,9 +1148,7 @@ where // Note that this is NOT an async function, but rather calls the *sync* KVStore write // method, allowing it to do its queueing immediately, and then return a future for the // completion of the write. This ensures monitor persistence ordering is preserved. - let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; - self.kv_store.write(primary, secondary, monitor_key.as_str(), monitor_bytes) + self.kv_store.write(CHANNEL_MONITOR_NAMESPACE, "", monitor_key.as_str(), monitor_bytes) } fn update_persisted_channel<'a, ChannelSigner: EcdsaChannelSigner + 'a>( @@ -1221,13 +1169,12 @@ where if persist_update { let monitor_key = monitor_name.to_string(); let update_name = UpdateName::from(update.update_id); - let primary = CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE; // Note that this is NOT an async function, but rather calls the *sync* KVStore // write method, allowing it to do its queueing immediately, and then return a // future for the completion of the write. This ensures monitor persistence // ordering is preserved. res_a = Some(self.kv_store.write( - primary, + MONITOR_UPDATE_NAMESPACE, &monitor_key, update_name.as_str(), update.encode(), @@ -1292,15 +1239,12 @@ where Ok((_block_hash, monitor)) => monitor, Err(_) => return, }; - let primary = ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary = ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; - match self.kv_store.write(primary, secondary, &monitor_key, monitor.encode()).await { + let primary = ARCHIVED_MONITOR_NAMESPACE; + match self.kv_store.write(primary, "", &monitor_key, monitor.encode()).await { Ok(()) => {}, Err(_e) => return, }; - let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; - let _ = self.kv_store.remove(primary, secondary, &monitor_key, true).await; + let _ = self.kv_store.remove(CHANNEL_MONITOR_NAMESPACE, "", &monitor_key, true).await; } // Cleans up monitor updates for given monitor in range `start..=end`. @@ -1308,7 +1252,7 @@ where let monitor_key = monitor_name.to_string(); for update_id in start..=end { let update_name = UpdateName::from(update_id); - let primary = CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE; + let primary = MONITOR_UPDATE_NAMESPACE; let res = self.kv_store.remove(primary, &monitor_key, update_name.as_str(), true).await; if let Err(e) = res { log_error!( @@ -1661,7 +1605,7 @@ mod tests { }; let update_list = KVStoreSync::list( &kv_store_0, - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_NAMESPACE, &monitor_name.to_string(), ); assert_eq!(update_list.unwrap().len() as u64, expected_updates, "persister 0"); @@ -1679,7 +1623,7 @@ mod tests { }; let update_list = KVStoreSync::list( &kv_store_1, - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_NAMESPACE, &monitor_name.to_string(), ); assert_eq!(update_list.unwrap().len() as u64, expected_updates, "persister 1"); @@ -1890,7 +1834,7 @@ mod tests { let monitor_name = monitor.persistence_key(); KVStoreSync::write( &kv_store_0, - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_NAMESPACE, &monitor_name.to_string(), UpdateName::from(1).as_str(), vec![0u8; 1], @@ -1903,7 +1847,7 @@ mod tests { // Confirm the stale update is unreadable/gone assert!(KVStoreSync::read( &kv_store_0, - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + MONITOR_UPDATE_NAMESPACE, &monitor_name.to_string(), UpdateName::from(1).as_str() ) diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 5a1ffad3e04..10260a803a1 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -22,8 +22,7 @@ use crate::sign::{ use crate::sync::Mutex; use crate::util::logger::Logger; use crate::util::persist::{ - KVStore, KVStoreSync, KVStoreSyncWrapper, OUTPUT_SWEEPER_PERSISTENCE_KEY, - OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, + KVStore, KVStoreSync, KVStoreSyncWrapper, OUTPUT_SWEEPER_KEY, OUTPUT_SWEEPER_NAMESPACE, }; use crate::util::ser::{Readable, ReadableArgs, Writeable}; use crate::{impl_writeable_tlv_based, log_debug, log_error}; @@ -611,13 +610,7 @@ where fn persist_state<'a>(&self, sweeper_state: &SweeperState) -> AsyncResult<'a, (), io::Error> { let encoded = sweeper_state.encode(); - - self.kv_store.write( - OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, - OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, - OUTPUT_SWEEPER_PERSISTENCE_KEY, - encoded, - ) + self.kv_store.write(OUTPUT_SWEEPER_NAMESPACE, "", OUTPUT_SWEEPER_KEY, encoded) } /// Updates the sweeper state by executing the given callback. Persists the state afterwards if it is marked dirty,