Skip to content

Commit 75a4085

Browse files
Cache peers in OffersMessageFlow
In upcoming commits, we'll be creating blinded paths during the process of creating a revoke_and_ack message within the Channel struct. These paths will be included in said RAA to be used as reply paths for often-offline senders held_htlc_available messages. Because we hold the per-peer lock corresponding to the Channel while creating this RAA, we can't use our typical approach of calling ChannelManager::get_peers_for_blinded_path to create these blinded paths. The ::get_peers method takes each peer's lock in turn in order to check for usable channels/onion message feature support, and it's not permitted to hold multiple peer state locks at the same time due to the potential for deadlocks (see the debug_sync module). To avoid taking other peer state locks while holding a particular Channel's peer state lock, here we cache the set of peers in the OffersMessageFlow, which is the struct that ultimately creates the blinded paths for the RAA.
1 parent f8143cf commit 75a4085

File tree

2 files changed

+52
-2
lines changed

2 files changed

+52
-2
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13163,6 +13163,8 @@ where
1316313163
for (err, counterparty_node_id) in failed_channels.drain(..) {
1316413164
let _ = handle_error!(self, err, counterparty_node_id);
1316513165
}
13166+
13167+
let _ = self.flow.peer_disconnected(counterparty_node_id);
1316613168
}
1316713169

1316813170
#[rustfmt::skip]
@@ -13174,14 +13176,15 @@ where
1317413176
}
1317513177

1317613178
let mut res = Ok(());
13177-
13179+
let mut peer_has_live_channel = false;
1317813180
PersistenceNotifierGuard::optionally_notify(self, || {
1317913181
// If we have too many peers connected which don't have funded channels, disconnect the
1318013182
// peer immediately (as long as it doesn't have funded channels). If we have a bunch of
1318113183
// unfunded channels taking up space in memory for disconnected peers, we still let new
1318213184
// peers connect, but we'll reject new channels from them.
1318313185
let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
1318413186
let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
13187+
let best_block_height = self.best_block.read().unwrap().height;
1318513188

1318613189
{
1318713190
let mut peer_state_lock = self.per_peer_state.write().unwrap();
@@ -13208,7 +13211,6 @@ where
1320813211
let mut peer_state = e.get().lock().unwrap();
1320913212
peer_state.latest_features = init_msg.features.clone();
1321013213

13211-
let best_block_height = self.best_block.read().unwrap().height;
1321213214
if inbound_peer_limited &&
1321313215
Self::unfunded_channel_count(&*peer_state, best_block_height) ==
1321413216
peer_state.channel_by_id.len()
@@ -13260,6 +13262,11 @@ where
1326013262
}),
1326113263
ReconnectionMsg::None => {},
1326213264
}
13265+
if chan.context().is_usable()
13266+
&& chan.context().channel_creation_height <= best_block_height.saturating_sub(6)
13267+
{
13268+
peer_has_live_channel = true;
13269+
}
1326313270
}
1326413271
}
1326513272

@@ -13272,6 +13279,7 @@ where
1327213279
// until we have some peer connection(s) to receive onion messages over, so as a minor optimization
1327313280
// refresh the cache when a peer connects.
1327413281
self.check_refresh_async_receive_offer_cache(false);
13282+
let _ = self.flow.peer_connected(counterparty_node_id, &init_msg.features, peer_has_live_channel);
1327513283
res
1327613284
}
1327713285

lightning/src/offers/flow.rs

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ use crate::sign::{EntropySource, NodeSigner, ReceiveAuthKey};
6161

6262
use crate::offers::static_invoice::{StaticInvoice, StaticInvoiceBuilder};
6363
use crate::sync::{Mutex, RwLock};
64+
use crate::types::features::InitFeatures;
6465
use crate::types::payment::{PaymentHash, PaymentSecret};
6566
use crate::util::logger::Logger;
6667
use crate::util::ser::Writeable;
@@ -100,6 +101,7 @@ where
100101

101102
pending_async_payments_messages: Mutex<Vec<(AsyncPaymentsMessage, MessageSendInstructions)>>,
102103
async_receive_offer_cache: Mutex<AsyncReceiveOfferCache>,
104+
peers_cache: Mutex<Vec<MessageForwardNode>>,
103105

104106
#[cfg(feature = "dnssec")]
105107
pub(crate) hrn_resolver: OMNameResolver,
@@ -136,6 +138,7 @@ where
136138

137139
pending_offers_messages: Mutex::new(Vec::new()),
138140
pending_async_payments_messages: Mutex::new(Vec::new()),
141+
peers_cache: Mutex::new(Vec::new()),
139142

140143
#[cfg(feature = "dnssec")]
141144
hrn_resolver: OMNameResolver::new(current_timestamp, best_block.height),
@@ -1683,4 +1686,43 @@ where
16831686
pub fn writeable_async_receive_offer_cache(&self) -> Vec<u8> {
16841687
self.async_receive_offer_cache.encode()
16851688
}
1689+
1690+
/// Indicates that a peer was connected to our node. Useful for the [`OffersMessageFlow`] to keep
1691+
/// track of which peers are connected, which allows for methods that can create blinded paths
1692+
/// without requiring a fresh set of [`MessageForwardNode`]s to be passed in.
1693+
///
1694+
/// `live_channel` should be set if we have an open, usable channel with this peer that has at
1695+
/// least six onchain confirmations.
1696+
///
1697+
/// MUST be called by always-online nodes that support holding HTLCs on behalf of often-offline
1698+
/// senders.
1699+
///
1700+
/// Errors if the peer does not support onion messages or we don't have a channel with them.
1701+
pub fn peer_connected(
1702+
&self, peer_node_id: PublicKey, features: &InitFeatures, live_channel: bool,
1703+
) -> Result<(), ()> {
1704+
if !features.supports_onion_messages() || !live_channel {
1705+
return Err(());
1706+
}
1707+
1708+
let mut peers_cache = self.peers_cache.lock().unwrap();
1709+
let peer = MessageForwardNode { node_id: peer_node_id, short_channel_id: None };
1710+
peers_cache.push(peer);
1711+
1712+
Ok(())
1713+
}
1714+
1715+
/// Indicates that a peer was disconnected from our node. See [`Self::peer_connected`].
1716+
///
1717+
/// Errors if the peer is unknown.
1718+
pub fn peer_disconnected(&self, peer_node_id: PublicKey) -> Result<(), ()> {
1719+
let mut peers_cache = self.peers_cache.lock().unwrap();
1720+
let peer_idx = match peers_cache.iter().position(|peer| peer.node_id == peer_node_id) {
1721+
Some(idx) => idx,
1722+
None => return Err(()),
1723+
};
1724+
peers_cache.swap_remove(peer_idx);
1725+
1726+
Ok(())
1727+
}
16861728
}

0 commit comments

Comments
 (0)