Skip to content

Commit 57d21ec

Browse files
Cache peers in OffersMessageFlow
In upcoming commits, we'll be creating blinded paths during the process of creating a revoke_and_ack message within the Channel struct. These paths will be included in said RAA to be used as reply paths for often-offline senders held_htlc_available messages. Because we hold the per-peer lock corresponding to the Channel while creating this RAA, we can't use our typical approach of calling ChannelManager::get_peers_for_blinded_path to create these blinded paths. The ::get_peers method takes each peer's lock in turn in order to check for usable channels/onion message feature support, and it's not permitted to hold multiple peer state locks at the same time due to the potential for deadlocks (see the debug_sync module). To avoid taking other peer state locks while holding a particular Channel's peer state lock, here we cache the set of peers in the OffersMessageFlow, which is the struct that ultimately creates the blinded paths for the RAA.
1 parent 6aa52e9 commit 57d21ec

File tree

2 files changed

+52
-2
lines changed

2 files changed

+52
-2
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13156,6 +13156,8 @@ where
1315613156
for (err, counterparty_node_id) in failed_channels.drain(..) {
1315713157
let _ = handle_error!(self, err, counterparty_node_id);
1315813158
}
13159+
13160+
let _ = self.flow.peer_disconnected(counterparty_node_id);
1315913161
}
1316013162

1316113163
#[rustfmt::skip]
@@ -13167,14 +13169,15 @@ where
1316713169
}
1316813170

1316913171
let mut res = Ok(());
13170-
13172+
let mut peer_has_live_channel = false;
1317113173
PersistenceNotifierGuard::optionally_notify(self, || {
1317213174
// If we have too many peers connected which don't have funded channels, disconnect the
1317313175
// peer immediately (as long as it doesn't have funded channels). If we have a bunch of
1317413176
// unfunded channels taking up space in memory for disconnected peers, we still let new
1317513177
// peers connect, but we'll reject new channels from them.
1317613178
let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
1317713179
let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
13180+
let best_block_height = self.best_block.read().unwrap().height;
1317813181

1317913182
{
1318013183
let mut peer_state_lock = self.per_peer_state.write().unwrap();
@@ -13201,7 +13204,6 @@ where
1320113204
let mut peer_state = e.get().lock().unwrap();
1320213205
peer_state.latest_features = init_msg.features.clone();
1320313206

13204-
let best_block_height = self.best_block.read().unwrap().height;
1320513207
if inbound_peer_limited &&
1320613208
Self::unfunded_channel_count(&*peer_state, best_block_height) ==
1320713209
peer_state.channel_by_id.len()
@@ -13253,6 +13255,11 @@ where
1325313255
}),
1325413256
ReconnectionMsg::None => {},
1325513257
}
13258+
if chan.context().is_usable()
13259+
&& chan.context().channel_creation_height <= best_block_height.saturating_sub(6)
13260+
{
13261+
peer_has_live_channel = true;
13262+
}
1325613263
}
1325713264
}
1325813265

@@ -13265,6 +13272,7 @@ where
1326513272
// until we have some peer connection(s) to receive onion messages over, so as a minor optimization
1326613273
// refresh the cache when a peer connects.
1326713274
self.check_refresh_async_receive_offer_cache(false);
13275+
let _ = self.flow.peer_connected(counterparty_node_id, &init_msg.features, peer_has_live_channel);
1326813276
res
1326913277
}
1327013278

lightning/src/offers/flow.rs

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ use crate::sign::{EntropySource, NodeSigner, ReceiveAuthKey};
6161

6262
use crate::offers::static_invoice::{StaticInvoice, StaticInvoiceBuilder};
6363
use crate::sync::{Mutex, RwLock};
64+
use crate::types::features::InitFeatures;
6465
use crate::types::payment::{PaymentHash, PaymentSecret};
6566
use crate::util::logger::Logger;
6667
use crate::util::ser::Writeable;
@@ -100,6 +101,7 @@ where
100101

101102
pending_async_payments_messages: Mutex<Vec<(AsyncPaymentsMessage, MessageSendInstructions)>>,
102103
async_receive_offer_cache: Mutex<AsyncReceiveOfferCache>,
104+
peers_cache: Mutex<Vec<MessageForwardNode>>,
103105

104106
#[cfg(feature = "dnssec")]
105107
pub(crate) hrn_resolver: OMNameResolver,
@@ -136,6 +138,7 @@ where
136138

137139
pending_offers_messages: Mutex::new(Vec::new()),
138140
pending_async_payments_messages: Mutex::new(Vec::new()),
141+
peers_cache: Mutex::new(Vec::new()),
139142

140143
#[cfg(feature = "dnssec")]
141144
hrn_resolver: OMNameResolver::new(current_timestamp, best_block.height),
@@ -1683,4 +1686,43 @@ where
16831686
pub fn writeable_async_receive_offer_cache(&self) -> Vec<u8> {
16841687
self.async_receive_offer_cache.encode()
16851688
}
1689+
1690+
/// Indicates that a peer was connected to our node. Useful for the [`OffersMessageFlow`] to keep
1691+
/// track of which peers are connected, which allows for methods that can create blinded paths
1692+
/// without requiring a fresh set of [`MessageForwardNode`]s to be passed in.
1693+
///
1694+
/// `live_channel` should be set if we have an open, usable channel with this peer that has at
1695+
/// least six onchain confirmations.
1696+
///
1697+
/// MUST be called by always-online nodes that support holding HTLCs on behalf of often-offline
1698+
/// senders.
1699+
///
1700+
/// Errors if the peer does not support onion messages or we don't have a channel with them.
1701+
pub fn peer_connected(
1702+
&self, peer_node_id: PublicKey, features: &InitFeatures, live_channel: bool,
1703+
) -> Result<(), ()> {
1704+
if !features.supports_onion_messages() || !live_channel {
1705+
return Err(());
1706+
}
1707+
1708+
let mut peers_cache = self.peers_cache.lock().unwrap();
1709+
let peer = MessageForwardNode { node_id: peer_node_id, short_channel_id: None };
1710+
peers_cache.push(peer);
1711+
1712+
Ok(())
1713+
}
1714+
1715+
/// Indicates that a peer was disconnected from our node. See [`Self::peer_connected`].
1716+
///
1717+
/// Errors if the peer is unknown.
1718+
pub fn peer_disconnected(&self, peer_node_id: PublicKey) -> Result<(), ()> {
1719+
let mut peers_cache = self.peers_cache.lock().unwrap();
1720+
let peer_idx = match peers_cache.iter().position(|peer| peer.node_id == peer_node_id) {
1721+
Some(idx) => idx,
1722+
None => return Err(()),
1723+
};
1724+
peers_cache.swap_remove(peer_idx);
1725+
1726+
Ok(())
1727+
}
16861728
}

0 commit comments

Comments
 (0)