diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index ab2f483a1..3af7ab547 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -13,6 +13,8 @@ dictionary Config { u64 probing_liquidity_limit_multiplier; AnchorChannelsConfig? anchor_channels_config; RouteParametersConfig? route_parameters; + sequence blocked_peers; + u32? max_channels_per_peer; }; dictionary AnchorChannelsConfig { @@ -98,6 +100,10 @@ interface Builder { [Throws=BuildError] void set_async_payments_role(AsyncPaymentsRole? role); [Throws=BuildError] + void set_blocked_peers(sequence blocked_peers); + [Throws=BuildError] + void set_max_channels_per_peer(u32? max_channels_per_peer); + [Throws=BuildError] Node build(); [Throws=BuildError] Node build_with_fs_store(); diff --git a/src/builder.rs b/src/builder.rs index c0e39af7a..0915a2291 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -582,6 +582,25 @@ impl NodeBuilder { Ok(self) } + /// Sets the list of peers from which we will not accept inbound channels. + pub fn set_blocked_peers( + &mut self, blocked_peers: Vec, + ) -> Result<&mut Self, BuildError> { + self.config.blocked_peers = blocked_peers; + Ok(self) + } + + /// Sets the maximum number of channels we'll accept from any single peer. + /// + /// If set, we will reject inbound channel requests from peers that already have this many + /// channels open with us. If set to `None`, no limit is enforced. + pub fn set_max_channels_per_peer( + &mut self, max_channels_per_peer: Option, + ) -> Result<&mut Self, BuildError> { + self.config.max_channels_per_peer = max_channels_per_peer; + Ok(self) + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self) -> Result { @@ -1045,6 +1064,21 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_async_payments_role(role).map(|_| ()) } + /// Sets the list of peers from which we will not accept inbound channels. + pub fn set_blocked_peers(&self, blocked_peers: Vec) -> Result<(), BuildError> { + self.inner.write().unwrap().set_blocked_peers(blocked_peers).map(|_| ()) + } + + /// Sets the maximum number of channels we'll accept from any single peer. + /// + /// If set, we will reject inbound channel requests from peers that already have this many + /// channels open with us. If set to `None`, no limit is enforced. + pub fn set_max_channels_per_peer( + &self, max_channels_per_peer: Option, + ) -> Result<(), BuildError> { + self.inner.write().unwrap().set_max_channels_per_peer(max_channels_per_peer).map(|_| ()) + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self) -> Result, BuildError> { diff --git a/src/config.rs b/src/config.rs index ce361c45a..e4d02d266 100644 --- a/src/config.rs +++ b/src/config.rs @@ -119,7 +119,9 @@ pub(crate) const EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS: u64 = 5; /// | `probing_liquidity_limit_multiplier` | 3 | /// | `log_level` | Debug | /// | `anchor_channels_config` | Some(..) | -/// | `route_parameters` | None | +/// | `route_parameters` | None | +/// | `blocked_peers` | [] | +/// | `max_channels_per_peer` | None | /// /// See [`AnchorChannelsConfig`] and [`RouteParametersConfig`] for more information regarding their /// respective default values. @@ -184,6 +186,15 @@ pub struct Config { /// **Note:** If unset, default parameters will be used, and you will be able to override the /// parameters on a per-payment basis in the corresponding method calls. pub route_parameters: Option, + /// A list of peers from which we will not accept inbound channels. + /// + /// Channels requested by peers in this list will be automatically rejected. + pub blocked_peers: Vec, + /// The maximum number of channels we'll accept from any single peer. + /// + /// If set, we will reject inbound channel requests from peers that already have this many + /// channels open with us. If set to `None`, no limit is enforced. + pub max_channels_per_peer: Option, } impl Default for Config { @@ -198,6 +209,8 @@ impl Default for Config { anchor_channels_config: Some(AnchorChannelsConfig::default()), route_parameters: None, node_alias: None, + blocked_peers: Vec::new(), + max_channels_per_peer: None, } } } diff --git a/src/event.rs b/src/event.rs index 1946350a3..71ede1586 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1076,6 +1076,49 @@ where } } + if self.config.blocked_peers.contains(&counterparty_node_id) { + log_error!( + self.logger, + "Rejecting inbound channel from blocked peer {}", + counterparty_node_id, + ); + + self.channel_manager + .force_close_broadcasting_latest_txn( + &temporary_channel_id, + &counterparty_node_id, + "Channel request rejected".to_string(), + ) + .unwrap_or_else(|e| { + log_error!(self.logger, "Failed to reject channel: {:?}", e) + }); + return Ok(()); + } + + if let Some(max_channels_per_peer) = self.config.max_channels_per_peer { + let open_channels = + self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); + if open_channels.len() >= max_channels_per_peer.try_into().unwrap() { + log_error!( + self.logger, + "Rejecting inbound channel from peer {} due to reaching the maximum number of channels per peer ({}).", + counterparty_node_id, + max_channels_per_peer, + ); + + self.channel_manager + .force_close_broadcasting_latest_txn( + &temporary_channel_id, + &counterparty_node_id, + "Channel request rejected".to_string(), + ) + .unwrap_or_else(|e| { + log_error!(self.logger, "Failed to reject channel: {:?}", e) + }); + return Ok(()); + } + } + let anchor_channel = channel_type.requires_anchors_zero_fee_htlc_tx(); if anchor_channel { if let Some(anchor_channels_config) = diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index e2d4207cd..a1f95f9ec 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -1860,3 +1860,141 @@ async fn drop_in_async_context() { let node = setup_node(&chain_source, config, Some(seed_bytes)); node.stop().unwrap(); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_blocked_peers_channel_rejection() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + + // Setup two nodes + let mut config_a = random_config(true); + let config_b = random_config(true); + + let node_b = setup_node(&chain_source, config_b, None); + + // Start node_a with node_b blocked + config_a.node_config.blocked_peers.push(node_b.node_id()); + let node_a = setup_node(&chain_source, config_a, None); + + // Fund node_b + let addr_b = node_b.onchain_payment().new_address().unwrap(); + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_b], + Amount::from_sat(5_000_000), + ) + .await; + node_b.sync_wallets().unwrap(); + + // Attempt to open channel from node_b to node_a (should be rejected) + node_b + .open_channel( + node_a.node_id(), + node_a.listening_addresses().unwrap().first().unwrap().clone(), + 1_000_000, + None, + None, + ) + .unwrap(); + + // Expect rejection via ChannelClosed event + match node_b.next_event_async().await { + Event::ChannelClosed { reason, .. } => { + assert!(matches!( + reason, + Some(lightning::events::ClosureReason::CounterpartyForceClosed { .. }) + )); + node_b.event_handled().unwrap(); + }, + e => panic!("Expected ChannelClosed event, got: {:?}", e), + } + + if let Some(_event) = node_a.next_event() { + node_a.event_handled().unwrap(); + } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_max_channels_per_peer() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + + let mut config_a = random_config(true); + config_a.node_config.max_channels_per_peer = Some(2); + + let config_b = random_config(true); + + let node_a = setup_node(&chain_source, config_a, None); + let node_b = setup_node(&chain_source, config_b, None); + + // Fund node_b + let addr_b = node_b.onchain_payment().new_address().unwrap(); + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_b], + Amount::from_sat(10_000_000), + ) + .await; + node_b.sync_wallets().unwrap(); + + // Open first channel - should succeed + open_channel(&node_b, &node_a, 1_000_000, false, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + // Open second channel - should succeed + node_b.sync_wallets().unwrap(); + open_channel(&node_b, &node_a, 1_000_000, false, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + // Verify we have 2 channels + assert_eq!( + node_a + .list_channels() + .iter() + .filter(|c| c.counterparty_node_id == node_b.node_id()) + .count(), + 2 + ); + + // Try to open third channel - should be rejected + node_b + .open_channel( + node_a.node_id(), + node_a.listening_addresses().unwrap().first().unwrap().clone(), + 1_000_000, + None, + None, + ) + .unwrap(); + + match node_b.next_event_async().await { + Event::ChannelClosed { reason, .. } => { + assert!(matches!( + reason, + Some(lightning::events::ClosureReason::CounterpartyForceClosed { .. }) + )); + node_b.event_handled().unwrap(); + }, + e => panic!("Expected ChannelClosed event, got: {:?}", e), + } + + // Still should have only 2 channels + assert_eq!( + node_a + .list_channels() + .iter() + .filter(|c| c.counterparty_node_id == node_b.node_id()) + .count(), + 2 + ); +}