From ab900bd4afbadaf7bd4778ff3f22a161e64cecd6 Mon Sep 17 00:00:00 2001 From: elnosh Date: Wed, 12 Nov 2025 10:21:48 -0500 Subject: [PATCH 1/2] Add docs to `commitment_signed_dance_return_raa` --- lightning/src/ln/chanmon_update_fail_tests.rs | 11 ++--------- lightning/src/ln/functional_test_utils.rs | 4 ++++ 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 891451075e4..444be01595b 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3527,15 +3527,8 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode // Note that when completing as a side effect of a reload we completed the CS dance in // `reconnect_nodes` above. if completion_mode != BlockedUpdateComplMode::AtReload { - nodes[1] - .node - .handle_commitment_signed_batch_test(node_a_id, &as_htlc_fulfill.commitment_signed); - check_added_monitors(&nodes[1], 1); - let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); - assert!(a.is_none()); - - nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); - check_added_monitors(&nodes[1], 0); + let commitment = &as_htlc_fulfill.commitment_signed; + do_commitment_signed_dance(&nodes[1], &nodes[0], commitment, false, true); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e31630a4926..140eaf3419a 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2668,6 +2668,10 @@ pub fn do_main_commitment_signed_dance( (extra_msg_option, bs_revoke_and_ack) } +/// Runs the commitment_signed dance by delivering the commitment_signed and handling the +/// responding `revoke_and_ack` and `commitment_signed`. +/// +/// Returns the recipient's `revoke_and_ack`. pub fn commitment_signed_dance_return_raa( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, commitment_signed: &Vec, fail_backwards: bool, From 51de9f29b01c3082e261d853925c9765096bff41 Mon Sep 17 00:00:00 2001 From: elnosh Date: Fri, 14 Nov 2025 09:01:36 -0500 Subject: [PATCH 2/2] Remove `check_added_monitors` macro Replace calls to `check_added_monitors` macro to the identically-named function. --- .../tests/lsps2_integration_tests.rs | 22 +- lightning-persister/src/test_utils.rs | 11 +- lightning/src/chain/chainmonitor.rs | 21 +- lightning/src/chain/channelmonitor.rs | 7 +- lightning/src/ln/async_payments_tests.rs | 30 +- lightning/src/ln/async_signer_tests.rs | 30 +- lightning/src/ln/blinded_payment_tests.rs | 66 +-- lightning/src/ln/chanmon_update_fail_tests.rs | 408 +++++++++--------- lightning/src/ln/channelmanager.rs | 60 +-- lightning/src/ln/functional_test_utils.rs | 96 ++--- .../src/ln/max_payment_path_len_tests.rs | 4 +- lightning/src/ln/monitor_tests.rs | 66 +-- lightning/src/ln/offers_tests.rs | 6 +- lightning/src/ln/onion_route_tests.rs | 48 +-- lightning/src/ln/payment_tests.rs | 228 +++++----- lightning/src/ln/priv_short_conf_tests.rs | 40 +- lightning/src/ln/quiescence_tests.rs | 6 +- lightning/src/ln/reload_tests.rs | 34 +- lightning/src/ln/reorg_tests.rs | 30 +- lightning/src/ln/shutdown_tests.rs | 42 +- lightning/src/ln/update_fee_tests.rs | 2 +- lightning/src/ln/zero_fee_commitment_tests.rs | 6 +- lightning/src/util/persist.rs | 6 +- 23 files changed, 622 insertions(+), 647 deletions(-) diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index 82f93b5990c..e4ace27b715 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -7,19 +7,11 @@ use common::{ get_lsps_message, LSPSNodes, LSPSNodesWithPayer, LiquidityNode, }; -use lightning::check_added_monitors; use lightning::events::{ClosureReason, Event}; use lightning::get_event_msg; use lightning::ln::channelmanager::PaymentId; use lightning::ln::channelmanager::Retry; -use lightning::ln::functional_test_utils::create_funding_transaction; -use lightning::ln::functional_test_utils::do_commitment_signed_dance; -use lightning::ln::functional_test_utils::expect_channel_pending_event; -use lightning::ln::functional_test_utils::expect_channel_ready_event; -use lightning::ln::functional_test_utils::expect_payment_sent; -use lightning::ln::functional_test_utils::test_default_channel_config; -use lightning::ln::functional_test_utils::SendEvent; -use lightning::ln::functional_test_utils::{connect_blocks, create_chan_between_nodes_with_value}; +use lightning::ln::functional_test_utils::*; use lightning::ln::msgs::BaseMessageHandler; use lightning::ln::msgs::ChannelMessageHandler; use lightning::ln::msgs::MessageSendEvent; @@ -1226,7 +1218,7 @@ fn client_trusts_lsp_end_to_end_test() { ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); @@ -1566,7 +1558,7 @@ fn create_channel_with_manual_broadcast( let funding_created = get_event_msg!(service_node, MessageSendEvent::SendFundingCreated, *client_node_id); client_node.node.handle_funding_created(*service_node_id, &funding_created); - check_added_monitors!(client_node.inner, 1); + check_added_monitors(&client_node.inner, 1); let bs_signed_locked = client_node.node.get_and_clear_pending_msg_events(); assert_eq!(bs_signed_locked.len(), 2); @@ -1602,7 +1594,7 @@ fn create_channel_with_manual_broadcast( _ => panic!("Unexpected event"), } expect_channel_pending_event(&client_node, &service_node_id); - check_added_monitors!(service_node.inner, 1); + check_added_monitors(&service_node.inner, 1); as_channel_ready = get_event_msg!(service_node, MessageSendEvent::SendChannelReady, *client_node_id); @@ -1699,7 +1691,7 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); @@ -1890,7 +1882,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); @@ -2227,7 +2219,7 @@ fn client_trusts_lsp_partial_fee_does_not_trigger_broadcast() { ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index 1de51f44cb2..55208c61491 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -1,14 +1,11 @@ +use lightning::check_closed_broadcast; use lightning::events::ClosureReason; -use lightning::ln::functional_test_utils::{ - check_closed_event, connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, - create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs, send_payment, -}; +use lightning::ln::functional_test_utils::*; use lightning::util::persist::{ migrate_kv_store_data, read_channel_monitors, KVStoreSync, MigratableKVStore, KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN, }; use lightning::util::test_utils; -use lightning::{check_added_monitors, check_closed_broadcast}; use std::panic::RefUnwindSafe; @@ -190,7 +187,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); @@ -206,7 +203,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { check_closed_broadcast!(nodes[1], true); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Make sure everything is persisted as expected after close. check_persisted_data!(11); diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index f4a1edff038..9fd6383cf7e 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -1568,7 +1568,6 @@ where mod tests { use crate::chain::channelmonitor::ANTI_REORG_DELAY; use crate::chain::{ChannelMonitorUpdateStatus, Watch}; - use crate::check_added_monitors; use crate::events::{ClosureReason, Event}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; @@ -1601,9 +1600,9 @@ mod tests { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.claim_funds(payment_preimage_2); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone(); @@ -1666,14 +1665,14 @@ mod tests { nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_first_raa, as_first_update) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut bs_2nd_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_first_update); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0] @@ -1683,21 +1682,21 @@ mod tests { nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &bs_2nd_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); expect_payment_path_successful!(nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_second_raa, as_second_update) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_second_update); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); expect_payment_path_successful!(nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } #[test] diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 10e5049682e..515a3dc5f1d 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -6911,10 +6911,7 @@ mod tests { use crate::util::logger::Logger; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_utils::{TestBroadcaster, TestFeeEstimator, TestLogger}; - use crate::{ - check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, - get_route_and_payment_hash, - }; + use crate::{check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash}; #[allow(unused_imports)] use crate::prelude::*; @@ -6973,7 +6970,7 @@ mod tests { nodes[1].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Build a new ChannelMonitorUpdate which contains both the failing commitment tx update // and provides the claim preimages for the two pending HTLCs. The first update generates diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 8e7fbdf94fd..1f1bb70714d 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -981,7 +981,7 @@ fn ignore_duplicate_invoice() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&always_online_node_id, &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[always_online_node, async_recipient]]; let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); @@ -1060,7 +1060,7 @@ fn ignore_duplicate_invoice() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&always_online_node_id, &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) .without_clearing_recipient_events(); @@ -1129,7 +1129,7 @@ fn async_receive_flow_success() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Receiving a duplicate release_htlc message doesn't result in duplicate payment. nodes[0] @@ -1519,7 +1519,7 @@ fn amount_doesnt_match_invreq() { let mut ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); assert!(matches!( ev, MessageSendEvent::UpdateHTLCs { ref updates, .. } if updates.update_add_htlcs.len() == 1)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[2], &nodes[3]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); let claimable_ev = do_pass_along_path(args).unwrap(); @@ -1723,7 +1723,7 @@ fn invalid_async_receive_with_retry( &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[2].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], true); // Trigger a retry and make sure it fails after calling the closure that induces recipient @@ -1735,7 +1735,7 @@ fn invalid_async_receive_with_retry( let mut ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); assert!(matches!( ev, MessageSendEvent::UpdateHTLCs { ref updates, .. } if updates.update_add_htlcs.len() == 1)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .without_claimable_event() @@ -1749,7 +1749,7 @@ fn invalid_async_receive_with_retry( let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); let claimable_ev = do_pass_along_path(args).unwrap(); @@ -1915,7 +1915,7 @@ fn expired_static_invoice_payment_path() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) @@ -2360,7 +2360,7 @@ fn refresh_static_invoices_for_used_offers() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&server.node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[server, recipient]]; let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); @@ -2694,7 +2694,7 @@ fn invoice_server_is_not_channel_peer() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&forwarding_node.node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[forwarding_node, recipient]]; let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); @@ -2933,7 +2933,7 @@ fn async_payment_e2e() { let mut events = sender_lsp.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&invoice_server.node.get_our_node_id(), &mut events); - check_added_monitors!(sender_lsp, 1); + check_added_monitors(&sender_lsp, 1); let path: &[&Node] = &[invoice_server, recipient]; let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); @@ -3170,7 +3170,7 @@ fn intercepted_hold_htlc() { let mut events = lsp.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); - check_added_monitors!(lsp, 1); + check_added_monitors(&lsp, 1); let path: &[&Node] = &[recipient]; let args = PassAlongPathArgs::new(lsp, path, amt_msat, payment_hash, ev); @@ -3271,7 +3271,7 @@ fn async_payment_mpp() { let expected_path: &[&Node] = &[recipient]; lsp_a.node.process_pending_htlc_forwards(); - check_added_monitors!(lsp_a, 1); + check_added_monitors(&lsp_a, 1); let mut events = lsp_a.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); @@ -3280,7 +3280,7 @@ fn async_payment_mpp() { do_pass_along_path(args); lsp_b.node.process_pending_htlc_forwards(); - check_added_monitors!(lsp_b, 1); + check_added_monitors(&lsp_b, 1); let mut events = lsp_b.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); @@ -3417,7 +3417,7 @@ fn release_htlc_races_htlc_onion_decode() { let mut events = sender_lsp.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&invoice_server.node.get_our_node_id(), &mut events); - check_added_monitors!(sender_lsp, 1); + check_added_monitors(&sender_lsp, 1); let path: &[&Node] = &[invoice_server, recipient]; let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 0c7a467fde7..f38afc41fcc 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -301,7 +301,7 @@ fn do_test_async_commitment_signature_for_commitment_signed_revoke_and_ack( src.node .send_payment_with_route(route, our_payment_hash, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(src, 1); + check_added_monitors(&src, 1); // Pass the payment along the route. let payment_event = { @@ -528,7 +528,7 @@ fn do_test_async_raa_peer_disconnect( src.node .send_payment_with_route(route, our_payment_hash, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(src, 1); + check_added_monitors(&src, 1); // Pass the payment along the route. let payment_event = { @@ -593,7 +593,7 @@ fn do_test_async_raa_peer_disconnect( (latest_update, _) = channel_map.get(&chan_id).unwrap().clone(); } dst.chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); - check_added_monitors!(dst, 0); + check_added_monitors(&dst, 0); } // Expect the RAA @@ -677,7 +677,7 @@ fn do_test_async_commitment_signature_peer_disconnect( src.node .send_payment_with_route(route, our_payment_hash, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(src, 1); + check_added_monitors(&src, 1); // Pass the payment along the route. let payment_event = { @@ -743,7 +743,7 @@ fn do_test_async_commitment_signature_peer_disconnect( (latest_update, _) = channel_map.get(&chan_id).unwrap().clone(); } dst.chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); - check_added_monitors!(dst, 0); + check_added_monitors(&dst, 0); } // Expect the RAA @@ -813,14 +813,14 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { .node .send_payment_with_route(route, payment_hash_2, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); get_htlc_update_msgs(&nodes[0], &node_b_id); // Send back update_fulfill_htlc + commitment_signed for the first payment. nodes[1].node.claim_funds(payment_preimage_1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Handle the update_fulfill_htlc, but fail to persist the monitor update when handling the // commitment_signed. @@ -844,7 +844,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); } // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -893,7 +893,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { (latest_update, _) = channel_map.get(&chan_id).unwrap().clone(); } nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); } // Make sure that on signer_unblocked we have the same behavior (even though RAA is ready, @@ -946,18 +946,18 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { nodes[1].node.handle_revoke_and_ack(node_a_id, as_resp.1.as_ref().unwrap()); let (bs_revoke_and_ack, bs_second_commitment_signed) = get_revoke_commit_msgs(&nodes[1], &node_a_id); - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); // The rest of this is boilerplate for resolving the previous state. nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); let as_commitment_signed = get_htlc_update_msgs(&nodes[0], &node_b_id); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_commitment_signed); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1] .node @@ -965,15 +965,15 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_and_process_pending_htlcs(&nodes[1], false); diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 3dd62748582..6cd38b28b60 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -428,11 +428,11 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { } nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, true, true); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if intro_fails { let mut updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -466,7 +466,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { cause_error!(2, 3, update_add); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true); expect_and_process_pending_htlcs(&nodes[2], false); @@ -478,7 +478,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), core::slice::from_ref(&failed_destination) ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; @@ -524,10 +524,10 @@ fn failed_backwards_to_intro_node() { let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -537,7 +537,7 @@ fn failed_backwards_to_intro_node() { // Ensure the final node fails to handle the HTLC. payment_event.msgs[0].onion_routing_packet.hop_data[0] ^= 1; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); @@ -608,7 +608,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); macro_rules! cause_error { @@ -632,7 +632,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, _ => panic!("Unexpected event {:?}", events), } check_closed_broadcast(&$curr_node, 1, true); - check_added_monitors!($curr_node, 1); + check_added_monitors(&$curr_node, 1); $curr_node.node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), @@ -644,22 +644,22 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, if intro_fails { cause_error!(nodes[0], nodes[1], nodes[2], chan_id_1_2, chan_upd_1_2.short_channel_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1]], false); return } expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_1_2 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); let mut update_add = &mut updates_1_2.update_add_htlcs[0]; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true); cause_error!(nodes[1], nodes[2], nodes[3], chan_id_2_3, chan_upd_2_3.short_channel_id); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; @@ -736,7 +736,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1]], false); return } @@ -842,7 +842,7 @@ fn three_hop_blinded_path_fail() { nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2], &nodes[3]], false); } @@ -943,10 +943,10 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { SendEvent::from_event(ev) }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut payment_event_1_2 = { let mut events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -958,7 +958,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { match check { ReceiveCheckFail::RecipientFail => { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); check_payment_claimable( @@ -970,7 +970,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[2].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::OnionDecodeFail => { let session_priv = SecretKey::from_slice(&session_priv).unwrap(); @@ -994,7 +994,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { &payment_hash ).unwrap(); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); @@ -1004,7 +1004,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { let update_add = &mut payment_event_1_2.msgs[0]; update_add.amount_msat -= 1; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); @@ -1018,7 +1018,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event_1_2.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[2].node.handle_shutdown(nodes[1].node.get_our_node_id(), &node_1_shutdown); assert!(commitment_signed_dance_through_cp_raa(&nodes[2], &nodes[1], false, false).is_none()); @@ -1029,15 +1029,15 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { ReceiveCheckFail::ProcessPendingHTLCsCheck => { assert_eq!(payment_event_1_2.msgs[0].cltv_expiry, nodes[0].best_block_info().1 + 1 + excess_final_cltv_delta_opt.unwrap() as u32 + TEST_FINAL_CLTV); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], true); expect_htlc_failure_conditions(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::PaymentConstraints => { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); @@ -1132,7 +1132,7 @@ fn blinded_path_retries() { nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let updates = get_htlc_update_msgs(&nodes[3], &$intro_node.node.get_our_node_id()); assert_eq!(updates.update_fail_malformed_htlcs.len(), 1); @@ -1163,7 +1163,7 @@ fn blinded_path_retries() { fail_payment_back!(nodes[1]); // Pass the retry along. - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], amt_msat, payment_hash, Some(payment_secret), msg_events.pop().unwrap(), true, None); @@ -1242,7 +1242,7 @@ fn min_htlc() { SendEvent::from_event(ev) }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[1], false); expect_htlc_handling_failed_destinations!( @@ -1438,7 +1438,7 @@ fn fails_receive_tlvs_authentication() { do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[1], false); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); let mut update_fail = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -2070,7 +2070,7 @@ fn test_trampoline_forward_payload_encoded_as_receive() { }; nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let replacement_onion = { // create a substitute onion where the last Trampoline hop is a forward @@ -2234,7 +2234,7 @@ fn do_test_trampoline_single_hop_receive(success: bool) { }; nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], amt_msat, payment_hash, payment_secret); if success { @@ -2374,7 +2374,7 @@ fn do_test_trampoline_unblinded_receive(success: bool) { outer_packet }; - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2491,7 +2491,7 @@ fn test_trampoline_forward_rejection() { nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 444be01595b..15f18a70d91 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -122,7 +122,7 @@ fn test_monitor_and_persister_update_fail() { // Try to update ChannelMonitor nodes[1].node.claim_funds(preimage); expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); @@ -168,7 +168,7 @@ fn test_monitor_and_persister_update_fail() { } } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_sent(&nodes[0], preimage, None, false, false); } @@ -194,7 +194,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -212,7 +212,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); @@ -261,7 +261,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -280,7 +280,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { message: message.clone(), }; nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &node_b_id, message).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); // TODO: Once we hit the chain with the failure transaction we should check that we get a @@ -337,7 +337,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -346,7 +346,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1] // but nodes[0] won't respond since it is frozen. nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -386,7 +386,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } nodes[0].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -404,7 +404,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); macro_rules! disconnect_reconnect_peers { () => {{ @@ -453,10 +453,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert_eq!(reestablish_2.len(), 1); nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); assert!(as_resp.0.is_none()); @@ -500,7 +500,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); as_resp.1 = Some(as_resp_raa); bs_resp.2 = None; @@ -543,7 +543,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if disconnect_count & !disconnect_flags > 2 { let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); @@ -567,7 +567,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(as_commitment_update.update_fail_htlcs.is_empty()); assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty()); assert!(as_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }; } @@ -580,7 +580,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }; } @@ -644,7 +644,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { ); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1] .node @@ -652,15 +652,15 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); expect_and_process_pending_htlcs(&nodes[1], false); @@ -742,7 +742,7 @@ fn test_monitor_update_fail_cs() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -751,13 +751,13 @@ fn test_monitor_update_fail_cs() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(responses.len(), 2); @@ -765,7 +765,7 @@ fn test_monitor_update_fail_cs() { MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -783,7 +783,7 @@ fn test_monitor_update_fail_cs() { .node .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); }, _ => panic!("Unexpected event"), @@ -792,11 +792,11 @@ fn test_monitor_update_fail_cs() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &final_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); @@ -850,7 +850,7 @@ fn test_monitor_update_fail_no_rebroadcast() { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -863,13 +863,13 @@ fn test_monitor_update_fail_no_rebroadcast() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); expect_and_process_pending_htlcs(&nodes[1], false); let events = nodes[1].node.get_and_clear_pending_events(); @@ -905,7 +905,7 @@ fn test_monitor_update_raa_while_paused() { let id = PaymentId(our_payment_hash_1.0); nodes[0].node.send_payment_with_route(route, our_payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -915,13 +915,13 @@ fn test_monitor_update_raa_while_paused() { let id_2 = PaymentId(our_payment_hash_2.0); nodes[1].node.send_payment_with_route(route, our_payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event_1.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event_1.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); @@ -929,37 +929,37 @@ fn test_monitor_update_raa_while_paused() { nodes[0].node.handle_update_add_htlc(node_b_id, &send_event_2.msgs[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &send_event_2.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let as_update_raa = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_update_raa.0); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_cs = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_update_raa.1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_and_process_pending_htlcs(&nodes[0], false); expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000); @@ -993,7 +993,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }], ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); @@ -1006,7 +1006,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let commitment = updates.commitment_signed; let bs_revoke_and_ack = commitment_signed_dance_return_raa(&nodes[1], &nodes[2], &commitment, false); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); // While the second channel is AwaitingRAA, forward a second payment to get it into the // holding cell. @@ -1015,7 +1015,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -1023,7 +1023,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Now fail monitor updating. @@ -1032,7 +1032,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Forward a third payment which will also be added to the holding cell, despite the channel // being paused waiting a monitor update. @@ -1041,18 +1041,18 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); let id_3 = PaymentId(payment_hash_3.0); nodes[0].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, true); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell // and not forwarded. expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { @@ -1062,13 +1062,13 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let onion_4 = RecipientOnionFields::secret_only(payment_secret_4); let id_4 = PaymentId(payment_hash_4.0); nodes[2].node.send_payment_with_route(route, payment_hash_4, onion_4, id_4).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_c_id, &send_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &send_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); (Some(payment_preimage_4), Some(payment_hash_4)) } else { @@ -1080,12 +1080,12 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_2.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); expect_and_process_pending_htlcs_and_htlc_handling_failed( &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }], ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); if test_ignore_second_cs { @@ -1137,11 +1137,11 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let as_cs; if test_ignore_second_cs { nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_event_b.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &raa.unwrap()); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(bs_cs.update_add_htlcs.is_empty()); assert!(bs_cs.update_fail_htlcs.is_empty()); @@ -1150,14 +1150,14 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(bs_cs.update_fee.is_none()); nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); as_cs = get_htlc_update_msgs(&nodes[1], &node_c_id); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } else { nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_event_b.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events(); // As both messages are for nodes[1], they're in order. @@ -1166,7 +1166,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, node_b_id); nodes[1].node.handle_revoke_and_ack(node_c_id, &msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), } @@ -1184,7 +1184,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[1] .node .handle_commitment_signed_batch_test(node_c_id, &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), } @@ -1199,23 +1199,23 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[2].node.handle_update_add_htlc(node_b_id, &as_cs.update_add_htlcs[0]); nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &as_cs.commitment_signed); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_second_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_second_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_second_raa); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); expect_and_process_pending_htlcs(&nodes[2], false); @@ -1237,7 +1237,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { if test_ignore_second_cs { expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); send_event = SendEvent::from_node(&nodes[1]); assert_eq!(send_event.node_id, node_a_id); @@ -1291,7 +1291,7 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.peer_disconnected(node_b_id); nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -1302,7 +1302,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -1327,7 +1327,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(as_channel_upd.contents.channel_flags & 2, 0); nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.peer_disconnected(node_a_id); nodes[0].node.peer_disconnected(node_b_id); @@ -1345,7 +1345,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(as_channel_upd.contents.channel_flags & 2, 0); nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); // The "disabled" bit should be unset as we just reconnected let bs_channel_upd = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); @@ -1354,7 +1354,7 @@ fn test_monitor_update_fail_reestablish() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(updates.update_add_htlcs.is_empty()); @@ -1398,28 +1398,28 @@ fn raa_no_response_awaiting_raa_state() { let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); let id_1 = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion_1, id_1).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from @@ -1430,17 +1430,17 @@ fn raa_no_response_awaiting_raa_state() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); // nodes[1] should be AwaitingRAA here! - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); @@ -1451,39 +1451,39 @@ fn raa_no_response_awaiting_raa_state() { let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); let id_3 = PaymentId(payment_hash_3.0); nodes[0].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // Finally deliver the RAA to nodes[1] which results in a CS response to the last update nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); let bs_update = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_update.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000); @@ -1518,7 +1518,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[1].node.peer_disconnected(node_a_id); nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let init_msg = msgs::Init { @@ -1543,7 +1543,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[1].node.handle_channel_reestablish(node_a_id, &as_reconnect); let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Send a second payment from A to B, resulting in a commitment update that gets swallowed with @@ -1553,12 +1553,12 @@ fn claim_while_disconnected_monitor_update_fail() { let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC // until we've channel_monitor_update'd and updated for the new commitment transaction. @@ -1568,7 +1568,7 @@ fn claim_while_disconnected_monitor_update_fail() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let mut bs_msgs = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(bs_msgs.len(), 2); @@ -1582,11 +1582,11 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), } @@ -1595,7 +1595,7 @@ fn claim_while_disconnected_monitor_update_fail() { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -1604,20 +1604,20 @@ fn claim_while_disconnected_monitor_update_fail() { let bs_commitment = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_commitment.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); @@ -1660,7 +1660,7 @@ fn monitor_failed_no_reestablish_response() { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1669,7 +1669,7 @@ fn monitor_failed_no_reestablish_response() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1] // is still failing to update monitors. @@ -1697,17 +1697,17 @@ fn monitor_failed_no_reestablish_response() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); @@ -1744,7 +1744,7 @@ fn first_message_on_recv_ordering() { let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); let id_1 = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route, payment_hash_1, onion_1, id_1).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1752,13 +1752,13 @@ fn first_message_on_recv_ordering() { assert_eq!(payment_event.node_id, node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); @@ -1769,7 +1769,7 @@ fn first_message_on_recv_ordering() { let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -1782,20 +1782,20 @@ fn first_message_on_recv_ordering() { // to the next message also tests resetting the delivery order. nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an // RAA/CS response, which should be generated when we call channel_monitor_update (with the // appropriate HTLC acceptance). nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); expect_and_process_pending_htlcs(&nodes[1], false); @@ -1803,13 +1803,13 @@ fn first_message_on_recv_ordering() { let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); @@ -1849,7 +1849,7 @@ fn test_monitor_update_fail_claim() { nodes[1].node.claim_funds(payment_preimage_1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Note that at this point there is a pending commitment transaction update for A being held by // B. Even when we go to send the payment from C through B to A, B will not update this @@ -1861,7 +1861,7 @@ fn test_monitor_update_fail_claim() { let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[2].node.send_payment_with_route(route.clone(), payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be // paused, so forward shouldn't succeed until we call channel_monitor_updated(). @@ -1880,7 +1880,7 @@ fn test_monitor_update_fail_claim() { let id_3 = PaymentId(payment_hash_3.0); let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); nodes[2].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1895,7 +1895,7 @@ fn test_monitor_update_fail_claim() { let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let mut bs_fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_fulfill.update_fulfill_htlcs.remove(0)); @@ -1904,7 +1904,7 @@ fn test_monitor_update_fail_claim() { // Get the payment forwards, note that they were batched into one commitment update. nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_forward_update = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[0]); nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[1]); @@ -1993,7 +1993,7 @@ fn test_monitor_update_on_pending_forwards() { &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }], ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let cs_fail_update = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fail_htlc(node_c_id, &cs_fail_update.update_fail_htlcs[0]); @@ -2005,7 +2005,7 @@ fn test_monitor_update_on_pending_forwards() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[2].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2018,12 +2018,12 @@ fn test_monitor_update_on_pending_forwards() { &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }], ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); @@ -2076,7 +2076,7 @@ fn monitor_update_claim_fail_no_response() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2087,7 +2087,7 @@ fn monitor_update_claim_fail_no_response() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2095,11 +2095,11 @@ fn monitor_update_claim_fail_no_response() { let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); @@ -2143,7 +2143,7 @@ fn do_during_funding_monitor_fail( .node .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let funding_created_msg = @@ -2153,20 +2153,20 @@ fn do_during_funding_monitor_fail( funding_created_msg.funding_output_index, ); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id), ); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); expect_channel_pending_event(&nodes[0], &node_b_id); let events = nodes[0].node.get_and_clear_pending_events(); @@ -2221,7 +2221,7 @@ fn do_during_funding_monitor_fail( chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { if !restore_b_before_lock { @@ -2325,7 +2325,7 @@ fn test_path_paused_mpp() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // Pass the first HTLC of the payment along to nodes[3]. @@ -2381,7 +2381,7 @@ fn test_pending_update_fee_ack_on_reconnect() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_initial_send_msgs = get_htlc_update_msgs(&nodes[1], &node_a_id); // bs_initial_send_msgs are not delivered until they are re-generated after reconnect @@ -2390,7 +2390,7 @@ fn test_pending_update_fee_ack_on_reconnect() { *feerate_lock *= 2; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_update_fee_msgs = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(as_update_fee_msgs.update_fee.is_some()); @@ -2398,7 +2398,7 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[1] .node .handle_commitment_signed_batch_test(node_a_id, &as_update_fee_msgs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // bs_first_raa is not delivered until it is re-generated after reconnect @@ -2440,33 +2440,33 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &bs_initial_send_msgs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack( node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_cs = get_htlc_update_msgs(&nodes[1], &node_a_id).commitment_signed; nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_commitment_signed_batch_test( node_a_id, &get_htlc_update_msgs(&nodes[0], &node_b_id).commitment_signed, ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_third_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack( node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[0], false); expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 1_000_000); @@ -2503,13 +2503,13 @@ fn test_fail_htlc_on_broadcast_after_claim() { assert_eq!(bs_txn.len(), 1); nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 2000); let mut cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fulfill_htlc(node_c_id, cs_updates.update_fulfill_htlcs.remove(0)); let mut bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); mine_transaction(&nodes[1], &bs_txn[0]); @@ -2517,7 +2517,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_closed_event(&nodes[1], 1, reason, &[node_c_id], 100000); check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs_and_htlc_handling_failed( &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }], @@ -2549,7 +2549,7 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { *feerate_lock += 20; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_msgs = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(update_msgs.update_fee.is_some()); if deliver_update { @@ -2601,38 +2601,38 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[1] .node .handle_commitment_signed_batch_test(node_a_id, &update_msgs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_update = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_update_fee(node_a_id, as_second_update.update_fee.as_ref().unwrap()); nodes[1] .node .handle_commitment_signed_batch_test(node_a_id, &as_second_update.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); let bs_second_cs = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &bs_second_cs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } else { let commitment = &update_msgs.commitment_signed; do_commitment_signed_dance(&nodes[1], &nodes[0], commitment, false, false); @@ -2696,29 +2696,29 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); let id_1 = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion_1, id_1).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send = SendEvent::from_node(&nodes[0]); assert_eq!(send.msgs.len(), 1); let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.claim_funds(payment_preimage_0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &send.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (raa, cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); if disconnect { // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just @@ -2750,7 +2750,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); @@ -2791,14 +2791,14 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // New outbound messages should be generated immediately upon a call to // get_and_clear_pending_msg_events (but not before). - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert_eq!(events.len(), 1); // Deliver the pending in-flight CS nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let commitment_msg = match events.pop().unwrap() { MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, mut updates } => { @@ -2818,13 +2818,13 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { }; nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(commitment_signed_dance_through_cp_raa(&nodes[1], &nodes[0], false, false).is_none()); let events = nodes[1].node.get_and_clear_pending_events(); @@ -2884,19 +2884,19 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let onion_2 = RecipientOnionFields::secret_only(second_payment_secret); let id_2 = PaymentId(second_payment_hash.0); nodes[0].node.send_payment_with_route(route, second_payment_hash, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id)); } @@ -2913,13 +2913,13 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash }], ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); get_htlc_update_msgs(&nodes[2], &node_b_id); // Note that we don't populate fulfill_msg.attribution_data here, which will lead to hold times being // unavailable. } else { nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 100_000); let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -2936,7 +2936,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f } nodes[1].node.handle_update_fulfill_htlc(node_c_id, fulfill_msg); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut bs_updates = None; if htlc_status != HTLCStatusAtDupClaim::HoldingCell { @@ -2975,7 +2975,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f if htlc_status == HTLCStatusAtDupClaim::HoldingCell { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa.unwrap()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); // We finally receive the second payment, but don't claim it bs_updates = Some(get_htlc_update_msgs(&nodes[1], &node_a_id)); @@ -3028,13 +3028,13 @@ fn test_temporary_error_during_shutdown() { node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_shutdown( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id), ); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -3096,20 +3096,20 @@ fn double_temp_error() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // `claim_funds` results in a ChannelMonitorUpdate. nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (latest_update_1, _) = get_latest_mon_update_id(&nodes[1], channel_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, // which had some asserts that prevented it from being called twice. nodes[1].node.claim_funds(payment_preimage_2); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update_2, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_2); // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions @@ -3159,18 +3159,18 @@ fn double_temp_error() { }; assert_eq!(node_id, node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_1); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed_b1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.process_pending_htlc_forwards(); let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs(&nodes[0], &node_b_id); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_a1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed_a1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Complete the second HTLC. let ((update_fulfill_2, commitment_signed_b2), raa_b2) = { @@ -3199,11 +3199,11 @@ fn double_temp_error() { ) }; nodes[0].node.handle_revoke_and_ack(node_b_id, &raa_b2); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_2); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); do_commitment_signed_dance(&nodes[0], &nodes[1], &commitment_signed_b2, false, false); expect_payment_sent!(nodes[0], payment_preimage_2); @@ -3266,12 +3266,12 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { .node .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events(); @@ -3281,7 +3281,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -3376,13 +3376,13 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo .node .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding @@ -3391,7 +3391,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_channel_pending_event(&nodes[0], &node_b_id); let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4037,7 +4037,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { message: msg.clone(), }; nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, msg).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100_000); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4484,7 +4484,7 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { .node .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, a_reason, &[node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); @@ -4494,20 +4494,20 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let b_reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, b_reason, &[node_a_id], 1000000); // Now that B has a pending forwarded payment across it with the inbound edge on-chain, claim // the payment on C and give B the preimage for it. nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); // At this point nodes[1] has the preimage and is waiting for the `ChannelMonitorUpdate` for @@ -4522,13 +4522,13 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { // background events (via `get_and_clear_pending_msg_events`), the final `ChannelMonitorUpdate` // will fly and we'll drop the preimage from channel B's `ChannelMonitor`. We'll also release // the `Event::PaymentForwarded`. - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(!get_monitor!(nodes[1], chan_b.2) .get_all_current_outbound_htlcs() .iter() @@ -4561,7 +4561,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { .node .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, a_reason, &[node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); @@ -4571,7 +4571,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let b_reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, b_reason, &[node_a_id], 1000000); @@ -4580,7 +4580,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { // `Event::PaymentClaimed` from being generated. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); // Once we complete the `ChannelMonitorUpdate` the `Event::PaymentClaimed` will become diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0a13d2312b2..2e605e9129e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18632,7 +18632,7 @@ mod tests { RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap(); nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash, RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); @@ -18642,19 +18642,19 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18668,7 +18668,7 @@ mod tests { // Send the second half of the original MPP payment. nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash, RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None); @@ -18679,34 +18679,34 @@ mod tests { // lightning messages manually. nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], our_payment_hash, 200_000); - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); let mut bs_1st_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_1st_updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_1st_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_first_raa, as_first_cs) = get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut bs_2nd_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_first_cs); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_2nd_updates.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_2nd_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); let as_second_updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Note that successful MPP payments will generate a single PaymentSent event upon the first // path's success and a PaymentPathSuccessful event for each path's success. @@ -18760,13 +18760,13 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0), route_params.clone(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward @@ -18774,7 +18774,7 @@ mod tests { let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18798,7 +18798,7 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -18809,19 +18809,19 @@ mod tests { let payment_secret = PaymentSecret([43; 32]); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18841,7 +18841,7 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_1, route.route_params.clone().unwrap(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -18858,19 +18858,19 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_2, route_params, Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18916,7 +18916,7 @@ mod tests { RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap(); nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash, RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); assert_eq!(updates.update_add_htlcs.len(), 1); @@ -18984,7 +18984,7 @@ mod tests { let message = "Channel force-closed".to_owned(); nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 100000); @@ -19048,7 +19048,7 @@ mod tests { .node .force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1_000_000); @@ -19250,13 +19250,13 @@ mod tests { let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); } open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 140eaf3419a..64f08db3c89 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1265,16 +1265,6 @@ pub fn check_added_monitors>(node: & } } -/// Check whether N channel monitor(s) have been added. -/// -/// Don't use this, use the identically-named function instead. -#[macro_export] -macro_rules! check_added_monitors { - ($node: expr, $count: expr) => { - $crate::ln::functional_test_utils::check_added_monitors(&$node, $count); - }; -} - fn claimed_htlc_matches_path<'a, 'b, 'c>( origin_node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], htlc: &ClaimedHTLC, ) -> bool { @@ -1353,7 +1343,7 @@ pub fn _reload_node<'a, 'b, 'c>( node.chain_monitor.load_existing_monitor(channel_id, monitor), Ok(ChannelMonitorUpdateStatus::Completed), ); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); } node_deserialized @@ -1509,7 +1499,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( .node .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .is_ok()); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); let funding_created_msg = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b_id); @@ -1552,7 +1542,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .is_err()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); tx } @@ -1634,7 +1624,7 @@ pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( get_event_msg!(initiator, MessageSendEvent::SendFundingCreated, receiver_node_id); receiver.node.handle_funding_created(initiator_node_id, &funding_created); - check_added_monitors!(receiver, 1); + check_added_monitors(&receiver, 1); let bs_signed_locked = receiver.node.get_and_clear_pending_msg_events(); assert_eq!(bs_signed_locked.len(), 2); let as_channel_ready; @@ -1644,7 +1634,7 @@ pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( initiator.node.handle_funding_signed(receiver_node_id, &msg); expect_channel_pending_event(&initiator, &receiver_node_id); expect_channel_pending_event(&receiver, &initiator_node_id); - check_added_monitors!(initiator, 1); + check_added_monitors(&initiator, 1); assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!( @@ -1838,11 +1828,11 @@ pub fn create_channel_manual_funding<'a, 'b, 'c: 'd, 'd>( funding_tx.clone(), ) .unwrap(); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); let funding_created = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b_id); node_b.node.handle_funding_created(node_a_id, &funding_created); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); let channel_id_b = expect_channel_pending_event(node_b, &node_a_id); if zero_conf { @@ -2008,7 +1998,7 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( let as_funding_created = get_event_msg!(nodes[a], MessageSendEvent::SendFundingCreated, node_b_id); nodes[b].node.handle_funding_created(node_a_id, &as_funding_created); - check_added_monitors!(nodes[b], 1); + check_added_monitors(&nodes[b], 1); let cs_funding_signed = get_event_msg!(nodes[b], MessageSendEvent::SendFundingSigned, node_a_id); @@ -2016,7 +2006,7 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( nodes[a].node.handle_funding_signed(node_b_id, &cs_funding_signed); expect_channel_pending_event(&nodes[a], &node_b_id); - check_added_monitors!(nodes[a], 1); + check_added_monitors(&nodes[a], 1); assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); @@ -2639,11 +2629,11 @@ pub fn do_main_commitment_signed_dance( let node_b_id = node_b.node.get_our_node_id(); let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs(node_a, &node_b_id); - check_added_monitors!(node_b, 0); + check_added_monitors(&node_b, 0); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); node_b.node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { let mut events = node_b.node.get_and_clear_pending_msg_events(); @@ -2660,7 +2650,7 @@ pub fn do_main_commitment_signed_dance( events.get(0).map(|e| e.clone()), ) }; - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); if fail_backwards { assert!(node_a.node.get_and_clear_pending_events().is_empty()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); @@ -2699,10 +2689,10 @@ pub fn do_commitment_signed_dance( ) { let node_b_id = node_b.node.get_our_node_id(); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); node_a.node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); // If this commitment signed dance was due to a claim, don't check for an RAA monitor update. let channel_id = commitment_signed[0].channel_id; @@ -2726,7 +2716,7 @@ pub fn do_commitment_signed_dance( channel_id, }], ); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); let node_a_per_peer_state = node_a.node.per_peer_state.read().unwrap(); let mut number_of_msg_events = 0; @@ -3413,7 +3403,7 @@ pub fn send_along_route_with_secret<'a, 'b, 'c>( Retry::Attempts(0), ) .unwrap(); - check_added_monitors!(origin_node, expected_paths.len()); + check_added_monitors(&origin_node, expected_paths.len()); pass_along_route(origin_node, expected_paths, recv_value, our_payment_hash, our_payment_secret); payment_id } @@ -3427,7 +3417,7 @@ fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) { prev_node .node .handle_update_fail_htlc(node.node.get_our_node_id(), &updates.update_fail_htlcs[0]); - check_added_monitors!(prev_node, 0); + check_added_monitors(&prev_node, 0); let is_first_hop = origin_node_id == prev_node.node.get_our_node_id(); // We do not want to fail backwards on the first hop. All other hops should fail backwards. @@ -3535,7 +3525,7 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option assert_eq!(node.node.get_our_node_id(), payment_event.node_id); node.node.handle_update_add_htlc(prev_node.node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(node, 0); + check_added_monitors(&node, 0); if is_last_hop && is_probe { do_commitment_signed_dance(node, prev_node, &payment_event.commitment_msg, true, true); @@ -3637,14 +3627,14 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option assert!(events_2.len() == 1); expect_htlc_handling_failed_destinations!(events_2, &[failure]); node.node.process_pending_htlc_forwards(); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); } else { assert!(events_2.is_empty()); } } else if !is_last_hop { let mut events_2 = node.node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); } @@ -3679,7 +3669,7 @@ pub fn send_probe_along_route<'a, 'b, 'c>( let mut events = origin_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_route.len()); - check_added_monitors!(origin_node, expected_route.len()); + check_added_monitors(&origin_node, expected_route.len()); for (path, payment_hash) in expected_route.iter() { let ev = remove_first_msg_event_to_node(&path[0].node.get_our_node_id(), &mut events); @@ -3866,7 +3856,7 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { _ => panic!(), } - check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len()); + check_added_monitors(&expected_paths[0].last().unwrap(), expected_paths.len()); let mut expected_total_fee_msat = 0; @@ -3935,7 +3925,7 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { $prev_node.node.get_our_node_id(), next_msgs.as_ref().unwrap().0.clone(), ); - check_added_monitors!($node, 0); + check_added_monitors(&$node, 0); assert!($node.node.get_and_clear_pending_msg_events().is_empty()); let commitment = &next_msgs.as_ref().unwrap().1; do_commitment_signed_dance($node, $prev_node, commitment, false, false); @@ -4000,7 +3990,7 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { ); expected_total_fee_msat += actual_fee.unwrap(); fwd_amt_msat += actual_fee.unwrap(); - check_added_monitors!($node, 1); + check_added_monitors(&$node, 1); let new_next_msgs = if $new_msgs { let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4049,7 +4039,7 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { // Ensure that claim_funds is idempotent. expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage); assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(expected_paths[0].last().unwrap(), 0); + check_added_monitors(&expected_paths[0].last().unwrap(), 0); expected_total_fee_msat } @@ -4144,7 +4134,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>( our_payment_hash: PaymentHash, expected_fail_reason: PaymentFailureReason, ) { let mut expected_paths: Vec<_> = expected_paths_slice.iter().collect(); - check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len()); + check_added_monitors(&expected_paths[0].last().unwrap(), expected_paths.len()); let mut per_path_msgs: Vec<((msgs::UpdateFailHTLC, Vec), PublicKey)> = Vec::with_capacity(expected_paths.len()); @@ -4256,7 +4246,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>( prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0, ); - check_added_monitors!(origin_node, 0); + check_added_monitors(&origin_node, 0); assert!(origin_node.node.get_and_clear_pending_msg_events().is_empty()); let commitment = &next_msgs.as_ref().unwrap().1; do_commitment_signed_dance(origin_node, prev_node, commitment, false, false); @@ -4319,7 +4309,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>( pending_events ); assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(expected_paths[0].last().unwrap(), 0); + check_added_monitors(&expected_paths[0].last().unwrap(), 0); } pub fn fail_payment<'a, 'b, 'c>( @@ -5175,9 +5165,9 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { || pending_cell_htlc_fails.0 != 0 || expect_renegotiated_funding_locked_monitor_update.1 { - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); } else { - check_added_monitors!(node_b, 0); + check_added_monitors(&node_b, 0); } let mut resp_2 = Vec::new(); @@ -5189,9 +5179,9 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { || pending_cell_htlc_fails.1 != 0 || expect_renegotiated_funding_locked_monitor_update.0 { - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); } else { - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); } // We don't yet support both needing updates, as that would require a different commitment dance: @@ -5266,7 +5256,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); node_a.node.handle_revoke_and_ack(node_b_id, &chan_msgs.1.unwrap()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); } else { assert!(chan_msgs.1.is_none()); } @@ -5306,15 +5296,15 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_b_id, &commitment_update.commitment_signed, ); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); let as_revoke_and_ack = get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!( - node_b, - if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 } + check_added_monitors( + &node_b, + if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 }, ); } } else { @@ -5380,7 +5370,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); node_b.node.handle_revoke_and_ack(node_a_id, &chan_msgs.1.unwrap()); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); } else { assert!(chan_msgs.1.is_none()); } @@ -5420,15 +5410,15 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_a_id, &commitment_update.commitment_signed, ); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); let bs_revoke_and_ack = get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_a.node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!( - node_a, - if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 } + check_added_monitors( + &node_a, + if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 }, ); } } else { @@ -5511,7 +5501,7 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( tx.clone(), ) .is_ok()); - check_added_monitors!(funding_node, 0); + check_added_monitors(&funding_node, 0); let events = funding_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), params.len()); for (other_node, ..) in params { diff --git a/lightning/src/ln/max_payment_path_len_tests.rs b/lightning/src/ln/max_payment_path_len_tests.rs index f67ad442c29..fa7e8d8f132 100644 --- a/lightning/src/ln/max_payment_path_len_tests.rs +++ b/lightning/src/ln/max_payment_path_len_tests.rs @@ -92,7 +92,7 @@ fn large_payment_metadata() { .node .send_payment(payment_hash, max_sized_onion.clone(), id, route_params, Retry::Attempts(0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1]]; @@ -174,7 +174,7 @@ fn large_payment_metadata() { .node .send_payment(payment_hash_2, onion_allowing_2_hops, id, route_params, Retry::Attempts(0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 34064ebb484..04915affa20 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -68,7 +68,7 @@ fn chanmon_fail_from_stale_commitment() { let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let bs_txn = get_local_commitment_txn!(nodes[1], chan_id_2); @@ -78,19 +78,19 @@ fn chanmon_fail_from_stale_commitment() { expect_and_process_pending_htlcs(&nodes[1], false); get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Don't bother delivering the new HTLC add/commits, instead confirming the pre-HTLC commitment // transaction for nodes[1]. mine_transaction(&nodes[1], &bs_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[2].node.get_our_node_id()], 100000); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let fail_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]); @@ -140,7 +140,7 @@ fn revoked_output_htlc_resolution_timing() { // Confirm the revoked commitment transaction, closing the channel. mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); // Two justice transactions will be broadcast, one on the unpinnable, revoked to_self output, @@ -185,7 +185,7 @@ fn archive_fully_resolved_monitors() { let message = "Channel force-closed".to_owned(); nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1_000_000); @@ -565,18 +565,18 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 3_000_100); let mut b_htlc_msgs = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); // We claim the dust payment here as well, but it won't impact our claimable balances as its // dust and thus doesn't appear on chain at all. nodes[1].node.claim_funds(dust_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], dust_payment_hash, 3_000); nodes[1].node.claim_funds(timeout_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], timeout_payment_hash, 4_000_200); if prev_commitment_tx { @@ -585,14 +585,14 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_fulfill); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &b_htlc_msgs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_raa, as_cs) = get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); let _htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs); let _bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } // Once B has received the payment preimage, it includes the value of the HTLC in its @@ -681,11 +681,11 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c assert_eq!(remote_txn[0].output[b_broadcast_txn[1].input[0].previous_output.vout as usize].value.to_sat(), 4_000); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); @@ -885,7 +885,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); @@ -897,7 +897,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 20_000_000); nodes[0].node.send_payment_with_route(route_2, payment_hash_2, RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); @@ -907,7 +907,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 20_000_000); nodes[1].node.claim_funds(payment_preimage_2); get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_2, 20_000_000); let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64; @@ -918,7 +918,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let message = "Channel force-closed".to_owned(); let node_a_commitment_claimable = nodes[0].best_block_info().1 + BREAKDOWN_TIMEOUT as u32; nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1000000); @@ -980,7 +980,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b // Get nodes[1]'s HTLC claim tx for the second HTLC mine_transaction(&nodes[1], &commitment_tx); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_claim_txn.len(), 1); @@ -1210,7 +1210,7 @@ fn test_no_preimage_inbound_htlc_balances() { mine_transaction(&nodes[0], &as_txn[0]); nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); assert_eq!(as_pre_spend_claims, @@ -1218,7 +1218,7 @@ fn test_no_preimage_inbound_htlc_balances() { mine_transaction(&nodes[1], &as_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); let node_b_commitment_claimable = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -1427,12 +1427,12 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc nodes[1].node.claim_funds(claimed_payment_preimage); expect_payment_claimed!(nodes[1], claimed_payment_hash, 3_000_100); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let _b_htlc_msgs = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); connect_blocks(&nodes[0], htlc_cltv_timeout + 1 - 10); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 5); @@ -1461,7 +1461,7 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_events(&nodes[1], &[ExpectedCloseEvent { channel_capacity_sats: Some(1_000_000), channel_id: Some(chan_id), @@ -1723,7 +1723,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor // B will generate an HTLC-Success from its revoked commitment tx mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); if keyed_anchors || p2a_anchor { handle_bump_htlc_event(&nodes[1], 1); @@ -1767,7 +1767,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor // A will generate justice tx from B's revoked commitment/HTLC tx mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); let to_remote_conf_height = nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -2020,7 +2020,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho nodes[0].node.claim_funds(claimed_payment_preimage); expect_payment_claimed!(nodes[0], claimed_payment_hash, 3_000_100); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let _a_htlc_msgs = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose { @@ -2049,7 +2049,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho mine_transaction(&nodes[1], &as_revoked_txn[0]); check_closed_broadcast!(nodes[1], true); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut claim_txn = nodes[1].tx_broadcaster.txn_broadcast(); assert_eq!(claim_txn.len(), 2); @@ -2635,9 +2635,9 @@ fn do_test_yield_anchors_events(have_htlcs: bool, p2a_anchor: bool) { } mine_transactions(&nodes[0], &[&commitment_tx, &anchor_tx]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); mine_transactions(&nodes[1], &[&commitment_tx, &anchor_tx]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if !have_htlcs { // If we don't have any HTLCs, we're done, the rest of the test is about HTLC transactions @@ -2828,7 +2828,7 @@ fn do_test_anchors_aggregated_revoked_htlc_tx(p2a_anchor: bool) { } } check_closed_broadcast(&nodes[0], 2, true); - check_added_monitors!(&nodes[0], 2); + check_added_monitors(&nodes[0], 2); check_closed_event(&nodes[0], 2, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id(); 2], 1000000); // Alice should detect the confirmed revoked commitments, and attempt to claim all of the @@ -3167,13 +3167,13 @@ fn do_test_monitor_claims_with_random_signatures(keyed_anchors: bool, p2a_anchor mine_transaction(closing_node, anchor_tx.as_ref().unwrap()); } check_closed_broadcast!(closing_node, true); - check_added_monitors!(closing_node, 1); + check_added_monitors(&closing_node, 1); let message = "ChannelMonitor-initiated commitment transaction broadcast".to_string(); check_closed_event(&closing_node, 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }, &[other_node.node.get_our_node_id()], 1_000_000); mine_transaction(other_node, &commitment_tx); check_closed_broadcast!(other_node, true); - check_added_monitors!(other_node, 1); + check_added_monitors(&other_node, 1); check_closed_event(&other_node, 1, ClosureReason::CommitmentTxConfirmed, &[closing_node.node.get_our_node_id()], 1_000_000); // If we update the best block to the new height before providing the confirmed transactions, diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 4c53aefe58d..906d9e247ce 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -2414,7 +2414,7 @@ fn rejects_keysend_to_non_static_invoice_path() { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), keysend_payment_id, route_params, Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); @@ -2482,7 +2482,7 @@ fn no_double_pay_with_stale_channelmanager() { let expected_route: &[&[&Node]] = &[&[&nodes[1]], &[&nodes[1]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let ev = remove_first_msg_event_to_node(&bob_id, &mut events); let args = PassAlongPathArgs::new(&nodes[0], expected_route[0], amt_msat, payment_hash, ev) @@ -2507,7 +2507,7 @@ fn no_double_pay_with_stale_channelmanager() { reload_node!(nodes[0], &alice_chan_manager_serialized, &[&monitor_0, &monitor_1], persister, chain_monitor, alice_deserialized); // The stale manager results in closing the channels. check_closed_event(&nodes[0], 2, ClosureReason::OutdatedChannelManager, &[bob_id, bob_id], 10_000_000); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); // Alice receives a duplicate invoice, but the payment should be transitioned to Retryable by now. nodes[0].onion_messenger.handle_onion_message(bob_id, &invoice_om); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index b097eac5dfd..d76d709abbb 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -133,7 +133,7 @@ fn run_onion_failure_test_with_fail_intercept( .node .send_payment_with_route(route.clone(), *payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); // temper update_add (0 => 1) let mut update_add_0 = update_0.update_add_htlcs[0].clone(); @@ -170,7 +170,7 @@ fn run_onion_failure_test_with_fail_intercept( expect_htlc_forward!(&nodes[1]); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert_eq!(update_1.update_add_htlcs.len(), 1); // tamper update_add (1 => 2) let mut update_add_1 = update_1.update_add_htlcs[0].clone(); @@ -202,7 +202,7 @@ fn run_onion_failure_test_with_fail_intercept( }, _ => {}, } - check_added_monitors!(&nodes[2], 1); + check_added_monitors(&nodes[2], 1); let update_2_1 = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); assert!(update_2_1.update_fail_htlcs.len() == 1); @@ -405,7 +405,7 @@ fn test_fee_failures() { .node .send_payment_with_route(route.clone(), payment_hash_success, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -456,7 +456,7 @@ fn test_fee_failures() { .node .send_payment_with_route(route, payment_hash_success, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -1548,7 +1548,7 @@ fn test_overshoot_final_cltv() { .send_payment_with_route(route, payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add_0 = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add_0); @@ -1567,7 +1567,7 @@ fn test_overshoot_final_cltv() { } expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); let mut update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_1); @@ -2285,7 +2285,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -2300,7 +2300,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { &nodes[1], &[HTLCHandlingFailureType::Receive { payment_hash }], ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2435,7 +2435,7 @@ fn test_phantom_onion_hmac_failure() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2470,7 +2470,7 @@ fn test_phantom_onion_hmac_failure() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2508,7 +2508,7 @@ fn test_phantom_invalid_onion_payload() { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2571,7 +2571,7 @@ fn test_phantom_invalid_onion_payload() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2607,7 +2607,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2637,7 +2637,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2676,7 +2676,7 @@ fn test_phantom_failure_too_low_cltv() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2691,7 +2691,7 @@ fn test_phantom_failure_too_low_cltv() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2729,7 +2729,7 @@ fn test_phantom_failure_modified_cltv() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2784,7 +2784,7 @@ fn test_phantom_failure_expires_too_soon() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2834,7 +2834,7 @@ fn test_phantom_failure_too_low_recv_amt() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2851,7 +2851,7 @@ fn test_phantom_failure_too_low_recv_amt() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2904,7 +2904,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2954,7 +2954,7 @@ fn test_phantom_failure_reject_payment() { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2981,7 +2981,7 @@ fn test_phantom_failure_reject_payment() { nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index dc91efa9da4..1877743d81e 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -144,7 +144,7 @@ fn mpp_retry() { let onion = RecipientOnionFields::secret_only(pay_secret); let retry = Retry::Attempts(1); nodes[0].node.send_payment(hash, onion, id, route_params.clone(), retry).unwrap(); - check_added_monitors!(nodes[0], 2); // one monitor per path + check_added_monitors(&nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -169,7 +169,7 @@ fn mpp_retry() { assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[2], &htlc_updates.commitment_signed, false, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -191,7 +191,7 @@ fn mpp_retry() { route.route_params = Some(route_params.clone()); nodes[0].router.expect_find_route(route_params, Ok(route)); expect_and_process_pending_htlcs(&nodes[0], false); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -262,7 +262,7 @@ fn mpp_retry_overpay() { let onion = RecipientOnionFields::secret_only(pay_secret); let retry = Retry::Attempts(1); nodes[0].node.send_payment(hash, onion, id, route_params.clone(), retry).unwrap(); - check_added_monitors!(nodes[0], 2); // one monitor per path + check_added_monitors(&nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -288,7 +288,7 @@ fn mpp_retry_overpay() { assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[2], &htlc_updates.commitment_signed, false, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -314,7 +314,7 @@ fn mpp_retry_overpay() { nodes[0].router.expect_find_route(route_params, Ok(route)); nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -362,7 +362,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { // Initiate the MPP payment. let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(route, hash, onion, PaymentId(hash.0)).unwrap(); - check_added_monitors!(nodes[0], 2); // one monitor per path + check_added_monitors(&nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -384,7 +384,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let htlc_fail_updates = get_htlc_update_msgs(&nodes[3], &node_b_id); assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); nodes[1].node.handle_update_fail_htlc(node_d_id, &htlc_fail_updates.update_fail_htlcs[0]); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let commitment = &htlc_fail_updates.commitment_signed; do_commitment_signed_dance(&nodes[1], &nodes[3], commitment, false, false); @@ -397,7 +397,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let htlc_fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let commitment = &htlc_fail_updates.commitment_signed; do_commitment_signed_dance(&nodes[0], &nodes[1], commitment, false, false); @@ -461,7 +461,7 @@ fn do_test_keysend_payments(public_node: bool) { nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); @@ -510,7 +510,7 @@ fn test_mpp_keysend() { let id = PaymentId([42; 32]); let hash = nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -553,7 +553,7 @@ fn test_fulfill_hold_times() { let id = PaymentId([42; 32]); let hash = nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -621,7 +621,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let onion = RecipientOnionFields::spontaneous_empty(); let retry = Retry::Attempts(0); nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_0, params, retry).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &node_b_id); let update_add_0 = update_0.update_add_htlcs[0].clone(); @@ -629,7 +629,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { do_commitment_signed_dance(&nodes[1], &nodes[0], &update_0.commitment_signed, false, true); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs(&nodes[1], &node_d_id); let update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(node_b_id, &update_add_1); @@ -670,7 +670,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let params = route.route_params.clone().unwrap(); let retry = Retry::Attempts(0); nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_1, params, retry).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_2 = get_htlc_update_msgs(&nodes[0], &node_c_id); let update_add_2 = update_2.update_add_htlcs[0].clone(); @@ -678,7 +678,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { do_commitment_signed_dance(&nodes[2], &nodes[0], &update_2.commitment_signed, false, true); expect_and_process_pending_htlcs(&nodes[2], false); - check_added_monitors!(&nodes[2], 1); + check_added_monitors(&nodes[2], 1); let update_3 = get_htlc_update_msgs(&nodes[2], &node_d_id); let update_add_3 = update_3.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(node_c_id, &update_add_3); @@ -710,7 +710,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { nodes[3].node.process_pending_htlc_forwards(); let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[3], &[fail_type]); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); // Fail back along nodes[2] let update_fail_0 = get_htlc_update_msgs(&nodes[3], &node_c_id); @@ -721,7 +721,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let fail_type = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_chan_id }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail_type]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let update_fail_1 = get_htlc_update_msgs(&nodes[2], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_c_id, &update_fail_1.update_fail_htlcs[0]); @@ -806,7 +806,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -862,7 +862,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { } else { assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.peer_disconnected(node_a_id); @@ -890,7 +890,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { nodes[1].node.handle_error(node_a_id, msg); check_closed_event(&nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) }, &[node_a_id], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); }, @@ -901,13 +901,13 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // Now claim the first payment, which should allow nodes[1] to claim the payment on-chain when // we close in a moment. nodes[2].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); let mut htlc_fulfill = get_htlc_update_msgs(&nodes[2], &node_b_id); let fulfill_msg = htlc_fulfill.update_fulfill_htlcs.remove(0); nodes[1].node.handle_update_fulfill_htlc(node_c_id, fulfill_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); do_commitment_signed_dance(&nodes[1], &nodes[2], &htlc_fulfill.commitment_signed, false, false); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false); @@ -990,7 +990,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let id = PaymentId(payment_hash.0); let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1071,7 +1071,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let init_msg = msgs::Init { features: nodes[1].node.init_features(), @@ -1102,7 +1102,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { ); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(msg) }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); }, _ => panic!("Unexpected event"), @@ -1115,7 +1115,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { nodes[2].node.fail_htlc_backwards(&hash); let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail_type]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let htlc_fulfill_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fail_htlc(node_c_id, &htlc_fulfill_updates.update_fail_htlcs[0]); @@ -1197,7 +1197,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // the payment is not (spuriously) listed as still pending. let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], amt, hash, payment_secret); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); @@ -1271,7 +1271,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( .force_close_broadcasting_latest_txn(&chan_id, &node_b_id, message.clone()) .unwrap(); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -1289,12 +1289,12 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( }; nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 10_000_000); mine_transaction(&nodes[1], &commitment_tx); check_closed_broadcast(&nodes[1], 1, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); let htlc_success_tx = { @@ -1450,7 +1450,7 @@ fn test_fulfill_restart_failure() { let mon_ser = get_monitor!(nodes[1], chan_id).encode(); nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 100_000); let mut htlc_fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); @@ -1467,7 +1467,7 @@ fn test_fulfill_restart_failure() { nodes[1].node.fail_htlc_backwards(&payment_hash); let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[fail_type]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); @@ -1517,7 +1517,7 @@ fn get_ldk_payment_preimage() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.unwrap(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Make sure to use `get_payment_preimage` let preimage = Some(nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap()); @@ -1560,7 +1560,7 @@ fn sent_probe_is_probe_of_sending_node() { } get_htlc_update_msgs(&nodes[0], &node_b_id); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } #[test] @@ -1607,20 +1607,20 @@ fn failed_probe_yields_event() { let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); // node[0] -- update_add_htlcs -> node[1] - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); let probe_event = SendEvent::from_commitment_update(node_b_id, channel_id, updates); nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &probe_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); // node[0] <- update_fail_htlcs -- node[1] - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); let _events = nodes[1].node.get_and_clear_pending_events(); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -1658,15 +1658,15 @@ fn onchain_failed_probe_yields_event() { let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); // node[0] -- update_add_htlcs -> node[1] - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); let probe_event = SendEvent::from_commitment_update(node_b_id, chan_id, updates); nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &probe_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let _ = get_htlc_update_msgs(&nodes[1], &node_c_id); // Don't bother forwarding the HTLC onwards and just confirm the force-close transaction on @@ -1674,7 +1674,7 @@ fn onchain_failed_probe_yields_event() { let bs_txn = get_local_commitment_txn!(nodes[1], chan_id); confirm_transaction(&nodes[0], &bs_txn[0]); check_closed_broadcast!(&nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_added_monitors(&nodes[0], 0); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -1925,7 +1925,7 @@ fn claimed_send_payment_idempotent() { let onion = RecipientOnionFields::secret_only(second_payment_secret); nodes[0].node.send_payment_with_route(route, hash_b, onion, payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, hash_b, second_payment_secret); claim_payment(&nodes[0], &[&nodes[1]], preimage_b); } @@ -1994,7 +1994,7 @@ fn abandoned_send_payment_idempotent() { // failed payment back. let onion = RecipientOnionFields::secret_only(second_payment_secret); nodes[0].node.send_payment_with_route(route, hash_b, onion, payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, hash_b, second_payment_secret); claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage); } @@ -2163,12 +2163,12 @@ fn test_holding_cell_inflight_htlcs() { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); } let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); @@ -2309,7 +2309,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[fail]); nodes[1].node.process_pending_htlc_forwards(); let update_fail = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_fail.update_fail_htlcs.len() == 1); let fail_msg = update_fail.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); @@ -2394,7 +2394,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { let fail_type = HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[fail_type]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_fail = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(htlc_fail.update_add_htlcs.is_empty()); @@ -2490,7 +2490,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(0)).unwrap(); - check_added_monitors!(nodes[0], num_mpp_parts); // one monitor per path + check_added_monitors(&nodes[0], num_mpp_parts); // one monitor per path let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), num_mpp_parts); @@ -2647,7 +2647,7 @@ fn do_automatic_retries(test: AutoRetry) { macro_rules! pass_failed_attempt_with_retry_along_path { ($failing_channel_id: expr, $expect_pending_htlcs_forwardable: expr) => { // Send a payment attempt that fails due to lack of liquidity on the second hop - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &node_b_id); let mut update_add = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(node_a_id, &update_add); @@ -2664,7 +2664,7 @@ fn do_automatic_retries(test: AutoRetry) { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); @@ -2710,7 +2710,7 @@ fn do_automatic_retries(test: AutoRetry) { // We retry payments in `process_pending_htlc_forwards` nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -2738,7 +2738,7 @@ fn do_automatic_retries(test: AutoRetry) { // We retry payments in `process_pending_htlc_forwards` nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -3008,7 +3008,7 @@ fn auto_retry_partial_failure() { } // Pass the first part of the payment along the path. - check_added_monitors!(nodes[0], 1); // only one HTLC actually made it out + check_added_monitors(&nodes[0], 1); // only one HTLC actually made it out let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); // Only one HTLC/channel update actually made it out @@ -3017,35 +3017,35 @@ fn auto_retry_partial_failure() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_2nd_htlcs = SendEvent::from_node(&nodes[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &as_2nd_htlcs.msgs[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &as_2nd_htlcs.msgs[1]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_2nd_htlcs.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_htlc_forwards(); @@ -3058,19 +3058,19 @@ fn auto_retry_partial_failure() { nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_claim.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_claim.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_third_raa, as_third_cs) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_third_raa); - check_added_monitors!(nodes[1], 4); + check_added_monitors(&nodes[1], 4); let mut bs_2nd_claim = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_third_cs); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_third_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); let bs_second_fulfill_a = bs_2nd_claim.update_fulfill_htlcs.remove(0); @@ -3078,18 +3078,18 @@ fn auto_retry_partial_failure() { nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_second_fulfill_a); nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_second_fulfill_b); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_2nd_claim.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_fourth_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_fourth_cs); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); if let Event::PaymentPathSuccessful { .. } = events[0] { @@ -3167,7 +3167,7 @@ fn auto_retry_zero_attempts_send_error() { } else { panic!(); } - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); } #[test] @@ -3203,12 +3203,12 @@ fn fails_paying_after_rejected_by_payee() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(&nodes[1], payment_hash, payment_secret, amt_msat); @@ -3336,7 +3336,7 @@ fn retry_multi_path_single_failed_payment() { } let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(htlc_msgs.len(), 2); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); } #[test] @@ -3417,7 +3417,7 @@ fn immediate_retry_on_failure() { } let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(htlc_msgs.len(), 2); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); } #[test] @@ -3541,40 +3541,40 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); let first_htlc = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert_eq!(first_htlc.msgs.len(), 1); nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let second_htlc = SendEvent::from_node(&nodes[0]); assert_eq!(second_htlc.msgs.len(), 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &second_htlc.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); let next_hop_failure = @@ -3631,7 +3631,7 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[0].node.process_pending_htlc_forwards(); let retry_htlc_updates = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); let commitment = &retry_htlc_updates.commitment_msg; @@ -3785,26 +3785,26 @@ fn test_simple_partial_retry() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); let first_htlc = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert_eq!(first_htlc.msgs.len(), 1); nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let second_htlc_updates = SendEvent::from_node(&nodes[0]); assert_eq!(second_htlc_updates.msgs.len(), 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc_updates.msgs[0]); let commitment = &second_htlc_updates.commitment_msg; @@ -3860,14 +3860,14 @@ fn test_simple_partial_retry() { nodes[0].node.process_pending_htlc_forwards(); let retry_htlc_updates = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); let commitment = &retry_htlc_updates.commitment_msg; do_commitment_signed_dance(&nodes[1], &nodes[0], commitment, false, true); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_forward = get_htlc_update_msgs(&nodes[1], &node_c_id); nodes[2].node.handle_update_add_htlc(node_b_id, &bs_second_forward.update_add_htlcs[0]); @@ -3987,7 +3987,7 @@ fn test_threaded_payment_retries() { let id = PaymentId(payment_hash.0); let retry = Retry::Attempts(0xdeadbeef); nodes[0].node.send_payment(payment_hash, onion, id, route_params.clone(), retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_msg_events.len(), 2); send_msg_events.retain(|msg| { @@ -4086,7 +4086,7 @@ fn test_threaded_payment_retries() { nodes[0].node.process_pending_htlc_forwards(); send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); if cur_time > end_time { break; @@ -4124,14 +4124,14 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: } nodes[1].node.claim_funds(our_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); if at_midpoint { let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } else { let mut fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, fulfill.update_fulfill_htlcs.remove(0)); @@ -4466,7 +4466,7 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - check_added_monitors!(&nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); @@ -4536,7 +4536,7 @@ fn test_retry_custom_tlvs() { nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); nodes[0].node.send_payment(hash, onion, id, route_params.clone(), Retry::Attempts(1)).unwrap(); - check_added_monitors!(nodes[0], 1); // one monitor per path + check_added_monitors(&nodes[0], 1); // one monitor per path // Add the HTLC along the first hop. let htlc_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); @@ -4550,7 +4550,7 @@ fn test_retry_custom_tlvs() { let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2_id }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; @@ -4571,7 +4571,7 @@ fn test_retry_custom_tlvs() { route.route_params = Some(route_params.clone()); nodes[0].router.expect_find_route(route_params, Ok(route)); nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; @@ -4673,7 +4673,7 @@ fn do_test_custom_tlvs_consistency( .node .test_send_payment_along_path(path_a, &hash, onion, amt_msat, cur_height, id, &None, priv_a) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4695,7 +4695,7 @@ fn do_test_custom_tlvs_consistency( .node .test_send_payment_along_path(path_b, &hash, onion, amt_msat, cur_height, id, &None, priv_b) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -4707,14 +4707,14 @@ fn do_test_custom_tlvs_consistency( do_commitment_signed_dance(&nodes[2], &nodes[0], commitment, false, false); expect_and_process_pending_htlcs(&nodes[2], false); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[3].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[3], 0); + check_added_monitors(&nodes[3], 0); do_commitment_signed_dance(&nodes[3], &nodes[2], &payment_event.commitment_msg, true, true); } expect_htlc_failure_conditions(nodes[3].node.get_and_clear_pending_events(), &[]); @@ -4743,7 +4743,7 @@ fn do_test_custom_tlvs_consistency( &nodes[3], &expected_destinations, ); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let fail_updates_1 = get_htlc_update_msgs(&nodes[3], &node_c_id); nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); @@ -4753,7 +4753,7 @@ fn do_test_custom_tlvs_consistency( let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs(&nodes[2], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_c_id, &fail_updates_2.update_fail_htlcs[0]); @@ -4815,7 +4815,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { }; let retry = Retry::Attempts(1); nodes[0].node.send_payment(payment_hash, onion, payment_id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_events.len(), 2); @@ -5009,7 +5009,7 @@ fn test_htlc_forward_considers_anchor_outputs_value() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5169,7 +5169,7 @@ fn test_non_strict_forwarding() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let mut send_event = SendEvent::from_event(msg_events.remove(0)); @@ -5177,7 +5177,7 @@ fn test_non_strict_forwarding() { do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); send_event = SendEvent::from_event(msg_events.remove(0)); @@ -5209,7 +5209,7 @@ fn test_non_strict_forwarding() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let mut send_event = SendEvent::from_event(msg_events.remove(0)); @@ -5217,7 +5217,7 @@ fn test_non_strict_forwarding() { do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let routed_scid = route.paths[0].hops[1].short_channel_id; let routed_chan_id = match routed_scid { scid if scid == chan_update_1.contents.short_channel_id => channel_id_1, @@ -5346,7 +5346,7 @@ fn pay_route_without_params() { let id = PaymentId(hash.0); nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index ea34e88f619..e0a43f408a1 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -82,7 +82,7 @@ fn test_priv_forwarding_rejection() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); @@ -165,7 +165,7 @@ fn test_priv_forwarding_rejection() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -349,7 +349,7 @@ fn test_routed_scid_alias() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); @@ -513,7 +513,7 @@ fn test_inbound_scid_privacy() { node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, node_c_id), ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let cs_funding_signed = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, node_b_id); @@ -521,7 +521,7 @@ fn test_inbound_scid_privacy() { nodes[1].node.handle_funding_signed(node_c_id, &cs_funding_signed); expect_channel_pending_event(&nodes[1], &node_c_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let conf_height = core::cmp::max(nodes[1].best_block_info().1 + 1, nodes[2].best_block_info().1 + 1); @@ -579,7 +579,7 @@ fn test_inbound_scid_privacy() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); @@ -600,7 +600,7 @@ fn test_inbound_scid_privacy() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route_2, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); assert_eq!(node_b_id, payment_event.node_id); @@ -697,7 +697,7 @@ fn test_scid_alias_returned() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &as_updates.commitment_signed, false, true); @@ -709,7 +709,7 @@ fn test_scid_alias_returned() { channel_id: chan.0.channel_id, }]; expect_htlc_failure_conditions(events, &expected_failures); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); @@ -734,7 +734,7 @@ fn test_scid_alias_returned() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &as_updates.commitment_signed, false, true); @@ -844,7 +844,7 @@ fn test_0conf_channel_with_async_monitor() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_funding_created(node_a_id, &funding_created); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let channel_id = ChannelId::v1_from_funding_outpoint(funding_output); @@ -859,7 +859,7 @@ fn test_0conf_channel_with_async_monitor() { MessageSendEvent::SendFundingSigned { node_id, msg } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -937,26 +937,26 @@ fn test_0conf_channel_with_async_monitor() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_send = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &as_send.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_send.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_raa, bs_commitment_signed) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_revoke_and_ack( node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -970,10 +970,10 @@ fn test_0conf_channel_with_async_monitor() { .chain_monitor .channel_monitor_updated(bs_raa.channel_id, latest_update) .unwrap(); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_send = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &bs_send.msgs[0]); @@ -1010,7 +1010,7 @@ fn test_0conf_close_no_early_chan_update() { send_payment(&nodes[0], &[&nodes[1]], 100_000); nodes[0].node.force_close_all_channels_broadcasting_latest_txn(message.clone()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); let _ = get_err_msg(&nodes[0], &node_b_id); diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index 6daf4d65b9d..a2b14a798c4 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -101,7 +101,7 @@ fn allow_shutdown_while_awaiting_quiescence(local_shutdown: bool) { let onion = RecipientOnionFields::secret_only(payment_secret); let payment_id = PaymentId(payment_hash.0); local_node.node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap(); - check_added_monitors!(local_node, 1); + check_added_monitors(&local_node, 1); // Attempt to send an HTLC, but don't fully commit it yet. let update_add = get_htlc_update_msgs(&local_node, &remote_node_id); @@ -373,7 +373,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { let onion1 = RecipientOnionFields::secret_only(payment_secret1); let payment_id1 = PaymentId(payment_hash1.0); nodes[1].node.send_payment_with_route(route1, payment_hash1, onion1, payment_id1).unwrap(); - check_added_monitors!(&nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Send a payment in the opposite direction. Since nodes[0] hasn't sent its own `stfu` yet, it's @@ -383,7 +383,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { let onion2 = RecipientOnionFields::secret_only(payment_secret2); let payment_id2 = PaymentId(payment_hash2.0); nodes[0].node.send_payment_with_route(route2, payment_hash2, onion2, payment_id2).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_add = get_htlc_update_msgs(&nodes[0], &node_id_1); nodes[1].node.handle_update_add_htlc(node_id_0, &update_add.update_add_htlcs[0]); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 6389f1da786..09c40c2c46c 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -259,7 +259,7 @@ fn test_manager_serialize_deserialize_events() { let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&node_a, &node_b.node.get_our_node_id(), channel_value, 42); node_a.node.funding_transaction_generated(temporary_channel_id, node_b.node.get_our_node_id(), tx.clone()).unwrap(); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); let funding_created = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()); let channel_id = ChannelId::v1_from_funding_txid( @@ -462,7 +462,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { for monitor in node_0_monitors.drain(..) { assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.channel_id(), monitor), Ok(ChannelMonitorUpdateStatus::Completed)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } nodes[0].node = &nodes_0_deserialized; @@ -474,7 +474,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { check_spends!(txn[0], funding_tx); assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.compute_txid()); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // nodes[1] and nodes[2] have no lost state with nodes[0]... reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); @@ -647,7 +647,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, .node .force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1000000); @@ -697,7 +697,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, assert_eq!(err_msgs_0.len(), 1); nodes[1].node.handle_error(nodes[0].node.get_our_node_id(), &err_msgs_0[0]); assert!(nodes[1].node.list_usable_channels().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) } , &[nodes[0].node.get_our_node_id()], 1000000); check_closed_broadcast!(nodes[1], false); @@ -754,7 +754,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -785,7 +785,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest expect_payment_claimable!(nodes[3], payment_hash, payment_secret, 15_000_000); nodes[3].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[3], 2); + check_added_monitors(&nodes[3], 2); expect_payment_claimed!(nodes[3], payment_hash, 15_000_000); // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we @@ -881,7 +881,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC // claim should fly. let mut ds_msgs = nodes[3].node.get_and_clear_pending_msg_events(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); assert_eq!(ds_msgs.len(), 2); if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); } @@ -889,7 +889,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest MessageSendEvent::UpdateHTLCs { mut updates, .. } => { let mut fulfill = updates.update_fulfill_htlcs.remove(0); nodes[2].node.handle_update_fulfill_htlc(nodes[3].node.get_our_node_id(), fulfill); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); do_commitment_signed_dance(&nodes[2], &nodes[3], &updates.commitment_signed, false, true); @@ -951,7 +951,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV; nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); @@ -985,7 +985,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let payment_event = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); if claim_htlc { get_monitor!(nodes[2], chan_id_2).provide_payment_preimage_unsafe_legacy( @@ -1005,7 +1005,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 }); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[2], 1, reason, &[nodes[1].node.get_our_node_id()], 100000); check_closed_broadcast!(nodes[2], true); @@ -1031,7 +1031,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht nodes[1].node.timer_tick_occurred(); let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_commitment_tx.len(), 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); @@ -1064,7 +1064,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht } else { expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true); } - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut update = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); if claim_htlc { @@ -1124,7 +1124,7 @@ fn removed_payment_no_manager_persistence() { &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash }] ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { @@ -1159,7 +1159,7 @@ fn removed_payment_no_manager_persistence() { &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { @@ -1266,7 +1266,7 @@ fn test_htlc_localremoved_persistence() { RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap(); nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash, RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 97e4429fbd6..e16a43e2279 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -65,7 +65,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // Provide preimage to node 2 by claiming payment nodes[2].node.claim_funds(our_payment_preimage); expect_payment_claimed!(nodes[2], our_payment_hash, 1_000_000); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let claim_txn = if local_commitment { @@ -79,7 +79,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // Give node 2 node 1's transactions and get its response (claiming the HTLC instead). connect_block(&nodes[2], &create_dummy_block(nodes[2].best_block_hash(), 42, node_1_commitment_txn.clone())); check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); check_closed_event(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 100000); let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_2_commitment_txn.len(), 1); // ChannelMonitor: 1 offered HTLC-Claim @@ -113,11 +113,11 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { vec![node_2_commitment_txn.pop().unwrap()] }; check_closed_broadcast!(nodes[1], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[2].node.get_our_node_id()], 100000); // Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1. connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0); if claim { @@ -139,7 +139,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { ); } - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Which should result in an immediate claim/fail of the HTLC: let mut htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); if claim { @@ -199,7 +199,7 @@ fn test_counterparty_revoked_reorg() { nodes[0].node.claim_funds(payment_preimage_3); let _ = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_claimed!(nodes[0], payment_hash_3, 4_000_000); let mut unrevoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2); @@ -211,7 +211,7 @@ fn test_counterparty_revoked_reorg() { // on any of the HTLCs, at least until we get six confirmations (which we won't get). mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); // Connect up to one block before the revoked transaction would be considered final, then do a @@ -313,7 +313,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 0); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } if reload_node { @@ -380,7 +380,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ // we were already running. nodes[0].node.test_process_background_events(); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(txn.len(), 1); @@ -389,7 +389,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ let expected_err = "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs."; if reorg_after_reload || !reload_node { handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs."); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) }; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 100000); } @@ -477,14 +477,14 @@ fn test_set_outpoints_partial_claiming() { expect_payment_claimed!(nodes[0], payment_hash_1, 3_000_000); nodes[0].node.claim_funds(payment_preimage_2); expect_payment_claimed!(nodes[0], payment_hash_2, 3_000_000); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); nodes[0].node.get_and_clear_pending_msg_events(); // Connect blocks on node A commitment transaction mine_transaction(&nodes[0], &remote_txn[0]); check_closed_broadcast!(nodes[0], true); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Verify node A broadcast tx claiming both HTLCs { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -508,7 +508,7 @@ fn test_set_outpoints_partial_claiming() { channel_funding_txo: None, user_channel_id: None, }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Verify node B broadcast 2 HTLC-timeout txn let partial_claim_tx = { let mut node_txn = nodes[1].tx_broadcaster.unique_txn_broadcast(); @@ -583,11 +583,11 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) { check_closed_broadcast!(nodes[0], true); assert!(nodes[0].node.list_channels().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); check_closed_broadcast!(nodes[1], true); assert!(nodes[1].node.list_channels().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 5e7c7d9fd35..192bc6399e4 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -175,7 +175,7 @@ fn expect_channel_shutdown_state_with_htlc() { // Claim Funds on Node2 nodes[2].node.claim_funds(payment_preimage_0); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); // Fulfil HTLCs on node1 and node0 @@ -187,7 +187,7 @@ fn expect_channel_shutdown_state_with_htlc() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -361,7 +361,7 @@ fn expect_channel_shutdown_state_with_force_closure() { .force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, message.clone()) .unwrap(); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown); assert!(nodes[1].node.list_channels().is_empty()); @@ -371,7 +371,7 @@ fn expect_channel_shutdown_state_with_force_closure() { check_spends!(node_txn[0], chan_1.3); mine_transaction(&nodes[0], &node_txn[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); @@ -452,7 +452,7 @@ fn updates_shutdown_wait() { unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); nodes[2].node.claim_funds(payment_preimage_0); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -463,7 +463,7 @@ fn updates_shutdown_wait() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -549,7 +549,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { .node .send_payment(our_payment_hash, onion, id, route_params, Retry::Attempts(0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); assert_eq!(updates.update_add_htlcs.len(), 1); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -564,7 +564,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_shutdown(node_a_id, &node_0_shutdown); assert!(commitment_signed_dance_through_cp_raa(&nodes[1], &nodes[0], false, false).is_none()); expect_and_process_pending_htlcs(&nodes[1], false); @@ -718,7 +718,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 100_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -729,7 +729,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -834,7 +834,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // checks it, but in this case nodes[1] didn't ever get a chance to receive a // closing_signed so we do it ourselves check_closed_broadcast!(nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); } @@ -920,7 +920,7 @@ fn test_upfront_shutdown_script() { nodes[0].node.close_channel(&chan.2, &node_b_id).unwrap(); let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, node_b_id); nodes[1].node.handle_shutdown(node_a_id, &node_1_shutdown); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { @@ -935,7 +935,7 @@ fn test_upfront_shutdown_script() { *nodes[0].override_init_features.borrow_mut() = None; let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); nodes[0].node.handle_shutdown(node_b_id, &node_0_shutdown); let events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -951,7 +951,7 @@ fn test_upfront_shutdown_script() { //// channel smoothly let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); nodes[0].node.handle_shutdown(node_b_id, &node_0_shutdown); let events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1088,7 +1088,7 @@ fn test_segwit_v0_shutdown_script() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a segwit v0 script supported even without option_shutdown_anysegwit let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1127,7 +1127,7 @@ fn test_anysegwit_shutdown_script() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a non-v0 segwit script supported by option_shutdown_anysegwit let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1188,7 +1188,7 @@ fn test_unsupported_anysegwit_shutdown_script() { Ok(_) => panic!("Expected error"), } nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a non-v0 segwit script unsupported without option_shutdown_anysegwit let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1217,7 +1217,7 @@ fn test_invalid_shutdown_script() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a segwit v0 script with an unsupported witness program let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1253,7 +1253,7 @@ fn test_user_shutdown_script() { .node .close_channel_with_feerate_and_script(&chan.2, &node_a_id, None, Some(shutdown_script)) .unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1390,7 +1390,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { && txn[0].output[0].script_pubkey.is_p2wsh()) ); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: "closing_signed negotiation failed to finish within two timer ticks".to_string(), }; @@ -1819,7 +1819,7 @@ fn test_force_closure_on_low_stale_fee() { // Finally, connect one more block and check the force-close happened. connect_blocks(&nodes[1], 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_broadcast(&nodes[1], 1, true); let reason = ClosureReason::PeerFeerateTooLow { peer_feerate_sat_per_kw: 253, diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs index 060496d3bee..67a07325ad6 100644 --- a/lightning/src/ln/update_fee_tests.rs +++ b/lightning/src/ln/update_fee_tests.rs @@ -1124,7 +1124,7 @@ pub fn do_cannot_afford_on_holding_cell_release( if let MessageSendEvent::SendRevokeAndACK { node_id, msg } = events.pop().unwrap() { assert_eq!(node_id, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } else { panic!(); } diff --git a/lightning/src/ln/zero_fee_commitment_tests.rs b/lightning/src/ln/zero_fee_commitment_tests.rs index f94066789c1..2503ad81cde 100644 --- a/lightning/src/ln/zero_fee_commitment_tests.rs +++ b/lightning/src/ln/zero_fee_commitment_tests.rs @@ -158,7 +158,7 @@ fn test_htlc_claim_chunking() { for (preimage, payment_hash) in node_1_preimages { nodes[1].node.claim_funds(preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, NONDUST_HTLC_AMT_MSAT); } nodes[0].node.get_and_clear_pending_msg_events(); @@ -188,12 +188,12 @@ fn test_htlc_claim_chunking() { assert_eq!(htlc_claims[1].output.len(), 24); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], CHAN_CAPACITY); assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], CHAN_CAPACITY); assert!(nodes[1].node.list_channels().is_empty()); diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index bcee29bcf2b..cf77e2fb486 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -1506,13 +1506,13 @@ impl From for UpdateName { mod tests { use super::*; use crate::chain::ChannelMonitorUpdateStatus; + use crate::check_closed_broadcast; use crate::events::ClosureReason; use crate::ln::functional_test_utils::*; use crate::ln::msgs::BaseMessageHandler; use crate::sync::Arc; use crate::util::test_channel_signer::TestChannelSigner; use crate::util::test_utils::{self, TestStore}; - use crate::{check_added_monitors, check_closed_broadcast}; use bitcoin::hashes::hex::FromHex; use core::cmp; @@ -1728,7 +1728,7 @@ mod tests { ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_id_1], 100000); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(node_txn.len(), 1); @@ -1740,7 +1740,7 @@ mod tests { let reason = ClosureReason::CommitmentTxConfirmed; let node_id_0 = nodes[0].node.get_our_node_id(); check_closed_event(&nodes[1], 1, reason, &[node_id_0], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Make sure everything is persisted as expected after close. // We always send at least two payments, and loop up to max_pending_updates_0 * 2.