@@ -19,11 +19,12 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
1919use crate :: chain:: transaction:: OutPoint ;
2020use crate :: chain:: { ChannelMonitorUpdateStatus , Listen , Watch } ;
2121use crate :: events:: { Event , MessageSendEvent , MessageSendEventsProvider , PaymentPurpose , ClosureReason , HTLCDestination } ;
22- use crate :: ln:: channelmanager:: { PaymentId , RAACommitmentOrder , RecipientOnionFields } ;
22+ use crate :: ln:: channelmanager:: { PaymentId , RAACommitmentOrder , RecipientOnionFields , Retry } ;
2323use crate :: ln:: channel:: { AnnouncementSigsState , ChannelPhase } ;
2424use crate :: ln:: msgs;
2525use crate :: ln:: types:: ChannelId ;
2626use crate :: ln:: msgs:: { ChannelMessageHandler , RoutingMessageHandler } ;
27+ use crate :: routing:: router:: { PaymentParameters , RouteParameters } ;
2728use crate :: util:: test_channel_signer:: TestChannelSigner ;
2829use crate :: util:: ser:: { ReadableArgs , Writeable } ;
2930use crate :: util:: test_utils:: TestBroadcaster ;
@@ -3030,7 +3031,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode
30303031 assert ! ( a. is_none( ) ) ;
30313032
30323033 nodes[ 1 ] . node . handle_revoke_and_ack ( node_a_id, & raa) ;
3033- check_added_monitors ( & nodes[ 1 ] , 0 ) ;
3034+ check_added_monitors ( & nodes[ 1 ] , 1 ) ;
30343035 assert ! ( nodes[ 1 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
30353036 }
30363037
@@ -3040,8 +3041,8 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode
30403041 if let Event :: PaymentPathSuccessful { .. } = events[ 2 ] { } else { panic ! ( ) ; }
30413042 if let Event :: PaymentForwarded { .. } = events[ 1 ] { } else { panic ! ( ) ; }
30423043
3043- // The event processing should release the last RAA updates on both channels .
3044- check_added_monitors ( & nodes[ 1 ] , 2 ) ;
3044+ // The event processing should release the last RAA update .
3045+ check_added_monitors ( & nodes[ 1 ] , 1 ) ;
30453046
30463047 // When we fetch the next update the message getter will generate the next update for nodes[2],
30473048 // generating a further monitor update.
@@ -4271,3 +4272,121 @@ fn test_single_channel_multiple_mpp() {
42714272 nodes[ 7 ] . node . handle_revoke_and_ack ( node_8_id, & raa) ;
42724273 check_added_monitors ( & nodes[ 7 ] , 1 ) ;
42734274}
4275+
4276+ #[ test]
4277+ fn test_mpp_claim_to_holding_cell ( ) {
4278+ // Previously, if an MPP payment was claimed while one channel was AwaitingRAA (causing the
4279+ // HTLC claim to go into the holding cell), and the RAA came in before the async monitor
4280+ // update with the preimage completed, the channel could hang waiting on itself.
4281+ // This tests that behavior.
4282+ let chanmon_cfgs = create_chanmon_cfgs ( 4 ) ;
4283+ let node_cfgs = create_node_cfgs ( 4 , & chanmon_cfgs) ;
4284+ let node_chanmgrs = create_node_chanmgrs ( 4 , & node_cfgs, & [ None , None , None , None ] ) ;
4285+ let nodes = create_network ( 4 , & node_cfgs, & node_chanmgrs) ;
4286+
4287+ let node_b_id = nodes[ 1 ] . node . get_our_node_id ( ) ;
4288+ let node_c_id = nodes[ 2 ] . node . get_our_node_id ( ) ;
4289+ let node_d_id = nodes[ 3 ] . node . get_our_node_id ( ) ;
4290+
4291+ // First open channels in a diamond and deliver the MPP payment.
4292+ let chan_1_scid = create_announced_chan_between_nodes ( & nodes, 0 , 1 ) . 0 . contents . short_channel_id ;
4293+ let chan_2_scid = create_announced_chan_between_nodes ( & nodes, 0 , 2 ) . 0 . contents . short_channel_id ;
4294+ let ( chan_3_update, _, chan_3_id, ..) = create_announced_chan_between_nodes ( & nodes, 1 , 3 ) ;
4295+ let chan_3_scid = chan_3_update. contents . short_channel_id ;
4296+ let ( chan_4_update, _, chan_4_id, ..) = create_announced_chan_between_nodes ( & nodes, 2 , 3 ) ;
4297+ let chan_4_scid = chan_4_update. contents . short_channel_id ;
4298+
4299+ let ( mut route, payment_hash, preimage_1, payment_secret) =
4300+ get_route_and_payment_hash ! ( & nodes[ 0 ] , nodes[ 3 ] , 500_000 ) ;
4301+ let path = route. paths [ 0 ] . clone ( ) ;
4302+ route. paths . push ( path) ;
4303+ route. paths [ 0 ] . hops [ 0 ] . pubkey = node_b_id;
4304+ route. paths [ 0 ] . hops [ 0 ] . short_channel_id = chan_1_scid;
4305+ route. paths [ 0 ] . hops [ 1 ] . short_channel_id = chan_3_scid;
4306+ route. paths [ 0 ] . hops [ 1 ] . fee_msat = 250_000 ;
4307+ route. paths [ 1 ] . hops [ 0 ] . pubkey = node_c_id;
4308+ route. paths [ 1 ] . hops [ 0 ] . short_channel_id = chan_2_scid;
4309+ route. paths [ 1 ] . hops [ 1 ] . short_channel_id = chan_4_scid;
4310+ route. paths [ 1 ] . hops [ 1 ] . fee_msat = 250_000 ;
4311+ let paths = & [ & [ & nodes[ 1 ] , & nodes[ 3 ] ] [ ..] , & [ & nodes[ 2 ] , & nodes[ 3 ] ] [ ..] ] ;
4312+ send_along_route_with_secret ( & nodes[ 0 ] , route, paths, 500_000 , payment_hash, payment_secret) ;
4313+
4314+ // Put the C <-> D channel into AwaitingRaa
4315+ let ( preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash ! ( nodes[ 3 ] ) ;
4316+ let onion = RecipientOnionFields :: secret_only ( payment_secret_2) ;
4317+ let id = PaymentId ( [ 42 ; 32 ] ) ;
4318+ let pay_params = PaymentParameters :: from_node_id ( node_d_id, TEST_FINAL_CLTV ) ;
4319+ let route_params = RouteParameters :: from_payment_params_and_value ( pay_params, 400_000 ) ;
4320+ nodes[ 2 ] . node . send_payment ( payment_hash_2, onion, id, route_params, Retry :: Attempts ( 0 ) ) . unwrap ( ) ;
4321+ check_added_monitors ( & nodes[ 2 ] , 1 ) ;
4322+
4323+ let mut payment_event = SendEvent :: from_node ( & nodes[ 2 ] ) ;
4324+ nodes[ 3 ] . node . handle_update_add_htlc ( node_c_id, & payment_event. msgs [ 0 ] ) ;
4325+ nodes[ 3 ] . node . handle_commitment_signed ( node_c_id, & payment_event. commitment_msg ) ;
4326+ check_added_monitors ( & nodes[ 3 ] , 1 ) ;
4327+
4328+ let ( raa, cs) = get_revoke_commit_msgs ( & nodes[ 3 ] , & node_c_id) ;
4329+ nodes[ 2 ] . node . handle_revoke_and_ack ( node_d_id, & raa) ;
4330+ check_added_monitors ( & nodes[ 2 ] , 1 ) ;
4331+
4332+ nodes[ 2 ] . node . handle_commitment_signed ( node_d_id, & cs) ;
4333+ check_added_monitors ( & nodes[ 2 ] , 1 ) ;
4334+
4335+ let cs_raa = get_event_msg ! ( nodes[ 2 ] , MessageSendEvent :: SendRevokeAndACK , node_d_id) ;
4336+
4337+ // Now claim the payment, completing both channel monitor updates async
4338+ // In the current code, the C <-> D channel happens to be the `durable_preimage_channel`,
4339+ // improving coverage somewhat but it isn't strictly critical to the test.
4340+ chanmon_cfgs[ 3 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: InProgress ) ;
4341+ chanmon_cfgs[ 3 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: InProgress ) ;
4342+ nodes[ 3 ] . node . claim_funds ( preimage_1) ;
4343+ check_added_monitors ( & nodes[ 3 ] , 2 ) ;
4344+
4345+ // Complete the B <-> D monitor update, freeing the first fulfill.
4346+ let ( outpoint, latest_id, _) = nodes[ 3 ] . chain_monitor . latest_monitor_update_id . lock ( ) . unwrap ( ) . get ( & chan_3_id) . unwrap ( ) . clone ( ) ;
4347+ nodes[ 3 ] . chain_monitor . chain_monitor . channel_monitor_updated ( outpoint, latest_id) . unwrap ( ) ;
4348+
4349+ let mut b_claim = get_htlc_update_msgs ( & nodes[ 3 ] , & node_b_id) ;
4350+
4351+ // When we deliver the pre-claim RAA, node D will shove the monitor update into the blocked
4352+ // state since we have a pending MPP payment which is blocking RAA monitor updates.
4353+ nodes[ 3 ] . node . handle_revoke_and_ack ( node_c_id, & cs_raa) ;
4354+ check_added_monitors ( & nodes[ 3 ] , 0 ) ;
4355+
4356+ // Finally, complete the C <-> D monitor update. Previously, this unlock failed to be processed
4357+ // due to the existence of the blocked RAA update above.
4358+ let ( outpoint, latest_id, _) = nodes[ 3 ] . chain_monitor . latest_monitor_update_id . lock ( ) . unwrap ( ) . get ( & chan_4_id) . unwrap ( ) . clone ( ) ;
4359+ nodes[ 3 ] . chain_monitor . chain_monitor . channel_monitor_updated ( outpoint, latest_id) . unwrap ( ) ;
4360+ // Once we process monitor events (in this case by checking for the `PaymentClaimed` event, the
4361+ // RAA monitor update blocked above will be released.
4362+ let events = nodes[ 3 ] . node . get_and_clear_pending_events ( ) ;
4363+ assert_eq ! ( events. len( ) , 2 ) ;
4364+ if let Event :: PaymentClaimed { .. } = & events[ 0 ] {
4365+ } else {
4366+ panic ! ( "Unexpected event: {events:?}" ) ;
4367+ }
4368+ if let Event :: PendingHTLCsForwardable { .. } = & events[ 1 ] {
4369+ } else {
4370+ panic ! ( "Unexpected event: {events:?}" ) ;
4371+ }
4372+ check_added_monitors ( & nodes[ 3 ] , 1 ) ;
4373+
4374+ // After the RAA monitor update completes, the C <-> D channel will be able to generate its
4375+ // fulfill updates as well.
4376+ let mut c_claim = get_htlc_update_msgs ( & nodes[ 3 ] , & node_c_id) ;
4377+ check_added_monitors ( & nodes[ 3 ] , 1 ) ;
4378+
4379+ // Finally, clear all the pending payments.
4380+ let path = [ & [ & nodes[ 1 ] , & nodes[ 3 ] ] [ ..] , & [ & nodes[ 2 ] , & nodes[ 3 ] ] [ ..] ] ;
4381+ let mut args = ClaimAlongRouteArgs :: new ( & nodes[ 0 ] , & path[ ..] , preimage_1) ;
4382+ let b_claim_msgs = ( b_claim. update_fulfill_htlcs . pop ( ) . unwrap ( ) , b_claim. commitment_signed ) ;
4383+ let c_claim_msgs = ( c_claim. update_fulfill_htlcs . pop ( ) . unwrap ( ) , c_claim. commitment_signed ) ;
4384+ let claims = vec ! [ ( b_claim_msgs, node_b_id) , ( c_claim_msgs, node_c_id) ] ;
4385+ pass_claimed_payment_along_route_from_ev ( 250_000 , claims, args) ;
4386+
4387+ expect_payment_sent ( & nodes[ 0 ] , preimage_1, None , true , true ) ;
4388+
4389+ nodes[ 3 ] . node . process_pending_htlc_forwards ( ) ;
4390+ expect_payment_claimable ! ( nodes[ 3 ] , payment_hash_2, payment_secret_2, 400_000 ) ;
4391+ claim_payment ( & nodes[ 2 ] , & [ & nodes[ 3 ] ] , preimage_2) ;
4392+ }
0 commit comments