diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index d4ffb12d0b4..55169755227 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1138,7 +1138,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { dest.handle_update_add_htlc(nodes[$node].get_our_node_id(), &new_msg); } } - for update_fulfill in update_fulfill_htlcs.iter() { + let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() || + !update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty(); + for update_fulfill in update_fulfill_htlcs { out.locked_write(format!("Delivering update_fulfill_htlc from node {} to node {}.\n", $node, idx).as_bytes()); dest.handle_update_fulfill_htlc(nodes[$node].get_our_node_id(), update_fulfill); } @@ -1154,8 +1156,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { out.locked_write(format!("Delivering update_fee from node {} to node {}.\n", $node, idx).as_bytes()); dest.handle_update_fee(nodes[$node].get_our_node_id(), &msg); } - let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() || - !update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty(); if $limit_events != ProcessMessages::AllMessages && processed_change { // If we only want to process some messages, don't deliver the CS until later. extra_ev = Some(MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates: CommitmentUpdate { diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index 68f25ada2d7..6ee0dfc3a8d 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -458,8 +458,8 @@ mod test { } check_added_monitors(&nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], payer_id); - nodes[0].node.handle_update_fulfill_htlc(payee_id, &updates.update_fulfill_htlcs[0]); + let mut updates = get_htlc_update_msgs!(nodes[1], payer_id); + nodes[0].node.handle_update_fulfill_htlc(payee_id, updates.update_fulfill_htlcs.remove(0)); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true); diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index e7d29912be7..1d1d3c4654b 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -721,7 +721,7 @@ mod tests { #[cfg(simple_close)] fn handle_closing_sig(&self, _their_node_id: PublicKey, _msg: ClosingSig) {} fn handle_update_add_htlc(&self, _their_node_id: PublicKey, _msg: &UpdateAddHTLC) {} - fn handle_update_fulfill_htlc(&self, _their_node_id: PublicKey, _msg: &UpdateFulfillHTLC) {} + fn handle_update_fulfill_htlc(&self, _their_node_id: PublicKey, _msg: UpdateFulfillHTLC) {} fn handle_update_fail_htlc(&self, _their_node_id: PublicKey, _msg: &UpdateFailHTLC) {} fn handle_update_fail_malformed_htlc( &self, _their_node_id: PublicKey, _msg: &UpdateFailMalformedHTLC, diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 98fd7e718e6..0de372813b3 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -1394,8 +1394,8 @@ mod tests { // Now manually walk the commitment signed dance - because we claimed two payments // back-to-back it doesn't fit into the neat walk commitment_signed_dance does. - let updates = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + let mut updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); check_added_monitors!(nodes[0], 1); @@ -1403,18 +1403,18 @@ mod tests { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); check_added_monitors!(nodes[1], 1); - let bs_second_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_2nd_updates = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_first_update); check_added_monitors!(nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0] .node - .handle_update_fulfill_htlc(node_b_id, &bs_second_updates.update_fulfill_htlcs[0]); + .handle_update_fulfill_htlc(node_b_id, bs_2nd_updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false); nodes[0] .node - .handle_commitment_signed_batch_test(node_b_id, &bs_second_updates.commitment_signed); + .handle_commitment_signed_batch_test(node_b_id, &bs_2nd_updates.commitment_signed); check_added_monitors!(nodes[0], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); expect_payment_path_successful!(nodes[0]); diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 39b6d16d318..e4fd7ad2ef8 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1102,11 +1102,22 @@ pub enum Event { /// /// May contain a closed channel if the HTLC sent along the path was fulfilled on chain. path: Path, - /// The hold times as reported by each hop. The unit in which the hold times are expressed are 100's of - /// milliseconds. So a hop reporting 2 is a hold time that corresponds to roughly 200 milliseconds. As earlier - /// hops hold on to an HTLC for longer, the hold times in the list are expected to decrease. When our peer - /// didn't provide attribution data, the list is empty. The same applies to HTLCs that were resolved onchain. - /// Because of unavailability of hold times, the list may be shorter than the number of hops in the path. + /// The time that each hop indicated it held the HTLC. + /// + /// The unit in which the hold times are expressed are 100's of milliseconds. So a hop + /// reporting 2 is a hold time that corresponds to between 200 and 299 milliseconds. + /// + /// We expect that at each hop the actual hold time will be strictly greater than the hold + /// time of the following hops, as a node along the path shouldn't have completed the HTLC + /// until the next node has completed it. Note that because hold times are in 100's of ms, + /// hold times as reported are likely to often be equal across hops. + /// + /// If our peer didn't provide attribution data or the HTLC resolved on chain, the list + /// will be empty. + /// + /// Each entry will correspond with one entry in [`Path::hops`], or, thereafter, the + /// [`BlindedTail::trampoline_hops`] in [`Path::blinded_tail`]. Because not all nodes + /// support hold times, the list may be shorter than the number of hops in the path. hold_times: Vec, }, /// Indicates an outbound HTLC we sent failed, likely due to an intermediary node being unable to @@ -1159,11 +1170,22 @@ pub enum Event { error_code: Option, #[cfg(any(test, feature = "_test_utils"))] error_data: Option>, - /// The hold times as reported by each hop. The unit in which the hold times are expressed are 100's of - /// milliseconds. So a hop reporting 2 is a hold time that corresponds to roughly 200 milliseconds. As earlier - /// hops hold on to an HTLC for longer, the hold times in the list are expected to decrease. When our peer - /// didn't provide attribution data, the list is empty. The same applies to HTLCs that were resolved onchain. - /// Because of unavailability of hold times, the list may be shorter than the number of hops in the path. + /// The time that each hop indicated it held the HTLC. + /// + /// The unit in which the hold times are expressed are 100's of milliseconds. So a hop + /// reporting 2 is a hold time that corresponds to between 200 and 299 milliseconds. + /// + /// We expect that at each hop the actual hold time will be strictly greater than the hold + /// time of the following hops, as a node along the path shouldn't have completed the HTLC + /// until the next node has completed it. Note that because hold times are in 100's of ms, + /// hold times as reported are likely to often be equal across hops. + /// + /// If our peer didn't provide attribution data or the HTLC resolved on chain, the list + /// will be empty. + /// + /// Each entry will correspond with one entry in [`Path::hops`], or, thereafter, the + /// [`BlindedTail::trampoline_hops`] in [`Path::blinded_tail`]. Because not all nodes + /// support hold times, the list may be shorter than the number of hops in the path. hold_times: Vec, }, /// Indicates that a probe payment we sent returned successful, i.e., only failed at the destination. diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 69abfbf3312..efabf528441 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -825,20 +825,20 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { // Handle the update_fulfill_htlc, but fail to persist the monitor update when handling the // commitment_signed. - let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); + let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); - match events_2[0] { + match events_2.remove(0) { MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, - updates: msgs::CommitmentUpdate { ref update_fulfill_htlcs, ref commitment_signed, .. }, + updates: msgs::CommitmentUpdate { mut update_fulfill_htlcs, commitment_signed, .. }, } => { - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); if monitor_update_failure { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); if monitor_update_failure { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } else { @@ -1401,8 +1401,8 @@ fn test_no_disconnect_while_async_commitment_signed_expecting_remote_revoke_and_ // After processing the `update_fulfill`, they'll only be able to send `revoke_and_ack` until // the `commitment_signed` is no longer pending. - let update = get_htlc_update_msgs!(&nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update.update_fulfill_htlcs[0]); + let mut update = get_htlc_update_msgs!(&nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, update.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &update.commitment_signed); check_added_monitors(&nodes[0], 1); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 843b6331ece..5827538fae8 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -114,9 +114,9 @@ fn test_monitor_and_persister_update_fail() { expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); check_added_monitors!(nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); { let mut per_peer_lock; @@ -360,7 +360,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(update_fee.is_none()); if (disconnect_count & 16) == 0 { - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_htlcs[0]); + let fulfill_msg = update_fulfill_htlcs[0].clone(); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, fulfill_msg); let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { @@ -467,7 +468,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[0].node.handle_update_fulfill_htlc( node_b_id, - &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0], + bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0].clone(), ); let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); @@ -1286,7 +1287,7 @@ fn test_monitor_update_fail_reestablish() { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1348,7 +1349,7 @@ fn test_monitor_update_fail_reestablish() { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); expect_payment_sent!(nodes[0], payment_preimage); } @@ -1556,13 +1557,14 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); - let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events(); + let mut bs_msgs = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(bs_msgs.len(), 2); - match bs_msgs[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { - assert_eq!(*node_id, node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + match bs_msgs.remove(0) { + MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, mut updates } => { + assert_eq!(node_id, node_a_id); + let update_fulfill = updates.update_fulfill_htlcs.remove(0); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0] .node @@ -1576,7 +1578,7 @@ fn claim_while_disconnected_monitor_update_fail() { _ => panic!("Unexpected event"), } - match bs_msgs[1] { + match bs_msgs[0] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, msg); @@ -1881,9 +1883,9 @@ fn test_monitor_update_fail_claim() { expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); - let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_fulfill_update.update_fulfill_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false); + let mut bs_fulfill = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_fulfill.update_fulfill_htlcs.remove(0)); + commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill.commitment_signed, false); expect_payment_sent!(nodes[0], payment_preimage_1); // Get the payment forwards, note that they were batched into one commitment update. @@ -2090,8 +2092,8 @@ fn monitor_update_claim_fail_no_response() { expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); - let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); + let mut bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_updates.update_fulfill_htlcs.remove(0)); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); expect_payment_sent!(nodes[0], payment_preimage_1); @@ -2493,9 +2495,9 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 2000); - let cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); - let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, cs_updates.update_fulfill_htlcs.remove(0)); + let mut bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); @@ -2509,7 +2511,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] ); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true); expect_payment_path_successful!(nodes[0]); @@ -2786,13 +2788,14 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { check_added_monitors!(nodes[0], 1); let commitment_msg = match events.pop().unwrap() { - MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } => { + MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, mut updates } => { assert_eq!(node_id, node_b_id); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(node_a_id, &updates.update_fulfill_htlcs[0]); + let update_fulfill = updates.update_fulfill_htlcs.remove(0); + nodes[1].node.handle_update_fulfill_htlc(node_a_id, update_fulfill); expect_payment_sent(&nodes[1], payment_preimage_0, None, false, false); assert_eq!(updates.update_add_htlcs.len(), 1); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); @@ -2923,7 +2926,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f ); fulfill_msg.attribution_data = cs_updates.update_fulfill_htlcs[0].attribution_data.clone(); } - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &fulfill_msg); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, fulfill_msg); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors!(nodes[1], 1); @@ -2933,7 +2936,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc( node_b_id, - &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0], + bs_updates.as_mut().unwrap().update_fulfill_htlcs.remove(0), ); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); if htlc_status == HTLCStatusAtDupClaim::Cleared { @@ -2975,7 +2978,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc( node_b_id, - &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0], + bs_updates.as_mut().unwrap().update_fulfill_htlcs.remove(0), ); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); } @@ -3155,7 +3158,7 @@ fn double_temp_error() { } }; assert_eq!(node_id, node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_1); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_1); check_added_monitors!(nodes[0], 0); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed_b1); @@ -3199,7 +3202,7 @@ fn double_temp_error() { check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_2); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_2); check_added_monitors!(nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false); @@ -3465,11 +3468,11 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); - let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut cs_htlc_fulfill = get_htlc_update_msgs!(nodes[2], node_b_id); nodes[1] .node - .handle_update_fulfill_htlc(node_c_id, &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]); - let commitment = cs_htlc_fulfill_updates.commitment_signed; + .handle_update_fulfill_htlc(node_c_id, cs_htlc_fulfill.update_fulfill_htlcs.remove(0)); + let commitment = cs_htlc_fulfill.commitment_signed; do_commitment_signed_dance(&nodes[1], &nodes[2], &commitment, false, false); check_added_monitors(&nodes[1], 0); @@ -3480,7 +3483,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode check_added_monitors(&nodes[0], 1); expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000); - let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let mut as_htlc_fulfill = get_htlc_update_msgs!(nodes[0], node_b_id); if completion_mode != BlockedUpdateComplMode::Sync { // We use to incorrectly handle monitor update completion in cases where we completed a // monitor update async or after reload. We test both based on the `completion_mode`. @@ -3488,7 +3491,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode } nodes[1] .node - .handle_update_fulfill_htlc(node_a_id, &as_htlc_fulfill_updates.update_fulfill_htlcs[0]); + .handle_update_fulfill_htlc(node_a_id, as_htlc_fulfill.update_fulfill_htlcs.remove(0)); check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2)); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -3524,10 +3527,9 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode // Note that when completing as a side effect of a reload we completed the CS dance in // `reconnect_nodes` above. if completion_mode != BlockedUpdateComplMode::AtReload { - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &as_htlc_fulfill_updates.commitment_signed, - ); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_htlc_fulfill.commitment_signed); check_added_monitors(&nodes[1], 1); let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); assert!(a.is_none()); @@ -3557,13 +3559,13 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode // When we fetch the next update the message getter will generate the next update for nodes[2], // generating a further monitor update. - let bs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], node_c_id); + let mut bs_htlc_fulfill = get_htlc_update_msgs!(nodes[1], node_c_id); check_added_monitors(&nodes[1], 1); nodes[2] .node - .handle_update_fulfill_htlc(node_b_id, &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]); - let commitment = bs_htlc_fulfill_updates.commitment_signed; + .handle_update_fulfill_htlc(node_b_id, bs_htlc_fulfill.update_fulfill_htlcs.remove(0)); + let commitment = bs_htlc_fulfill.commitment_signed; do_commitment_signed_dance(&nodes[2], &nodes[1], &commitment, false, false); expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); } @@ -3620,8 +3622,8 @@ fn do_test_inverted_mon_completion_order( expect_payment_claimed!(nodes[2], payment_hash, 100_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); + let mut cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, cs_updates.update_fulfill_htlcs.remove(0)); // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages // for it since the monitor update is marked in-progress. @@ -3739,10 +3741,10 @@ fn do_test_inverted_mon_completion_order( // node A. } - let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); + let mut bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); check_added_monitors(&nodes[1], 1); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_updates.update_fulfill_htlcs.remove(0)); do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false); expect_payment_forwarded!( @@ -3808,8 +3810,8 @@ fn do_test_durable_preimages_on_closed_channel( expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); + let mut cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, cs_updates.update_fulfill_htlcs.remove(0)); // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages // for it since the monitor update is marked in-progress. @@ -4002,8 +4004,8 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); + let mut cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, cs_updates.update_fulfill_htlcs.remove(0)); // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages // for it since the monitor update is marked in-progress. @@ -4118,17 +4120,18 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); - let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); + let mut cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); if hold_chan_a { // The first update will be on the A <-> B channel, which we optionally allow to complete. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, cs_updates.update_fulfill_htlcs.remove(0)); check_added_monitors(&nodes[1], 1); if !hold_chan_a { - let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); + let mut bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); + let mut update_fulfill = bs_updates.update_fulfill_htlcs.remove(0); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); expect_payment_sent!(&nodes[0], payment_preimage); } @@ -4196,7 +4199,8 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { assert_eq!(a_update.len(), 1); assert_eq!(c_update.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &a_update[0].update_fulfill_htlcs[0]); + let update_fulfill = a_update[0].update_fulfill_htlcs[0].clone(); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill); commitment_signed_dance!(nodes[0], nodes[1], a_update[0].commitment_signed, false); expect_payment_sent(&nodes[0], payment_preimage, None, true, true); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); @@ -4268,9 +4272,9 @@ fn test_partial_claim_mon_update_compl_actions() { // blocks. nodes[3].chain_monitor.complete_sole_pending_chan_update(&chan_3_id); expect_payment_claimed!(&nodes[3], payment_hash, 200_000); - let updates = get_htlc_update_msgs(&nodes[3], &node_b_id); + let mut updates = get_htlc_update_msgs(&nodes[3], &node_b_id); - nodes[1].node.handle_update_fulfill_htlc(node_d_id, &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_d_id, updates.update_fulfill_htlcs.remove(0)); check_added_monitors(&nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[3], Some(1000), false, false); let _bs_updates_for_a = get_htlc_update_msgs(&nodes[1], &node_a_id); @@ -4306,8 +4310,9 @@ fn test_partial_claim_mon_update_compl_actions() { } match remove_first_msg_event_to_node(&node_c_id, &mut ds_msgs) { - MessageSendEvent::UpdateHTLCs { updates, .. } => { - nodes[2].node.handle_update_fulfill_htlc(node_d_id, &updates.update_fulfill_htlcs[0]); + MessageSendEvent::UpdateHTLCs { mut updates, .. } => { + let update_fulfill = updates.update_fulfill_htlcs.remove(0); + nodes[2].node.handle_update_fulfill_htlc(node_d_id, update_fulfill); check_added_monitors(&nodes[2], 1); expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); let _cs_updates_for_a = get_htlc_update_msgs(&nodes[2], &node_a_id); @@ -4392,9 +4397,9 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); - let updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut updates = get_htlc_update_msgs!(nodes[2], node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); check_added_monitors!(nodes[1], 1); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); @@ -4603,7 +4608,7 @@ fn test_single_channel_multiple_mpp() { do_a_write_background.send(()).unwrap(); }); block_thrd2.store(false, Ordering::Release); - let first_updates = get_htlc_update_msgs(&nodes[8], &node_h_id); + let mut first_updates = get_htlc_update_msgs(&nodes[8], &node_h_id); thrd2.join().unwrap(); // Disconnect node 6 from all its peers so it doesn't bother to fail the HTLCs back @@ -4614,7 +4619,8 @@ fn test_single_channel_multiple_mpp() { nodes[7].node.peer_disconnected(node_f_id); nodes[7].node.peer_disconnected(node_g_id); - nodes[7].node.handle_update_fulfill_htlc(node_i_id, &first_updates.update_fulfill_htlcs[0]); + let first_update_fulfill = first_updates.update_fulfill_htlcs.remove(0); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, first_update_fulfill); check_added_monitors(&nodes[7], 1); expect_payment_forwarded!(nodes[7], nodes[1], nodes[8], Some(1000), false, false); nodes[7].node.handle_commitment_signed_batch_test(node_i_id, &first_updates.commitment_signed); @@ -4661,22 +4667,22 @@ fn test_single_channel_multiple_mpp() { nodes[8].node.handle_commitment_signed_batch_test(node_h_id, &cs); check_added_monitors(&nodes[8], 1); - let (updates, raa) = get_updates_and_revoke(&nodes[8], &node_h_id); + let (mut updates, raa) = get_updates_and_revoke(&nodes[8], &node_h_id); - nodes[7].node.handle_update_fulfill_htlc(node_i_id, &updates.update_fulfill_htlcs[0]); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[7], nodes[2], nodes[8], Some(1000), false, false); - nodes[7].node.handle_update_fulfill_htlc(node_i_id, &updates.update_fulfill_htlcs[1]); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[7], nodes[3], nodes[8], Some(1000), false, false); let mut next_source = 4; - if let Some(update) = updates.update_fulfill_htlcs.get(2) { - nodes[7].node.handle_update_fulfill_htlc(node_i_id, update); + if let Some(update) = updates.update_fulfill_htlcs.get(0) { + nodes[7].node.handle_update_fulfill_htlc(node_i_id, update.clone()); expect_payment_forwarded!(nodes[7], nodes[4], nodes[8], Some(1000), false, false); next_source += 1; } nodes[7].node.handle_commitment_signed_batch_test(node_i_id, &updates.commitment_signed); nodes[7].node.handle_revoke_and_ack(node_i_id, &raa); - if updates.update_fulfill_htlcs.get(2).is_some() { + if updates.update_fulfill_htlcs.get(0).is_some() { check_added_monitors(&nodes[7], 5); } else { check_added_monitors(&nodes[7], 4); @@ -4688,22 +4694,22 @@ fn test_single_channel_multiple_mpp() { nodes[8].node.handle_commitment_signed_batch_test(node_h_id, &cs); check_added_monitors(&nodes[8], 2); - let (updates, raa) = get_updates_and_revoke(&nodes[8], &node_h_id); + let (mut updates, raa) = get_updates_and_revoke(&nodes[8], &node_h_id); - nodes[7].node.handle_update_fulfill_htlc(node_i_id, &updates.update_fulfill_htlcs[0]); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); next_source += 1; - nodes[7].node.handle_update_fulfill_htlc(node_i_id, &updates.update_fulfill_htlcs[1]); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); next_source += 1; - if let Some(update) = updates.update_fulfill_htlcs.get(2) { - nodes[7].node.handle_update_fulfill_htlc(node_i_id, update); + if let Some(update) = updates.update_fulfill_htlcs.get(0) { + nodes[7].node.handle_update_fulfill_htlc(node_i_id, update.clone()); expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); } nodes[7].node.handle_commitment_signed_batch_test(node_i_id, &updates.commitment_signed); nodes[7].node.handle_revoke_and_ack(node_i_id, &raa); - if updates.update_fulfill_htlcs.get(2).is_some() { + if updates.update_fulfill_htlcs.get(0).is_some() { check_added_monitors(&nodes[7], 5); } else { check_added_monitors(&nodes[7], 4); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index ab61cee7036..26d5314cd2b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7500,7 +7500,6 @@ where true } }); - let now = duration_since_epoch(); pending_outbound_htlcs.retain(|htlc| { if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state { log_trace!( @@ -7511,7 +7510,7 @@ where // We really want take() here, but, again, non-mut ref :( match outcome.clone() { OutboundHTLCOutcome::Failure(mut reason) => { - hold_time(htlc.send_timestamp, now).map(|hold_time| { + hold_time_since(htlc.send_timestamp).map(|hold_time| { reason.set_hold_time(hold_time); }); revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason)); @@ -12841,7 +12840,7 @@ where (58, self.interactive_tx_signing_session, option), // Added in 0.2 (59, self.funding.minimum_depth_override, option), // Added in 0.2 (60, self.context.historical_scids, optional_vec), // Added in 0.2 - (61, fulfill_attribution_data, optional_vec), + (61, fulfill_attribution_data, optional_vec), // Added in 0.2 }); Ok(()) @@ -13238,12 +13237,12 @@ where (51, is_manual_broadcast, option), (53, funding_tx_broadcast_safe_event_emitted, option), (54, pending_funding, optional_vec), // Added in 0.2 - (55, removed_htlc_attribution_data, optional_vec), - (57, holding_cell_attribution_data, optional_vec), + (55, removed_htlc_attribution_data, optional_vec), // Added in 0.2 + (57, holding_cell_attribution_data, optional_vec), // Added in 0.2 (58, interactive_tx_signing_session, option), // Added in 0.2 (59, minimum_depth_override, option), // Added in 0.2 (60, historical_scids, optional_vec), // Added in 0.2 - (61, fulfill_attribution_data, optional_vec), + (61, fulfill_attribution_data, optional_vec), // Added in 0.2 }); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); @@ -13598,7 +13597,7 @@ where } } -pub(crate) fn duration_since_epoch() -> Option { +fn duration_since_epoch() -> Option { #[cfg(not(feature = "std"))] let now = None; @@ -13614,9 +13613,9 @@ pub(crate) fn duration_since_epoch() -> Option { /// Returns the time expressed in hold time units (1 unit = 100 ms) that has elapsed between send_timestamp and now. If /// any of the arguments are `None`, returns `None`. -pub(crate) fn hold_time(send_timestamp: Option, now: Option) -> Option { +pub(crate) fn hold_time_since(send_timestamp: Option) -> Option { send_timestamp.and_then(|t| { - now.map(|now| { + duration_since_epoch().map(|now| { let elapsed = now.saturating_sub(t).as_millis() / HOLD_TIME_UNIT_MILLIS; u32::try_from(elapsed).unwrap_or(u32::MAX) }) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7f77f20e088..76c972fe8a0 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -60,11 +60,10 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::channel::{ - self, hold_time, Channel, ChannelError, ChannelUpdateStatus, FundedChannel, InboundV1Channel, - OutboundV1Channel, ReconnectionMsg, ShutdownResult, UpdateFulfillCommitFetch, - WithChannelContext, + self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, FundedChannel, + InboundV1Channel, OutboundV1Channel, PendingV2Channel, ReconnectionMsg, ShutdownResult, + UpdateFulfillCommitFetch, WithChannelContext, }; -use crate::ln::channel::{duration_since_epoch, PendingV2Channel}; use crate::ln::channel_state::ChannelDetails; use crate::ln::inbound_payment; use crate::ln::msgs; @@ -7899,7 +7898,7 @@ where }; let attribution_data = process_fulfill_attribution_data( - attribution_data.as_ref(), + attribution_data, &htlc.prev_hop.incoming_packet_shared_secret, 0, ); @@ -8264,7 +8263,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ forwarded_htlc_value_msat: Option, skimmed_fee_msat: Option, from_onchain: bool, startup_replay: bool, next_channel_counterparty_node_id: PublicKey, next_channel_outpoint: OutPoint, next_channel_id: ChannelId, - next_user_channel_id: Option, attribution_data: Option<&AttributionData>, + next_user_channel_id: Option, attribution_data: Option, send_timestamp: Option, ) { match source { @@ -8299,8 +8298,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); // Obtain hold time, if available. - let now = duration_since_epoch(); - let hold_time = hold_time(send_timestamp, now).unwrap_or(0); + let hold_time = hold_time_since(send_timestamp).unwrap_or(0); // If attribution data was received from downstream, we shift it and get it ready for adding our hold // time. Note that fulfilled HTLCs take a fast path to the incoming side. We don't need to wait for RAA @@ -9870,7 +9868,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } fn internal_update_fulfill_htlc( - &self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC, + &self, counterparty_node_id: &PublicKey, msg: msgs::UpdateFulfillHTLC, ) -> Result<(), MsgHandleErrInternal> { let funding_txo; let next_user_channel_id; @@ -9929,7 +9927,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ funding_txo, msg.channel_id, Some(next_user_channel_id), - msg.attribution_data.as_ref(), + msg.attribution_data, send_timestamp, ); @@ -13488,7 +13486,7 @@ where } fn handle_update_fulfill_htlc( - &self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFulfillHTLC, + &self, counterparty_node_id: PublicKey, msg: msgs::UpdateFulfillHTLC, ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_update_fulfill_htlc(&counterparty_node_id, msg); @@ -17048,20 +17046,20 @@ mod tests { expect_payment_claimed!(nodes[1], our_payment_hash, 200_000); check_added_monitors!(nodes[1], 2); - let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]); + let mut bs_1st_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_1st_updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_1st_updates.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); check_added_monitors!(nodes[1], 1); - let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let mut bs_2nd_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_first_cs); check_added_monitors!(nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed); + nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_2nd_updates.update_fulfill_htlcs.remove(0)); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_2nd_updates.commitment_signed); check_added_monitors!(nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); @@ -18319,9 +18317,10 @@ pub mod bench { expect_payment_claimed!(ANodeHolder { node: &$node_b }, payment_hash, 10_000); match $node_b.get_and_clear_pending_msg_events().pop().unwrap() { - MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } => { + MessageSendEvent::UpdateHTLCs { node_id, mut updates, .. } => { assert_eq!(node_id, $node_a.get_our_node_id()); - $node_a.handle_update_fulfill_htlc($node_b.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + let fulfill = updates.update_fulfill_htlcs.remove(0); + $node_a.handle_update_fulfill_htlc($node_b.get_our_node_id(), fulfill); $node_a.handle_commitment_signed_batch_test($node_b.get_our_node_id(), &updates.commitment_signed); }, _ => panic!("Failed to generate claim event"), diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index c906a65cec4..9529e49ccfc 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -3813,7 +3813,7 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { ($node: expr, $prev_node: expr) => {{ $node.node.handle_update_fulfill_htlc( $prev_node.node.get_our_node_id(), - &next_msgs.as_ref().unwrap().0, + next_msgs.as_ref().unwrap().0.clone(), ); check_added_monitors!($node, 0); assert!($node.node.get_and_clear_pending_msg_events().is_empty()); @@ -3824,7 +3824,7 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { ($idx: expr, $node: expr, $prev_node: expr, $next_node: expr, $new_msgs: expr) => {{ $node.node.handle_update_fulfill_htlc( $prev_node.node.get_our_node_id(), - &next_msgs.as_ref().unwrap().0, + next_msgs.as_ref().unwrap().0.clone(), ); let mut fee = { let (base_fee, prop_fee) = { @@ -5003,7 +5003,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_a.node.handle_update_add_htlc(node_b_id, &update_add); } for update_fulfill in commitment_update.update_fulfill_htlcs { - node_a.node.handle_update_fulfill_htlc(node_b_id, &update_fulfill); + node_a.node.handle_update_fulfill_htlc(node_b_id, update_fulfill); } for update_fail in commitment_update.update_fail_htlcs { node_a.node.handle_update_fail_htlc(node_b_id, &update_fail); @@ -5082,7 +5082,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_b.node.handle_update_add_htlc(node_a_id, &update_add); } for update_fulfill in commitment_update.update_fulfill_htlcs { - node_b.node.handle_update_fulfill_htlc(node_a_id, &update_fulfill); + node_b.node.handle_update_fulfill_htlc(node_a_id, update_fulfill); } for update_fail in commitment_update.update_fail_htlcs { node_b.node.handle_update_fail_htlc(node_a_id, &update_fail); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 217057da1ce..64232d50029 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -938,10 +938,10 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac nodes[2].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); check_added_monitors(&nodes[2], 1); - let commitment_update = get_htlc_update_msgs(&nodes[2], &node_b_id); - let update_fulfill = commitment_update.update_fulfill_htlcs[0].clone(); + let mut commitment_update = get_htlc_update_msgs(&nodes[2], &node_b_id); + let update_fulfill = commitment_update.update_fulfill_htlcs.remove(0); - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &update_fulfill); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, update_fulfill); let err_msg = get_err_msg(&nodes[1], &node_c_id); assert_eq!(err_msg.channel_id, chan_2.2); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1735,9 +1735,9 @@ pub fn test_multiple_package_conflicts() { // // Because two update_fulfill_htlc messages are created at once, the commitment_signed_dance // macro doesn't work properly and we must process the first update_fulfill_htlc manually. - let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); + let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); check_added_monitors(&nodes[0], 1); @@ -1746,21 +1746,21 @@ pub fn test_multiple_package_conflicts() { nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commit_signed); check_added_monitors(&nodes[1], 4); - let events = nodes[1].node.get_and_clear_pending_msg_events(); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); - let revoke_ack = match &events[1] { + let revoke_ack = match events.remove(1) { MessageSendEvent::SendRevokeAndACK { node_id: _, msg } => msg, _ => panic!("Unexpected event"), }; - nodes[0].node.handle_revoke_and_ack(node_b_id, revoke_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_ack); expect_payment_sent!(nodes[0], preimage_1); - let updates = match &events[0] { + let mut updates = match events.remove(0) { MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates } => updates, _ => panic!("Unexpected event"), }; assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); expect_payment_sent!(nodes[0], preimage_2); @@ -2885,8 +2885,8 @@ pub fn test_dup_events_on_peer_disconnect() { nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, 1_000_000); check_added_monitors(&nodes[1], 1); - let claim_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &claim_msgs.update_fulfill_htlcs[0]); + let mut claim_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, claim_msgs.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.peer_disconnected(node_b_id); @@ -3226,7 +3226,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken }; if messages_delivered >= 1 { - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_htlc); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_htlc); let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 1); @@ -3474,15 +3474,15 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors(&nodes[1], 1); - let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); + let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); - match events_2[0] { + match events_2.remove(0) { MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, - ref update_fulfill_htlcs, + mut update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, @@ -3497,7 +3497,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(update_fail_malformed_htlcs.is_empty()); assert!(update_fee.is_none()); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_htlcs.remove(0)); let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { @@ -4516,8 +4516,8 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { nodes[4].node.claim_funds(our_payment_preimage); expect_payment_claimed!(nodes[4], dup_payment_hash, 800_000); check_added_monitors(&nodes[4], 1); - let updates = get_htlc_update_msgs!(nodes[4], node_c_id); - nodes[2].node.handle_update_fulfill_htlc(node_e_id, &updates.update_fulfill_htlcs[0]); + let mut updates = get_htlc_update_msgs!(nodes[4], node_c_id); + nodes[2].node.handle_update_fulfill_htlc(node_e_id, updates.update_fulfill_htlcs.remove(0)); let _cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); expect_payment_forwarded!(nodes[2], nodes[1], nodes[4], Some(196), false, false); check_added_monitors(&nodes[2], 1); @@ -4589,7 +4589,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // provide to node A. mine_transaction(&nodes[1], htlc_success_tx_to_confirm); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(392), true, true); - let updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); @@ -4597,7 +4597,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { assert!(updates.update_fail_malformed_htlcs.is_empty()); check_added_monitors(&nodes[1], 1); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true); } @@ -5310,8 +5310,8 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 }); - let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); + let mut bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_updates.commitment_signed); @@ -5799,8 +5799,8 @@ pub fn test_free_and_fail_holding_cell_htlcs() { check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, amt_1); - let update_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_msgs.update_fulfill_htlcs[0]); + let mut update_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_msgs.update_fulfill_htlcs.remove(0)); commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true); expect_payment_sent!(nodes[0], payment_preimage_1); } @@ -8127,9 +8127,9 @@ pub fn test_update_err_monitor_lockdown() { check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); - let updates = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); { let mut per_peer_lock; let mut peer_state_lock; @@ -8543,14 +8543,15 @@ fn do_test_onchain_htlc_settlement_after_close( check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); - let carol_updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut carol_updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(carol_updates.update_add_htlcs.is_empty()); assert!(carol_updates.update_fail_htlcs.is_empty()); assert!(carol_updates.update_fail_malformed_htlcs.is_empty()); assert!(carol_updates.update_fee.is_none()); assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &carol_updates.update_fulfill_htlcs[0]); + let carol_fulfill = carol_updates.update_fulfill_htlcs.remove(0); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, carol_fulfill); let went_onchain = go_onchain_before_fulfill || force_closing_node == 1; let fee = if went_onchain { None } else { Some(1000) }; expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], fee, went_onchain, false); @@ -11201,11 +11202,11 @@ fn do_test_multi_post_event_actions(do_reload: bool) { expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000); for dest in &[1, 2] { - let htlc_fulfill = get_htlc_update_msgs!(nodes[*dest], node_a_id); + let mut htlc_fulfill = get_htlc_update_msgs!(nodes[*dest], node_a_id); let dest_node_id = nodes[*dest].node.get_our_node_id(); nodes[0] .node - .handle_update_fulfill_htlc(dest_node_id, &htlc_fulfill.update_fulfill_htlcs[0]); + .handle_update_fulfill_htlc(dest_node_id, htlc_fulfill.update_fulfill_htlcs.remove(0)); commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill.commitment_signed, false); check_added_monitors(&nodes[0], 0); } diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 19539af0e1a..f54baea5c2f 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -505,7 +505,7 @@ pub fn channel_reserve_in_flight_removes() { nodes[1].node.claim_funds(payment_preimage_1); expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1); check_added_monitors(&nodes[1], 1); - let bs_removes = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_removes = get_htlc_update_msgs!(nodes[1], node_a_id); // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not // remove the second HTLC when we send the HTLC back from B to A. @@ -514,7 +514,7 @@ pub fn channel_reserve_in_flight_removes() { check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_removes.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_removes.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_removes.commitment_signed); check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); @@ -528,7 +528,7 @@ pub fn channel_reserve_in_flight_removes() { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors(&nodes[1], 1); - let bs_cs = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_cs = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors(&nodes[0], 1); @@ -543,7 +543,7 @@ pub fn channel_reserve_in_flight_removes() { // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A // can no longer broadcast a commitment transaction with it and B has the preimage so can go // on-chain as necessary). - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_cs.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_cs.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs.commitment_signed); check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); @@ -1816,7 +1816,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { attribution_data: None, }; - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_msg); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_msg); assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); @@ -1966,7 +1966,7 @@ pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { update_fulfill_msg.htlc_id = 1; - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_msg); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_msg); assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); @@ -2025,7 +2025,7 @@ pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_msg); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_msg); assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index fc35739c18c..6d0091fc325 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -552,7 +552,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 3_000_100); - let b_htlc_msgs = get_htlc_update_msgs!(&nodes[1], nodes[0].node.get_our_node_id()); + let mut b_htlc_msgs = get_htlc_update_msgs!(&nodes[1], nodes[0].node.get_our_node_id()); // We claim the dust payment here as well, but it won't impact our claimable balances as its // dust and thus doesn't appear on chain at all. nodes[1].node.claim_funds(dust_payment_preimage); @@ -565,7 +565,8 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { if prev_commitment_tx { // To build a previous commitment transaction, deliver one round of commitment messages. - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &b_htlc_msgs.update_fulfill_htlcs[0]); + let bs_fulfill = b_htlc_msgs.update_fulfill_htlcs.remove(0); + nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_fulfill); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &b_htlc_msgs.commitment_signed); check_added_monitors!(nodes[0], 1); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 03389c09609..e0219a5523f 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -2021,7 +2021,7 @@ pub trait ChannelMessageHandler: BaseMessageHandler { /// Handle an incoming `update_add_htlc` message from the given peer. fn handle_update_add_htlc(&self, their_node_id: PublicKey, msg: &UpdateAddHTLC); /// Handle an incoming `update_fulfill_htlc` message from the given peer. - fn handle_update_fulfill_htlc(&self, their_node_id: PublicKey, msg: &UpdateFulfillHTLC); + fn handle_update_fulfill_htlc(&self, their_node_id: PublicKey, msg: UpdateFulfillHTLC); /// Handle an incoming `update_fail_htlc` message from the given peer. fn handle_update_fail_htlc(&self, their_node_id: PublicKey, msg: &UpdateFailHTLC); /// Handle an incoming `update_fail_malformed_htlc` message from the given peer. diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 47f8c388340..d45860b0e26 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -2871,12 +2871,10 @@ fn process_failure_packet( /// attribution data is passed in, a new `AttributionData` field is instantiated. It is needless to say that in that /// case the sender won't receive any hold times from nodes downstream of the current node. pub(crate) fn process_fulfill_attribution_data( - attribution_data: Option<&AttributionData>, shared_secret: &[u8], hold_time: u32, + attribution_data: Option, shared_secret: &[u8], hold_time: u32, ) -> AttributionData { let mut attribution_data = - attribution_data.map_or(AttributionData::new(), |attribution_data| { - let mut attribution_data = attribution_data.clone(); - + attribution_data.map_or(AttributionData::new(), |mut attribution_data| { // Shift the existing attribution data to the right to make space for the new hold time and HMACs. attribution_data.shift_right(); diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 51937d675ba..a6410b26f84 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -110,7 +110,7 @@ pub(crate) enum PendingOutboundPayment { // The deadline as duration since the Unix epoch for the async recipient to come online, // after which we'll fail the payment. // - // Defaults to [`ASYNC_PAYMENT_TIMEOUT_RELATIVE_EXPIRY`]. + // Defaults to creation time + [`ASYNC_PAYMENT_TIMEOUT_RELATIVE_EXPIRY`]. expiry_time: Duration, }, Retryable { diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index f500c889dc3..a486d8c8b56 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -907,8 +907,9 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); - let htlc_fulfill = get_htlc_update_msgs!(nodes[2], node_b_id); - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &htlc_fulfill.update_fulfill_htlcs[0]); + let mut htlc_fulfill = get_htlc_update_msgs!(nodes[2], node_b_id); + let fulfill_msg = htlc_fulfill.update_fulfill_htlcs.remove(0); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, fulfill_msg); check_added_monitors!(nodes[1], 1); commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill.commitment_signed, false); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false); @@ -1412,8 +1413,9 @@ fn test_fulfill_restart_failure() { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 100_000); - let htlc_fulfill = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &htlc_fulfill.update_fulfill_htlcs[0]); + let mut htlc_fulfill = get_htlc_update_msgs!(nodes[1], node_a_id); + let fulfill_msg = htlc_fulfill.update_fulfill_htlcs.remove(0); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, fulfill_msg); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); // Now reload nodes[1]... @@ -3003,10 +3005,10 @@ fn auto_retry_partial_failure() { expect_payment_claimable!(nodes[1], payment_hash, payment_secret, amt_msat); nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, amt_msat); - let bs_claim = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_claim = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(bs_claim.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_claim.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_claim.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_claim.commitment_signed); check_added_monitors!(nodes[0], 1); @@ -3014,7 +3016,7 @@ fn auto_retry_partial_failure() { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_third_raa); check_added_monitors!(nodes[1], 4); - let bs_2nd_claim = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut bs_2nd_claim = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_third_cs); check_added_monitors!(nodes[1], 1); @@ -3024,8 +3026,10 @@ fn auto_retry_partial_failure() { check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_2nd_claim.update_fulfill_htlcs[0]); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_2nd_claim.update_fulfill_htlcs[1]); + let bs_second_fulfill_a = bs_2nd_claim.update_fulfill_htlcs.remove(0); + let bs_second_fulfill_b = bs_2nd_claim.update_fulfill_htlcs.remove(0); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_second_fulfill_a); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_second_fulfill_b); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_2nd_claim.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], node_b_id); @@ -4074,14 +4078,14 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); if at_midpoint { - let updates = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + let mut updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); check_added_monitors!(nodes[0], 1); } else { - let htlc_fulfill = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &htlc_fulfill.update_fulfill_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], htlc_fulfill.commitment_signed, false); + let mut fulfill = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, fulfill.update_fulfill_htlcs.remove(0)); + commitment_signed_dance!(nodes[0], nodes[1], fulfill.commitment_signed, false); // Ignore the PaymentSent event which is now pending on nodes[0] - if we were to handle it we'd // be expected to ignore the eventual conflicting PaymentFailed, but by not looking at it we // expect to get the PaymentSent again later. @@ -4293,11 +4297,12 @@ fn do_claim_from_closed_chan(fail_payment: bool) { check_added_monitors(&nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[3], Some(1000), false, true); - let bs_claims = nodes[1].node.get_and_clear_pending_msg_events(); + let mut bs_claims = nodes[1].node.get_and_clear_pending_msg_events(); check_added_monitors(&nodes[1], 1); assert_eq!(bs_claims.len(), 1); - if let MessageSendEvent::UpdateHTLCs { updates, .. } = &bs_claims[0] { - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + if let MessageSendEvent::UpdateHTLCs { mut updates, .. } = bs_claims.remove(0) { + let fulfill = updates.update_fulfill_htlcs.remove(0); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, fulfill); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); } else { panic!(); @@ -4305,11 +4310,13 @@ fn do_claim_from_closed_chan(fail_payment: bool) { expect_payment_sent!(nodes[0], payment_preimage); - let ds_claim_msgs = nodes[3].node.get_and_clear_pending_msg_events(); + let mut ds_claim_msgs = nodes[3].node.get_and_clear_pending_msg_events(); assert_eq!(ds_claim_msgs.len(), 1); - let cs_claim_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = &ds_claim_msgs[0] + let mut cs_claim_msgs = if let MessageSendEvent::UpdateHTLCs { mut updates, .. } = + ds_claim_msgs.remove(0) { - nodes[2].node.handle_update_fulfill_htlc(node_d_id, &updates.update_fulfill_htlcs[0]); + let fulfill = updates.update_fulfill_htlcs.remove(0); + nodes[2].node.handle_update_fulfill_htlc(node_d_id, fulfill); let cs_claim_msgs = nodes[2].node.get_and_clear_pending_msg_events(); check_added_monitors(&nodes[2], 1); commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true); @@ -4320,8 +4327,9 @@ fn do_claim_from_closed_chan(fail_payment: bool) { }; assert_eq!(cs_claim_msgs.len(), 1); - if let MessageSendEvent::UpdateHTLCs { updates, .. } = &cs_claim_msgs[0] { - nodes[0].node.handle_update_fulfill_htlc(node_c_id, &updates.update_fulfill_htlcs[0]); + if let MessageSendEvent::UpdateHTLCs { mut updates, .. } = cs_claim_msgs.remove(0) { + let fulfill = updates.update_fulfill_htlcs.remove(0); + nodes[0].node.handle_update_fulfill_htlc(node_c_id, fulfill); commitment_signed_dance!(nodes[0], nodes[2], updates.commitment_signed, false, true); } else { panic!(); diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index ef79fa4b30d..98e54eec925 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -463,7 +463,7 @@ impl ChannelMessageHandler for ErroringMessageHandler { fn handle_update_add_htlc(&self, their_node_id: PublicKey, msg: &msgs::UpdateAddHTLC) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } - fn handle_update_fulfill_htlc(&self, their_node_id: PublicKey, msg: &msgs::UpdateFulfillHTLC) { + fn handle_update_fulfill_htlc(&self, their_node_id: PublicKey, msg: msgs::UpdateFulfillHTLC) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_update_fail_htlc(&self, their_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) { @@ -2544,7 +2544,7 @@ where self.message_handler.chan_handler.handle_update_add_htlc(their_node_id, &msg); }, wire::Message::UpdateFulfillHTLC(msg) => { - self.message_handler.chan_handler.handle_update_fulfill_htlc(their_node_id, &msg); + self.message_handler.chan_handler.handle_update_fulfill_htlc(their_node_id, msg); }, wire::Message::UpdateFailHTLC(msg) => { self.message_handler.chan_handler.handle_update_fail_htlc(their_node_id, &msg); diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index c2ab17e7269..6c33d3156a8 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -198,8 +198,8 @@ fn test_quiescence_waits_for_async_signer_and_monitor_update() { nodes[1].node.claim_funds(preimage); check_added_monitors(&nodes[1], 1); - let update = get_htlc_update_msgs!(&nodes[1], node_id_0); - nodes[0].node.handle_update_fulfill_htlc(node_id_1, &update.update_fulfill_htlcs[0]); + let mut update = get_htlc_update_msgs!(&nodes[1], node_id_0); + nodes[0].node.handle_update_fulfill_htlc(node_id_1, update.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(node_id_1, &update.commitment_signed); check_added_monitors(&nodes[0], 1); @@ -412,13 +412,13 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { // Now that quiescence is over, nodes are allowed to make updates again. nodes[1] will have its // outbound HTLC finally go out, along with the fail/claim of nodes[0]'s payment. - let update = get_htlc_update_msgs!(&nodes[1], node_id_0); + let mut update = get_htlc_update_msgs!(&nodes[1], node_id_0); check_added_monitors(&nodes[1], 1); nodes[0].node.handle_update_add_htlc(node_id_1, &update.update_add_htlcs[0]); if fail_htlc { nodes[0].node.handle_update_fail_htlc(node_id_1, &update.update_fail_htlcs[0]); } else { - nodes[0].node.handle_update_fulfill_htlc(node_id_1, &update.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_id_1, update.update_fulfill_htlcs.remove(0)); } commitment_signed_dance!(&nodes[0], &nodes[1], update.commitment_signed, false); @@ -451,11 +451,11 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { } check_added_monitors(&nodes[0], 1); - let update = get_htlc_update_msgs!(&nodes[0], node_id_1); + let mut update = get_htlc_update_msgs!(&nodes[0], node_id_1); if fail_htlc { nodes[1].node.handle_update_fail_htlc(node_id_0, &update.update_fail_htlcs[0]); } else { - nodes[1].node.handle_update_fulfill_htlc(node_id_0, &update.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_id_0, update.update_fulfill_htlcs.remove(0)); } commitment_signed_dance!(&nodes[1], &nodes[0], update.commitment_signed, false); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 4d07442acfd..23e828b93a5 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -956,14 +956,15 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC // claim should fly. - let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events(); + let mut ds_msgs = nodes[3].node.get_and_clear_pending_msg_events(); check_added_monitors!(nodes[3], 1); assert_eq!(ds_msgs.len(), 2); if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); } - let cs_updates = match ds_msgs[1] { - MessageSendEvent::UpdateHTLCs { ref updates, .. } => { - nodes[2].node.handle_update_fulfill_htlc(nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + let mut cs_updates = match ds_msgs.remove(1) { + MessageSendEvent::UpdateHTLCs { mut updates, .. } => { + let mut fulfill = updates.update_fulfill_htlcs.remove(0); + nodes[2].node.handle_update_fulfill_htlc(nodes[3].node.get_our_node_id(), fulfill); check_added_monitors!(nodes[2], 1); let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); @@ -973,7 +974,8 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest _ => panic!(), }; - nodes[0].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]); + let fulfill = cs_updates.update_fulfill_htlcs.remove(0); + nodes[0].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), fulfill); commitment_signed_dance!(nodes[0], nodes[2], cs_updates.commitment_signed, false, true); expect_payment_sent!(nodes[0], payment_preimage); @@ -1138,19 +1140,13 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht } check_added_monitors!(nodes[1], 1); - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match &events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fulfill_htlcs, update_fail_htlcs, commitment_signed, .. }, .. } => { - if claim_htlc { - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]); - } else { - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); - } - commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); - }, - _ => panic!("Unexpected event"), + let mut update = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + if claim_htlc { + nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), update.update_fulfill_htlcs.remove(0)); + } else { + nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update.update_fail_htlcs[0]); } + commitment_signed_dance!(nodes[0], nodes[1], update.commitment_signed, false); if claim_htlc { expect_payment_sent!(nodes[0], payment_preimage); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index a472cea7ed2..4a2a98815c6 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -138,10 +138,10 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { check_added_monitors!(nodes[1], 1); // Which should result in an immediate claim/fail of the HTLC: - let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let mut htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); if claim { assert_eq!(htlc_updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &htlc_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), htlc_updates.update_fulfill_htlcs.remove(0)); } else { assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index ba36b5ab144..5a08144949f 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -179,16 +179,16 @@ fn expect_channel_shutdown_state_with_htlc() { expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); // Fulfil HTLCs on node1 and node0 - let updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors!(nodes[1], 1); - let updates_2 = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates_2 = get_htlc_update_msgs!(nodes[1], node_a_id); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); // Still in "resolvingHTLCs" on chan1 after htlc removed on chan2 @@ -200,7 +200,7 @@ fn expect_channel_shutdown_state_with_htlc() { assert!(updates_2.update_fail_malformed_htlcs.is_empty()); assert!(updates_2.update_fee.is_none()); assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates_2.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates_2.update_fulfill_htlcs.remove(0)); commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); expect_payment_sent!(nodes[0], payment_preimage_0); @@ -455,16 +455,16 @@ fn updates_shutdown_wait() { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); - let updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors!(nodes[1], 1); - let updates_2 = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates_2 = get_htlc_update_msgs!(nodes[1], node_a_id); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); assert!(updates_2.update_add_htlcs.is_empty()); @@ -472,7 +472,7 @@ fn updates_shutdown_wait() { assert!(updates_2.update_fail_malformed_htlcs.is_empty()); assert!(updates_2.update_fee.is_none()); assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates_2.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates_2.update_fulfill_htlcs.remove(0)); commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); expect_payment_sent!(nodes[0], payment_preimage_0); @@ -719,16 +719,16 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 100_000); - let updates = get_htlc_update_msgs!(nodes[2], node_b_id); + let mut updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors!(nodes[1], 1); - let updates_2 = get_htlc_update_msgs!(nodes[1], node_a_id); + let mut updates_2 = get_htlc_update_msgs!(nodes[1], node_a_id); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); assert!(updates_2.update_add_htlcs.is_empty()); @@ -736,7 +736,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert!(updates_2.update_fail_malformed_htlcs.is_empty()); assert!(updates_2.update_fee.is_none()); assert_eq!(updates_2.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates_2.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates_2.update_fulfill_htlcs.remove(0)); commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true); expect_payment_sent!(nodes[0], payment_preimage); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 4165afea767..4db680a20dd 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -1085,8 +1085,8 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { fn handle_update_add_htlc(&self, _their_node_id: PublicKey, msg: &msgs::UpdateAddHTLC) { self.received_msg(wire::Message::UpdateAddHTLC(msg.clone())); } - fn handle_update_fulfill_htlc(&self, _their_node_id: PublicKey, msg: &msgs::UpdateFulfillHTLC) { - self.received_msg(wire::Message::UpdateFulfillHTLC(msg.clone())); + fn handle_update_fulfill_htlc(&self, _their_node_id: PublicKey, msg: msgs::UpdateFulfillHTLC) { + self.received_msg(wire::Message::UpdateFulfillHTLC(msg)); } fn handle_update_fail_htlc(&self, _their_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) { self.received_msg(wire::Message::UpdateFailHTLC(msg.clone()));