diff --git a/fuzz/src/process_onion_failure.rs b/fuzz/src/process_onion_failure.rs index 2b0d8c4c72b..1bc9900718a 100644 --- a/fuzz/src/process_onion_failure.rs +++ b/fuzz/src/process_onion_failure.rs @@ -115,7 +115,7 @@ fn do_test(data: &[u8], out: Out) { let path = Path { hops, blinded_tail }; let htlc_source = HTLCSource::OutboundRoute { - path, + path: path.clone(), session_priv, first_hop_htlc_msat: 0, payment_id, @@ -133,8 +133,19 @@ fn do_test(data: &[u8], out: Out) { } else { None }; - let encrypted_packet = OnionErrorPacket { data: failure_data.into(), attribution_data }; + let encrypted_packet = + OnionErrorPacket { data: failure_data.into(), attribution_data: attribution_data.clone() }; lightning::ln::process_onion_failure(&secp_ctx, &logger, &htlc_source, encrypted_packet); + + if let Some(attribution_data) = attribution_data { + lightning::ln::decode_fulfill_attribution_data( + &secp_ctx, + &logger, + &path, + &session_priv, + attribution_data, + ); + } } /// Method that needs to be added manually, {name}_test diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 235bb39c7d4..53f74332a51 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -2732,6 +2732,7 @@ mod tests { payment_id: PaymentId([42; 32]), payment_hash: None, path: path.clone(), + hold_times: Vec::new(), }); let event = $receive.expect("PaymentPathSuccessful not handled within deadline"); match event { diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index b830d2e90ba..39b6d16d318 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1102,6 +1102,12 @@ pub enum Event { /// /// May contain a closed channel if the HTLC sent along the path was fulfilled on chain. path: Path, + /// The hold times as reported by each hop. The unit in which the hold times are expressed are 100's of + /// milliseconds. So a hop reporting 2 is a hold time that corresponds to roughly 200 milliseconds. As earlier + /// hops hold on to an HTLC for longer, the hold times in the list are expected to decrease. When our peer + /// didn't provide attribution data, the list is empty. The same applies to HTLCs that were resolved onchain. + /// Because of unavailability of hold times, the list may be shorter than the number of hops in the path. + hold_times: Vec, }, /// Indicates an outbound HTLC we sent failed, likely due to an intermediary node being unable to /// handle the HTLC. @@ -1153,7 +1159,11 @@ pub enum Event { error_code: Option, #[cfg(any(test, feature = "_test_utils"))] error_data: Option>, - #[cfg(any(test, feature = "_test_utils"))] + /// The hold times as reported by each hop. The unit in which the hold times are expressed are 100's of + /// milliseconds. So a hop reporting 2 is a hold time that corresponds to roughly 200 milliseconds. As earlier + /// hops hold on to an HTLC for longer, the hold times in the list are expected to decrease. When our peer + /// didn't provide attribution data, the list is empty. The same applies to HTLCs that were resolved onchain. + /// Because of unavailability of hold times, the list may be shorter than the number of hops in the path. hold_times: Vec, }, /// Indicates that a probe payment we sent returned successful, i.e., only failed at the destination. @@ -1792,7 +1802,6 @@ impl Writeable for Event { ref error_code, #[cfg(any(test, feature = "_test_utils"))] ref error_data, - #[cfg(any(test, feature = "_test_utils"))] ref hold_times, } => { 3u8.write(writer)?; @@ -1800,8 +1809,6 @@ impl Writeable for Event { error_code.write(writer)?; #[cfg(any(test, feature = "_test_utils"))] error_data.write(writer)?; - #[cfg(any(test, feature = "_test_utils"))] - hold_times.write(writer)?; write_tlv_fields!(writer, { (0, payment_hash, required), (1, None::, option), // network_update in LDK versions prior to 0.0.114 @@ -1813,6 +1820,7 @@ impl Writeable for Event { (9, None::, option), // retry in LDK versions prior to 0.0.115 (11, payment_id, option), (13, failure, required), + (15, *hold_times, optional_vec), }); }, &Event::PendingHTLCsForwardable { time_forwardable: _ } => { @@ -1910,10 +1918,16 @@ impl Writeable for Event { (4, funding_info, required), }) }, - &Event::PaymentPathSuccessful { ref payment_id, ref payment_hash, ref path } => { + &Event::PaymentPathSuccessful { + ref payment_id, + ref payment_hash, + ref path, + ref hold_times, + } => { 13u8.write(writer)?; write_tlv_fields!(writer, { (0, payment_id, required), + (1, *hold_times, optional_vec), (2, payment_hash, option), (4, path.hops, required_vec), (6, path.blinded_tail, option), @@ -2232,8 +2246,6 @@ impl MaybeReadable for Event { let error_code = Readable::read(reader)?; #[cfg(any(test, feature = "_test_utils"))] let error_data = Readable::read(reader)?; - #[cfg(any(test, feature = "_test_utils"))] - let hold_times = Readable::read(reader)?; let mut payment_hash = PaymentHash([0; 32]); let mut payment_failed_permanently = false; let mut network_update = None; @@ -2242,6 +2254,7 @@ impl MaybeReadable for Event { let mut short_channel_id = None; let mut payment_id = None; let mut failure_opt = None; + let mut hold_times = None; read_tlv_fields!(reader, { (0, payment_hash, required), (1, network_update, upgradable_option), @@ -2253,7 +2266,9 @@ impl MaybeReadable for Event { (7, short_channel_id, option), (11, payment_id, option), (13, failure_opt, upgradable_option), + (15, hold_times, optional_vec), }); + let hold_times = hold_times.unwrap_or(Vec::new()); let failure = failure_opt.unwrap_or_else(|| PathFailure::OnPath { network_update }); Ok(Some(Event::PaymentPathFailed { @@ -2267,7 +2282,6 @@ impl MaybeReadable for Event { error_code, #[cfg(any(test, feature = "_test_utils"))] error_data, - #[cfg(any(test, feature = "_test_utils"))] hold_times, })) }; @@ -2413,14 +2427,19 @@ impl MaybeReadable for Event { let mut f = || { _init_and_read_len_prefixed_tlv_fields!(reader, { (0, payment_id, required), + (1, hold_times, optional_vec), (2, payment_hash, option), (4, path, required_vec), (6, blinded_tail, option), }); + + let hold_times = hold_times.unwrap_or(Vec::new()); + Ok(Some(Event::PaymentPathSuccessful { payment_id: payment_id.0.unwrap(), payment_hash, path: Path { hops: path, blinded_tail }, + hold_times, })) }; f() diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index ab9f172ce1b..7d6ccfd440b 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -605,7 +605,7 @@ fn async_receive_flow_success() { let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); - let res = + let (res, _) = claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, keysend_preimage)); assert_eq!(res, Some(PaidBolt12Invoice::StaticInvoice(static_invoice))); } @@ -1723,7 +1723,7 @@ fn refresh_static_invoices() { let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let res = claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage)); - assert_eq!(res, Some(PaidBolt12Invoice::StaticInvoice(updated_invoice))); + assert_eq!(res.0, Some(PaidBolt12Invoice::StaticInvoice(updated_invoice))); } #[cfg_attr(feature = "std", ignore)] @@ -2053,5 +2053,5 @@ fn invoice_server_is_not_channel_peer() { let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let res = claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage)); - assert_eq!(res, Some(PaidBolt12Invoice::StaticInvoice(invoice))); + assert_eq!(res.0, Some(PaidBolt12Invoice::StaticInvoice(invoice))); } diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 4a868e0eb76..71c7687047d 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -2333,6 +2333,8 @@ fn test_trampoline_unblinded_receive() { None, ).unwrap(); + // Use a different session key to construct the replacement onion packet. Note that the sender isn't aware of + // this and won't be able to decode the fulfill hold times. let outer_session_priv = secret_from_hex("e52c20461ed7acd46c4e7b591a37610519179482887bd73bf3b94617f8f03677"); let (outer_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], outer_total_msat, &recipient_onion_fields, outer_starting_htlc_offset, &None, None, Some(trampoline_packet)).unwrap(); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index deace0e0d1b..843b6331ece 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2890,8 +2890,12 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id)); } - let fulfill_msg = - msgs::UpdateFulfillHTLC { channel_id: chan_id_2, htlc_id: 0, payment_preimage }; + let mut fulfill_msg = msgs::UpdateFulfillHTLC { + channel_id: chan_id_2, + htlc_id: 0, + payment_preimage, + attribution_data: None, + }; if second_fails { nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( @@ -2900,6 +2904,8 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f ); check_added_monitors!(nodes[2], 1); get_htlc_update_msgs!(nodes[2], node_b_id); + // Note that we don't populate fulfill_msg.attribution_data here, which will lead to hold times being + // unavailable. } else { nodes[2].node.claim_funds(payment_preimage); check_added_monitors!(nodes[2], 1); @@ -2907,8 +2913,15 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1); - // Check that the message we're about to deliver matches the one generated: - assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]); + + // Check that the message we're about to deliver matches the one generated. Ignore attribution data. + assert_eq!(fulfill_msg.channel_id, cs_updates.update_fulfill_htlcs[0].channel_id); + assert_eq!(fulfill_msg.htlc_id, cs_updates.update_fulfill_htlcs[0].htlc_id); + assert_eq!( + fulfill_msg.payment_preimage, + cs_updates.update_fulfill_htlcs[0].payment_preimage + ); + fulfill_msg.attribution_data = cs_updates.update_fulfill_htlcs[0].attribution_data.clone(); } nodes[1].node.handle_update_fulfill_htlc(node_c_id, &fulfill_msg); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 10f2b9d76e4..fe7829e01c2 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -140,7 +140,7 @@ enum FeeUpdateState { enum InboundHTLCRemovalReason { FailRelay(msgs::OnionErrorPacket), FailMalformed(([u8; 32], u16)), - Fulfill(PaymentPreimage), + Fulfill(PaymentPreimage, Option), } /// Represents the resolution status of an inbound HTLC. @@ -236,7 +236,7 @@ impl From<&InboundHTLCState> for Option { Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail), InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) => Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail), - InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) => + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_, _)) => Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill), } } @@ -268,7 +268,7 @@ impl InboundHTLCState { fn preimage(&self) -> Option { match self { - InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(preimage)) => { + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(preimage, _)) => { Some(*preimage) }, _ => None, @@ -350,11 +350,11 @@ impl From<&OutboundHTLCState> for OutboundHTLCStateDetails { // the state yet. OutboundHTLCState::RemoteRemoved(_) => OutboundHTLCStateDetails::Committed, - OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) => + OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_, _)) => OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess, OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) => OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure, - OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => + OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_, _)) => OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess, OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) => OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure, @@ -389,9 +389,9 @@ impl OutboundHTLCState { #[rustfmt::skip] fn preimage(&self) -> Option { match self { - OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(preimage)) - | OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(preimage)) - | OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(preimage)) => { + OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(preimage, _)) + | OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(preimage, _)) + | OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(preimage, _)) => { Some(*preimage) }, _ => None, @@ -404,14 +404,14 @@ impl OutboundHTLCState { enum OutboundHTLCOutcome { /// We started always filling in the preimages here in 0.0.105, and the requirement /// that the preimages always be filled in was added in 0.2. - Success(PaymentPreimage), + Success(PaymentPreimage, Option), Failure(HTLCFailReason), } impl<'a> Into> for &'a OutboundHTLCOutcome { fn into(self) -> Option<&'a HTLCFailReason> { match self { - OutboundHTLCOutcome::Success(_) => None, + OutboundHTLCOutcome::Success(_, _) => None, OutboundHTLCOutcome::Failure(ref r) => Some(r), } } @@ -468,6 +468,7 @@ enum HTLCUpdateAwaitingACK { }, ClaimHTLC { payment_preimage: PaymentPreimage, + attribution_data: Option, htlc_id: u64, }, FailHTLC { @@ -1183,7 +1184,7 @@ pub(super) struct MonitorRestoreUpdates { pub order: RAACommitmentOrder, pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, - pub finalized_claimed_htlcs: Vec, + pub finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, pub pending_update_adds: Vec, pub funding_broadcastable: Option, pub channel_ready: Option, @@ -2313,7 +2314,7 @@ where // but need to handle this somehow or we run the risk of losing HTLCs! monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, - monitor_pending_finalized_fulfills: Vec, + monitor_pending_finalized_fulfills: Vec<(HTLCSource, Option)>, monitor_pending_update_adds: Vec, monitor_pending_tx_signatures: Option, @@ -4141,7 +4142,7 @@ where let (local_balance_before_fee_msat, remote_balance_before_fee_msat) = { let mut removed_outbound_total_msat = 0; for htlc in self.pending_outbound_htlcs.iter() { - if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state { + if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_, _)) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_, _)) = htlc.state { removed_outbound_total_msat += htlc.amount_msat; } } @@ -4371,7 +4372,7 @@ where if !funding.is_outbound() { let mut removed_outbound_total_msat = 0; for htlc in self.pending_outbound_htlcs.iter() { - if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state { + if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_, _)) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_, _)) = htlc.state { removed_outbound_total_msat += htlc.amount_msat; } } @@ -6147,7 +6148,7 @@ where assert!(!self.context.channel_state.can_generate_new_commitment()); let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update let fulfill_resp = - self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, None, logger); + self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, None, None, logger); self.context.latest_monitor_update_id = mon_update_id; if let UpdateFulfillFetch::NewClaim { update_blocked, .. } = fulfill_resp { assert!(update_blocked); // The HTLC must have ended up in the holding cell. @@ -6156,7 +6157,8 @@ where fn get_update_fulfill_htlc( &mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, - payment_info: Option, logger: &L, + payment_info: Option, attribution_data: Option, + logger: &L, ) -> UpdateFulfillFetch where L::Target: Logger, @@ -6190,7 +6192,7 @@ where match htlc.state { InboundHTLCState::Committed => {}, InboundHTLCState::LocalRemoved(ref reason) => { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + if let &InboundHTLCRemovalReason::Fulfill(_, _) = reason { } else { log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id()); debug_assert!( @@ -6271,6 +6273,7 @@ where self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, + attribution_data, }); return UpdateFulfillFetch::NewClaim { monitor_update, @@ -6301,6 +6304,7 @@ where ); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill( payment_preimage_arg.clone(), + attribution_data, )); } @@ -6309,13 +6313,20 @@ where pub fn get_update_fulfill_htlc_and_commit( &mut self, htlc_id: u64, payment_preimage: PaymentPreimage, - payment_info: Option, logger: &L, + payment_info: Option, attribution_data: Option, + logger: &L, ) -> UpdateFulfillCommitFetch where L::Target: Logger, { let release_cs_monitor = self.context.blocked_monitor_updates.is_empty(); - match self.get_update_fulfill_htlc(htlc_id, payment_preimage, payment_info, logger) { + match self.get_update_fulfill_htlc( + htlc_id, + payment_preimage, + payment_info, + attribution_data, + logger, + ) { UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, @@ -6624,7 +6635,7 @@ where fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, outcome: OutboundHTLCOutcome) -> Result<&OutboundHTLCOutput, ChannelError> { for htlc in self.context.pending_outbound_htlcs.iter_mut() { if htlc.htlc_id == htlc_id { - if let OutboundHTLCOutcome::Success(ref payment_preimage) = outcome { + if let OutboundHTLCOutcome::Success(ref payment_preimage, ..) = outcome { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array()); if payment_hash != htlc.payment_hash { return Err(ChannelError::close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id))); @@ -6647,7 +6658,7 @@ where pub fn update_fulfill_htlc( &mut self, msg: &msgs::UpdateFulfillHTLC, - ) -> Result<(HTLCSource, u64, Option), ChannelError> { + ) -> Result<(HTLCSource, u64, Option, Option), ChannelError> { if self.context.channel_state.is_remote_stfu_sent() || self.context.channel_state.is_quiescent() { @@ -6666,9 +6677,11 @@ where )); } - let outcome = OutboundHTLCOutcome::Success(msg.payment_preimage); - self.mark_outbound_htlc_removed(msg.htlc_id, outcome) - .map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat)) + let outcome = + OutboundHTLCOutcome::Success(msg.payment_preimage, msg.attribution_data.clone()); + self.mark_outbound_htlc_removed(msg.htlc_id, outcome).map(|htlc| { + (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat, htlc.send_timestamp) + }) } #[rustfmt::skip] @@ -7025,9 +7038,9 @@ where log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.", &htlc.payment_hash, &self.context.channel_id); // Swap against a dummy variant to avoid a potentially expensive clone of `OutboundHTLCOutcome::Failure(HTLCFailReason)` - let mut reason = OutboundHTLCOutcome::Success(PaymentPreimage([0u8; 32])); + let mut reason = OutboundHTLCOutcome::Success(PaymentPreimage([0u8; 32]), None); mem::swap(outcome, &mut reason); - if let OutboundHTLCOutcome::Success(preimage) = reason { + if let OutboundHTLCOutcome::Success(preimage, _) = reason { // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b) // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could // have a `Success(None)` reason. In this case we could forget some HTLC @@ -7228,7 +7241,11 @@ where } None }, - &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => { + &HTLCUpdateAwaitingACK::ClaimHTLC { + ref payment_preimage, + htlc_id, + ref attribution_data, + } => { // If an HTLC claim was previously added to the holding cell (via // `get_update_fulfill_htlc`, then generating the claim message itself must // not fail - any in between attempts to claim the HTLC will have resulted @@ -7239,8 +7256,13 @@ where // `ChannelMonitorUpdate` to the user, making this one redundant, however // there's no harm in including the extra `ChannelMonitorUpdateStep` here. // We do not bother to track and include `payment_info` here, however. - let fulfill = - self.get_update_fulfill_htlc(htlc_id, *payment_preimage, None, logger); + let fulfill = self.get_update_fulfill_htlc( + htlc_id, + *payment_preimage, + None, + attribution_data.clone(), + logger, + ); let mut additional_monitor_update = if let UpdateFulfillFetch::NewClaim { monitor_update, .. } = fulfill { monitor_update @@ -7459,7 +7481,7 @@ where pending_inbound_htlcs.retain(|htlc| { if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state { log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash); - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + if let &InboundHTLCRemovalReason::Fulfill(_, _) = reason { value_to_self_msat_diff += htlc.amount_msat as i64; } *expecting_peer_commitment_signed = true; @@ -7477,19 +7499,21 @@ where &htlc.payment_hash ); // We really want take() here, but, again, non-mut ref :( - if let OutboundHTLCOutcome::Failure(mut reason) = outcome.clone() { - if let (Some(timestamp), Some(now)) = (htlc.send_timestamp, now) { - let elapsed_millis = now.saturating_sub(timestamp).as_millis(); - let elapsed_units = elapsed_millis / HOLD_TIME_UNIT_MILLIS; - let hold_time = u32::try_from(elapsed_units).unwrap_or(u32::MAX); - reason.set_hold_time(hold_time); - } - - revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason)); - } else { - finalized_claimed_htlcs.push(htlc.source.clone()); - // They fulfilled, so we sent them money - value_to_self_msat_diff -= htlc.amount_msat as i64; + match outcome.clone() { + OutboundHTLCOutcome::Failure(mut reason) => { + hold_time(htlc.send_timestamp, now).map(|hold_time| { + reason.set_hold_time(hold_time); + }); + revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason)); + }, + OutboundHTLCOutcome::Success(_, attribution_data) => { + // Even though a fast track was taken for fulfilled HTLCs to the incoming side, we still + // pass along attribution data here so that we can include hold time information in the + // final PaymentPathSuccessful events. + finalized_claimed_htlcs.push((htlc.source.clone(), attribution_data)); + // They fulfilled, so we sent them money + value_to_self_msat_diff -= htlc.amount_msat as i64; + }, } false } else { @@ -7573,7 +7597,7 @@ where { log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash); // Swap against a dummy variant to avoid a potentially expensive clone of `OutboundHTLCOutcome::Failure(HTLCFailReason)` - let mut reason = OutboundHTLCOutcome::Success(PaymentPreimage([0u8; 32])); + let mut reason = OutboundHTLCOutcome::Success(PaymentPreimage([0u8; 32]), None); mem::swap(outcome, &mut reason); htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason); require_commitment = true; @@ -7972,7 +7996,7 @@ where &mut self, resend_raa: bool, resend_commitment: bool, resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, - mut pending_finalized_claimed_htlcs: Vec, + mut pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, ) { self.context.monitor_pending_revoke_and_ack |= resend_raa; self.context.monitor_pending_commitment_signed |= resend_commitment; @@ -8326,11 +8350,15 @@ where failure_code: failure_code.clone(), }); }, - &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => { + &InboundHTLCRemovalReason::Fulfill( + ref payment_preimage, + ref attribution_data, + ) => { update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC { channel_id: self.context.channel_id(), htlc_id: htlc.htlc_id, payment_preimage: payment_preimage.clone(), + attribution_data: attribution_data.clone(), }); }, } @@ -10696,7 +10724,7 @@ where if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state { log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash); // Swap against a dummy variant to avoid a potentially expensive clone of `OutboundHTLCOutcome::Failure(HTLCFailReason)` - let mut reason = OutboundHTLCOutcome::Success(PaymentPreimage([0u8; 32])); + let mut reason = OutboundHTLCOutcome::Success(PaymentPreimage([0u8; 32]), None); mem::swap(outcome, &mut reason); htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason); } @@ -12416,7 +12444,7 @@ where dropped_inbound_htlcs += 1; } } - let mut removed_htlc_failure_attribution_data: Vec<&Option> = Vec::new(); + let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -12448,15 +12476,16 @@ where }) => { 0u8.write(writer)?; data.write(writer)?; - removed_htlc_failure_attribution_data.push(&attribution_data); + removed_htlc_attribution_data.push(&attribution_data); }, InboundHTLCRemovalReason::FailMalformed((hash, code)) => { 1u8.write(writer)?; (hash, code).write(writer)?; }, - InboundHTLCRemovalReason::Fulfill(preimage) => { + InboundHTLCRemovalReason::Fulfill(preimage, attribution_data) => { 2u8.write(writer)?; preimage.write(writer)?; + removed_htlc_attribution_data.push(&attribution_data); }, } }, @@ -12466,6 +12495,7 @@ where // The elements of this vector will always be `Some` starting in 0.2, // but we still serialize the option to maintain backwards compatibility let mut preimages: Vec> = vec![]; + let mut fulfill_attribution_data = vec![]; let mut pending_outbound_skimmed_fees: Vec> = Vec::new(); let mut pending_outbound_blinding_points: Vec> = Vec::new(); @@ -12491,16 +12521,18 @@ where }, &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => { 3u8.write(writer)?; - if let OutboundHTLCOutcome::Success(preimage) = outcome { + if let OutboundHTLCOutcome::Success(preimage, attribution_data) = outcome { preimages.push(Some(preimage)); + fulfill_attribution_data.push(attribution_data); } let reason: Option<&HTLCFailReason> = outcome.into(); reason.write(writer)?; }, &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => { 4u8.write(writer)?; - if let OutboundHTLCOutcome::Success(preimage) = outcome { + if let OutboundHTLCOutcome::Success(preimage, attribution_data) = outcome { preimages.push(Some(preimage)); + fulfill_attribution_data.push(attribution_data); } let reason: Option<&HTLCFailReason> = outcome.into(); reason.write(writer)?; @@ -12515,7 +12547,7 @@ where Vec::with_capacity(holding_cell_htlc_update_count); let mut holding_cell_blinding_points: Vec> = Vec::with_capacity(holding_cell_htlc_update_count); - let mut holding_cell_failure_attribution_data: Vec> = + let mut holding_cell_attribution_data: Vec> = Vec::with_capacity(holding_cell_htlc_update_count); // Vec of (htlc_id, failure_code, sha256_of_onion) let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new(); @@ -12541,10 +12573,17 @@ where holding_cell_skimmed_fees.push(skimmed_fee_msat); holding_cell_blinding_points.push(blinding_point); }, - &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => { + &HTLCUpdateAwaitingACK::ClaimHTLC { + ref payment_preimage, + ref htlc_id, + ref attribution_data, + } => { 1u8.write(writer)?; payment_preimage.write(writer)?; htlc_id.write(writer)?; + + // Store the attribution data for later writing. + holding_cell_attribution_data.push(attribution_data.as_ref()); }, &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => { 2u8.write(writer)?; @@ -12552,8 +12591,7 @@ where err_packet.data.write(writer)?; // Store the attribution data for later writing. - holding_cell_failure_attribution_data - .push(err_packet.attribution_data.as_ref()); + holding_cell_attribution_data.push(err_packet.attribution_data.as_ref()); }, &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, @@ -12570,7 +12608,7 @@ where // Push 'None' attribution data for FailMalformedHTLC, because FailMalformedHTLC uses the same // type 2 and is deserialized as a FailHTLC. - holding_cell_failure_attribution_data.push(None); + holding_cell_attribution_data.push(None); }, } } @@ -12773,11 +12811,12 @@ where (51, is_manual_broadcast, option), // Added in 0.0.124 (53, funding_tx_broadcast_safe_event_emitted, option), // Added in 0.0.124 (54, self.pending_funding, optional_vec), // Added in 0.2 - (55, removed_htlc_failure_attribution_data, optional_vec), // Added in 0.2 - (57, holding_cell_failure_attribution_data, optional_vec), // Added in 0.2 + (55, removed_htlc_attribution_data, optional_vec), // Added in 0.2 + (57, holding_cell_attribution_data, optional_vec), // Added in 0.2 (58, self.interactive_tx_signing_session, option), // Added in 0.2 (59, self.funding.minimum_depth_override, option), // Added in 0.2 (60, self.context.historical_scids, optional_vec), // Added in 0.2 + (61, fulfill_attribution_data, optional_vec), }); Ok(()) @@ -12869,7 +12908,7 @@ where attribution_data: None, }), 1 => InboundHTLCRemovalReason::FailMalformed(Readable::read(reader)?), - 2 => InboundHTLCRemovalReason::Fulfill(Readable::read(reader)?), + 2 => InboundHTLCRemovalReason::Fulfill(Readable::read(reader)?, None), _ => return Err(DecodeError::InvalidValue), }; InboundHTLCState::LocalRemoved(reason) @@ -12899,7 +12938,7 @@ where let outcome = match option { Some(r) => OutboundHTLCOutcome::Failure(r), // Initialize this variant with a dummy preimage, the actual preimage will be filled in further down - None => OutboundHTLCOutcome::Success(PaymentPreimage([0u8; 32])), + None => OutboundHTLCOutcome::Success(PaymentPreimage([0u8; 32]), None), }; OutboundHTLCState::RemoteRemoved(outcome) }, @@ -12908,7 +12947,7 @@ where let outcome = match option { Some(r) => OutboundHTLCOutcome::Failure(r), // Initialize this variant with a dummy preimage, the actual preimage will be filled in further down - None => OutboundHTLCOutcome::Success(PaymentPreimage([0u8; 32])), + None => OutboundHTLCOutcome::Success(PaymentPreimage([0u8; 32]), None), }; OutboundHTLCState::AwaitingRemoteRevokeToRemove(outcome) }, @@ -12917,7 +12956,7 @@ where let outcome = match option { Some(r) => OutboundHTLCOutcome::Failure(r), // Initialize this variant with a dummy preimage, the actual preimage will be filled in further down - None => OutboundHTLCOutcome::Success(PaymentPreimage([0u8; 32])), + None => OutboundHTLCOutcome::Success(PaymentPreimage([0u8; 32]), None), }; OutboundHTLCState::AwaitingRemovedRemoteRevoke(outcome) }, @@ -12948,6 +12987,7 @@ where 1 => HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: Readable::read(reader)?, htlc_id: Readable::read(reader)?, + attribution_data: None, }, 2 => HTLCUpdateAwaitingACK::FailHTLC { htlc_id: Readable::read(reader)?, @@ -13092,6 +13132,7 @@ where // Starting in 0.2, all the elements in this vector will be `Some`, but they are still // serialized as options to maintain backwards compatibility let mut preimages: Vec> = Vec::new(); + let mut fulfill_attribution_data: Option>> = None; // If we read an old Channel, for simplicity we just treat it as "we never sent an // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine. @@ -13119,8 +13160,8 @@ where let mut pending_outbound_blinding_points_opt: Option>> = None; let mut holding_cell_blinding_points_opt: Option>> = None; - let mut removed_htlc_failure_attribution_data: Option>> = None; - let mut holding_cell_failure_attribution_data: Option>> = None; + let mut removed_htlc_attribution_data: Option>> = None; + let mut holding_cell_attribution_data: Option>> = None; let mut malformed_htlcs: Option> = None; let mut monitor_pending_update_adds: Option> = None; @@ -13172,33 +13213,37 @@ where (51, is_manual_broadcast, option), (53, funding_tx_broadcast_safe_event_emitted, option), (54, pending_funding, optional_vec), // Added in 0.2 - (55, removed_htlc_failure_attribution_data, optional_vec), - (57, holding_cell_failure_attribution_data, optional_vec), + (55, removed_htlc_attribution_data, optional_vec), + (57, holding_cell_attribution_data, optional_vec), (58, interactive_tx_signing_session, option), // Added in 0.2 (59, minimum_depth_override, option), // Added in 0.2 (60, historical_scids, optional_vec), // Added in 0.2 + (61, fulfill_attribution_data, optional_vec), }); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); let mut iter = preimages.into_iter(); + let mut fulfill_attribution_data_iter = fulfill_attribution_data.map(Vec::into_iter); for htlc in pending_outbound_htlcs.iter_mut() { match &mut htlc.state { OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success( ref mut preimage, - )) => { - // This variant was initialized like this further above - debug_assert_eq!(preimage, &PaymentPreimage([0u8; 32])); - // Flatten and unwrap the preimage; they are always set starting in 0.2. - *preimage = iter.next().flatten().ok_or(DecodeError::InvalidValue)?; - }, - OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success( + ref mut attribution_data, + )) + | OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success( ref mut preimage, + ref mut attribution_data, )) => { // This variant was initialized like this further above debug_assert_eq!(preimage, &PaymentPreimage([0u8; 32])); // Flatten and unwrap the preimage; they are always set starting in 0.2. *preimage = iter.next().flatten().ok_or(DecodeError::InvalidValue)?; + + *attribution_data = fulfill_attribution_data_iter + .as_mut() + .and_then(Iterator::next) + .ok_or(DecodeError::InvalidValue)?; }, _ => {}, } @@ -13276,46 +13321,48 @@ where } } - if let Some(attribution_data_list) = removed_htlc_failure_attribution_data { - let mut removed_htlc_relay_failures = - pending_inbound_htlcs.iter_mut().filter_map(|status| { - if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay( - ref mut packet, - )) = &mut status.state - { - Some(&mut packet.attribution_data) - } else { - None + if let Some(attribution_data_list) = removed_htlc_attribution_data { + let mut removed_htlcs = pending_inbound_htlcs.iter_mut().filter_map(|status| { + if let InboundHTLCState::LocalRemoved(reason) = &mut status.state { + match reason { + InboundHTLCRemovalReason::FailRelay(ref mut packet) => { + Some(&mut packet.attribution_data) + }, + InboundHTLCRemovalReason::Fulfill(_, ref mut attribution_data) => { + Some(attribution_data) + }, + _ => None, } - }); + } else { + None + } + }); for attribution_data in attribution_data_list { - *removed_htlc_relay_failures.next().ok_or(DecodeError::InvalidValue)? = - attribution_data; + *removed_htlcs.next().ok_or(DecodeError::InvalidValue)? = attribution_data; } - if removed_htlc_relay_failures.next().is_some() { + if removed_htlcs.next().is_some() { return Err(DecodeError::InvalidValue); } } - if let Some(attribution_data_list) = holding_cell_failure_attribution_data { - let mut holding_cell_failures = - holding_cell_htlc_updates.iter_mut().filter_map(|upd| { - if let HTLCUpdateAwaitingACK::FailHTLC { + if let Some(attribution_data_list) = holding_cell_attribution_data { + let mut holding_cell_htlcs = + holding_cell_htlc_updates.iter_mut().filter_map(|upd| match upd { + HTLCUpdateAwaitingACK::FailHTLC { err_packet: OnionErrorPacket { ref mut attribution_data, .. }, .. - } = upd - { + } => Some(attribution_data), + HTLCUpdateAwaitingACK::ClaimHTLC { attribution_data, .. } => { Some(attribution_data) - } else { - None - } + }, + _ => None, }); for attribution_data in attribution_data_list { - *holding_cell_failures.next().ok_or(DecodeError::InvalidValue)? = attribution_data; + *holding_cell_htlcs.next().ok_or(DecodeError::InvalidValue)? = attribution_data; } - if holding_cell_failures.next().is_some() { + if holding_cell_htlcs.next().is_some() { return Err(DecodeError::InvalidValue); } } @@ -13526,7 +13573,7 @@ where } } -fn duration_since_epoch() -> Option { +pub(crate) fn duration_since_epoch() -> Option { #[cfg(not(feature = "std"))] let now = None; @@ -13540,6 +13587,17 @@ fn duration_since_epoch() -> Option { now } +/// Returns the time expressed in hold time units (1 unit = 100 ms) that has elapsed between send_timestamp and now. If +/// any of the arguments are `None`, returns `None`. +pub(crate) fn hold_time(send_timestamp: Option, now: Option) -> Option { + send_timestamp.and_then(|t| { + now.map(|now| { + let elapsed = now.saturating_sub(t).as_millis() / HOLD_TIME_UNIT_MILLIS; + u32::try_from(elapsed).unwrap_or(u32::MAX) + }) + }) +} + #[cfg(test)] mod tests { use crate::chain::chaininterface::LowerBoundedFeeEstimator; @@ -14298,17 +14356,16 @@ mod tests { skimmed_fee_msat: None, blinding_point: None, }; - let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC { + let dummy_holding_cell_claim_htlc = |attribution_data| HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: PaymentPreimage([42; 32]), htlc_id: 0, + attribution_data, }; - let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC { - htlc_id, - err_packet: msgs::OnionErrorPacket { - data: vec![42], - attribution_data: Some(AttributionData::new()), - }, - }; + let dummy_holding_cell_failed_htlc = + |htlc_id, attribution_data| HTLCUpdateAwaitingACK::FailHTLC { + htlc_id, + err_packet: msgs::OnionErrorPacket { data: vec![42], attribution_data }, + }; let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, @@ -14316,29 +14373,45 @@ mod tests { sha256_of_onion: [0; 32], }; let mut holding_cell_htlc_updates = Vec::with_capacity(12); - for i in 0..12 { - if i % 5 == 0 { - holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone()); - } else if i % 5 == 1 { - holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone()); - } else if i % 5 == 2 { - let mut dummy_add = dummy_holding_cell_add_htlc.clone(); - if let HTLCUpdateAwaitingACK::AddHTLC { - ref mut blinding_point, - ref mut skimmed_fee_msat, - .. - } = &mut dummy_add - { - *blinding_point = Some(test_utils::pubkey(42 + i)); - *skimmed_fee_msat = Some(42); - } else { - panic!() - } - holding_cell_htlc_updates.push(dummy_add); - } else if i % 5 == 3 { - holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64)); - } else { - holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64)); + for i in 0..16 { + match i % 7 { + 0 => { + holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone()); + }, + 1 => { + holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc(None)); + }, + 2 => { + holding_cell_htlc_updates + .push(dummy_holding_cell_claim_htlc(Some(AttributionData::new()))); + }, + 3 => { + let mut dummy_add = dummy_holding_cell_add_htlc.clone(); + if let HTLCUpdateAwaitingACK::AddHTLC { + ref mut blinding_point, + ref mut skimmed_fee_msat, + .. + } = &mut dummy_add + { + *blinding_point = Some(test_utils::pubkey(42 + i)); + *skimmed_fee_msat = Some(42); + } else { + panic!() + } + holding_cell_htlc_updates.push(dummy_add); + }, + 4 => { + holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64)); + }, + 5 => { + holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64, None)); + }, + _ => { + holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc( + i as u64, + Some(AttributionData::new()), + )); + }, } } chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone(); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a5127c46826..905787da6b3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -58,12 +58,12 @@ use crate::events::{ use crate::events::{FundingInfo, PaidBolt12Invoice}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. -use crate::ln::channel::PendingV2Channel; use crate::ln::channel::{ - self, Channel, ChannelError, ChannelUpdateStatus, FundedChannel, InboundV1Channel, + self, hold_time, Channel, ChannelError, ChannelUpdateStatus, FundedChannel, InboundV1Channel, OutboundV1Channel, ReconnectionMsg, ShutdownResult, UpdateFulfillCommitFetch, WithChannelContext, }; +use crate::ln::channel::{duration_since_epoch, PendingV2Channel}; use crate::ln::channel_state::ChannelDetails; use crate::ln::inbound_payment; use crate::ln::msgs; @@ -77,7 +77,10 @@ use crate::ln::onion_payment::{ NextPacketDetails, }; use crate::ln::onion_utils::{self}; -use crate::ln::onion_utils::{HTLCFailReason, LocalHTLCFailureReason}; +use crate::ln::onion_utils::{ + decode_fulfill_attribution_data, HTLCFailReason, LocalHTLCFailureReason, +}; +use crate::ln::onion_utils::{process_fulfill_attribution_data, AttributionData}; use crate::ln::our_peer_storage::EncryptedOurPeerStorage; #[cfg(test)] use crate::ln::outbound_payment; @@ -7938,10 +7941,30 @@ where pending_claim: PendingMPPClaimPointer(Arc::clone(pending_claim)), } }); + + // Create new attribution data as the final hop. Always report a zero hold time, because reporting a + // non-zero value will not make a difference in the penalty that may be applied by the sender. If there + // is a phantom hop, we need to double-process. + let attribution_data = + if let Some(phantom_secret) = htlc.prev_hop.phantom_shared_secret { + let attribution_data = + process_fulfill_attribution_data(None, &phantom_secret, 0); + Some(attribution_data) + } else { + None + }; + + let attribution_data = process_fulfill_attribution_data( + attribution_data.as_ref(), + &htlc.prev_hop.incoming_packet_shared_secret, + 0, + ); + self.claim_funds_from_hop( htlc.prev_hop, payment_preimage, payment_info.clone(), + Some(attribution_data), |_, definitely_duplicate| { debug_assert!( !definitely_duplicate, @@ -7986,7 +8009,8 @@ where ) -> (Option, Option), >( &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, - payment_info: Option, completion_action: ComplFunc, + payment_info: Option, attribution_data: Option, + completion_action: ComplFunc, ) { let counterparty_node_id = prev_hop.counterparty_node_id.or_else(|| { let short_to_chan_info = self.short_to_chan_info.read().unwrap(); @@ -7999,7 +8023,13 @@ where channel_id: prev_hop.channel_id, htlc_id: prev_hop.htlc_id, }; - self.claim_mpp_part(htlc_source, payment_preimage, payment_info, completion_action) + self.claim_mpp_part( + htlc_source, + payment_preimage, + payment_info, + attribution_data, + completion_action, + ) } fn claim_mpp_part< @@ -8009,7 +8039,8 @@ where ) -> (Option, Option), >( &self, prev_hop: HTLCClaimSource, payment_preimage: PaymentPreimage, - payment_info: Option, completion_action: ComplFunc, + payment_info: Option, attribution_data: Option, + completion_action: ComplFunc, ) { //TODO: Delay the claimed_funds relaying just like we do outbound relay! @@ -8050,6 +8081,7 @@ where prev_hop.htlc_id, payment_preimage, payment_info, + attribution_data, &&logger, ); @@ -8249,8 +8281,38 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ); } - fn finalize_claims(&self, sources: Vec) { - self.pending_outbound_payments.finalize_claims(sources, &self.pending_events); + fn finalize_claims(&self, sources: Vec<(HTLCSource, Option)>) { + // Decode attribution data to hold times. + let hold_times = sources.into_iter().filter_map(|(source, attribution_data)| { + if let HTLCSource::OutboundRoute { ref session_priv, ref path, .. } = source { + // If the path has trampoline hops, we need to hash the session private key to get the outer session key. + let derived_key; + let session_priv = if path.has_trampoline_hops() { + let session_priv_hash = + Sha256::hash(&session_priv.secret_bytes()).to_byte_array(); + derived_key = SecretKey::from_slice(&session_priv_hash[..]).unwrap(); + &derived_key + } else { + session_priv + }; + + let hold_times = attribution_data.map_or(Vec::new(), |attribution_data| { + decode_fulfill_attribution_data( + &self.secp_ctx, + &self.logger, + path, + session_priv, + attribution_data, + ) + }); + + Some((source, hold_times)) + } else { + None + } + }); + + self.pending_outbound_payments.finalize_claims(hold_times, &self.pending_events); } fn claim_funds_internal( @@ -8258,7 +8320,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ forwarded_htlc_value_msat: Option, skimmed_fee_msat: Option, from_onchain: bool, startup_replay: bool, next_channel_counterparty_node_id: PublicKey, next_channel_outpoint: OutPoint, next_channel_id: ChannelId, - next_user_channel_id: Option, + next_user_channel_id: Option, attribution_data: Option<&AttributionData>, + send_timestamp: Option, ) { match source { HTLCSource::OutboundRoute { @@ -8290,10 +8353,25 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let prev_node_id = hop_data.counterparty_node_id; let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); + + // Obtain hold time, if available. + let now = duration_since_epoch(); + let hold_time = hold_time(send_timestamp, now).unwrap_or(0); + + // If attribution data was received from downstream, we shift it and get it ready for adding our hold + // time. Note that fulfilled HTLCs take a fast path to the incoming side. We don't need to wait for RAA + // to record the hold time like we do for failed HTLCs. + let attribution_data = process_fulfill_attribution_data( + attribution_data, + &hop_data.incoming_packet_shared_secret, + hold_time, + ); + self.claim_funds_from_hop( hop_data, payment_preimage, None, + Some(attribution_data), |htlc_claim_value_msat, definitely_duplicate| { let chan_to_release = Some(EventUnblockedChannel { counterparty_node_id: next_channel_counterparty_node_id, @@ -9852,7 +9930,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ) -> Result<(), MsgHandleErrInternal> { let funding_txo; let next_user_channel_id; - let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = { + let (htlc_source, forwarded_htlc_value, skimmed_fee_msat, send_timestamp) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); @@ -9907,6 +9985,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ funding_txo, msg.channel_id, Some(next_user_channel_id), + msg.attribution_data.as_ref(), + send_timestamp, ); Ok(()) @@ -10800,6 +10880,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Claiming HTLC with preimage {} from our monitor", preimage ); + // Claim the funds from the previous hop, if there is one. Because this is in response to a + // chain event, no attribution data is available. self.claim_funds_internal( htlc_update.source, preimage, @@ -10811,6 +10893,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ funding_outpoint, channel_id, None, + None, + None, ); } else { log_trace!( @@ -16677,10 +16761,14 @@ where // Note that we don't need to pass the `payment_info` here - its // already (clearly) durably on disk in the `ChannelMonitor` so there's // no need to worry about getting it into others. + // + // We don't encode any attribution data, because the required onion shared secret isn't + // available here. channel_manager.claim_mpp_part( part.into(), payment_preimage, None, + None, |_, _| { ( Some(MonitorUpdateCompletionAction::PaymentClaimed { @@ -16825,6 +16913,7 @@ where // We use `downstream_closed` in place of `from_onchain` here just as a guess - we // don't remember in the `ChannelMonitor` where we got a preimage from, but if the // channel is closed we just assume that it probably came from an on-chain claim. + // The same holds for attribution data. We don't have any, so we pass an empty one. channel_manager.claim_funds_internal( source, preimage, @@ -16836,6 +16925,8 @@ where downstream_funding, downstream_channel_id, None, + None, + None, ); } @@ -17055,7 +17146,7 @@ mod tests { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => { + Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path, .. } => { assert_eq!(payment_id, *actual_payment_id); assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap()); assert_eq!(route.paths[0], *path); @@ -17063,7 +17154,7 @@ mod tests { _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => { + Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path, ..} => { assert_eq!(payment_id, *actual_payment_id); assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap()); assert_eq!(route.paths[0], *path); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index dac63c8b33f..61505a222c7 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2862,11 +2862,13 @@ macro_rules! expect_payment_claimed { }; } +/// Inspect events to assert that a payment was sent. If this was a BOLT 12 payment, the BOLT 12 invoice is returned. If +/// per-path claims are expected, the events for each path are returned as well. pub fn expect_payment_sent>( node: &H, expected_payment_preimage: PaymentPreimage, expected_fee_msat_opt: Option>, expect_per_path_claims: bool, expect_post_ev_mon_update: bool, -) -> Option { +) -> (Option, Vec) { let events = node.node().get_and_clear_pending_events(); let expected_payment_hash = PaymentHash( bitcoin::hashes::sha256::Hash::hash(&expected_payment_preimage.0).to_byte_array(), @@ -2881,6 +2883,7 @@ pub fn expect_payment_sent>( } // We return the invoice because some test may want to check the invoice details. let invoice; + let mut path_events = Vec::new(); let expected_payment_id = match events[0] { Event::PaymentSent { ref payment_id, @@ -2909,12 +2912,14 @@ pub fn expect_payment_sent>( Event::PaymentPathSuccessful { payment_id, payment_hash, .. } => { assert_eq!(payment_id, expected_payment_id); assert_eq!(payment_hash, Some(expected_payment_hash)); + + path_events.push(events[i].clone()); }, _ => panic!("Unexpected event"), } } } - invoice + (invoice, path_events) } #[macro_export] @@ -3926,7 +3931,9 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { expected_total_fee_msat } -pub fn claim_payment_along_route(args: ClaimAlongRouteArgs) -> Option { +pub fn claim_payment_along_route( + args: ClaimAlongRouteArgs, +) -> (Option, Vec) { let origin_node = args.origin_node; let payment_preimage = args.payment_preimage; let skip_last = args.skip_last; @@ -3934,7 +3941,7 @@ pub fn claim_payment_along_route(args: ClaimAlongRouteArgs) -> Option( &[expected_route], our_payment_preimage, )) + .0 } pub const TEST_FINAL_CLTV: u32 = 70; diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index a91ae04329a..a75e5a6f92a 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -7748,7 +7748,8 @@ pub fn test_onion_value_mpp_set_calculation() { assert_eq!(node.node.get_our_node_id(), payment_event.node_id); if idx == 0 { - // routing node + // Manipulate the onion packet for the routing node. Note that we pick a dummy session_priv here. The sender + // won't be able to decode fulfill attribution data. let session_priv = [3; 32]; let height = nodes[0].best_block_info().1; let session_priv = SecretKey::from_slice(&session_priv).unwrap(); diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 22026414e51..4da79f97b2f 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -1785,6 +1785,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { channel_id: chan.2, htlc_id: 0, payment_preimage: our_payment_preimage, + attribution_data: None, }; nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_msg); diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index c9f1741cdbb..a513582cb64 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -52,6 +52,8 @@ pub use onion_utils::{create_payment_onion, LocalHTLCFailureReason}; // without the node parameter being mut. This is incorrect, and thus newer rustcs will complain // about an unnecessary mut. Thus, we silence the unused_mut warning in two test modules below. +#[cfg(fuzzing)] +pub use onion_utils::decode_fulfill_attribution_data; #[cfg(fuzzing)] pub use onion_utils::process_onion_failure; diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index d4b9fa23f1a..03389c09609 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -790,6 +790,8 @@ pub struct UpdateFulfillHTLC { pub htlc_id: u64, /// The pre-image of the payment hash, allowing HTLC redemption pub payment_preimage: PaymentPreimage, + /// Optional field for attribution data that allows the sender to receive per hop HTLC hold times. + pub attribution_data: Option, } /// A [`peer_storage`] message that can be sent to or received from a peer. @@ -3170,7 +3172,9 @@ impl_writeable_msg!(UpdateFulfillHTLC, { channel_id, htlc_id, payment_preimage -}, {}); +}, { + (1, attribution_data, option) +}); impl_writeable_msg!(PeerStorage, { data }, {}); @@ -5671,6 +5675,7 @@ mod tests { channel_id: ChannelId::from_bytes([2; 32]), htlc_id: 2316138423780173, payment_preimage: PaymentPreimage([1; 32]), + attribution_data: None, }; let encoded_value = update_fulfill_htlc.encode(); let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d0101010101010101010101010101010101010101010101010101010101010101").unwrap(); diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index c2187ecf7b6..47f8c388340 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -880,10 +880,7 @@ fn crypt_failure_packet(shared_secret: &[u8], packet: &mut OnionErrorPacket) { chacha.process_in_place(&mut packet.data); if let Some(ref mut attribution_data) = packet.attribution_data { - let ammagext = gen_ammagext_from_shared_secret(&shared_secret); - let mut chacha = ChaCha20::new(&ammagext, &[0u8; 8]); - chacha.process_in_place(&mut attribution_data.hold_times); - chacha.process_in_place(&mut attribution_data.hmacs); + attribution_data.crypt(shared_secret); } } @@ -945,10 +942,7 @@ fn update_attribution_data( let attribution_data = onion_error_packet.attribution_data.get_or_insert(AttributionData::new()); - let hold_time_bytes: [u8; 4] = hold_time.to_be_bytes(); - attribution_data.hold_times[..HOLD_TIME_LEN].copy_from_slice(&hold_time_bytes); - - attribution_data.add_hmacs(shared_secret, &onion_error_packet.data); + attribution_data.update(&onion_error_packet.data, shared_secret, hold_time); } pub(super) fn build_failure_packet( @@ -1214,34 +1208,37 @@ where // nearby failure, the verified HMACs will include some zero padding data. Failures beyond the last // attributable hop will not be attributable. let position = attributable_hop_count - route_hop_idx - 1; - let hold_time = attribution_data.verify( + let res = attribution_data.verify( &encrypted_packet.data, shared_secret.as_ref(), position, ); - if let Some(hold_time) = hold_time { - hop_hold_times.push(hold_time); - - log_debug!( - logger, - "Htlc hold time at pos {}: {} ms", - route_hop_idx, - (hold_time as u128) * HOLD_TIME_UNIT_MILLIS - ); + match res { + Ok(hold_time) => { + hop_hold_times.push(hold_time); + + log_debug!( + logger, + "Htlc hold time at pos {}: {} ms", + route_hop_idx, + (hold_time as u128) * HOLD_TIME_UNIT_MILLIS + ); - // Shift attribution data to prepare for processing the next hop. - attribution_data.shift_left(); - } else { - // Store the failing hop, but continue processing the failure for the remaining hops. During the - // upgrade period, it may happen that nodes along the way drop attribution data. If the legacy - // failure is still valid, it should be processed normally. - attribution_failed_channel = route_hop.short_channel_id(); - - log_debug!( - logger, - "Invalid HMAC in attribution data for node at pos {}", - route_hop_idx - ); + // Shift attribution data to prepare for processing the next hop. + attribution_data.shift_left(); + }, + Err(()) => { + // Store the failing hop, but continue processing the failure for the remaining hops. During the + // upgrade period, it may happen that nodes along the way drop attribution data. If the legacy + // failure is still valid, it should be processed normally. + attribution_failed_channel = route_hop.short_channel_id(); + + log_debug!( + logger, + "Invalid failure HMAC in attribution data for node at pos {}", + route_hop_idx + ); + }, } } } else { @@ -1469,6 +1466,56 @@ where } } +/// Decodes the attribution data that we got back from upstream on a payment we sent. +pub fn decode_fulfill_attribution_data( + secp_ctx: &Secp256k1, logger: &L, path: &Path, outer_session_priv: &SecretKey, + mut attribution_data: AttributionData, +) -> Vec +where + L::Target: Logger, +{ + let mut hold_times = Vec::new(); + + // Only consider hops in the regular path for attribution data. Blinded path attribution data isn't accessible. + let shared_secrets = + construct_onion_keys_generic(secp_ctx, &path.hops, None, outer_session_priv) + .map(|(shared_secret, _, _, _, _)| shared_secret); + + // Path length can reach 27 hops, but attribution data can only be conveyed back to the sender from the first 20 + // hops. Determine the number of hops to be used for attribution data. + let attributable_hop_count = usize::min(path.hops.len(), MAX_HOPS); + + for (route_hop_idx, shared_secret) in shared_secrets.enumerate().take(attributable_hop_count) { + attribution_data.crypt(shared_secret.as_ref()); + + // Calculate position relative to the last attributable hop. The last attributable hop is at position 0. We need + // to look at the chain of HMACs that does include all data up to the last attributable hop. Hold times beyond + // the last attributable hop will not be available. + let position = attributable_hop_count - route_hop_idx - 1; + let res = attribution_data.verify(&Vec::new(), shared_secret.as_ref(), position); + match res { + Ok(hold_time) => { + hold_times.push(hold_time); + + // Shift attribution data to prepare for processing the next hop. + attribution_data.shift_left(); + }, + Err(()) => { + // We will hit this if there is a node on the path that does not support fulfill attribution data. + log_debug!( + logger, + "Invalid fulfill HMAC in attribution data for node at pos {}", + route_hop_idx + ); + + break; + }, + } + } + + hold_times +} + const BADONION: u16 = 0x8000; const PERM: u16 = 0x4000; const NODE: u16 = 0x2000; @@ -2657,6 +2704,14 @@ impl_writeable!(AttributionData, { }); impl AttributionData { + /// Encrypts or decrypts the attribution data using the provided shared secret. + pub(crate) fn crypt(&mut self, shared_secret: &[u8]) { + let ammagext = gen_ammagext_from_shared_secret(&shared_secret); + let mut chacha = ChaCha20::new(&ammagext, &[0u8; 8]); + chacha.process_in_place(&mut self.hold_times); + chacha.process_in_place(&mut self.hmacs); + } + /// Adds the current node's HMACs for all possible positions to this packet. pub(crate) fn add_hmacs(&mut self, shared_secret: &[u8], message: &[u8]) { let um: [u8; 32] = gen_um_from_shared_secret(&shared_secret); @@ -2705,8 +2760,8 @@ impl AttributionData { } /// Verifies the attribution data of a failure packet for the given position in the path. If the HMAC checks out, the - /// reported hold time is returned. If the HMAC does not match, None is returned. - fn verify(&self, message: &Vec, shared_secret: &[u8], position: usize) -> Option { + /// reported hold time is returned. If the HMAC does not match, an error is returned. + fn verify(&self, message: &[u8], shared_secret: &[u8], position: usize) -> Result { // Calculate the expected HMAC. let um = gen_um_from_shared_secret(shared_secret); let mut hmac = HmacEngine::::new(&um); @@ -2719,13 +2774,13 @@ impl AttributionData { let hmac_idx = MAX_HOPS - position - 1; let actual_hmac = self.get_hmac(hmac_idx); if !fixed_time_eq(expected_hmac, actual_hmac) { - return None; + return Err(()); } // The HMAC checks out and the hold time can be extracted and returned; let hold_time: u32 = u32::from_be_bytes(self.get_hold_time_bytes(0).try_into().unwrap()); - Some(hold_time) + Ok(hold_time) } /// Shifts hold times and HMACs to the left, taking into account HMAC pruning. This is the inverse operation of what @@ -2791,6 +2846,12 @@ impl AttributionData { fn get_hold_time_bytes(&self, idx: usize) -> &[u8] { &self.hold_times[idx * HOLD_TIME_LEN..(idx + 1) * HOLD_TIME_LEN] } + + fn update(&mut self, message: &[u8], shared_secret: &[u8], hold_time: u32) { + let hold_time_bytes: [u8; 4] = hold_time.to_be_bytes(); + self.hold_times[..HOLD_TIME_LEN].copy_from_slice(&hold_time_bytes); + self.add_hmacs(shared_secret, message); + } } /// Updates the attribution data for an intermediate node. @@ -2806,6 +2867,30 @@ fn process_failure_packet( update_attribution_data(onion_error, shared_secret, hold_time); } +/// Updates fulfill attribution data with the given hold time for an intermediate or final node. If no downstream +/// attribution data is passed in, a new `AttributionData` field is instantiated. It is needless to say that in that +/// case the sender won't receive any hold times from nodes downstream of the current node. +pub(crate) fn process_fulfill_attribution_data( + attribution_data: Option<&AttributionData>, shared_secret: &[u8], hold_time: u32, +) -> AttributionData { + let mut attribution_data = + attribution_data.map_or(AttributionData::new(), |attribution_data| { + let mut attribution_data = attribution_data.clone(); + + // Shift the existing attribution data to the right to make space for the new hold time and HMACs. + attribution_data.shift_right(); + + attribution_data + }); + + // Add this node's hold time and HMACs. We pass in an empty message because there is no (failure) message in the + // fulfill case. + attribution_data.update(&[], &shared_secret, hold_time); + attribution_data.crypt(&shared_secret); + + attribution_data +} + #[cfg(test)] mod tests { use core::iter; @@ -3335,6 +3420,68 @@ mod tests { process_onion_failure(&ctx_full, &logger, &htlc_source, onion_error) } + /// Tests that the hold times and HMACs in the attribution data are matching the specification test vector and that + /// decoding yields the expected values. + #[test] + fn test_success_hold_times() { + fn assert_data(actual: &AttributionData, expected: &str) { + let (expected_hold_times, expected_hmacs) = + expected.split_at(MAX_HOPS * HOLD_TIME_LEN * 2); + + println!( + "{}{}", + actual.hold_times.to_lower_hex_string(), + actual.hmacs.to_lower_hex_string() + ); + + assert_eq!(actual.hold_times.to_lower_hex_string(), expected_hold_times); + assert_eq!(actual.hmacs.to_lower_hex_string(), expected_hmacs); + } + + // The test vector from BOLT #4. + const EXPECTED_MESSAGES: [&str; 5] = [ + "d77d0711b5f71d1d1be56bd88b3bb7ebc1792bb739ea7ebc1bc3b031b8bc2df3a50e25aeb99f47d7f7ab39e24187d3f4df9c4333463b053832ee9ac07274a5261b8b2a01fc09ce9ea7cd04d7b585dfb83299fb6570d71f793c1fcac0ef498766952c8c6840efa02a567d558a3cf6822b12476324b9b9efa03e5f8f26f81fa93daac46cbf00c98e69b6747cf69caaa2a71b025bd18830c4c54cd08f598cfde6197b3f2a951aba907c964c0f5d19a44e6d1d7279637321fa598adde927b3087d238f8b426ecde500d318617cdb7a56e6ce3520fc95be41a549973764e4dc483853ecc313947709f1b5199cb077d46e701fa633e11d3e13b03e9212c115ca6fa004b2f3dd912814693b705a561a06da54cdf603677a3abecdc22c7358c2de3cef771b366a568150aeecc86ad1990bb0f4e2865933b03ea0df87901bff467908273dc6cea31cbab0e2b8d398d10b001058c259ed221b7b55762f4c7e49c8c11a45a107b7a2c605c26dc5b0b10d719b1c844670102b2b6a36c43fe4753a78a483fc39166ae28420f112d50c10ee64ca69569a2f690712905236b7c2cb7ac8954f02922d2d918c56d42649261593c47b14b324a65038c3c5be8d3c403ce0c8f19299b1664bf077d7cf1636c4fb9685a8e58b7029fd0939fa07925a60bed339b23f973293598f595e75c8f9d455d7cebe4b5e23357c8bd47d66d6628b39427e37e0aecbabf46c11be6771f7136e108a143ae9bafba0fc47a51b6c7deef4cba54bae906398ee3162a41f2191ca386b628bde7e1dd63d1611aa01a95c456df337c763cb8c3a81a6013aa633739d8cd554c688102211725e6adad165adc1bcd429d020c51b4b25d2117e8bb27eb0cc7020f9070d4ad19ac31a76ebdf5f9246646aeadbfb9a3f1d75bd8237961e786302516a1a781780e8b73f58dc06f307e58bd0eb1d8f5c9111f01312974c1dc777a6a2d3834d8a2a40014e9818d0685cb3919f6b3b788ddc640b0ff9b1854d7098c7dd6f35196e902b26709640bc87935a3914869a807e8339281e9cedaaca99474c3e7bdd35050bb998ab4546f9900904e0e39135e861ff7862049269701081ebce32e4cca992c6967ff0fd239e38233eaf614af31e186635e9439ec5884d798f9174da6ff569d68ed5c092b78bd3f880f5e88a7a8ab36789e1b57b035fb6c32a6358f51f83e4e5f46220bcad072943df8bd9541a61b7dae8f30fa3dd5fb39b1fd9a0b8e802552b78d4ec306ecee15bfe6da14b29ba6d19ce5be4dd478bca74a52429cd5309d404655c3dec85c252", + "1571e10db7f8aa9f8e7e99caaf9c892e106c817df1d8e3b7b0e39d1c48f631e473e17e205489dd7b3c634cac3be0825cbf01418cd46e83c24b8d9c207742db9a0f0e5bcd888086498159f08080ba7bf3ea029c0b493227c4e75a90f70340d9e21f00979fc7e4fb2078477c1a457ba242ed54b313e590b13a2a13bfeed753dab133c78059f460075b2594b4c31c50f31076f8f1a0f7ad0530d0fadaf2d86e505ff9755940ec0665f9e5bc58cad6e523091f94d0bcd3c6c65ca1a5d401128dcc5e14f9108b32e660017c13de598bcf9d403710857cccb0fb9c2a81bfd66bc4552e1132afa3119203a4aaa1e8839c1dab8cbdcde7b527aca3f54bde651aa9f3f2178829cee3f1c0b9292758a40cc63bd998fcd0d3ed4bdcaf1023267b8f8e44130a63ad15f76145936552381eabb6d684c0a3af6ba8efcf207cebaea5b7acdbb63f8e7221102409d10c23f0514dc9f4d0efb2264161a193a999a23e992632710580a0d320f676d367b9190721194514457761af05207cdab2b6328b1b3767eacb36a7ef4f7bd2e16762d13df188e0898b7410f62459458712a44bf594ae662fd89eb300abb6952ff8ad40164f2bcd7f86db5c7650b654b79046de55d51aa8061ce35f867a3e8f5bf98ad920be827101c64fb871d86e53a4b3c0455bfac5784168218aa72cbee86d9c750a9fa63c363a8b43d7bf4b2762516706a306f0aa3be1ec788b5e13f8b24837e53ac414f211e11c7a093cd9653dfa5fba4e377c79adfa5e841e2ddb6afc054fc715c05ddc6c8fc3e1ee3406e1ffceb2df77dc2f02652614d1bfcfaddebaa53ba919c7051034e2c7b7cfaabdf89f26e7f8e3f956d205dfab747ad0cb505b85b54a68439621b25832cbc2898919d0cd7c0a64cfd235388982dd4dd68240cb668f57e1d2619a656ed326f8c92357ee0d9acead3c20008bc5f04ca8059b55d77861c6d04dfc57cfba57315075acbe1451c96cf28e1e328e142890248d18f53b5d3513ce574dea7156cf596fdb3d909095ec287651f9cf1bcdc791c5938a5dd9b47e84c004d24ab3ae74492c7e8dcc1da15f65324be2672947ec82074cac8ce2b925bc555facbbf1b55d63ea6fbea6a785c97d4caf2e1dad9551b7f66c31caae5ebc7c0047e892f201308fcf452c588be0e63d89152113d87bf0dbd01603b4cdc7f0b724b0714a9851887a01f709408882e18230fe810b9fafa58a666654576d8eba3005f07221f55a6193815a672e5db56204053bc4286fa3db38250396309fd28011b5708a26a2d76c4a333b69b6bfd272fb", + "34e34397b8621ec2f2b54dbe6c14073e267324cd60b152bce76aec8729a6ddefb61bc263be4b57bd592aae604a32bea69afe6ef4a6b573c26b17d69381ec1fc9b5aa769d148f2f1f8b5377a73840bb6dffc324ded0d1c00dc0c99e3dbc13273b2f89510af6410b525dd8836208abbbaae12753ae2276fa0ca49950374f94e187bf65cefcdd9dd9142074edc4bd0052d0eb027cb1ab6182497f9a10f9fe800b3228e3c088dab60081c807b30a67313667ca8c9e77b38b161a037cae8e973038d0fc4a97ea215914c6c4e23baf6ac4f0fb1e7fcc8aac3f6303658dae1f91588b535eb678e2200f45383c2590a55dc181a09f2209da72f79ae6745992c803310d39f960e8ecf327aed706e4b3e2704eeb9b304dc0e0685f5dcd0389ec377bdba37610ad556a0e957a413a56339dd3c40817214bced5802beee2ee545bdd713208751added5fc0eb2bc89a5aa2decb18ee37dac39f22a33b60cc1a369d24de9f3d2d8b63c039e248806de4e36a47c7a0aed30edd30c3d62debdf1ad82bf7aedd7edec413850d91c261e12beec7ad1586a9ad25b2db62c58ca17119d61dcc4f3e5c4520c42a8e384a45d8659b338b3a08f9e123a1d3781f5fc97564ccff2c1d97f06fa0150cfa1e20eacabefb0c339ec109336d207cc63d9170752fc58314c43e6d4a528fd0975afa85f3aa186ff1b6b8cb12c97ed4ace295b0ef5f075f0217665b8bb180246b87982d10f43c9866b22878106f5214e99188781180478b07764a5e12876ddcb709e0a0a8dd42cf004c695c6fc1669a6fd0e4a1ca54b024d0d80eac492a9e5036501f36fb25b72a054189294955830e43c18e55668337c8c6733abb09fc2d4ade18d5a853a2b82f7b4d77151a64985004f1d9218f2945b63c56fdebd1e96a2a7e49fa70acb4c39873947b83c191c10e9a8f40f60f3ad5a2be47145c22ea59ed3f5f4e61cb069e875fb67142d281d784bf925cc286eacc2c43e94d08da4924b83e58dbf2e43fa625bdd620eba6d9ce960ff17d14ed1f2dbee7d08eceb540fdc75ff06dabc767267658fad8ce99e2a3236e46d2deedcb51c3c6f81589357edebac9772a70b3d910d83cd1b9ce6534a011e9fa557b891a23b5d88afcc0d9856c6dabeab25eea55e9a248182229e4927f268fe5431672fcce52f434ca3d27d1a2136bae5770bb36920df12fbc01d0e8165610efa04794f414c1417f1d4059435c5385bfe2de83ce0e238d6fd2dbd3c0487c69843298577bfa480fe2a16ab2a0e4bc712cd8b5a14871cda61c993b6835303d9043d7689a", + "74a4ea61339463642a2182758871b2ea724f31f531aa98d80f1c3043febca41d5ee52e8b1e127e61719a0d078db8909748d57839e58424b91f063c4fbc8a221bef261140e66a9b596ca6d420a973ad5431adfa8280a7355462fe50d4cac15cdfbd7a535c4b72a0b6d7d8a64cff3f719ff9b8be28036826342dc3bf3781efc70063d1e6fc79dff86334ae0564a5ab87bd61f8446465ef6713f8c4ef9d0200ebb375f90ee115216b469af42de554622df222858d30d733af1c9223e327ae09d9126be8baee6dd59a112d83a57cc6e0252104c11bc11705d384220eedd72f1a29a0597d97967e28b2ad13ba28b3d8a53c3613c1bb49fe9700739969ef1f795034ef9e2e983af2d3bbd6c637fb12f2f7dfc3aee85e08711e9b604106e95d7a4974e5b047674a6015792dae5d913681d84f71edd415910582e5d86590df2ecfd561dc6e1cdb08d3e10901312326a45fb0498a177319389809c6ba07a76cfad621e07b9af097730e94df92fbd311b2cb5da32c80ab5f14971b6d40f8e2ab202ac98bd8439790764a40bf309ea2205c1632610956495720030a25dc7118e0c868fdfa78c3e9ecce58215579a0581b3bafdb7dbbe53be9e904567fdc0ce1236aab5d22f1ebc18997e3ea83d362d891e04c5785fd5238326f767bce499209f8db211a50e1402160486e98e7235cf397dbb9ae19fd9b79ef589c821c6f99f28be33452405a003b33f4540fe0a41dfcc286f4d7cc10b70552ba7850869abadcd4bb7f256823face853633d6e2a999ac9fcd259c71d08e266db5d744e1909a62c0db673745ad9585949d108ab96640d2bc27fb4acac7fa8b170a30055a5ede90e004df9a44bdc29aeb4a6bec1e85dde1de6aaf01c6a5d12405d0bec22f49026cb23264f8c04b8401d3c2ab6f2e109948b6193b3bec27adfe19fb8afb8a92364d6fc5b219e8737d583e7ff3a4bcb75d53edda3bf3f52896ac36d8a877ad9f296ea6c045603fc62ac4ae41272bde85ef7c3b3fd3538aacfd5b025fefbe277c2906821ecb20e6f75ea479fa3280f9100fb0089203455c56b6bc775e5c2f0f58c63edd63fa3eec0b40da4b276d0d41da2ec0ead865a98d12bc694e23d8eaadd2b4d0ee88e9570c88fb878930f492e036d27998d593e47763927ff7eb80b188864a3846dd2238f7f95f4090ed399ae95deaeb37abca1cf37c397cc12189affb42dca46b4ff6988eb8c060691d155302d448f50ff70a794d97c0408f8cee9385d6a71fa412e36edcb22dbf433db9db4779f27b682ee17fc05e70c8e794b9f7f6d1", + "84986c936d26bfd3bb2d34d3ec62cfdb63e0032fdb3d9d75f3e5d456f73dffa7e35aab1db4f1bd3b98ff585caf004f656c51037a3f4e810d275f3f6aea0c8e3a125ebee5f374b6440bcb9bb2955ebf70c06d64090f9f6cf098200305f7f4305ba9e1350a0c3f7dab4ccf35b8399b9650d8e363bf83d3a0a09706433f0adae6562eb338b21ea6f21329b3775905e59187c325c9cbf589f5da5e915d9e5ad1d21aa1431f9bdc587185ed8b5d4928e697e67cc96bee6d5354e3764cede3f385588fa665310356b2b1e68f8bd30c75d395405614a40a587031ebd6ace60dfb7c6dd188b572bd8e3e9a47b06c2187b528c5ed35c32da5130a21cd881138a5fcac806858ce6c596d810a7492eb261bcc91cead1dae75075b950c2e81cecf7e5fdb2b51df005d285803201ce914dfbf3218383829a0caa8f15486dd801133f1ed7edec436730b0ec98f48732547927229ac80269fcdc5e4f4db264274e940178732b429f9f0e582c559f994a7cdfb76c93ffc39de91ff936316726cc561a6520d47b2cd487299a96322dadc463ef06127fc63902ff9cc4f265e2fbd9de3fa5e48b7b51aa0850580ef9f3b5ebb60c6c3216c5a75a93e82936113d9cad57ae4a94dd6481954a9bd1b5cff4ab29ca221fa2bf9b28a362c9661206f896fc7cec563fb80aa5eaccb26c09fa4ef7a981e63028a9c4dac12f82ccb5bea090d56bbb1a4c431e315d9a169299224a8dbd099fb67ea61dfc604edf8a18ee742550b636836bb552dabb28820221bf8546331f32b0c143c1c89310c4fa2e1e0e895ce1a1eb0f43278fdb528131a3e32bfffe0c6de9006418f5309cba773ca38b6ad8507cc59445ccc0257506ebc16a4c01d4cd97e03fcf7a2049fea0db28447858f73b8e9fe98b391b136c9dc510288630a1f0af93b26a8891b857bfe4b818af99a1e011e6dbaa53982d29cf74ae7dffef45545279f19931708ed3eede5e82280eab908e8eb80abff3f1f023ab66869297b40da8496861dc455ac3abe1efa8a6f9e2c4eda48025d43a486a3f26f269743eaa30d6f0e1f48db6287751358a41f5b07aee0f098862e3493731fe2697acce734f004907c6f11eef189424fee52cd30ad708707eaf2e441f52bcf3d0c5440c1742458653c0c8a27b5ade784d9e09c8b47f1671901a29360e7e5e94946b9c75752a1a8d599d2a3e14ac81b84d42115cd688c8383a64fc6e7e1dc5568bb4837358ebe63207a4067af66b2027ad2ce8fb7ae3a452d40723a51fdf9f9c9913e8029a222cf81d12ad41e58860d75deb6de30ad", + ]; + + let onion_keys = build_test_onion_keys(); + + let mut attribution_data = AttributionData::new(); + attribution_data.update(&[], onion_keys[4].shared_secret.as_ref(), 1); + + let logger: Arc = Arc::new(TestLogger::new()); + + attribution_data.crypt(onion_keys[4].shared_secret.as_ref()); + + assert_data(&attribution_data, EXPECTED_MESSAGES[0]); + + for idx in (0..4).rev() { + let shared_secret = onion_keys[idx].shared_secret.as_ref(); + let hold_time = (5 - idx) as u32; + + attribution_data.shift_right(); + attribution_data.update(&[], shared_secret, hold_time); + attribution_data.crypt(shared_secret); + + assert_data(&attribution_data, EXPECTED_MESSAGES[4 - idx]); + } + + let ctx_full = Secp256k1::new(); + let path = build_test_path(); + let hold_times = decode_fulfill_attribution_data( + &ctx_full, + &logger, + &path, + &get_test_session_key(), + attribution_data.clone(), + ); + + assert_eq!(hold_times, [5, 4, 3, 2, 1]) + } + fn build_trampoline_test_path() -> Path { Path { hops: vec![ diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 27a25f27fa0..7c8287428de 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -1687,7 +1687,6 @@ impl OutboundPayments { error_code: None, #[cfg(any(test, feature = "_test_utils"))] error_data: None, - #[cfg(any(test, feature = "_test_utils"))] hold_times: Vec::new(), }; events.push_back((event, None)); @@ -2176,6 +2175,7 @@ impl OutboundPayments { payment_id, payment_hash, path, + hold_times: Vec::new(), }, Some(ev_completion_action))); } } @@ -2185,12 +2185,12 @@ impl OutboundPayments { } #[rustfmt::skip] - pub(super) fn finalize_claims(&self, sources: Vec, + pub(super) fn finalize_claims)>>(&self, sources: I, pending_events: &Mutex)>>) { let mut outbounds = self.pending_outbound_payments.lock().unwrap(); let mut pending_events = pending_events.lock().unwrap(); - for source in sources { + for (source, hold_times) in sources { if let HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } = source { let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); @@ -2203,6 +2203,7 @@ impl OutboundPayments { payment_id, payment_hash, path, + hold_times }, None)); } } @@ -2326,6 +2327,7 @@ impl OutboundPayments { short_channel_id, payment_failed_permanently, failed_within_blinded_path, + hold_times, .. } = onion_error.decode_onion_failure(secp_ctx, logger, &source); @@ -2454,7 +2456,6 @@ impl OutboundPayments { error_code: onion_error_code.map(|f| f.failure_code()), #[cfg(any(test, feature = "_test_utils"))] error_data: onion_error_data, - #[cfg(any(test, feature = "_test_utils"))] hold_times, } } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index ac74ae2d833..f500c889dc3 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -61,6 +61,8 @@ use crate::ln::functional_test_utils::*; use crate::routing::gossip::NodeId; use core::cmp::Ordering; +#[cfg(feature = "std")] +use std::thread; #[cfg(feature = "std")] use { @@ -529,6 +531,65 @@ fn test_mpp_keysend() { claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, preimage.unwrap())); } +#[test] +#[cfg(feature = "std")] +fn test_fulfill_hold_times() { + // Tests that as a sender we correctly receive non-zero hold times for a keysend payment. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 1, 2); + + let recv_value = 5_000_000; + let route_params = RouteParameters::from_payment_params_and_value( + PaymentParameters::for_keysend(node_c_id, 40, true), + recv_value, + ); + + let preimage = Some(PaymentPreimage([42; 32])); + let payment_secret = PaymentSecret([42; 32]); + let onion = RecipientOnionFields::secret_only(payment_secret); + let retry = Retry::Attempts(0); + let id = PaymentId([42; 32]); + let hash = + nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); + check_added_monitors!(nodes[0], 1); + + let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + + let ev = remove_first_msg_event_to_node(&node_b_id, &mut events); + let payment_secret = Some(payment_secret); + pass_along_path(&nodes[0], route[0], recv_value, hash, payment_secret, ev, true, preimage); + + // Delay claiming so that we get a non-zero hold time. + thread::sleep(Duration::from_millis(200)); + + let (_, path_events) = + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, preimage.unwrap())); + + assert_eq!(path_events.len(), 1); + match &path_events[0] { + Event::PaymentPathSuccessful { hold_times, .. } => { + assert_eq!(hold_times.len(), 2); + + // The final node always reports a zero hold time. + assert!(hold_times[1] == 0); + + // It's predecessor reports a non-zero hold time because we delayed claiming. + assert!(hold_times[0] > 0); + }, + _ => panic!("Unexpected event"), + } +} + #[test] fn test_reject_mpp_keysend_htlc_mismatching_secret() { // This test enforces that we reject MPP keysend HTLCs if the payment_secrets between MPP parts