diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index aca4bb6e5a9..79f6fa0ce92 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -80,6 +80,8 @@ //! (see [BOLT-2](https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#channel-quiescence) for more information). //! - `ZeroFeeCommitments` - A channel type which always uses zero transaction fee on commitment transactions. //! (see [BOLT PR #1228](https://github.com/lightning/bolts/pull/1228) for more info). +//! - `Splice` - Allows replacing the funding transaction with a new one +//! (see [BOLT PR #1160](https://github.com/lightning/bolts/pull/1160) for more information). //! //! LDK knows about the following features, but does not support them: //! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be @@ -163,7 +165,7 @@ mod sealed { // Byte 6 ZeroConf, // Byte 7 - Trampoline | SimpleClose, + Trampoline | SimpleClose | Splice, ] ); define_context!( @@ -184,7 +186,7 @@ mod sealed { // Byte 6 ZeroConf | Keysend, // Byte 7 - Trampoline | SimpleClose, + Trampoline | SimpleClose | Splice, // Byte 8 - 31 ,,,,,,,,,,,,,,,,,,,,,,,, // Byte 32 @@ -673,9 +675,20 @@ mod sealed { supports_simple_close, requires_simple_close ); - // By default, allocate enough bytes to cover up to SimpleClose. Update this as new features are + define_feature!( + 63, + Splice, + [InitContext, NodeContext], + "Feature flags for channel splicing.", + set_splicing_optional, + set_splicing_required, + clear_splicing, + supports_splicing, + requires_splicing + ); + // By default, allocate enough bytes to cover up to Splice. Update this as new features are // added which we expect to appear commonly across contexts. - pub(super) const MIN_FEATURES_ALLOCATION_BYTES: usize = (61 + 7) / 8; + pub(super) const MIN_FEATURES_ALLOCATION_BYTES: usize = (63 + 7) / 8; define_feature!( 259, DnsResolver, @@ -1369,6 +1382,7 @@ mod tests { init_features.set_zero_conf_optional(); init_features.set_quiescence_optional(); init_features.set_simple_close_optional(); + init_features.set_splicing_optional(); assert!(init_features.initial_routing_sync()); assert!(!init_features.supports_upfront_shutdown_script()); @@ -1384,7 +1398,7 @@ mod tests { // - onion_messages // - option_channel_type | option_scid_alias // - option_zeroconf - // - option_simple_close + // - option_simple_close | option_splice assert_eq!(node_features.flags.len(), 8); assert_eq!(node_features.flags[0], 0b00000001); assert_eq!(node_features.flags[1], 0b01010001); @@ -1393,7 +1407,7 @@ mod tests { assert_eq!(node_features.flags[4], 0b10001000); assert_eq!(node_features.flags[5], 0b10100000); assert_eq!(node_features.flags[6], 0b00001000); - assert_eq!(node_features.flags[7], 0b00100000); + assert_eq!(node_features.flags[7], 0b10100000); } // Check that cleared flags are kept blank when converting back: diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b516961d100..f8ffedfc8ad 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1213,6 +1213,8 @@ pub(super) struct ReestablishResponses { pub shutdown_msg: Option, pub tx_signatures: Option, pub tx_abort: Option, + pub splice_locked: Option, + pub implicit_splice_locked: Option, } /// The first message we send to our peer after connection @@ -1662,12 +1664,12 @@ where /// send our peer to begin the channel reconnection process. #[rustfmt::skip] pub fn peer_connected_get_handshake( - &mut self, chain_hash: ChainHash, logger: &L, + &mut self, chain_hash: ChainHash, features: &InitFeatures, logger: &L, ) -> ReconnectionMsg where L::Target: Logger { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(chan) => - ReconnectionMsg::Reestablish(chan.get_channel_reestablish(logger)), + ReconnectionMsg::Reestablish(chan.get_channel_reestablish(features, logger)), ChannelPhase::UnfundedOutboundV1(chan) => { chan.get_open_channel(chain_hash, logger) .map(|msg| ReconnectionMsg::Open(OpenChannelMessage::V1(msg))) @@ -2169,6 +2171,10 @@ impl FundingScope { pub fn get_short_channel_id(&self) -> Option { self.short_channel_id } + + fn is_splice(&self) -> bool { + self.channel_transaction_parameters.splice_parent_funding_txid.is_some() + } } /// Info about a pending splice, used in the pre-splice channel @@ -5956,6 +5962,7 @@ macro_rules! promote_splice_funding { core::mem::swap(&mut $self.funding, $funding); $self.pending_splice = None; $self.pending_funding.clear(); + $self.context.announcement_sigs = None; $self.context.announcement_sigs_state = AnnouncementSigsState::NotSent; }; } @@ -8305,7 +8312,8 @@ where #[rustfmt::skip] pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, - chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock + chain_hash: ChainHash, features: &InitFeatures, user_config: &UserConfig, + best_block: &BestBlock, ) -> Result where L::Target: Logger, @@ -8388,6 +8396,8 @@ where shutdown_msg, announcement_sigs, tx_signatures: None, tx_abort: None, + splice_locked: None, + implicit_splice_locked: None, }); } @@ -8399,6 +8409,8 @@ where shutdown_msg, announcement_sigs, tx_signatures: None, tx_abort: None, + splice_locked: None, + implicit_splice_locked: None, }); } @@ -8429,94 +8441,209 @@ where let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke(); let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 }; - let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.holder_commitment_point.transaction_number() == 1 { + let splicing_negotiated = features.supports_splicing(); + let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.holder_commitment_point.transaction_number() == 1 && !splicing_negotiated { // We should never have to worry about MonitorUpdateInProgress resending ChannelReady self.get_channel_ready(logger) + } else if splicing_negotiated { + let funding_txid = self + .maybe_get_my_current_funding_locked(features) + .filter(|funding| !funding.is_splice()) + .map(|funding| { + funding.get_funding_txid().expect("funding_txid should always be set") + }); + + // A node: + // - if `option_splice` was negotiated and `your_last_funding_locked` is not + // set in the `channel_reestablish` it received: + // - MUST retransmit `channel_ready`. + msg.your_last_funding_locked_txid + .is_none() + .then(|| funding_txid) + .flatten() + // The sending node: + // - if `my_current_funding_locked` is included: + // - if `announce_channel` is set for this channel: + // - if it has not received `announcement_signatures` for that transaction: + // - MUST retransmit `channel_ready` or `splice_locked` after exchanging `channel_reestablish`. + .or_else(|| { + funding_txid + .filter(|_| self.context.config.announce_for_forwarding) + .filter(|_| self.context.announcement_sigs.is_none()) + }) + // TODO: The language from the spec below should be updated to be in terms of + // `your_last_funding_locked` received and `my_current_funding_locked` sent rather + // than other messages received. + // + // - if it receives `channel_ready` for that transaction after exchanging `channel_reestablish`: + // - MUST retransmit `channel_ready` in response, if not already sent since reconnecting. + .or_else(|| { + msg.your_last_funding_locked_txid + .and_then(|last_funding_txid| { + funding_txid.filter(|funding_txid| last_funding_txid != *funding_txid) + }) + }) + .and_then(|_| self.get_channel_ready(logger)) } else { None }; - if msg.next_local_commitment_number == next_counterparty_commitment_number { - if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { - log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); - } else { - log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); - } + let sent_splice_txid = self + .maybe_get_my_current_funding_locked(features) + .filter(|funding| funding.is_splice()) + .map(|funding| { + funding.get_funding_txid().expect("Splice funding_txid should always be set") + }); + let splice_locked = msg + // A receiving node: + // - if `your_last_funding_locked` is set and it does not match the most recent + // `splice_locked` it has sent: + // - MUST retransmit `splice_locked`. + .your_last_funding_locked_txid + .and_then(|last_funding_txid| { + sent_splice_txid.filter(|sent_splice_txid| last_funding_txid != *sent_splice_txid) + }) + // The sending node: + // - if `my_current_funding_locked` is included: + // - if `announce_channel` is set for this channel: + // - if it has not received `announcement_signatures` for that transaction: + // - MUST retransmit `channel_ready` or `splice_locked` after exchanging `channel_reestablish`. + .or_else(|| { + sent_splice_txid + .filter(|_| self.context.config.announce_for_forwarding) + .filter(|sent_splice_txid| { + if self.funding.get_funding_txid() == Some(*sent_splice_txid) { + self.context.announcement_sigs.is_none() + } else { + true + } + }) + }) + .map(|splice_txid| msgs::SpliceLocked { + channel_id: self.context.channel_id, + splice_txid, + }); - // if next_funding_txid is set: - let (commitment_update, tx_signatures, tx_abort) = if let Some(next_funding_txid) = msg.next_funding_txid { - if let Some(session) = &self.interactive_tx_signing_session { - // if next_funding_txid matches the latest interactive funding transaction: - let our_next_funding_txid = session.unsigned_tx().compute_txid(); - if our_next_funding_txid == next_funding_txid { - debug_assert_eq!(session.unsigned_tx().compute_txid(), self.maybe_get_next_funding_txid().unwrap()); - - let commitment_update = if !self.context.channel_state.is_their_tx_signatures_sent() && msg.next_local_commitment_number == 0 { - // if it has not received tx_signatures for that funding transaction AND - // if next_commitment_number is zero: - // MUST retransmit its commitment_signed for that funding transaction. - let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger)?; - Some(msgs::CommitmentUpdate { - commitment_signed: vec![commitment_signed], - update_add_htlcs: vec![], - update_fulfill_htlcs: vec![], - update_fail_htlcs: vec![], - update_fail_malformed_htlcs: vec![], - update_fee: None, - }) - } else { None }; + // A receiving node: + // - if splice transactions are pending and `my_current_funding_locked` matches one of + // those splice transactions, for which it hasn't received `splice_locked` yet: + // - MUST process `my_current_funding_locked` as if it was receiving `splice_locked` + // for this `txid`. + #[cfg(splicing)] + let implicit_splice_locked = msg.my_current_funding_locked_txid.and_then(|funding_txid| { + self.pending_funding + .iter() + .find(|funding| funding.get_funding_txid() == Some(funding_txid)) + .and_then(|_| { + self.pending_splice.as_ref().and_then(|pending_splice| { + (Some(funding_txid) != pending_splice.received_funding_txid) + .then(|| funding_txid) + }) + }) + .map(|splice_txid| msgs::SpliceLocked { + channel_id: self.context.channel_id, + splice_txid, + }) + }); + #[cfg(not(splicing))] + let implicit_splice_locked = None; + + let mut commitment_update = None; + let mut tx_signatures = None; + let mut tx_abort = None; + + // if next_funding_txid is set: + if let Some(next_funding_txid) = msg.next_funding_txid { + // - if `next_funding_txid` matches the latest interactive funding transaction + // or the current channel funding transaction: + if let Some(session) = &self.interactive_tx_signing_session { + let our_next_funding_txid = self.maybe_get_next_funding_txid(); + if let Some(our_next_funding_txid) = our_next_funding_txid { + if our_next_funding_txid != next_funding_txid { + return Err(ChannelError::close(format!( + "Unexpected next_funding_txid: {}; expected: {}", + next_funding_txid, our_next_funding_txid, + ))); + } + + if !session.has_received_commitment_signed() { + self.context.expecting_peer_commitment_signed = true; + } + + // - if `next_commitment_number` is equal to the commitment number of the + // `commitment_signed` message it sent for this funding transaction: + // - MUST retransmit its `commitment_signed` for that funding transaction. + if msg.next_local_commitment_number == next_counterparty_commitment_number { + // `next_counterparty_commitment_number` is guaranteed to always be the + // commitment number of the `commitment_signed` message we sent for this + // funding transaction. If they set `next_funding_txid`, then they should + // not have processed our `tx_signatures` yet, which implies that our state + // machine is still paused and no updates can happen that would increment + // our `next_counterparty_commitment_number`. + // + // If they did set `next_funding_txid` even after processing our + // `tx_signatures` erroneously, this may end up resulting in a force close. + // // TODO(dual_funding): For async signing support we need to hold back `tx_signatures` until the `commitment_signed` is ready. - let tx_signatures = if ( - // if it has not received tx_signatures for that funding transaction AND - // if it has already received commitment_signed AND it should sign first, as specified in the tx_signatures requirements: - // MUST send its tx_signatures for that funding transaction. - !self.context.channel_state.is_their_tx_signatures_sent() && session.has_received_commitment_signed() && session.holder_sends_tx_signatures_first() - // else if it has already received tx_signatures for that funding transaction: - // MUST send its tx_signatures for that funding transaction. - ) || self.context.channel_state.is_their_tx_signatures_sent() { - if self.context.channel_state.is_monitor_update_in_progress() { - // The `monitor_pending_tx_signatures` field should have already been set in `commitment_signed_initial_v2` - // if we were up first for signing and had a monitor update in progress, but check again just in case. - debug_assert!(self.context.monitor_pending_tx_signatures.is_some(), "monitor_pending_tx_signatures should already be set"); - log_debug!(logger, "Not sending tx_signatures: a monitor update is in progress. Setting monitor_pending_tx_signatures."); - if self.context.monitor_pending_tx_signatures.is_none() { - self.context.monitor_pending_tx_signatures = session.holder_tx_signatures().clone(); - } - None - } else { - // If `holder_tx_signatures` is `None` here, the `tx_signatures` message will be sent - // when the holder provides their witnesses as this will queue a `tx_signatures` if the - // holder must send one. - session.holder_tx_signatures().clone() + let commitment_signed = self.context.get_initial_commitment_signed(&self.funding, logger)?; + commitment_update = Some(msgs::CommitmentUpdate { + commitment_signed: vec![commitment_signed], + update_add_htlcs: vec![], + update_fulfill_htlcs: vec![], + update_fail_htlcs: vec![], + update_fail_malformed_htlcs: vec![], + update_fee: None, + }); + } + + // - if it has already received `commitment_signed` and it should sign first, + // as specified in the [`tx_signatures` requirements](#the-tx_signatures-message): + // - MUST send its `tx_signatures` for that funding transaction. + // + // - if it has already received `tx_signatures` for that funding transaction: + // - MUST send its `tx_signatures` for that funding transaction. + if (session.has_received_commitment_signed() && session.holder_sends_tx_signatures_first()) + || self.context.channel_state.is_their_tx_signatures_sent() + { + if self.context.channel_state.is_monitor_update_in_progress() { + // The `monitor_pending_tx_signatures` field should have already been + // set in `commitment_signed_initial_v2` if we were up first for signing + // and had a monitor update in progress. + if session.holder_sends_tx_signatures_first() { + debug_assert!(self.context.monitor_pending_tx_signatures.is_some()); } } else { - None - }; - if !session.has_received_commitment_signed() { - self.context.expecting_peer_commitment_signed = true; + // If `holder_tx_signatures` is `None` here, the `tx_signatures` message + // will be sent when the user provides their witnesses. + tx_signatures = session.holder_tx_signatures().clone() } - (commitment_update, tx_signatures, None) - } else { - // The `next_funding_txid` does not match the latest interactive funding transaction so we - // MUST send tx_abort to let the remote know that they can forget this funding transaction. - (None, None, Some(msgs::TxAbort { - channel_id: self.context.channel_id(), - data: format!( - "next_funding_txid {} does match our latest interactive funding txid {}", - next_funding_txid, our_next_funding_txid, - ).into_bytes() })) } } else { - // We'll just send a `tx_abort` here if we don't have a signing session for this channel - // on reestablish and tell our peer to just forget about it. - // Our peer is doing something strange, but it doesn't warrant closing the channel. - (None, None, Some(msgs::TxAbort { + // The `next_funding_txid` does not match the latest interactive funding + // transaction so we MUST send tx_abort to let the remote know that they can + // forget this funding transaction. + tx_abort = Some(msgs::TxAbort { channel_id: self.context.channel_id(), - data: - "No active signing session. The associated funding transaction may have already been broadcast.".as_bytes().to_vec() })) + data: format!( + "Unexpected next_funding_txid {}", + next_funding_txid, + ).into_bytes() }); } } else { - // Don't send anything related to interactive signing if `next_funding_txid` is not set. - (None, None, None) - }; + // We'll just send a `tx_abort` here if we don't have a signing session for this channel + // on reestablish and tell our peer to just forget about it. + // Our peer is doing something strange, but it doesn't warrant closing the channel. + tx_abort = Some(msgs::TxAbort { + channel_id: self.context.channel_id(), + data: + "No active signing session. The associated funding transaction may have already been broadcast.".as_bytes().to_vec() }); + } + } + + if msg.next_local_commitment_number == next_counterparty_commitment_number { + if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { + log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); + } else { + log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); + } Ok(ReestablishResponses { channel_ready, shutdown_msg, announcement_sigs, @@ -8525,8 +8652,15 @@ where order: self.context.resend_order.clone(), tx_signatures, tx_abort, + splice_locked, + implicit_splice_locked, }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { + // We've made an update so we must have exchanged `tx_signatures`, implying that + // `commitment_signed` was also exchanged. However, we may still need to retransmit our + // `tx_signatures` if the counterparty sent theirs first but didn't get to process ours. + debug_assert!(commitment_update.is_none()); + if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id()); } else { @@ -8539,8 +8673,10 @@ where channel_ready, shutdown_msg, announcement_sigs, commitment_update: None, raa: None, order: self.context.resend_order.clone(), - tx_signatures: None, - tx_abort: None, + tx_signatures, + tx_abort, + splice_locked, + implicit_splice_locked, }) } else { let commitment_update = if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst @@ -8563,8 +8699,10 @@ where channel_ready, shutdown_msg, announcement_sigs, raa, commitment_update, order: self.context.resend_order.clone(), - tx_signatures: None, - tx_abort: None, + tx_signatures, + tx_abort, + splice_locked, + implicit_splice_locked, }) } } else if msg.next_local_commitment_number < next_counterparty_commitment_number { @@ -9403,6 +9541,13 @@ where false } + /// Returns true if thier channel_ready has been received + #[cfg(splicing)] + pub fn is_their_channel_ready(&self) -> bool { + matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)) + || matches!(self.context.channel_state, ChannelState::ChannelReady(_)) + } + /// Returns true if our channel_ready has been sent pub fn is_our_channel_ready(&self) -> bool { matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) @@ -10130,6 +10275,19 @@ where self.sign_channel_announcement(node_signer, announcement).ok() } + fn get_next_local_commitment_number(&self) -> u64 { + if let Some(session) = &self.interactive_tx_signing_session { + if !self.context.channel_state.is_their_tx_signatures_sent() + && !session.has_received_commitment_signed() + { + // FIXME + return unimplemented!(); + } + } + + INITIAL_COMMITMENT_NUMBER - self.holder_commitment_point.transaction_number() + } + #[rustfmt::skip] fn maybe_get_next_funding_txid(&self) -> Option { // If we've sent `commtiment_signed` for an interactively constructed transaction @@ -10150,10 +10308,78 @@ where } } + #[cfg(splicing)] + fn maybe_get_your_last_funding_locked_txid(&self, features: &InitFeatures) -> Option { + if !features.supports_splicing() { + return None; + } + + self.pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.received_funding_txid) + .or_else(|| { + self.is_their_channel_ready().then(|| self.funding.get_funding_txid()).flatten() + }) + } + #[cfg(not(splicing))] + fn maybe_get_your_last_funding_locked_txid(&self, _features: &InitFeatures) -> Option { + None + } + + #[cfg(splicing)] + fn maybe_get_my_current_funding_locked( + &self, features: &InitFeatures, + ) -> Option<&FundingScope> { + if !features.supports_splicing() { + return None; + } + + self.pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.sent_funding_txid) + .and_then(|funding_txid| { + self.pending_funding + .iter() + .find(|funding| funding.get_funding_txid() == Some(funding_txid)) + }) + .or_else(|| self.is_our_channel_ready().then(|| &self.funding)) + } + + #[cfg(not(splicing))] + fn maybe_get_my_current_funding_locked( + &self, _features: &InitFeatures, + ) -> Option<&FundingScope> { + None + } + + #[cfg(splicing)] + fn maybe_get_my_current_funding_locked_txid(&self, features: &InitFeatures) -> Option { + if !features.supports_splicing() { + return None; + } + + self.pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.sent_funding_txid) + .or_else(|| { + self.is_our_channel_ready().then(|| self.funding.get_funding_txid()).flatten() + }) + } + + #[cfg(not(splicing))] + fn maybe_get_my_current_funding_locked_txid(&self, _features: &InitFeatures) -> Option { + None + } + /// May panic if called on a channel that wasn't immediately-previously /// self.remove_uncommitted_htlcs_and_mark_paused()'d #[rustfmt::skip] - fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger { + fn get_channel_reestablish( + &mut self, features: &InitFeatures, logger: &L, + ) -> msgs::ChannelReestablish + where + L::Target: Logger, + { assert!(self.context.channel_state.is_peer_disconnected()); assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); // This is generally the first function which gets called on any given channel once we're @@ -10189,7 +10415,7 @@ where // next_local_commitment_number is the next commitment_signed number we expect to // receive (indicating if they need to resend one that we missed). - next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.holder_commitment_point.transaction_number(), + next_local_commitment_number: self.get_next_local_commitment_number(), // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to // receive, however we track it by the next commitment number for a remote transaction // (which is one further, as they always revoke previous commitment transaction, not @@ -10201,6 +10427,8 @@ where your_last_per_commitment_secret: remote_last_secret, my_current_per_commitment_point: dummy_pubkey, next_funding_txid: self.maybe_get_next_funding_txid(), + your_last_funding_locked_txid: self.maybe_get_your_last_funding_locked_txid(features), + my_current_funding_locked_txid: self.maybe_get_my_current_funding_locked_txid(features), } } @@ -13688,7 +13916,7 @@ mod tests { // Now disconnect the two nodes and check that the commitment point in // Node B's channel_reestablish message is sane. assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); - let msg = node_b_chan.get_channel_reestablish(&&logger); + let msg = node_b_chan.get_channel_reestablish(&channelmanager::provided_init_features(&config), &&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); @@ -13696,7 +13924,7 @@ mod tests { // Check that the commitment point in Node A's channel_reestablish message // is sane. assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); - let msg = node_a_chan.get_channel_reestablish(&&logger); + let msg = node_a_chan.get_channel_reestablish(&channelmanager::provided_init_features(&config), &&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9e7031bf8cb..74ec0b029b9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3406,7 +3406,8 @@ macro_rules! handle_monitor_update_completion { &mut $peer_state.pending_msg_events, $chan, updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds, updates.funding_broadcastable, updates.channel_ready, - updates.announcement_sigs, updates.tx_signatures, None); + updates.announcement_sigs, updates.tx_signatures, None, None, + ); if let Some(upd) = channel_update { $peer_state.pending_msg_events.push(upd); } @@ -8059,9 +8060,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option, tx_signatures: Option, tx_abort: Option, + splice_locked: Option, ) -> (Option<(u64, Option, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { let logger = WithChannelContext::from(&self.logger, &channel.context, None); - log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", + log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort, {} splice_locked", &channel.context.channel_id(), if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, @@ -8071,6 +8073,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if announcement_sigs.is_some() { "sending" } else { "without" }, if tx_signatures.is_some() { "sending" } else { "without" }, if tx_abort.is_some() { "sending" } else { "without" }, + if splice_locked.is_some() { "sending" } else { "without" }, ); let counterparty_node_id = channel.context.get_counterparty_node_id(); @@ -8110,6 +8113,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ msg, }); } + if let Some(msg) = splice_locked { + pending_msg_events.push(MessageSendEvent::SendSpliceLocked { + node_id: counterparty_node_id, + msg, + }); + } macro_rules! handle_cs { () => { if let Some(update) = commitment_update { @@ -10008,7 +10017,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result { - let need_lnd_workaround = { + let (implicit_splice_locked, need_lnd_workaround) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -10025,12 +10034,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { + let features = &peer_state.latest_features; // Currently, we expect all holding cell update_adds to be dropped on peer // disconnect, so Channel's reestablish will never hand us any holding cell // freed HTLCs to fail backwards. If in the future we no longer drop pending // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. let responses = try_channel_entry!(self, peer_state, chan.channel_reestablish( - msg, &&logger, &self.node_signer, self.chain_hash, + msg, &&logger, &self.node_signer, self.chain_hash, features, &self.default_configuration, &*self.best_block.read().unwrap()), chan_entry); let mut channel_update = None; if let Some(msg) = responses.shutdown_msg { @@ -10053,13 +10063,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order, Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs, - responses.tx_signatures, responses.tx_abort); + responses.tx_signatures, responses.tx_abort, responses.splice_locked, + ); debug_assert!(htlc_forwards.is_none()); debug_assert!(decode_update_add_htlcs.is_none()); if let Some(upd) = channel_update { peer_state.pending_msg_events.push(upd); } - need_lnd_workaround + + (responses.implicit_splice_locked, need_lnd_workaround) } else { return try_channel_entry!(self, peer_state, Err(ChannelError::close( "Got a channel_reestablish message for an unfunded channel!".into())), chan_entry); @@ -10091,6 +10103,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ your_last_per_commitment_secret: [1u8; 32], my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(), next_funding_txid: None, + your_last_funding_locked_txid: None, + my_current_funding_locked_txid: None, }, }); return Err(MsgHandleErrInternal::send_err_msg_no_close( @@ -10104,6 +10118,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(channel_ready_msg) = need_lnd_workaround { self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; } + + #[cfg(not(splicing))] + let _ = implicit_splice_locked; + #[cfg(splicing)] + if let Some(splice_locked) = implicit_splice_locked { + self.internal_splice_locked(counterparty_node_id, &splice_locked)?; + } + Ok(NotifyOption::SkipPersistHandleEvents) } @@ -11935,8 +11957,9 @@ where } for (_, chan) in peer_state.channel_by_id.iter_mut() { + let features = &peer_state.latest_features; let logger = WithChannelContext::from(&self.logger, &chan.context(), None); - match chan.peer_connected_get_handshake(self.chain_hash, &&logger) { + match chan.peer_connected_get_handshake(self.chain_hash, features, &&logger) { ReconnectionMsg::Reestablish(msg) => pending_msg_events.push(MessageSendEvent::SendChannelReestablish { node_id: chan.context().get_counterparty_node_id(), diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index c3913d31e9c..6129b72bfd5 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -928,6 +928,16 @@ pub struct ChannelReestablish { /// * `channel_reestablish`-sending node: https:///github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L2466-L2470 /// * `channel_reestablish`-receiving node: https:///github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L2520-L2531 pub next_funding_txid: Option, + /// The last funding txid received by the sending node, which may be: + /// - the txid of the last `splice_locked` it received, otherwise + /// - the txid of the funding transaction if it received `channel_ready`, or else + /// - `None` if it has never received `channel_ready` or `splice_locked` + pub your_last_funding_locked_txid: Option, + /// The last funding txid sent by the sending node, which may be: + /// - the txid of the last `splice_locked` it sent, otherwise + /// - the txid of the funding transaction if it sent `channel_ready`, or else + /// - `None` if it has never sent `channel_ready` or `splice_locked` + pub my_current_funding_locked_txid: Option, } /// An [`announcement_signatures`] message to be sent to or received from a peer. @@ -2805,6 +2815,8 @@ impl_writeable_msg!(ChannelReestablish, { my_current_per_commitment_point, }, { (0, next_funding_txid, option), + (1, your_last_funding_locked_txid, option), + (3, my_current_funding_locked_txid, option), }); impl_writeable_msg!(ClosingSigned, @@ -4275,6 +4287,8 @@ mod tests { your_last_per_commitment_secret: [9; 32], my_current_per_commitment_point: public_key, next_funding_txid: None, + your_last_funding_locked_txid: None, + my_current_funding_locked_txid: None, }; let encoded_value = cr.encode(); @@ -4326,6 +4340,8 @@ mod tests { ]) .unwrap(), )), + your_last_funding_locked_txid: None, + my_current_funding_locked_txid: None, }; let encoded_value = cr.encode(); @@ -4349,6 +4365,73 @@ mod tests { ); } + #[test] + fn encoding_channel_reestablish_with_funding_locked_txid() { + let public_key = { + let secp_ctx = Secp256k1::new(); + PublicKey::from_secret_key( + &secp_ctx, + &SecretKey::from_slice( + &>::from_hex( + "0101010101010101010101010101010101010101010101010101010101010101", + ) + .unwrap()[..], + ) + .unwrap(), + ) + }; + + let cr = msgs::ChannelReestablish { + channel_id: ChannelId::from_bytes([ + 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, + 0, 0, 0, 0, + ]), + next_local_commitment_number: 3, + next_remote_commitment_number: 4, + your_last_per_commitment_secret: [9; 32], + my_current_per_commitment_point: public_key, + next_funding_txid: None, + your_last_funding_locked_txid: Some(Txid::from_raw_hash( + bitcoin::hashes::Hash::from_slice(&[ + 48, 167, 250, 69, 152, 48, 103, 172, 164, 99, 59, 19, 23, 11, 92, 84, 15, 80, + 4, 12, 98, 82, 75, 31, 201, 11, 91, 23, 98, 23, 53, 124, + ]) + .unwrap(), + )), + my_current_funding_locked_txid: Some(Txid::from_raw_hash( + bitcoin::hashes::Hash::from_slice(&[ + 21, 167, 250, 69, 152, 48, 103, 172, 164, 99, 59, 19, 23, 11, 92, 84, 15, 80, + 4, 12, 98, 82, 75, 31, 201, 11, 91, 23, 98, 23, 53, 124, + ]) + .unwrap(), + )), + }; + + let encoded_value = cr.encode(); + assert_eq!( + encoded_value, + vec![ + 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, + 0, 0, 0, 0, // channel_id + 0, 0, 0, 0, 0, 0, 0, 3, // next_local_commitment_number + 0, 0, 0, 0, 0, 0, 0, 4, // next_remote_commitment_number + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, // your_last_per_commitment_secret + 3, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30, + 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, + 143, // my_current_per_commitment_point + 1, // Type (your_last_funding_locked_txid) + 32, // Length + 48, 167, 250, 69, 152, 48, 103, 172, 164, 99, 59, 19, 23, 11, 92, 84, 15, 80, 4, + 12, 98, 82, 75, 31, 201, 11, 91, 23, 98, 23, 53, 124, // Value + 3, // Type (my_current_funding_locked_txid) + 32, // Length + 21, 167, 250, 69, 152, 48, 103, 172, 164, 99, 59, 19, 23, 11, 92, 84, 15, 80, 4, + 12, 98, 82, 75, 31, 201, 11, 91, 23, 98, 23, 53, 124, // Value + ] + ); + } + macro_rules! get_keys_from { ($slice: expr, $secp_ctx: expr) => {{ let privkey = SecretKey::from_slice(&>::from_hex($slice).unwrap()[..]).unwrap();