@@ -40,7 +40,7 @@ use crate::blinded_path::payment::{AsyncBolt12OfferContext, BlindedPaymentPath,
40
40
use crate::chain;
41
41
use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
42
42
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
43
- use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
43
+ use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, MAX_BLOCKS_FOR_CONF, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
44
44
use crate::chain::transaction::{OutPoint, TransactionData};
45
45
use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent};
46
46
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
@@ -2824,7 +2824,7 @@ pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
2824
2824
pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
2825
2825
2826
2826
/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
2827
- /// HTLC's CLTV. The current default represents roughly seven hours of blocks at six blocks/hour.
2827
+ /// HTLC's CLTV. The current default represents roughly eight hours of blocks at six blocks/hour.
2828
2828
///
2829
2829
/// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`]
2830
2830
///
@@ -2833,7 +2833,7 @@ pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
2833
2833
// i.e. the node we forwarded the payment on to should always have enough room to reliably time out
2834
2834
// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
2835
2835
// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
2836
- pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7 ;
2836
+ pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*8 ;
2837
2837
// This should be long enough to allow a payment path drawn across multiple routing hops with substantial
2838
2838
// `cltv_expiry_delta`. Indeed, the length of those values is the reaction delay offered to a routing node
2839
2839
// in case of HTLC on-chain settlement. While appearing less competitive, a node operator could decide to
@@ -2850,19 +2850,34 @@ pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6;
2850
2850
// a payment was being routed, so we add an extra block to be safe.
2851
2851
pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3;
2852
2852
2853
- // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
2854
- // ie that if the next-hop peer fails the HTLC within
2855
- // LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
2856
- // then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
2857
- // failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
2858
- // LATENCY_GRACE_PERIOD_BLOCKS.
2859
- #[allow(dead_code)]
2860
- const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
2853
+ // Check that our MIN_CLTV_EXPIRY_DELTA gives us enough time to get everything on chain and locked
2854
+ // in with enough time left to fail the corresponding HTLC back to our inbound edge before they
2855
+ // force-close on us.
2856
+ // In other words, if the next-hop peer fails HTLC LATENCY_GRACE_PERIOD_BLOCKS after our
2857
+ // CLTV_CLAIM_BUFFER (because that's how many blocks we allow them after expiry), we'll still have
2858
+ // 2*MAX_BLOCKS_FOR_CONF + ANTI_REORG_DELAY left to get two transactions on chain and the second
2859
+ // fully locked in before the peer force-closes on us (LATENCY_GRACE_PERIOD_BLOCKS before the
2860
+ // expiry, i.e. assuming the peer force-closes right at the expiry and we're behind by
2861
+ // LATENCY_GRACE_PERIOD_BLOCKS).
2862
+ const _CHECK_CLTV_EXPIRY_SANITY: () = assert!(
2863
+ MIN_CLTV_EXPIRY_DELTA as u32 >= 2*LATENCY_GRACE_PERIOD_BLOCKS + 2*MAX_BLOCKS_FOR_CONF + ANTI_REORG_DELAY
2864
+ );
2865
+
2866
+ // Check that our MIN_CLTV_EXPIRY_DELTA gives us enough time to get the HTLC preimage back to our
2867
+ // counterparty if the outbound edge gives us the preimage only one block before we'd force-close
2868
+ // the channel.
2869
+ // ie they provide the preimage LATENCY_GRACE_PERIOD_BLOCKS - 1 after the HTLC expires, then we
2870
+ // pass the preimage back, which takes LATENCY_GRACE_PERIOD_BLOCKS to complete, and we want to make
2871
+ // sure this all happens at least N blocks before the inbound HTLC expires (where N is the
2872
+ // counterparty's CLTV_CLAIM_BUFFER or equivalent).
2873
+ const _ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER: u32 = 6 * 6;
2861
2874
2862
- // Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
2863
- // ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
2864
- #[allow(dead_code)]
2865
- const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
2875
+ const _CHECK_COUNTERPARTY_REALISTIC: () =
2876
+ assert!(_ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER >= CLTV_CLAIM_BUFFER);
2877
+
2878
+ const _CHECK_CLTV_EXPIRY_OFFCHAIN: () = assert!(
2879
+ MIN_CLTV_EXPIRY_DELTA as u32 >= 2*LATENCY_GRACE_PERIOD_BLOCKS - 1 + _ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER
2880
+ );
2866
2881
2867
2882
/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
2868
2883
pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
@@ -15979,15 +15994,15 @@ mod tests {
15979
15994
let current_height: u32 = node[0].node.best_block.read().unwrap().height;
15980
15995
let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive(msgs::InboundOnionReceivePayload {
15981
15996
sender_intended_htlc_amt_msat: 100,
15982
- cltv_expiry_height: 22 ,
15997
+ cltv_expiry_height: TEST_FINAL_CLTV ,
15983
15998
payment_metadata: None,
15984
15999
keysend_preimage: None,
15985
16000
payment_data: Some(msgs::FinalOnionHopData {
15986
16001
payment_secret: PaymentSecret([0; 32]),
15987
16002
total_msat: 100,
15988
16003
}),
15989
16004
custom_tlvs: Vec::new(),
15990
- }), [0; 32], PaymentHash([0; 32]), 100, 23 , None, true, None, current_height);
16005
+ }), [0; 32], PaymentHash([0; 32]), 100, TEST_FINAL_CLTV + 1 , None, true, None, current_height);
15991
16006
15992
16007
// Should not return an error as this condition:
15993
16008
// https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334
0 commit comments