diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 59612636a9e..519c9eb46e3 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -85,7 +85,7 @@ use bitcoin::secp256k1::{self, Message, PublicKey, Scalar, Secp256k1, SecretKey} use lightning::util::dyn_signer::DynSigner; use std::cell::RefCell; -use std::cmp::{self, Ordering}; +use std::cmp; use std::mem; use std::sync::atomic; use std::sync::{Arc, Mutex}; @@ -1304,28 +1304,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { // deduplicate the calls here. let mut claim_set = new_hash_map(); let mut events = nodes[$node].get_and_clear_pending_events(); - // Sort events so that PendingHTLCsForwardable get processed last. This avoids a - // case where we first process a PendingHTLCsForwardable, then claim/fail on a - // PaymentClaimable, claiming/failing two HTLCs, but leaving a just-generated - // PaymentClaimable event for the second HTLC in our pending_events (and breaking - // our claim_set deduplication). - events.sort_by(|a, b| { - if let events::Event::PaymentClaimable { .. } = a { - if let events::Event::PendingHTLCsForwardable { .. } = b { - Ordering::Less - } else { - Ordering::Equal - } - } else if let events::Event::PendingHTLCsForwardable { .. } = a { - if let events::Event::PaymentClaimable { .. } = b { - Ordering::Greater - } else { - Ordering::Equal - } - } else { - Ordering::Equal - } - }); let had_events = !events.is_empty(); for event in events.drain(..) { match event { @@ -1352,9 +1330,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, events::Event::PaymentForwarded { .. } if $node == 1 => {}, events::Event::ChannelReady { .. } => {}, - events::Event::PendingHTLCsForwardable { .. } => { - nodes[$node].process_pending_htlc_forwards(); - }, events::Event::HTLCHandlingFailed { .. } => {}, _ => { if out.may_fail.load(atomic::Ordering::Acquire) { @@ -1365,6 +1340,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, } } + nodes[$node].process_pending_htlc_forwards(); had_events }}; } @@ -1806,8 +1782,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { last_pass_no_updates = false; continue; } - // ...making sure any pending PendingHTLCsForwardable events are handled and - // payments claimed. + // ...making sure any payments are claimed. if process_events!(0, false) { last_pass_no_updates = false; continue; diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index f6fa07199fa..f4c3289d567 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -635,7 +635,6 @@ pub fn do_test(mut data: &[u8], logger: &Arc) { let mut loss_detector = MoneyLossDetector::new(&peers, channelmanager.clone(), monitor.clone(), peer_manager); - let mut should_forward = false; let mut payments_received: Vec = Vec::new(); let mut intercepted_htlcs: Vec = Vec::new(); let mut payments_sent: u16 = 0; @@ -785,10 +784,7 @@ pub fn do_test(mut data: &[u8], logger: &Arc) { } }, 7 => { - if should_forward { - channelmanager.process_pending_htlc_forwards(); - should_forward = false; - } + channelmanager.process_pending_htlc_forwards(); }, 8 => { for payment in payments_received.drain(..) { @@ -1004,9 +1000,6 @@ pub fn do_test(mut data: &[u8], logger: &Arc) { //TODO: enhance by fetching random amounts from fuzz input? payments_received.push(payment_hash); }, - Event::PendingHTLCsForwardable { .. } => { - should_forward = true; - }, Event::HTLCIntercepted { intercept_id, .. } => { if !intercepted_htlcs.contains(&intercept_id) { intercepted_htlcs.push(intercept_id); diff --git a/lightning-background-processor/Cargo.toml b/lightning-background-processor/Cargo.toml index fa89b078de5..17f00600171 100644 --- a/lightning-background-processor/Cargo.toml +++ b/lightning-background-processor/Cargo.toml @@ -25,6 +25,7 @@ bitcoin-io = { version = "0.1.2", default-features = false } lightning = { version = "0.2.0", path = "../lightning", default-features = false } lightning-rapid-gossip-sync = { version = "0.2.0", path = "../lightning-rapid-gossip-sync", default-features = false } lightning-liquidity = { version = "0.2.0", path = "../lightning-liquidity", default-features = false } +possiblyrandom = { version = "0.2", path = "../possiblyrandom", default-features = false } [dev-dependencies] tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] } diff --git a/lightning-background-processor/src/fwd_batch.rs b/lightning-background-processor/src/fwd_batch.rs new file mode 100644 index 00000000000..8e6761b6e20 --- /dev/null +++ b/lightning-background-processor/src/fwd_batch.rs @@ -0,0 +1,485 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +use core::time::Duration; + +pub(crate) struct BatchDelay { + next_batch_delay_millis: u16, +} + +impl BatchDelay { + pub(crate) fn new() -> Self { + let next_batch_delay_millis = rand_batch_delay_millis(); + Self { next_batch_delay_millis } + } + + pub(crate) fn get(&self) -> Duration { + Duration::from_millis(self.next_batch_delay_millis as u64) + } + + pub(crate) fn next(&mut self) -> Duration { + let next = rand_batch_delay_millis(); + self.next_batch_delay_millis = next; + Duration::from_millis(next as u64) + } +} + +fn rand_batch_delay_millis() -> u16 { + const USIZE_LEN: usize = core::mem::size_of::(); + let mut random_bytes = [0u8; USIZE_LEN]; + possiblyrandom::getpossiblyrandom(&mut random_bytes); + + let index = usize::from_be_bytes(random_bytes) % FWD_DELAYS_MILLIS.len(); + *FWD_DELAYS_MILLIS.get(index).unwrap_or(&50) +} + +// An array of potential forwarding delays (log-normal distribution, 10000 samples, mean = 50, sd = 0.5), generated via the following R-script: +// ``` +// set.seed(123) +// n <- 10000 +// meanlog <- log(50) +// sdlog <- 0.5 +// log_normal_data <- round(rlnorm(n, meanlog = meanlog, sdlog = sdlog)) +// cat(log_normal_data, file = "log_normal_data.txt", sep = ", ") +// ``` +static FWD_DELAYS_MILLIS: [u16; 10000] = [ + 38, 45, 109, 52, 53, 118, 63, 27, 35, 40, 92, 60, 61, 53, 38, 122, 64, 19, 71, 39, 29, 45, 30, + 35, 37, 22, 76, 54, 28, 94, 62, 43, 78, 78, 75, 71, 66, 48, 43, 41, 35, 45, 27, 148, 91, 29, + 41, 40, 74, 48, 57, 49, 49, 99, 45, 107, 23, 67, 53, 56, 60, 39, 42, 30, 29, 58, 63, 51, 79, + 139, 39, 16, 83, 35, 35, 83, 43, 27, 55, 47, 50, 61, 42, 69, 45, 59, 87, 62, 42, 89, 82, 66, + 56, 37, 99, 37, 149, 108, 44, 30, 35, 57, 44, 42, 31, 49, 34, 22, 41, 79, 38, 68, 22, 49, 65, + 58, 53, 36, 33, 30, 53, 31, 39, 44, 126, 36, 56, 52, 31, 48, 103, 63, 51, 40, 18, 88, 24, 72, + 130, 24, 71, 44, 23, 23, 22, 38, 24, 71, 143, 26, 74, 73, 59, 30, 47, 43, 66, 42, 81, 41, 85, + 30, 27, 253, 41, 58, 69, 39, 65, 60, 45, 52, 49, 145, 35, 29, 51, 58, 62, 40, 29, 94, 42, 32, + 44, 45, 87, 52, 73, 39, 56, 43, 52, 32, 26, 136, 68, 27, 37, 28, 150, 96, 44, 66, 41, 39, 34, + 37, 114, 49, 53, 56, 93, 39, 30, 116, 40, 35, 27, 26, 38, 68, 87, 71, 42, 52, 35, 35, 78, 30, + 133, 48, 56, 35, 38, 26, 46, 62, 59, 34, 34, 39, 106, 28, 46, 129, 48, 25, 36, 64, 41, 38, 42, + 52, 111, 48, 86, 69, 47, 23, 39, 39, 51, 96, 157, 108, 47, 21, 41, 52, 76, 81, 70, 25, 76, 40, + 55, 52, 62, 51, 22, 72, 61, 44, 53, 53, 56, 114, 45, 54, 90, 85, 89, 37, 136, 52, 127, 25, 51, + 93, 35, 34, 31, 30, 40, 59, 18, 56, 93, 138, 96, 73, 21, 37, 42, 71, 47, 27, 116, 79, 56, 92, + 26, 70, 38, 70, 49, 69, 97, 50, 83, 28, 35, 107, 60, 18, 25, 45, 77, 48, 68, 81, 115, 51, 49, + 21, 53, 38, 31, 46, 83, 18, 40, 53, 32, 59, 61, 49, 15, 181, 45, 69, 57, 83, 75, 45, 60, 31, + 77, 40, 167, 22, 40, 76, 65, 37, 30, 54, 50, 20, 51, 55, 55, 30, 63, 100, 63, 28, 40, 59, 36, + 17, 78, 33, 38, 106, 34, 76, 27, 42, 48, 28, 36, 49, 70, 22, 42, 73, 38, 56, 64, 57, 69, 47, + 41, 13, 48, 62, 65, 38, 122, 58, 53, 94, 35, 40, 166, 50, 113, 24, 45, 60, 58, 30, 50, 29, 71, + 86, 16, 93, 27, 63, 70, 45, 36, 54, 62, 78, 18, 22, 102, 84, 62, 71, 79, 13, 87, 39, 56, 43, + 77, 42, 65, 41, 29, 92, 72, 118, 52, 88, 134, 43, 26, 44, 45, 54, 118, 42, 60, 45, 51, 59, 97, + 53, 71, 74, 79, 38, 113, 41, 47, 101, 95, 29, 32, 25, 55, 54, 60, 66, 37, 30, 84, 73, 24, 48, + 32, 18, 54, 48, 48, 56, 78, 55, 37, 35, 47, 58, 30, 46, 81, 47, 35, 44, 87, 66, 93, 54, 61, 38, + 68, 39, 25, 53, 132, 75, 90, 60, 37, 45, 44, 40, 71, 27, 77, 77, 27, 69, 169, 38, 76, 34, 87, + 57, 114, 24, 49, 38, 45, 36, 33, 67, 29, 105, 28, 53, 65, 67, 43, 52, 81, 24, 34, 59, 40, 99, + 70, 52, 24, 51, 43, 48, 28, 64, 30, 45, 61, 34, 67, 26, 12, 63, 76, 43, 64, 28, 47, 19, 90, + 127, 86, 49, 49, 23, 74, 45, 36, 25, 43, 33, 41, 27, 116, 50, 86, 14, 40, 36, 27, 108, 25, 59, + 76, 55, 32, 80, 54, 29, 25, 142, 36, 20, 65, 58, 25, 19, 47, 88, 69, 39, 33, 57, 54, 69, 41, + 78, 33, 42, 72, 82, 19, 53, 68, 24, 64, 33, 83, 65, 73, 53, 77, 100, 134, 49, 16, 51, 55, 46, + 66, 83, 39, 43, 61, 38, 52, 19, 29, 26, 33, 35, 61, 82, 35, 30, 30, 41, 44, 64, 43, 18, 48, 91, + 91, 34, 23, 171, 46, 48, 62, 22, 35, 23, 35, 53, 25, 67, 58, 32, 56, 73, 85, 45, 48, 48, 103, + 88, 76, 43, 60, 61, 30, 21, 69, 23, 50, 57, 66, 55, 35, 82, 119, 78, 19, 101, 49, 65, 68, 48, + 48, 83, 71, 82, 165, 70, 55, 17, 192, 39, 164, 60, 108, 47, 65, 56, 46, 47, 83, 45, 18, 45, 65, + 68, 68, 21, 60, 81, 95, 45, 43, 105, 22, 40, 63, 22, 58, 128, 50, 44, 63, 43, 75, 79, 50, 28, + 26, 37, 74, 19, 19, 36, 61, 32, 78, 59, 46, 75, 61, 40, 56, 69, 60, 36, 77, 89, 57, 54, 48, + 147, 57, 46, 14, 23, 48, 55, 57, 75, 45, 92, 32, 27, 27, 72, 48, 74, 44, 37, 42, 20, 28, 24, + 85, 37, 74, 107, 45, 58, 21, 33, 51, 58, 34, 191, 40, 52, 69, 49, 36, 84, 112, 49, 66, 48, 83, + 28, 160, 37, 24, 42, 54, 113, 79, 54, 25, 32, 46, 179, 20, 88, 38, 115, 28, 54, 29, 79, 105, + 133, 75, 126, 93, 47, 63, 31, 46, 92, 66, 63, 30, 37, 34, 61, 30, 66, 29, 125, 63, 35, 56, 42, + 60, 56, 30, 34, 58, 113, 86, 37, 76, 49, 58, 57, 181, 28, 53, 21, 67, 87, 103, 19, 61, 111, 41, + 45, 49, 60, 70, 97, 48, 55, 173, 62, 55, 26, 50, 45, 50, 37, 35, 35, 45, 99, 84, 42, 22, 33, + 40, 53, 36, 136, 44, 27, 47, 30, 54, 56, 60, 22, 56, 58, 25, 81, 74, 158, 54, 51, 52, 52, 20, + 22, 48, 37, 51, 17, 24, 29, 82, 29, 34, 52, 43, 54, 158, 28, 43, 39, 107, 34, 48, 74, 29, 114, + 70, 29, 63, 45, 58, 48, 85, 25, 39, 44, 30, 30, 50, 47, 14, 84, 57, 167, 70, 40, 202, 206, 27, + 63, 45, 55, 56, 27, 58, 120, 46, 46, 101, 78, 22, 56, 114, 101, 62, 72, 27, 58, 31, 40, 80, 28, + 57, 62, 51, 124, 30, 68, 48, 44, 63, 30, 26, 39, 120, 51, 59, 45, 63, 31, 89, 67, 33, 51, 72, + 66, 105, 37, 87, 84, 46, 31, 29, 63, 48, 122, 65, 42, 30, 37, 59, 40, 65, 42, 47, 35, 130, 59, + 56, 21, 70, 30, 32, 79, 40, 21, 121, 15, 67, 83, 36, 62, 62, 84, 64, 44, 79, 75, 31, 25, 54, + 44, 31, 52, 26, 54, 37, 81, 168, 32, 85, 76, 48, 40, 110, 18, 36, 24, 100, 45, 38, 246, 49, 40, + 58, 23, 64, 48, 63, 31, 30, 35, 34, 96, 110, 46, 42, 42, 52, 52, 58, 34, 33, 91, 38, 58, 49, + 31, 67, 55, 101, 53, 42, 57, 28, 33, 65, 42, 48, 47, 36, 18, 60, 73, 26, 105, 61, 98, 31, 54, + 48, 73, 37, 50, 21, 34, 53, 35, 49, 40, 68, 90, 33, 43, 103, 17, 43, 140, 150, 54, 32, 54, 36, + 103, 33, 60, 68, 34, 77, 34, 69, 87, 31, 87, 39, 58, 55, 40, 57, 27, 47, 76, 17, 24, 28, 23, + 62, 30, 17, 36, 41, 77, 29, 89, 61, 60, 33, 133, 46, 20, 45, 19, 43, 40, 70, 83, 35, 75, 102, + 34, 36, 69, 55, 66, 101, 61, 85, 68, 62, 61, 95, 30, 29, 67, 47, 52, 72, 73, 44, 43, 52, 85, + 62, 102, 50, 88, 78, 68, 62, 43, 47, 56, 51, 80, 47, 48, 39, 47, 48, 16, 25, 48, 44, 27, 18, + 42, 69, 121, 49, 105, 52, 53, 59, 32, 49, 79, 34, 43, 28, 60, 62, 69, 27, 53, 31, 50, 98, 39, + 72, 34, 39, 62, 36, 21, 94, 60, 37, 18, 129, 272, 55, 76, 92, 35, 42, 21, 35, 68, 43, 45, 47, + 20, 76, 24, 45, 40, 45, 104, 37, 34, 23, 33, 28, 61, 32, 58, 46, 24, 34, 78, 79, 136, 50, 24, + 34, 61, 129, 53, 88, 73, 28, 46, 96, 77, 63, 63, 39, 26, 96, 25, 31, 68, 27, 38, 28, 68, 63, + 62, 62, 66, 69, 19, 46, 53, 72, 28, 39, 29, 85, 95, 74, 27, 63, 89, 54, 38, 29, 36, 71, 40, 56, + 96, 67, 99, 21, 43, 52, 67, 28, 75, 58, 57, 64, 47, 79, 69, 46, 84, 58, 70, 37, 110, 50, 76, + 48, 43, 74, 23, 64, 61, 13, 60, 17, 70, 40, 35, 50, 99, 36, 66, 59, 28, 21, 18, 66, 49, 126, + 67, 76, 97, 39, 65, 77, 99, 73, 62, 32, 72, 64, 69, 62, 45, 48, 71, 54, 33, 43, 26, 31, 47, 20, + 21, 87, 37, 20, 47, 97, 70, 62, 90, 34, 72, 37, 50, 151, 81, 73, 29, 34, 157, 29, 56, 78, 83, + 86, 46, 33, 43, 32, 68, 88, 145, 60, 32, 83, 79, 44, 23, 73, 88, 43, 65, 44, 70, 47, 42, 53, + 43, 37, 43, 50, 55, 108, 25, 45, 69, 121, 62, 50, 43, 62, 82, 46, 54, 56, 72, 87, 57, 48, 100, + 54, 110, 48, 68, 23, 47, 49, 34, 30, 80, 79, 26, 62, 29, 30, 65, 35, 82, 51, 65, 38, 28, 79, + 56, 44, 52, 97, 35, 34, 37, 23, 75, 55, 55, 49, 39, 33, 77, 25, 62, 34, 58, 35, 38, 121, 70, + 17, 53, 40, 28, 56, 53, 42, 134, 16, 76, 43, 69, 40, 37, 21, 45, 173, 82, 116, 79, 89, 79, 63, + 37, 38, 22, 50, 78, 84, 52, 38, 259, 72, 102, 42, 49, 63, 116, 43, 77, 18, 39, 26, 87, 46, 41, + 99, 87, 32, 62, 27, 39, 112, 103, 87, 38, 20, 28, 81, 43, 17, 46, 101, 69, 33, 23, 33, 51, 78, + 19, 83, 50, 66, 22, 36, 40, 84, 84, 62, 112, 39, 83, 49, 36, 79, 100, 78, 39, 40, 100, 60, 76, + 42, 94, 33, 68, 33, 15, 26, 54, 78, 21, 27, 121, 40, 64, 38, 43, 52, 50, 45, 40, 11, 127, 122, + 29, 46, 99, 82, 95, 29, 181, 28, 73, 54, 41, 41, 30, 113, 52, 57, 56, 41, 28, 57, 18, 25, 29, + 29, 53, 111, 23, 62, 78, 30, 49, 75, 44, 45, 58, 49, 38, 47, 128, 77, 32, 27, 42, 97, 58, 35, + 78, 47, 29, 63, 107, 62, 55, 36, 66, 29, 22, 49, 49, 163, 27, 54, 75, 85, 50, 34, 48, 132, 64, + 18, 102, 66, 49, 50, 46, 57, 39, 57, 31, 55, 87, 64, 84, 29, 51, 68, 29, 109, 30, 54, 54, 53, + 45, 63, 30, 26, 36, 49, 94, 111, 59, 61, 44, 51, 25, 58, 38, 59, 42, 36, 45, 42, 86, 56, 62, + 27, 106, 54, 33, 58, 77, 25, 116, 46, 88, 52, 42, 20, 29, 49, 76, 20, 99, 101, 28, 63, 30, 47, + 80, 77, 33, 41, 31, 59, 23, 13, 104, 156, 40, 73, 50, 104, 66, 55, 20, 86, 42, 25, 45, 47, 85, + 28, 38, 105, 99, 89, 115, 55, 37, 66, 85, 167, 19, 57, 69, 49, 23, 72, 34, 64, 79, 95, 23, 86, + 34, 26, 96, 89, 44, 73, 33, 46, 48, 43, 72, 108, 40, 49, 113, 42, 14, 65, 32, 30, 102, 170, 30, + 84, 48, 54, 92, 56, 33, 40, 70, 16, 86, 23, 51, 45, 95, 55, 53, 71, 33, 45, 90, 70, 26, 69, 25, + 37, 92, 16, 21, 96, 33, 93, 46, 62, 75, 36, 43, 41, 204, 53, 63, 42, 44, 100, 42, 51, 60, 40, + 133, 24, 137, 42, 99, 18, 81, 77, 26, 34, 55, 89, 34, 45, 110, 28, 80, 63, 29, 36, 74, 57, 102, + 37, 21, 84, 37, 53, 118, 44, 109, 29, 79, 57, 35, 26, 52, 57, 57, 65, 40, 39, 56, 38, 92, 55, + 37, 20, 36, 139, 38, 33, 69, 62, 73, 44, 88, 28, 49, 35, 70, 53, 87, 140, 54, 38, 33, 44, 48, + 135, 28, 29, 41, 67, 75, 32, 53, 49, 91, 86, 43, 51, 69, 22, 43, 67, 38, 49, 49, 35, 30, 45, + 42, 76, 107, 50, 61, 48, 68, 56, 56, 87, 31, 103, 60, 55, 38, 46, 60, 18, 37, 67, 43, 34, 36, + 56, 22, 77, 17, 39, 34, 27, 29, 57, 47, 43, 16, 26, 47, 32, 34, 85, 55, 30, 40, 43, 36, 31, 49, + 72, 56, 45, 61, 74, 47, 63, 28, 44, 41, 55, 60, 47, 34, 31, 57, 99, 34, 41, 59, 38, 53, 49, 34, + 76, 76, 83, 94, 39, 40, 57, 69, 42, 40, 37, 105, 14, 46, 51, 29, 61, 19, 81, 22, 17, 34, 54, + 36, 38, 59, 29, 48, 64, 113, 64, 118, 63, 50, 16, 27, 43, 71, 43, 66, 29, 44, 56, 42, 53, 36, + 60, 48, 49, 19, 36, 24, 22, 39, 46, 58, 86, 62, 83, 38, 43, 86, 50, 22, 33, 83, 37, 59, 50, 39, + 42, 61, 56, 93, 45, 32, 54, 71, 156, 40, 42, 78, 95, 36, 70, 38, 31, 68, 53, 61, 37, 123, 70, + 41, 71, 27, 47, 101, 45, 79, 45, 16, 32, 56, 103, 37, 57, 31, 30, 34, 99, 80, 67, 87, 40, 73, + 56, 30, 33, 51, 55, 43, 30, 18, 62, 43, 66, 23, 27, 47, 25, 19, 74, 78, 30, 44, 28, 59, 49, 32, + 34, 29, 62, 67, 60, 33, 20, 65, 46, 49, 84, 32, 89, 41, 35, 45, 61, 45, 32, 43, 14, 39, 63, 46, + 71, 64, 69, 69, 21, 50, 113, 39, 63, 54, 66, 44, 36, 52, 55, 46, 51, 61, 34, 88, 150, 105, 40, + 20, 51, 78, 74, 27, 29, 53, 68, 77, 75, 31, 26, 18, 64, 76, 59, 29, 109, 42, 122, 66, 91, 59, + 48, 30, 38, 49, 58, 49, 46, 55, 38, 38, 24, 76, 51, 48, 35, 44, 29, 30, 19, 82, 53, 29, 89, 68, + 72, 45, 40, 48, 61, 75, 111, 39, 30, 53, 227, 40, 89, 44, 39, 35, 45, 114, 58, 64, 76, 141, 74, + 24, 30, 23, 12, 95, 39, 61, 90, 39, 47, 60, 56, 34, 72, 66, 43, 27, 69, 102, 100, 77, 39, 38, + 37, 41, 71, 57, 20, 20, 49, 54, 16, 33, 51, 104, 18, 27, 29, 55, 59, 43, 22, 36, 38, 45, 113, + 36, 52, 35, 27, 69, 30, 196, 52, 52, 48, 126, 70, 44, 28, 30, 34, 92, 45, 34, 74, 98, 35, 40, + 59, 50, 61, 118, 49, 43, 64, 49, 68, 45, 62, 23, 50, 39, 28, 41, 60, 52, 71, 80, 76, 43, 26, + 43, 90, 47, 22, 64, 58, 41, 55, 99, 96, 40, 50, 24, 35, 136, 106, 45, 35, 25, 145, 53, 37, 86, + 29, 72, 103, 45, 103, 69, 36, 67, 35, 38, 74, 39, 49, 83, 27, 111, 139, 68, 62, 39, 64, 39, 58, + 35, 74, 71, 93, 135, 36, 81, 24, 40, 80, 35, 59, 32, 42, 49, 37, 19, 56, 63, 54, 39, 78, 86, + 15, 88, 21, 42, 29, 36, 97, 65, 64, 18, 55, 68, 77, 31, 80, 25, 119, 34, 73, 59, 29, 89, 51, + 75, 56, 77, 51, 87, 42, 65, 60, 76, 37, 55, 37, 79, 36, 119, 74, 50, 91, 48, 66, 65, 62, 37, + 27, 22, 40, 70, 44, 27, 147, 71, 132, 92, 37, 41, 24, 72, 113, 32, 27, 78, 23, 131, 67, 21, 60, + 53, 15, 49, 37, 32, 56, 77, 119, 13, 73, 31, 89, 59, 103, 24, 64, 58, 84, 45, 89, 40, 40, 51, + 29, 40, 87, 114, 87, 40, 59, 53, 27, 57, 33, 29, 72, 35, 47, 31, 51, 64, 64, 29, 94, 105, 60, + 54, 52, 57, 60, 54, 69, 42, 88, 49, 21, 48, 63, 92, 52, 19, 50, 27, 31, 30, 34, 88, 33, 40, 55, + 78, 36, 33, 39, 28, 79, 17, 38, 24, 19, 51, 108, 113, 38, 35, 38, 71, 14, 57, 41, 34, 18, 45, + 22, 50, 32, 51, 63, 39, 30, 37, 36, 24, 34, 45, 45, 59, 95, 59, 105, 36, 98, 67, 28, 61, 27, + 80, 38, 44, 36, 33, 117, 82, 70, 52, 72, 70, 111, 76, 45, 45, 40, 42, 44, 92, 16, 35, 47, 26, + 112, 44, 29, 32, 77, 127, 47, 53, 31, 97, 72, 16, 36, 44, 29, 44, 28, 60, 59, 75, 62, 111, 41, + 17, 51, 69, 41, 101, 86, 65, 52, 38, 46, 63, 81, 43, 35, 172, 65, 32, 80, 52, 76, 51, 49, 52, + 61, 37, 36, 46, 54, 43, 59, 29, 24, 129, 61, 41, 43, 74, 45, 55, 44, 39, 68, 54, 96, 28, 47, + 45, 29, 82, 47, 25, 37, 29, 84, 178, 36, 48, 58, 194, 16, 39, 27, 45, 44, 35, 125, 57, 165, 75, + 27, 34, 57, 37, 60, 72, 62, 47, 36, 26, 59, 79, 48, 73, 152, 131, 39, 66, 35, 71, 49, 57, 33, + 91, 48, 40, 48, 52, 113, 29, 171, 34, 55, 18, 36, 72, 64, 61, 40, 20, 29, 45, 68, 76, 43, 46, + 46, 51, 108, 35, 45, 52, 26, 32, 55, 62, 88, 40, 103, 32, 23, 19, 87, 58, 47, 43, 111, 91, 30, + 25, 86, 66, 39, 61, 18, 36, 62, 48, 26, 83, 89, 31, 46, 72, 77, 38, 32, 59, 19, 31, 74, 21, 27, + 44, 43, 21, 65, 30, 46, 45, 71, 48, 47, 114, 60, 68, 63, 67, 98, 49, 56, 134, 38, 95, 37, 118, + 36, 33, 84, 81, 66, 57, 56, 140, 24, 31, 26, 81, 44, 47, 122, 37, 137, 25, 97, 59, 41, 46, 98, + 117, 22, 277, 181, 38, 48, 36, 48, 72, 53, 55, 105, 17, 39, 54, 34, 85, 40, 40, 91, 76, 46, 42, + 24, 35, 183, 49, 79, 46, 68, 49, 99, 39, 21, 78, 44, 104, 53, 103, 60, 42, 72, 40, 116, 54, 18, + 39, 22, 17, 52, 69, 34, 26, 37, 23, 71, 67, 23, 37, 84, 46, 50, 62, 68, 91, 74, 25, 42, 58, 47, + 17, 34, 83, 54, 24, 56, 46, 70, 143, 23, 39, 47, 17, 49, 37, 153, 12, 105, 83, 72, 54, 35, 87, + 32, 71, 61, 86, 37, 37, 58, 58, 99, 64, 54, 76, 38, 86, 30, 59, 21, 57, 23, 45, 21, 36, 34, 66, + 37, 34, 92, 115, 86, 24, 95, 27, 48, 72, 45, 46, 53, 62, 61, 21, 46, 58, 67, 43, 35, 25, 35, + 47, 36, 67, 60, 22, 59, 66, 44, 49, 40, 43, 30, 62, 27, 37, 32, 32, 42, 39, 35, 44, 23, 53, 69, + 26, 25, 153, 56, 51, 45, 64, 51, 38, 40, 42, 43, 34, 86, 52, 34, 67, 60, 47, 61, 32, 45, 30, + 28, 42, 54, 32, 46, 20, 40, 90, 28, 28, 45, 65, 93, 188, 75, 70, 138, 43, 45, 57, 25, 20, 123, + 38, 40, 30, 54, 67, 45, 50, 80, 50, 38, 49, 33, 53, 32, 100, 14, 61, 39, 135, 44, 64, 61, 57, + 65, 136, 80, 55, 122, 67, 69, 93, 54, 72, 44, 53, 51, 36, 138, 38, 42, 97, 49, 90, 37, 50, 44, + 31, 43, 36, 77, 43, 48, 33, 56, 22, 11, 128, 73, 69, 16, 70, 65, 48, 76, 88, 47, 132, 82, 92, + 43, 47, 32, 61, 57, 52, 36, 45, 135, 31, 41, 25, 27, 43, 41, 37, 34, 45, 88, 42, 26, 29, 18, + 21, 34, 31, 28, 135, 120, 180, 74, 46, 30, 29, 39, 95, 70, 78, 46, 23, 59, 45, 43, 149, 77, 45, + 19, 43, 82, 60, 57, 84, 46, 42, 44, 72, 44, 73, 27, 26, 53, 73, 41, 37, 12, 60, 32, 61, 102, + 39, 71, 46, 60, 28, 62, 96, 123, 28, 65, 39, 65, 46, 44, 70, 73, 57, 63, 60, 57, 52, 79, 71, + 74, 87, 66, 46, 29, 39, 32, 43, 38, 45, 30, 37, 60, 56, 41, 83, 22, 64, 110, 41, 61, 38, 49, + 75, 32, 38, 65, 62, 75, 94, 73, 29, 54, 41, 50, 79, 57, 54, 47, 61, 28, 40, 112, 68, 31, 27, + 31, 67, 62, 38, 42, 82, 43, 49, 60, 46, 35, 41, 29, 63, 97, 56, 75, 36, 47, 47, 24, 52, 64, 70, + 97, 24, 68, 72, 45, 68, 25, 56, 10, 58, 111, 108, 13, 65, 42, 49, 17, 61, 38, 60, 45, 30, 59, + 67, 51, 66, 40, 95, 42, 37, 84, 87, 70, 56, 111, 30, 88, 118, 51, 67, 68, 91, 33, 78, 21, 141, + 38, 43, 57, 27, 44, 52, 42, 138, 20, 47, 90, 33, 47, 132, 20, 33, 27, 68, 32, 63, 36, 46, 77, + 54, 19, 38, 71, 127, 29, 45, 48, 35, 25, 125, 105, 25, 19, 44, 62, 47, 88, 44, 28, 107, 44, 43, + 44, 51, 16, 70, 72, 68, 41, 53, 24, 60, 33, 97, 31, 18, 55, 22, 31, 75, 128, 44, 81, 25, 119, + 50, 34, 61, 33, 51, 53, 90, 38, 41, 53, 78, 129, 110, 81, 19, 80, 73, 35, 38, 25, 50, 52, 21, + 23, 55, 30, 43, 83, 40, 53, 68, 28, 93, 79, 94, 40, 24, 65, 68, 178, 118, 40, 30, 68, 135, 28, + 34, 120, 125, 55, 22, 38, 90, 147, 50, 36, 58, 54, 32, 67, 100, 50, 68, 57, 63, 45, 91, 56, 39, + 69, 81, 48, 13, 113, 34, 42, 112, 19, 29, 34, 26, 87, 94, 64, 40, 40, 38, 38, 64, 79, 40, 59, + 15, 52, 32, 87, 90, 17, 79, 38, 46, 78, 71, 33, 59, 40, 93, 23, 75, 100, 14, 69, 42, 53, 44, + 71, 80, 67, 47, 40, 47, 45, 55, 33, 67, 30, 24, 57, 62, 83, 72, 56, 71, 31, 28, 27, 51, 70, 79, + 105, 59, 47, 36, 57, 48, 42, 36, 30, 31, 104, 57, 40, 23, 55, 39, 48, 45, 74, 48, 46, 164, 86, + 53, 35, 90, 31, 49, 77, 41, 42, 62, 54, 44, 45, 57, 63, 96, 41, 97, 24, 36, 25, 58, 62, 25, 51, + 32, 64, 23, 77, 49, 30, 48, 44, 42, 24, 37, 46, 24, 58, 99, 74, 34, 36, 40, 68, 49, 31, 23, 66, + 30, 64, 52, 72, 86, 25, 83, 71, 43, 50, 64, 70, 21, 104, 25, 18, 81, 23, 45, 26, 20, 40, 69, + 59, 36, 50, 48, 49, 40, 60, 29, 60, 42, 42, 71, 40, 31, 27, 70, 16, 41, 55, 25, 50, 70, 20, 32, + 121, 36, 31, 33, 40, 55, 35, 55, 71, 59, 73, 34, 47, 70, 51, 56, 51, 55, 49, 29, 55, 40, 103, + 38, 54, 19, 36, 33, 93, 46, 54, 58, 38, 58, 106, 68, 71, 33, 55, 58, 36, 53, 134, 31, 66, 29, + 60, 30, 67, 27, 51, 98, 45, 47, 101, 138, 173, 41, 30, 69, 62, 95, 54, 54, 107, 54, 70, 64, 49, + 38, 42, 19, 95, 23, 37, 42, 91, 51, 80, 43, 115, 64, 66, 78, 24, 50, 56, 34, 50, 35, 56, 20, + 58, 36, 80, 19, 51, 45, 80, 23, 38, 71, 68, 60, 58, 22, 77, 56, 20, 46, 81, 71, 36, 79, 41, 61, + 80, 61, 36, 75, 105, 31, 46, 83, 33, 21, 39, 85, 25, 44, 81, 51, 66, 113, 78, 67, 64, 28, 76, + 66, 61, 51, 26, 54, 94, 37, 20, 125, 38, 88, 68, 115, 49, 43, 119, 147, 59, 54, 72, 36, 64, 30, + 43, 74, 34, 77, 64, 102, 148, 37, 61, 109, 55, 36, 30, 101, 183, 30, 53, 23, 52, 43, 50, 213, + 25, 62, 125, 37, 33, 51, 96, 42, 30, 52, 18, 24, 86, 63, 17, 68, 16, 40, 58, 24, 94, 92, 36, + 88, 55, 69, 70, 26, 18, 151, 56, 60, 28, 88, 125, 107, 18, 30, 53, 71, 38, 33, 56, 31, 139, 20, + 64, 18, 39, 105, 88, 25, 24, 51, 31, 61, 31, 22, 40, 27, 86, 42, 49, 100, 127, 49, 59, 96, 46, + 30, 82, 23, 44, 41, 38, 13, 37, 94, 40, 37, 36, 32, 54, 67, 103, 23, 61, 46, 58, 30, 51, 28, + 70, 31, 52, 17, 59, 87, 73, 85, 25, 46, 48, 72, 46, 45, 116, 86, 21, 41, 30, 221, 39, 89, 52, + 53, 64, 47, 76, 66, 22, 38, 38, 39, 85, 105, 32, 66, 28, 38, 58, 45, 36, 27, 43, 51, 66, 32, + 67, 28, 38, 63, 35, 34, 49, 51, 80, 44, 21, 36, 36, 31, 59, 175, 74, 46, 64, 103, 43, 70, 66, + 47, 51, 69, 49, 42, 37, 58, 49, 42, 55, 34, 33, 99, 32, 59, 31, 45, 61, 30, 67, 56, 43, 57, 52, + 33, 25, 53, 30, 49, 22, 31, 57, 84, 54, 30, 41, 86, 65, 74, 82, 60, 87, 31, 58, 42, 55, 97, 64, + 36, 42, 31, 48, 39, 74, 39, 71, 72, 40, 102, 51, 36, 52, 30, 69, 32, 95, 18, 29, 42, 35, 98, + 49, 60, 71, 54, 81, 62, 71, 24, 28, 16, 44, 102, 31, 59, 62, 56, 43, 37, 36, 50, 96, 34, 47, + 53, 74, 45, 69, 24, 72, 52, 29, 16, 42, 70, 59, 42, 51, 54, 34, 51, 53, 35, 55, 58, 65, 71, 61, + 58, 48, 63, 27, 69, 38, 161, 37, 12, 99, 37, 67, 56, 87, 32, 28, 85, 31, 23, 74, 48, 156, 59, + 42, 37, 116, 27, 68, 66, 38, 108, 34, 50, 61, 160, 68, 119, 35, 21, 78, 22, 26, 68, 49, 76, + 102, 79, 61, 66, 54, 117, 51, 75, 31, 51, 18, 54, 24, 47, 44, 81, 36, 36, 19, 68, 86, 45, 105, + 41, 30, 56, 70, 24, 22, 51, 64, 15, 21, 58, 78, 41, 91, 37, 46, 98, 48, 51, 32, 52, 36, 20, 31, + 44, 54, 85, 51, 47, 87, 167, 48, 90, 64, 48, 19, 31, 38, 29, 28, 41, 80, 32, 53, 45, 74, 51, + 46, 37, 78, 88, 50, 257, 50, 216, 47, 23, 25, 32, 105, 117, 61, 58, 38, 36, 35, 32, 38, 37, 65, + 74, 111, 15, 64, 43, 44, 22, 49, 34, 60, 29, 280, 34, 43, 27, 42, 145, 41, 68, 31, 108, 63, 35, + 56, 41, 64, 33, 63, 110, 94, 58, 59, 62, 78, 61, 68, 69, 56, 72, 51, 83, 40, 55, 51, 29, 91, + 28, 96, 29, 31, 77, 48, 20, 106, 14, 35, 23, 38, 52, 42, 37, 46, 57, 33, 18, 31, 19, 69, 127, + 28, 32, 32, 74, 51, 64, 74, 57, 63, 28, 48, 37, 26, 28, 80, 26, 41, 82, 59, 69, 59, 46, 17, 37, + 25, 41, 140, 54, 35, 78, 64, 48, 45, 46, 48, 68, 53, 37, 76, 35, 96, 31, 73, 94, 92, 47, 71, + 17, 62, 47, 67, 53, 96, 46, 72, 47, 69, 27, 68, 41, 61, 32, 66, 223, 108, 82, 37, 59, 46, 19, + 44, 43, 48, 38, 69, 130, 89, 70, 51, 52, 89, 95, 37, 80, 28, 28, 111, 51, 51, 82, 45, 33, 54, + 21, 39, 46, 24, 55, 35, 87, 43, 29, 56, 49, 51, 56, 41, 42, 69, 113, 19, 47, 82, 28, 90, 41, + 71, 104, 78, 92, 17, 26, 67, 32, 38, 100, 53, 51, 44, 22, 20, 45, 60, 26, 20, 95, 64, 64, 79, + 66, 29, 93, 26, 39, 59, 58, 67, 78, 33, 26, 28, 67, 40, 61, 90, 36, 79, 36, 68, 51, 33, 80, 72, + 32, 51, 176, 34, 46, 101, 29, 164, 36, 39, 49, 86, 252, 64, 68, 25, 54, 15, 44, 104, 45, 49, + 51, 47, 53, 23, 39, 42, 66, 55, 34, 76, 92, 26, 55, 34, 37, 62, 76, 60, 38, 88, 84, 30, 58, 28, + 104, 57, 85, 61, 47, 73, 29, 24, 36, 57, 41, 40, 79, 39, 81, 34, 44, 33, 40, 28, 63, 90, 58, + 118, 22, 34, 65, 69, 59, 120, 116, 24, 41, 24, 30, 57, 60, 34, 58, 35, 57, 53, 43, 31, 40, 64, + 39, 42, 29, 40, 49, 35, 41, 29, 73, 151, 40, 63, 110, 41, 41, 69, 31, 46, 38, 31, 62, 47, 148, + 12, 69, 33, 43, 29, 67, 98, 64, 32, 112, 46, 55, 49, 45, 52, 30, 89, 69, 52, 80, 27, 86, 56, + 64, 40, 34, 38, 84, 86, 109, 48, 34, 104, 23, 46, 68, 28, 23, 64, 112, 35, 35, 31, 105, 35, 33, + 33, 37, 37, 68, 58, 63, 36, 72, 11, 88, 55, 41, 63, 27, 36, 53, 23, 49, 49, 43, 26, 90, 24, 24, + 37, 108, 53, 57, 45, 57, 69, 71, 99, 34, 46, 61, 76, 42, 69, 38, 42, 83, 20, 56, 79, 42, 68, + 35, 54, 50, 49, 26, 46, 33, 42, 83, 38, 49, 53, 45, 75, 49, 54, 32, 41, 36, 63, 89, 40, 17, 33, + 31, 90, 54, 109, 61, 23, 64, 34, 50, 48, 47, 42, 131, 106, 32, 55, 89, 87, 32, 101, 35, 53, 54, + 19, 47, 17, 19, 59, 59, 81, 60, 36, 34, 75, 103, 75, 56, 48, 113, 14, 82, 46, 28, 76, 31, 59, + 55, 54, 20, 54, 39, 35, 28, 28, 33, 93, 71, 57, 118, 112, 79, 28, 102, 45, 44, 56, 52, 39, 70, + 39, 38, 44, 34, 54, 48, 43, 63, 20, 57, 78, 78, 153, 36, 31, 59, 22, 92, 30, 62, 42, 15, 57, + 29, 79, 15, 47, 30, 88, 35, 80, 40, 53, 104, 36, 124, 82, 95, 16, 46, 24, 73, 92, 37, 39, 22, + 74, 104, 21, 20, 89, 98, 60, 56, 37, 59, 40, 139, 40, 10, 64, 27, 75, 23, 44, 61, 45, 19, 24, + 33, 113, 47, 95, 55, 39, 88, 28, 105, 79, 59, 67, 55, 40, 42, 37, 93, 67, 47, 28, 59, 30, 33, + 139, 25, 32, 37, 37, 28, 46, 30, 98, 53, 77, 25, 50, 66, 53, 50, 30, 37, 41, 33, 73, 121, 97, + 62, 47, 67, 34, 57, 54, 15, 129, 26, 13, 50, 53, 50, 54, 28, 67, 100, 33, 58, 44, 49, 81, 57, + 131, 42, 105, 40, 45, 42, 58, 63, 54, 67, 34, 44, 69, 84, 59, 71, 76, 68, 45, 46, 68, 40, 47, + 48, 23, 59, 66, 48, 123, 46, 67, 65, 74, 62, 45, 66, 57, 56, 72, 74, 50, 41, 97, 51, 41, 31, + 57, 61, 28, 25, 55, 33, 98, 29, 23, 48, 63, 54, 47, 60, 34, 56, 33, 33, 53, 61, 32, 23, 30, 18, + 53, 67, 123, 32, 39, 47, 56, 34, 80, 41, 82, 22, 46, 59, 39, 39, 36, 53, 94, 30, 48, 39, 101, + 131, 64, 52, 99, 40, 49, 85, 53, 39, 55, 40, 127, 41, 80, 50, 97, 80, 36, 58, 46, 64, 49, 63, + 47, 81, 58, 26, 49, 42, 87, 28, 18, 63, 80, 63, 98, 78, 31, 80, 37, 87, 82, 49, 85, 44, 83, 16, + 30, 54, 39, 13, 33, 112, 27, 54, 39, 50, 46, 47, 47, 17, 50, 53, 61, 99, 51, 57, 24, 53, 23, + 136, 42, 60, 62, 26, 63, 29, 63, 37, 36, 62, 60, 44, 35, 55, 49, 81, 36, 47, 156, 26, 59, 61, + 40, 35, 24, 25, 44, 94, 51, 108, 37, 67, 53, 60, 55, 74, 50, 81, 64, 50, 47, 26, 36, 44, 80, + 74, 33, 39, 119, 25, 55, 57, 39, 25, 41, 192, 47, 45, 55, 28, 73, 72, 42, 52, 39, 71, 22, 45, + 68, 116, 32, 29, 47, 49, 73, 47, 38, 129, 42, 78, 73, 40, 115, 28, 31, 90, 23, 69, 55, 72, 74, + 36, 36, 75, 78, 29, 27, 29, 50, 49, 160, 65, 46, 38, 38, 37, 29, 55, 10, 28, 82, 54, 66, 37, + 101, 43, 67, 50, 64, 40, 93, 39, 209, 46, 53, 37, 72, 55, 69, 71, 34, 76, 28, 54, 79, 23, 40, + 41, 18, 45, 101, 25, 29, 41, 49, 53, 55, 44, 163, 40, 26, 23, 45, 38, 85, 46, 52, 22, 44, 44, + 25, 32, 32, 76, 66, 71, 101, 43, 90, 44, 89, 27, 37, 79, 115, 49, 42, 68, 47, 50, 57, 86, 29, + 40, 68, 17, 96, 36, 30, 54, 153, 36, 24, 47, 90, 44, 67, 89, 21, 67, 39, 49, 154, 57, 75, 30, + 62, 47, 45, 66, 24, 100, 157, 40, 65, 140, 32, 51, 159, 31, 81, 63, 42, 60, 42, 73, 39, 21, 75, + 88, 53, 33, 39, 26, 25, 88, 18, 66, 41, 26, 45, 45, 91, 166, 124, 113, 112, 80, 40, 54, 23, 34, + 32, 29, 101, 64, 57, 50, 82, 78, 32, 18, 28, 39, 50, 106, 69, 70, 75, 47, 29, 32, 25, 26, 23, + 77, 27, 68, 162, 45, 46, 76, 50, 34, 25, 52, 40, 61, 58, 96, 68, 88, 52, 75, 44, 46, 52, 29, + 27, 43, 25, 52, 75, 32, 51, 23, 80, 42, 107, 51, 127, 59, 45, 83, 65, 38, 71, 69, 60, 42, 49, + 78, 88, 58, 33, 73, 54, 88, 22, 34, 65, 25, 198, 33, 63, 21, 151, 72, 39, 57, 35, 21, 29, 32, + 88, 63, 42, 44, 85, 30, 67, 31, 72, 94, 40, 63, 17, 59, 59, 62, 92, 31, 101, 31, 22, 151, 75, + 73, 69, 45, 83, 103, 23, 28, 35, 65, 15, 46, 24, 42, 22, 90, 44, 45, 105, 32, 23, 37, 36, 60, + 43, 25, 46, 67, 26, 82, 71, 106, 42, 62, 32, 44, 30, 86, 24, 68, 36, 30, 104, 53, 59, 55, 69, + 105, 52, 92, 15, 69, 62, 55, 53, 40, 37, 86, 46, 90, 41, 30, 103, 48, 92, 27, 60, 43, 133, 96, + 66, 39, 82, 72, 24, 91, 77, 46, 159, 81, 39, 80, 61, 46, 10, 40, 51, 72, 60, 56, 107, 49, 29, + 39, 46, 38, 20, 67, 74, 35, 62, 43, 43, 68, 62, 37, 35, 66, 30, 35, 100, 54, 40, 28, 72, 38, + 52, 91, 45, 84, 28, 21, 59, 91, 38, 44, 79, 40, 29, 33, 46, 23, 75, 67, 76, 108, 82, 103, 42, + 89, 25, 68, 115, 140, 58, 73, 29, 23, 50, 58, 47, 33, 27, 80, 83, 46, 116, 50, 81, 36, 68, 70, + 58, 86, 47, 18, 42, 73, 20, 21, 51, 53, 92, 48, 64, 57, 73, 79, 26, 36, 79, 17, 14, 54, 51, 42, + 76, 91, 82, 46, 49, 58, 17, 30, 74, 57, 26, 95, 64, 37, 50, 52, 43, 46, 46, 70, 82, 66, 120, + 68, 59, 76, 47, 72, 136, 45, 84, 51, 34, 88, 63, 43, 121, 20, 48, 45, 47, 53, 127, 18, 84, 34, + 80, 40, 77, 45, 37, 27, 30, 42, 49, 65, 48, 93, 27, 66, 79, 65, 47, 39, 25, 23, 71, 43, 23, 65, + 74, 17, 125, 93, 27, 33, 55, 60, 28, 36, 234, 80, 100, 34, 43, 25, 60, 49, 159, 43, 50, 31, 56, + 34, 30, 47, 32, 32, 65, 86, 77, 50, 52, 34, 58, 82, 31, 82, 36, 62, 25, 54, 96, 48, 23, 113, + 33, 31, 37, 30, 55, 59, 42, 75, 62, 26, 70, 26, 48, 44, 34, 88, 51, 65, 49, 32, 67, 34, 76, + 167, 59, 70, 62, 93, 27, 31, 52, 49, 28, 21, 39, 114, 154, 76, 22, 73, 38, 53, 70, 115, 53, 35, + 73, 75, 79, 89, 58, 31, 53, 21, 89, 64, 71, 54, 15, 37, 80, 44, 76, 31, 57, 38, 36, 46, 118, + 54, 56, 111, 43, 65, 35, 76, 25, 49, 88, 98, 48, 15, 114, 37, 72, 21, 40, 113, 59, 59, 78, 25, + 51, 63, 88, 33, 56, 70, 321, 111, 15, 50, 55, 75, 35, 82, 35, 47, 68, 37, 52, 110, 40, 28, 72, + 34, 44, 64, 63, 90, 36, 43, 36, 46, 135, 48, 49, 65, 27, 81, 49, 35, 60, 46, 46, 47, 108, 38, + 81, 54, 44, 36, 42, 44, 86, 34, 114, 46, 36, 35, 55, 120, 45, 48, 55, 52, 35, 82, 27, 31, 41, + 44, 85, 37, 46, 42, 111, 103, 29, 62, 26, 33, 37, 33, 56, 43, 58, 92, 90, 18, 44, 63, 90, 38, + 53, 113, 55, 128, 17, 138, 15, 88, 85, 41, 42, 60, 54, 56, 96, 56, 18, 62, 43, 76, 32, 44, 23, + 24, 21, 51, 13, 94, 36, 50, 91, 58, 62, 36, 26, 69, 45, 50, 33, 38, 16, 22, 23, 46, 29, 61, 51, + 39, 22, 36, 78, 63, 43, 16, 39, 12, 121, 51, 94, 46, 141, 32, 76, 50, 72, 26, 153, 58, 39, 28, + 23, 20, 21, 51, 88, 60, 19, 52, 106, 83, 126, 62, 10, 69, 56, 21, 47, 36, 40, 63, 37, 36, 24, + 54, 66, 67, 91, 15, 51, 26, 68, 52, 29, 41, 101, 38, 21, 86, 90, 37, 114, 46, 26, 43, 57, 66, + 61, 93, 56, 65, 83, 46, 34, 43, 26, 51, 64, 30, 32, 29, 49, 102, 36, 37, 49, 31, 83, 43, 88, + 69, 74, 24, 22, 51, 42, 42, 48, 111, 40, 37, 113, 56, 56, 26, 73, 90, 71, 28, 103, 43, 125, 63, + 62, 136, 91, 63, 34, 78, 73, 39, 45, 44, 22, 70, 24, 59, 23, 81, 75, 128, 28, 81, 45, 56, 41, + 62, 29, 32, 75, 12, 44, 40, 73, 62, 25, 105, 85, 103, 31, 130, 91, 89, 27, 48, 55, 25, 45, 74, + 59, 56, 103, 34, 74, 85, 26, 18, 81, 44, 166, 59, 49, 21, 33, 25, 208, 26, 68, 98, 43, 54, 29, + 50, 42, 34, 67, 33, 23, 75, 55, 22, 35, 77, 30, 74, 42, 25, 63, 14, 36, 48, 57, 57, 53, 100, + 118, 125, 26, 33, 27, 41, 58, 55, 45, 46, 41, 33, 35, 33, 39, 25, 93, 133, 26, 94, 53, 51, 51, + 63, 53, 33, 54, 77, 44, 27, 22, 68, 48, 37, 56, 35, 48, 58, 18, 31, 26, 27, 109, 90, 73, 42, + 90, 27, 18, 11, 56, 51, 34, 64, 51, 43, 95, 103, 84, 67, 30, 44, 49, 54, 87, 25, 25, 49, 24, + 87, 46, 13, 44, 56, 34, 27, 50, 92, 39, 87, 101, 27, 77, 47, 57, 120, 82, 39, 24, 60, 65, 92, + 102, 56, 54, 62, 86, 54, 41, 30, 50, 50, 50, 41, 37, 79, 47, 109, 34, 20, 46, 79, 46, 23, 37, + 49, 96, 88, 75, 63, 33, 75, 92, 72, 90, 58, 83, 101, 31, 107, 73, 71, 52, 31, 27, 58, 47, 85, + 50, 42, 106, 44, 36, 70, 106, 31, 44, 42, 51, 59, 72, 84, 35, 50, 74, 37, 63, 120, 26, 72, 48, + 30, 57, 49, 30, 50, 67, 45, 123, 109, 34, 100, 94, 36, 110, 115, 68, 78, 30, 127, 37, 42, 65, + 53, 116, 35, 38, 57, 66, 216, 26, 87, 78, 87, 27, 60, 39, 21, 32, 30, 113, 35, 78, 68, 34, 65, + 66, 28, 72, 50, 44, 32, 46, 29, 49, 47, 44, 87, 37, 47, 77, 97, 47, 52, 87, 59, 59, 44, 53, 50, + 67, 46, 43, 48, 40, 108, 29, 92, 50, 18, 38, 89, 26, 53, 27, 52, 83, 45, 81, 72, 92, 97, 82, + 61, 52, 54, 60, 32, 37, 39, 38, 43, 49, 34, 16, 137, 33, 83, 37, 64, 47, 68, 20, 56, 46, 100, + 42, 41, 20, 54, 68, 44, 37, 60, 27, 59, 83, 46, 41, 42, 64, 26, 33, 24, 44, 49, 52, 46, 20, 26, + 43, 57, 55, 30, 83, 51, 42, 57, 77, 68, 41, 28, 77, 30, 35, 89, 38, 44, 63, 97, 31, 73, 49, + 102, 81, 49, 34, 26, 30, 40, 27, 47, 38, 134, 87, 64, 38, 137, 43, 49, 19, 86, 49, 30, 50, 86, + 52, 58, 85, 32, 44, 71, 25, 91, 29, 57, 55, 37, 56, 45, 78, 26, 96, 44, 35, 29, 86, 23, 54, 37, + 34, 46, 51, 42, 34, 72, 70, 114, 30, 30, 33, 28, 135, 14, 25, 34, 44, 46, 83, 95, 30, 56, 21, + 78, 32, 62, 30, 50, 50, 29, 60, 257, 51, 41, 39, 42, 54, 32, 107, 68, 79, 100, 29, 31, 59, 39, + 65, 48, 71, 22, 71, 20, 50, 63, 34, 106, 56, 22, 109, 58, 44, 72, 27, 50, 58, 12, 41, 35, 60, + 87, 54, 33, 48, 30, 30, 14, 44, 67, 57, 30, 40, 88, 72, 65, 43, 49, 29, 51, 61, 51, 56, 15, 58, + 42, 27, 43, 55, 31, 51, 28, 29, 124, 74, 23, 44, 53, 43, 86, 155, 37, 76, 87, 61, 63, 92, 53, + 19, 63, 26, 37, 44, 17, 46, 78, 37, 24, 43, 38, 58, 37, 35, 60, 64, 89, 85, 36, 79, 26, 54, 19, + 61, 29, 48, 57, 24, 78, 87, 57, 27, 24, 24, 40, 47, 60, 74, 39, 87, 42, 24, 71, 66, 53, 64, + 102, 42, 83, 38, 100, 46, 45, 68, 68, 103, 63, 83, 70, 39, 94, 31, 83, 32, 82, 47, 51, 131, 34, + 84, 36, 51, 47, 132, 94, 52, 53, 52, 25, 22, 46, 45, 35, 32, 49, 122, 42, 88, 44, 58, 148, 58, + 36, 21, 39, 82, 40, 75, 44, 25, 19, 30, 50, 39, 34, 52, 29, 81, 37, 46, 32, 66, 60, 40, 19, 54, + 62, 64, 85, 33, 35, 26, 77, 40, 34, 49, 45, 36, 134, 35, 88, 67, 124, 53, 50, 37, 27, 43, 37, + 39, 60, 40, 9, 45, 55, 48, 55, 40, 80, 42, 40, 79, 91, 41, 47, 35, 53, 120, 77, 69, 63, 45, 22, + 73, 54, 39, 96, 76, 92, 73, 34, 44, 19, 45, 21, 53, 54, 37, 49, 22, 60, 129, 68, 119, 46, 44, + 89, 64, 75, 47, 49, 57, 51, 74, 42, 32, 105, 35, 58, 20, 36, 168, 52, 75, 66, 25, 77, 30, 71, + 27, 49, 65, 42, 47, 67, 72, 107, 205, 47, 97, 71, 52, 49, 30, 82, 84, 39, 27, 51, 87, 119, 38, + 47, 118, 41, 30, 60, 164, 34, 39, 47, 51, 41, 64, 36, 47, 70, 53, 71, 25, 33, 46, 23, 55, 79, + 47, 22, 44, 26, 44, 25, 18, 77, 57, 28, 25, 27, 38, 91, 60, 31, 82, 25, 116, 44, 102, 30, 49, + 39, 59, 31, 27, 31, 121, 56, 84, 34, 47, 48, 44, 47, 58, 34, 73, 123, 49, 65, 56, 49, 63, 49, + 32, 48, 92, 66, 61, 47, 66, 18, 48, 24, 41, 25, 94, 35, 39, 93, 75, 42, 18, 76, 41, 35, 38, 29, + 25, 30, 86, 30, 43, 65, 62, 19, 104, 55, 82, 31, 40, 44, 69, 27, 39, 68, 86, 32, 101, 47, 45, + 70, 77, 34, 77, 31, 76, 38, 34, 85, 63, 27, 77, 44, 66, 63, 31, 47, 49, 40, 18, 39, 64, 81, 89, + 71, 45, 42, 37, 80, 37, 55, 80, 60, 55, 41, 28, 44, 33, 54, 51, 42, 67, 103, 51, 66, 30, 48, + 51, 63, 61, 55, 61, 32, 55, 22, 79, 70, 49, 116, 38, 94, 71, 149, 94, 73, 67, 65, 77, 73, 91, + 47, 121, 49, 26, 95, 25, 51, 33, 96, 70, 36, 85, 56, 71, 79, 39, 57, 14, 42, 76, 83, 67, 57, + 73, 31, 36, 32, 60, 52, 61, 61, 89, 119, 26, 62, 37, 101, 71, 91, 27, 52, 112, 32, 30, 47, 36, + 76, 40, 45, 25, 60, 70, 26, 103, 31, 63, 55, 32, 38, 80, 48, 46, 80, 60, 44, 55, 26, 57, 150, + 45, 38, 99, 76, 102, 67, 61, 71, 33, 57, 62, 127, 77, 113, 51, 33, 33, 26, 20, 41, 39, 102, 22, + 54, 40, 69, 74, 33, 46, 45, 30, 53, 42, 36, 30, 51, 52, 36, 111, 28, 45, 35, 77, 87, 15, 37, + 19, 125, 54, 62, 50, 23, 23, 78, 60, 35, 24, 71, 32, 43, 74, 32, 26, 131, 38, 123, 40, 62, 65, + 28, 35, 127, 49, 85, 22, 80, 43, 46, 38, 48, 55, 21, 33, 63, 47, 53, 43, 92, 50, 48, 71, 158, + 66, 89, 33, 52, 39, 78, 54, 69, 191, 36, 66, 30, 35, 87, 48, 60, 35, 47, 31, 67, 32, 43, 40, + 54, 41, 26, 58, 73, 115, 24, 102, 30, 46, 27, 107, 54, 49, 62, 209, 36, 63, 52, 54, 87, 55, 48, + 59, 35, 128, 127, 34, 19, 133, 64, 25, 33, 30, 53, 44, 99, 127, 79, 49, 87, 34, 36, 51, 46, 34, + 60, 37, 88, 42, 42, 48, 99, 68, 60, 83, 29, 89, 50, 24, 37, 42, 107, 18, 42, 31, 57, 50, 85, + 82, 71, 123, 29, 39, 81, 37, 27, 60, 75, 39, 13, 29, 27, 36, 41, 82, 25, 37, 35, 102, 24, 23, + 84, 51, 37, 35, 14, 57, 47, 87, 39, 83, 57, 72, 20, 82, 74, 96, 49, 29, 47, 73, 78, 97, 48, + 110, 34, 13, 56, 49, 26, 54, 27, 33, 101, 11, 59, 40, 37, 53, 39, 38, 25, 111, 79, 27, 43, 70, + 38, 51, 53, 113, 34, 73, 37, 44, 49, 58, 53, 39, 50, 57, 62, 26, 45, 123, 38, 51, 82, 27, 26, + 90, 68, 29, 105, 83, 120, 27, 84, 53, 19, 33, 72, 73, 24, 50, 20, 78, 64, 120, 44, 64, 35, 31, + 82, 79, 59, 63, 59, 61, 55, 26, 18, 26, 93, 29, 67, 75, 55, 59, 69, 110, 50, 43, 37, 80, 53, + 86, 25, 40, 44, 64, 69, 45, 56, 28, 138, 44, 48, 42, 30, 77, 20, 43, 53, 24, 85, 39, 70, 20, + 43, 52, 25, 79, 57, 48, 55, 49, 70, 55, 108, 41, 77, 54, 147, 51, 53, 42, 18, 19, 47, 52, 35, + 68, 99, 118, 17, 75, 80, 71, 88, 46, 43, 83, 36, 78, 33, 37, 35, 78, 49, 65, 164, 30, 97, 68, + 50, 28, 207, 22, 34, 88, 23, 62, 23, 63, 148, 55, 64, 72, 34, 31, 38, 60, 29, 48, 75, 38, 49, + 55, 22, 46, 27, 44, 64, 68, 32, 27, 122, 19, 29, 43, 48, 37, 25, 48, 26, 39, 62, 84, 87, 79, + 44, 21, 23, 70, 16, 72, 51, 130, 50, 17, 76, 86, 41, 43, 61, 57, 50, 51, 135, 87, 139, 41, 41, + 43, 72, 122, 62, 43, 58, 42, 45, 31, 54, 41, 30, 50, 64, 46, 44, 44, 82, 72, 49, 32, 51, 27, + 87, 60, 105, 86, 63, 40, 47, 28, 45, 89, 54, 54, 40, 97, 65, 33, 58, 87, 44, 51, 42, 95, 53, + 186, 19, 49, 78, 44, 141, 50, 30, 40, 45, 53, 54, 105, 35, 40, 50, 28, 55, 58, 27, 27, 48, 36, + 72, 38, 48, 62, 34, 79, 58, 154, 43, 19, 90, 50, 112, 31, 187, 102, 45, 87, 48, 135, 37, 54, + 30, 48, 32, 119, 43, 37, 88, 21, 85, 35, 55, 26, 68, 81, 59, 36, 67, 45, 55, 40, 169, 21, 69, + 130, 33, 21, 27, 53, 102, 70, 54, 30, 34, 52, 52, 46, 20, 57, 44, 23, 21, 54, 32, 20, 48, 64, + 84, 30, 49, 88, 24, 89, 124, 35, 47, 7, 19, 96, 12, 53, 29, 44, 37, 45, 46, 62, 43, 87, 30, 49, + 17, 64, 54, 74, 25, 38, 27, 48, 33, 43, 87, 126, 19, 25, 90, 52, 30, 71, 28, 47, 60, 74, 70, + 66, 61, 81, 42, 29, 61, 60, 82, 30, 60, 40, 69, 88, 76, 30, 32, 37, 62, 35, 56, 87, 66, 93, 52, + 57, 31, 69, 21, 25, 60, 32, 60, 26, 51, 21, 63, 21, 57, 50, 80, 164, 20, 42, 36, 36, 26, 64, + 56, 65, 123, 21, 58, 44, 41, 35, 45, 80, 33, 43, 62, 63, 120, 43, 46, 21, 96, 72, 82, 30, 107, + 35, 106, 49, 67, 54, 33, 46, 38, 30, 47, 32, 50, 55, 72, 33, 70, 50, 42, 87, 57, 34, 27, 59, + 34, 123, 66, 13, 30, 27, 30, 22, 65, 60, 62, 88, 49, 103, 28, 35, 34, 41, 32, 69, 47, 87, 23, + 23, 36, 32, 23, 61, 70, 97, 65, 62, 96, 41, 53, 58, 164, 62, 34, 63, 28, 29, 92, 28, 23, 40, + 36, 58, 44, 188, 97, 18, 59, 41, 74, 30, 39, 52, 61, 22, 41, 18, 66, 36, 100, 90, 34, 60, 90, + 63, 29, 19, 57, 35, 99, 33, 69, 80, 74, 21, 24, 39, 84, 93, 127, 31, 36, 49, 93, 19, 27, 120, + 74, 17, 86, 61, 35, 50, 69, 60, 74, 108, 61, 24, 52, 15, 44, 34, 31, 37, 342, 30, 32, 41, 41, + 273, 49, 46, 40, 88, 45, 63, 53, 23, 33, 50, 91, 58, 53, 33, 79, 80, 43, 74, 44, 56, 76, 66, + 34, 60, 38, 45, 32, 49, 77, 43, 72, 47, 66, 23, 71, 36, 40, 57, 64, 39, 30, 35, 27, 88, 60, 50, + 30, 77, 55, 21, 41, 43, 43, 51, 61, 91, 47, 36, 28, 59, 40, 68, 32, 43, 27, 66, 45, 202, 40, + 33, 133, 131, 108, 59, 72, 79, 88, 36, 34, 86, 28, 61, 35, 139, 81, 65, 55, 33, 39, 53, 56, 59, + 14, 60, 67, 87, 109, 78, 83, 40, 38, 32, 113, 18, 57, 51, 62, 76, 25, 75, 61, 48, 40, 61, 34, + 61, 50, 22, 119, 142, 85, 41, 37, 86, 80, 87, 44, 89, 122, 54, 55, 51, 28, 28, 37, 63, 57, 92, + 32, 47, 39, 55, 68, 117, 115, 126, 68, 123, 29, 54, 103, 43, 63, 73, 45, 94, 66, 34, 90, 27, + 80, 33, 68, 62, 90, 62, 76, 35, 48, 85, 33, 30, 46, 47, 50, 159, 24, 32, 33, 103, 93, 31, 29, + 42, 18, 64, 48, 21, 44, 49, 76, 49, 35, 36, 98, 43, 58, 61, 62, 24, 71, 91, 140, 26, 28, 62, + 39, 93, 23, 60, 61, 78, 79, 53, 22, 59, 60, 20, 88, 53, 29, 57, 81, 67, 41, 163, 46, 59, 63, + 80, 76, 50, 47, 29, 62, 71, 41, 77, 52, 65, 35, 85, 26, 143, 46, 25, 30, 18, 45, 55, 36, 46, + 26, 83, 46, 58, 144, 81, 28, 88, 22, 210, 105, 23, 33, 109, 28, 166, 97, 76, 40, 115, 55, 86, + 38, 75, 31, 38, 46, 91, 55, 40, 30, 35, 59, 60, 43, 36, 18, 60, 42, 49, 47, 18, 48, 123, 126, + 24, 51, 18, 37, 119, 76, 42, 41, 48, 28, 53, 33, 25, 33, 84, 30, 31, 16, 66, 52, 41, 79, 51, + 67, 41, 36, 87, 76, 79, 62, 47, 32, 134, 54, 42, 121, 116, 36, 50, 43, 81, 43, 57, 51, 50, 31, + 54, 63, 51, 42, 66, 53, 76, 30, 132, 30, 39, 73, 61, 24, 36, 72, 146, 118, 133, 88, 64, 44, 70, + 37, 18, 92, 39, 72, 21, 9, 47, 24, 42, 59, 38, 51, 35, 29, 44, 83, 29, 77, 38, 26, 69, 72, 90, + 67, 49, 107, 40, 48, 42, 73, 41, 69, 24, 34, 58, 55, 35, 100, 75, 52, 58, 44, 92, 60, 18, 51, + 49, 30, 42, 118, 178, 86, 40, 82, 71, 120, 36, 78, 185, 17, 78, 97, 64, 67, 34, 35, 30, 21, 62, + 53, 49, 54, 41, 121, 60, 49, 29, 31, 34, 63, 79, 38, 33, 35, 29, 53, 92, 14, 48, 96, 34, 47, + 74, 60, 59, 16, 41, 46, 85, 59, 73, 77, 24, 66, 64, 26, 18, 56, 63, 35, 83, 25, 132, 68, 105, + 75, 59, 61, 95, 87, 57, 139, 30, 26, 171, 34, 111, 75, 40, 50, 41, 53, 39, 29, 71, 43, 46, 42, + 59, 31, 59, 39, 45, 39, 51, 62, 32, 38, 47, 22, 47, 114, 42, 49, 29, 42, 65, 74, 54, 44, 28, + 50, 58, 41, 80, 56, 31, 42, 36, 27, 28, 42, 34, 54, 19, 25, 99, 42, 57, 75, 48, 92, 75, 139, + 78, 131, 51, 28, 86, 43, 64, 120, 75, 24, 106, 72, 47, 36, 49, 26, 16, 34, 49, 27, 16, 67, 34, + 51, 15, 74, 32, 91, 52, 56, 58, 47, 69, 130, 58, 47, 45, 28, 55, 25, 29, 101, 23, 145, 71, 121, + 63, 46, 116, 51, 41, 37, 38, 63, 61, 52, 20, 29, 16, 27, 126, 107, 35, 93, 47, 46, 85, 47, 92, + 58, 31, 61, 49, 39, 45, 50, 68, 52, 56, 26, 46, 47, 42, 41, 50, 50, 63, 103, 45, 44, 144, 41, + 38, 65, 47, 64, 74, 202, 92, 80, 58, 41, 51, 30, 75, 69, 95, 30, 44, 31, 45, 56, 76, 58, 37, + 48, 31, 61, 67, 27, 73, 80, 69, 78, 31, 32, 97, 72, 43, 67, 124, 55, 41, 37, 28, 55, 65, 54, + 60, 46, 63, 33, 45, 49, 97, 18, 156, 35, 86, 44, 43, 47, 15, 211, 57, 70, 37, 35, 98, 74, 87, + 43, 26, 37, 115, 23, 121, 39, 23, 28, 60, 43, 14, 42, 110, 42, 109, 36, 72, 42, 79, 20, 55, 99, + 33, 114, 41, 38, 29, 66, 29, 25, 44, 21, 32, 133, 51, 77, 22, 17, 81, 53, 112, 84, 16, 91, 30, + 48, 57, 36, 34, 50, 57, 22, 121, 98, 42, 21, 83, 137, 29, 54, 24, 39, 68, 24, 29, 42, 38, 50, + 47, 19, 26, 54, 63, 103, 67, 49, 68, 29, 16, 44, 43, 33, 16, 25, 105, 29, 49, 42, 116, 54, 138, + 84, 23, 53, 63, 36, 41, 51, 19, 17, 35, 62, 88, 15, 45, 32, 37, 82, 137, 42, 51, 21, 198, 49, + 71, 63, 27, 54, 26, 53, 58, 31, 25, 52, 53, 53, 131, 37, 38, 30, 196, 35, 97, 27, 101, 59, 33, + 58, 88, 80, 56, 41, 22, 31, 29, 68, 40, 64, 73, 78, 125, 41, 43, 35, 97, 40, 49, 37, 44, 58, + 31, 80, 28, 77, 36, 32, 89, 50, 56, 101, 74, 48, 31, 57, 41, 76, 29, 35, 208, 98, 29, 39, 47, + 34, 78, 57, 111, 47, 78, 59, 25, 25, 52, 21, 31, 70, 70, 40, 51, 28, 47, 78, 94, 27, 42, 46, + 54, 27, 54, 65, 111, 28, 66, 43, 21, 134, 30, 35, 43, 58, 53, 23, 35, 69, 32, 56, 53, 80, 68, + 60, 83, 59, 33, 42, 36, 34, 91, 83, 60, 50, 50, 39, 64, 25, 28, 70, 33, 73, 52, 56, 45, 39, + 117, 41, 171, 57, 51, 67, 72, 23, 47, 55, 26, 29, 38, 65, 39, 32, 24, 21, 71, 74, 26, 30, 52, + 138, 121, 42, 35, 126, 105, 22, 28, 72, 46, 14, 53, 111, 51, 78, 33, 48, 80, 46, 48, 55, 30, + 33, 40, 31, 89, 32, 47, 98, 40, 35, 56, 51, 55, 31, 36, 68, 13, 92, 57, 23, 19, 31, 86, 60, 44, + 56, 94, 47, 45, 42, 45, 19, 46, 158, 47, 69, 42, 106, 71, 48, 42, 40, 33, 66, 56, 86, 38, 23, + 19, 15, 73, 50, 71, 78, 37, 113, 140, 54, 44, 68, 78, 97, 72, 47, 60, 41, 32, 24, 25, 41, 65, + 82, 53, 44, 20, 44, 32, 26, 29, 41, 42, 89, 151, 64, 47, 121, 67, 56, 92, 23, 61, 50, 79, 33, + 38, 85, 60, 34, 33, 34, 59, 73, 26, 68, 61, 56, 55, 36, 18, 36, 24, 52, 21, 56, 161, 90, 82, + 45, 36, 25, 68, 74, 76, 36, 42, 24, 49, 31, 25, 28, 47, 56, 77, 19, 108, 32, 59, 82, 32, 31, + 56, 27, 41, 31, 57, 71, 85, 43, 48, 48, 49, 45, 34, 55, 37, 49, 48, 26, 89, 55, 42, 65, 29, + 128, 64, 43, 88, 54, 37, 25, 49, 68, 59, 25, 62, 38, 50, 93, 59, 65, 33, 56, 24, 77, 34, 27, + 33, 69, 55, 18, 17, 41, 32, 51, 38, 40, 58, 69, 41, 63, 20, 35, 58, 41, 69, 28, 74, 74, 55, 48, + 78, 45, 58, 28, 35, 61, 37, 43, 86, 54, 64, 83, 44, 43, 84, 21, 60, 53, 110, 46, 55, 26, 42, + 29, 67, 52, 42, 57, 99, 36, 65, 51, 29, 22, 94, 108, 92, 31, 42, 132, 73, 70, 125, 28, 39, 38, + 41, 29, 52, 45, 31, 126, 52, 15, 43, 14, 44, 60, 38, 49, 59, 41, 100, 128, 75, 81, 161, 47, 55, + 23, 48, 38, 47, 53, 47, 43, 14, 92, 69, 20, 51, 102, 122, 171, 38, 80, 101, 34, 67, 58, 95, 57, + 44, 64, 29, 70, 42, 63, 56, 58, 35, 47, 40, 67, 137, 66, 27, 28, 51, 83, 102, 29, 53, 53, 47, + 27, 68, 19, 71, 23, 42, 46, 36, 38, 63, 96, 51, 37, 36, 72, 40, 23, 51, 79, 63, 118, 172, 68, + 46, 94, 73, 144, 59, 46, 76, 56, 61, 98, 26, 37, 35, 32, 51, 39, 70, 95, 36, 61, 59, 31, 57, + 34, 29, 20, 118, 47, 42, 102, 145, 35, 27, 48, 24, 37, 48, 38, 116, 56, 53, 115, 106, 43, 50, + 31, 35, 24, 69, 45, 66, 45, 24, 29, 73, 34, 25, 22, 37, 59, 42, 38, 48, 52, 80, 52, 47, 42, 30, + 47, 25, 104, 55, 19, 26, 71, 16, 70, 58, 35, 37, 26, 28, 41, 24, 43, 38, 54, 64, 57, 68, 54, + 36, 44, 99, 111, 63, 30, 59, 92, 29, 87, 23, 45, 98, 51, 70, 47, 34, 97, 60, 39, 27, 58, 41, + 72, 82, 126, 50, 54, 41, 35, 15, 27, 69, 114, 25, 73, 40, 74, 120, 31, 18, 43, 71, 80, 64, 47, + 232, 81, 41, 55, 75, 63, 49, 55, 64, 37, 44, 74, 40, 69, 29, 79, 94, 40, 23, 58, 57, 23, 67, + 18, 34, 114, 74, 53, 12, 52, 29, 51, 53, 63, 43, 46, 39, 53, 36, 68, 31, 26, 48, 40, 63, 23, + 57, 82, 32, 74, 76, 67, 29, 43, 59, 24, 33, 64, 86, 59, 39, 36, 131, 42, 71, 28, 30, 41, 60, + 77, 50, 63, 48, 59, 24, 51, 45, 46, 37, 119, 33, 43, 33, 44, 63, 16, 46, 43, 28, 46, 57, 57, + 39, 159, 42, 22, 38, 81, 34, 48, 41, 35, 97, 20, 49, 46, 102, 49, 84, 56, 33, 60, 29, 28, 92, + 56, 23, 37, 68, 57, 40, 31, 44, 79, 38, 81, 69, 44, 20, 33, 69, 124, 76, 31, 56, 41, 38, 29, + 34, 76, 128, 69, 28, 35, 144, 40, 34, 40, 165, 60, 43, 86, 25, 47, 71, 29, 39, 62, 25, 18, 43, + 138, 34, 25, 47, 93, 31, 68, 43, 19, 49, 59, 45, 53, 32, 35, 47, 97, 25, 34, 49, 27, 26, 43, + 39, 66, 41, 24, 171, 39, 165, 31, 65, 36, 85, 37, 50, 66, 42, 35, 45, 20, 45, 39, 51, 22, 68, + 48, 29, 95, 47, 21, 108, 37, 55, 112, 69, 52, 57, 39, 78, 56, 37, 38, 65, 31, 71, 76, 31, 91, + 42, 104, 62, 24, 72, 33, 40, 60, 84, 39, 53, 23, 17, 20, 33, 83, 31, 31, 43, 29, 67, 55, 71, + 31, 48, 50, 61, 53, 35, 72, 60, 47, 40, 74, 120, 104, 40, 47, 39, 55, 42, 115, 42, 40, 47, 46, + 70, 65, 23, 36, 40, 80, 49, 64, 48, 40, 70, 161, 50, 9, 31, 45, 23, 59, 37, 71, 80, 20, 66, 81, + 36, 31, 19, 81, 142, 47, 87, 92, 53, 51, 140, 40, 28, 93, 37, 24, 42, 33, 30, 156, 50, 66, 17, + 53, 20, 63, 72, 40, 38, 97, 29, 58, 64, 72, 47, 54, 31, 80, 37, 30, 42, 34, 86, 36, +]; diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 235bb39c7d4..4b4ff3c0ce4 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -1,3 +1,12 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning //! running properly, and (2) either can or should be run in the background. #![cfg_attr(feature = "std", doc = "See docs for [`BackgroundProcessor`] for more details.")] @@ -17,6 +26,10 @@ extern crate alloc; extern crate lightning; extern crate lightning_rapid_gossip_sync; +mod fwd_batch; + +use fwd_batch::BatchDelay; + use lightning::chain; use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; use lightning::chain::chainmonitor::{ChainMonitor, Persist}; @@ -101,59 +114,59 @@ pub struct BackgroundProcessor { } #[cfg(not(test))] -const FRESHNESS_TIMER: u64 = 60; +const FRESHNESS_TIMER: Duration = Duration::from_secs(60); #[cfg(test)] -const FRESHNESS_TIMER: u64 = 1; +const FRESHNESS_TIMER: Duration = Duration::from_secs(1); #[cfg(all(not(test), not(debug_assertions)))] -const PING_TIMER: u64 = 10; +const PING_TIMER: Duration = Duration::from_secs(10); /// Signature operations take a lot longer without compiler optimisations. /// Increasing the ping timer allows for this but slower devices will be disconnected if the /// timeout is reached. #[cfg(all(not(test), debug_assertions))] -const PING_TIMER: u64 = 30; +const PING_TIMER: Duration = Duration::from_secs(30); #[cfg(test)] -const PING_TIMER: u64 = 1; +const PING_TIMER: Duration = Duration::from_secs(1); #[cfg(not(test))] -const ONION_MESSAGE_HANDLER_TIMER: u64 = 10; +const ONION_MESSAGE_HANDLER_TIMER: Duration = Duration::from_secs(10); #[cfg(test)] -const ONION_MESSAGE_HANDLER_TIMER: u64 = 1; +const ONION_MESSAGE_HANDLER_TIMER: Duration = Duration::from_secs(1); /// Prune the network graph of stale entries hourly. -const NETWORK_PRUNE_TIMER: u64 = 60 * 60; +const NETWORK_PRUNE_TIMER: Duration = Duration::from_secs(60 * 60); #[cfg(not(test))] -const SCORER_PERSIST_TIMER: u64 = 60 * 5; +const SCORER_PERSIST_TIMER: Duration = Duration::from_secs(60 * 5); #[cfg(test)] -const SCORER_PERSIST_TIMER: u64 = 1; +const SCORER_PERSIST_TIMER: Duration = Duration::from_secs(1); #[cfg(not(test))] -const FIRST_NETWORK_PRUNE_TIMER: u64 = 60; +const FIRST_NETWORK_PRUNE_TIMER: Duration = Duration::from_secs(60); #[cfg(test)] -const FIRST_NETWORK_PRUNE_TIMER: u64 = 1; +const FIRST_NETWORK_PRUNE_TIMER: Duration = Duration::from_secs(1); #[cfg(not(test))] -const REBROADCAST_TIMER: u64 = 30; +const REBROADCAST_TIMER: Duration = Duration::from_secs(30); #[cfg(test)] -const REBROADCAST_TIMER: u64 = 1; +const REBROADCAST_TIMER: Duration = Duration::from_secs(1); #[cfg(not(test))] -const SWEEPER_TIMER: u64 = 30; +const SWEEPER_TIMER: Duration = Duration::from_secs(30); #[cfg(test)] -const SWEEPER_TIMER: u64 = 1; +const SWEEPER_TIMER: Duration = Duration::from_secs(1); /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement -const fn min_u64(a: u64, b: u64) -> u64 { - if a < b { +const fn min_duration(a: Duration, b: Duration) -> Duration { + if a.as_nanos() < b.as_nanos() { a } else { b } } -const FASTEST_TIMER: u64 = min_u64( - min_u64(FRESHNESS_TIMER, PING_TIMER), - min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)), +const FASTEST_TIMER: Duration = min_duration( + min_duration(FRESHNESS_TIMER, PING_TIMER), + min_duration(SCORER_PERSIST_TIMER, min_duration(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)), ); /// Either [`P2PGossipSync`] or [`RapidGossipSync`]. @@ -319,7 +332,7 @@ macro_rules! define_run_body { $peer_manager: ident, $gossip_sync: ident, $process_sweeper: expr, $logger: ident, $scorer: ident, $loop_exit_check: expr, $await: expr, $get_timer: expr, - $timer_elapsed: expr, $check_slow_await: expr, $time_fetch: expr, + $timer_elapsed: expr, $check_slow_await: expr, $time_fetch: expr, $batch_delay: expr, ) => { { log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup"); $channel_manager.get_cm().timer_tick_occurred(); @@ -336,6 +349,9 @@ macro_rules! define_run_body { let mut have_pruned = false; let mut have_decayed_scorer = false; + let mut cur_batch_delay = $batch_delay.get(); + let mut last_forwards_processing_call = $get_timer(cur_batch_delay); + loop { $process_channel_manager_events; $process_chain_monitor_events; @@ -360,12 +376,25 @@ macro_rules! define_run_body { break; } + if $timer_elapsed(&mut last_forwards_processing_call, cur_batch_delay) { + $channel_manager.get_cm().process_pending_htlc_forwards(); + cur_batch_delay = $batch_delay.next(); + last_forwards_processing_call = $get_timer(cur_batch_delay); + } + + // Checke whether to exit the loop again, as some time might have passed since we + // checked above. + if $loop_exit_check { + log_trace!($logger, "Terminating background processor."); + break; + } + // We wait up to 100ms, but track how long it takes to detect being put to sleep, // see `await_start`'s use below. let mut await_start = None; - if $check_slow_await { await_start = Some($get_timer(1)); } + if $check_slow_await { await_start = Some($get_timer(Duration::from_secs(1))); } $await; - let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false }; + let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), Duration::from_secs(1)) } else { false }; // Exit the loop if the background processor was requested to stop. if $loop_exit_check { @@ -514,12 +543,14 @@ pub(crate) mod futures_util { C: Future + Unpin, D: Future + Unpin, E: Future + Unpin, + F: Future + Unpin, > { pub a: A, pub b: B, pub c: C, pub d: D, pub e: E, + pub f: F, } pub(crate) enum SelectorOutput { @@ -528,6 +559,7 @@ pub(crate) mod futures_util { C, D, E(bool), + F(bool), } impl< @@ -536,7 +568,8 @@ pub(crate) mod futures_util { C: Future + Unpin, D: Future + Unpin, E: Future + Unpin, - > Future for Selector + F: Future + Unpin, + > Future for Selector { type Output = SelectorOutput; fn poll( @@ -572,6 +605,12 @@ pub(crate) mod futures_util { }, Poll::Pending => {}, } + match Pin::new(&mut self.f).poll(ctx) { + Poll::Ready(res) => { + return Poll::Ready(SelectorOutput::F(res)); + }, + Poll::Pending => {}, + } Poll::Pending } } @@ -854,6 +893,7 @@ where event_handler(event).await }) }; + let mut batch_delay = BatchDelay::new(); define_run_body!( persister, chain_monitor, @@ -892,10 +932,11 @@ where b: chain_monitor.get_update_future(), c: om_fut, d: lm_fut, - e: sleeper(if mobile_interruptable_platform { + e: sleeper(batch_delay.get()), + f: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { - Duration::from_secs(FASTEST_TIMER) + FASTEST_TIMER }), }; match fut.await { @@ -903,9 +944,12 @@ where SelectorOutput::E(exit) => { should_break = exit; }, + SelectorOutput::F(exit) => { + should_break = exit; + }, } }, - |t| sleeper(Duration::from_secs(t)), + |t| sleeper(t), |fut: &mut SleepFuture, _| { let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -919,6 +963,7 @@ where }, mobile_interruptable_platform, fetch_time, + batch_delay, ) } @@ -1042,6 +1087,7 @@ impl BackgroundProcessor { } event_handler.handle_event(event) }; + let mut batch_delay = BatchDelay::new(); define_run_body!( persister, chain_monitor, @@ -1085,10 +1131,12 @@ impl BackgroundProcessor { &chain_monitor.get_update_future(), ), }; - sleeper.wait_timeout(Duration::from_millis(100)); + let batch_delay = batch_delay.get(); + let fastest_timeout = batch_delay.min(Duration::from_millis(100)); + sleeper.wait_timeout(fastest_timeout); }, |_| Instant::now(), - |time: &Instant, dur| time.elapsed().as_secs() > dur, + |time: &Instant, dur| time.elapsed() > dur, false, || { use std::time::SystemTime; @@ -1098,6 +1146,7 @@ impl BackgroundProcessor { .expect("Time should be sometime after 1970"), ) }, + batch_delay, ) }); Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) } @@ -1206,7 +1255,8 @@ mod tests { use std::time::Duration; use std::{env, fs}; - const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER; + const EVENT_DEADLINE: Duration = + Duration::from_millis(5 * (FRESHNESS_TIMER.as_millis() as u64)); #[derive(Clone, Hash, PartialEq, Eq)] struct TestDescriptor {} @@ -2244,7 +2294,7 @@ mod tests { // Open a channel and check that the FundingGenerationReady event was handled. begin_open_channel!(nodes[0], nodes[1], channel_value); let (temporary_channel_id, funding_tx) = funding_generation_recv - .recv_timeout(Duration::from_secs(EVENT_DEADLINE)) + .recv_timeout(EVENT_DEADLINE) .expect("FundingGenerationReady not handled within deadline"); nodes[0] .node @@ -2256,7 +2306,7 @@ mod tests { let msg_1 = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_0_id); nodes[0].node.handle_funding_signed(node_1_id, &msg_1); channel_pending_recv - .recv_timeout(Duration::from_secs(EVENT_DEADLINE)) + .recv_timeout(EVENT_DEADLINE) .expect("ChannelPending not handled within deadline"); // Confirm the funding transaction. @@ -2318,9 +2368,8 @@ mod tests { let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap(); confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32); - let event = receiver - .recv_timeout(Duration::from_secs(EVENT_DEADLINE)) - .expect("Events not handled within deadline"); + let event = + receiver.recv_timeout(EVENT_DEADLINE).expect("Events not handled within deadline"); match event { Event::SpendableOutputs { outputs, channel_id } => { nodes[0] @@ -2472,8 +2521,8 @@ mod tests { begin_open_channel!(nodes[0], nodes[1], channel_value); assert_eq!( - first_event_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE)).unwrap(), - second_event_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE)).unwrap() + first_event_recv.recv_timeout(EVENT_DEADLINE).unwrap(), + second_event_recv.recv_timeout(EVENT_DEADLINE).unwrap() ); if !std::thread::panicking() { @@ -2600,7 +2649,7 @@ mod tests { do_test_not_pruning_network_graph_until_graph_sync_completion!( nodes, - receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)), + receiver.recv_timeout(super::FIRST_NETWORK_PRUNE_TIMER * 5), std::thread::sleep(Duration::from_millis(1)) ); @@ -2649,8 +2698,7 @@ mod tests { { let mut i = 0; loop { - tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)) - .await; + tokio::time::sleep(super::FIRST_NETWORK_PRUNE_TIMER).await; if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); } @@ -2797,10 +2845,7 @@ mod tests { Some(Arc::clone(&nodes[0].scorer)), ); - do_test_payment_path_scoring!( - nodes, - receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)) - ); + do_test_payment_path_scoring!(nodes, receiver.recv_timeout(EVENT_DEADLINE)); if !std::thread::panicking() { bg_processor.stop().unwrap(); diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index 73dccdadd23..6cb040fe0d5 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -179,10 +179,7 @@ mod test { use lightning::types::payment::PaymentHash; use lightning::util::logger::Logger; - use lightning::{ - commitment_signed_dance, expect_payment_claimed, expect_pending_htlcs_forwardable, - get_htlc_update_msgs, - }; + use lightning::{commitment_signed_dance, expect_payment_claimed, get_htlc_update_msgs}; use lightning_types::string::UntrustedString; use std::ops::Deref; @@ -413,7 +410,7 @@ mod test { let updates = get_htlc_update_msgs!(nodes[0], payee_id); nodes[1].node.handle_update_add_htlc(payer_id, &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let claimable_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(claimable_events.len(), 1); diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index d01af737c32..169cd6ed52c 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -50,7 +50,6 @@ use bitcoin::script::ScriptBuf; use bitcoin::secp256k1::PublicKey; use bitcoin::{OutPoint, Transaction}; use core::ops::Deref; -use core::time::Duration; #[allow(unused_imports)] use crate::prelude::*; @@ -1174,21 +1173,6 @@ pub enum Event { /// with channels in the public network graph. short_channel_id: Option, }, - /// Used to indicate that [`ChannelManager::process_pending_htlc_forwards`] should be called at - /// a time in the future. - /// - /// # Failure Behavior and Persistence - /// This event will eventually be replayed after failures-to-handle (i.e., the event handler - /// returning `Err(ReplayEvent ())`) and will be regenerated after restarts. - /// - /// [`ChannelManager::process_pending_htlc_forwards`]: crate::ln::channelmanager::ChannelManager::process_pending_htlc_forwards - PendingHTLCsForwardable { - /// The minimum amount of time that should be waited prior to calling - /// process_pending_htlc_forwards. To increase the effort required to correlate payments, - /// you should wait a random amount of time in roughly the range (now + time_forwardable, - /// now + 5*time_forwardable). - time_forwardable: Duration, - }, /// Used to indicate that we've intercepted an HTLC forward. This event will only be generated if /// you've encoded an intercept scid in the receiver's invoice route hints using /// [`ChannelManager::get_intercept_scid`] and have set [`UserConfig::accept_intercept_htlcs`]. @@ -1721,11 +1705,7 @@ impl Writeable for Event { (13, failure, required), }); }, - &Event::PendingHTLCsForwardable { time_forwardable: _ } => { - 4u8.write(writer)?; - // Note that we now ignore these on the read end as we'll re-generate them in - // ChannelManager, we write them here only for backwards compatibility. - }, + // 4u8 used to be `PendingHTLCsForwardable` &Event::SpendableOutputs { ref outputs, channel_id } => { 5u8.write(writer)?; write_tlv_fields!(writer, { diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index a956f2ebae2..4f13cd86f9b 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -915,7 +915,7 @@ fn invalid_async_receive_with_retry( // Fail the HTLC backwards to enable us to more easily modify the now-Retryable outbound to test // failures on the recipient's end. nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_conditions( + expect_htlc_failure_conditions( nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }], ); diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index b667007295a..00bbff30edd 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -956,7 +956,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let events_5 = nodes[1].node.get_and_clear_pending_events(); check_payment_claimable( diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 4a868e0eb76..c722e239c6c 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -439,7 +439,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { check_added_monitors!(nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, true, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); if intro_fails { @@ -477,7 +477,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); let failed_destination = match check { ForwardCheckFail::InboundOnionCheck|ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCHandlingFailureType::InvalidOnion, ForwardCheckFail::OutboundChannelCheck => @@ -534,7 +534,7 @@ fn failed_backwards_to_intro_node() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -548,7 +548,7 @@ fn failed_backwards_to_intro_node() { check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); check_added_monitors(&nodes[2], 1); @@ -626,7 +626,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, // Disconnect the next-hop peer so when we go to forward in process_pending_htlc_forwards, the // intro node will error backwards. $curr_node.node.peer_disconnected($next_node.node.get_our_node_id()); - expect_pending_htlcs_forwardable!($curr_node); + $curr_node.node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), vec![HTLCHandlingFailureType::Forward { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]); }, @@ -636,10 +636,6 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, $curr_node.node.force_close_broadcasting_latest_txn(&$failed_chan_id, &$next_node.node.get_our_node_id(), error_message.to_string()).unwrap(); let events = $curr_node.node.get_and_clear_pending_events(); match events[0] { - crate::events::Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event {:?}", events), - }; - match events[1] { crate::events::Event::ChannelClosed { .. } => {}, _ => panic!("Unexpected event {:?}", events), } @@ -662,7 +658,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, return } - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); let mut updates_1_2 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); @@ -729,7 +725,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -747,7 +743,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { if intercept_node_fails { nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], [HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(&nodes[1], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1]], false); @@ -755,7 +751,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { } nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap(); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let payment_event = { { @@ -769,7 +765,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { }; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], &payment_event.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); expect_payment_claimable!(&nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); do_claim_payment_along_route( @@ -851,7 +847,7 @@ fn three_hop_blinded_path_fail() { pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2], &nodes[3]]], amt_msat, payment_hash, payment_secret); nodes[3].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_conditions( + expect_htlc_failure_conditions( nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); @@ -958,7 +954,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); check_added_monitors!(nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, false, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(&nodes[1], 1); let mut payment_event_1_2 = { @@ -973,13 +969,13 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); check_payment_claimable( &nodes[2].node.get_and_clear_pending_events()[0], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id() ); nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_conditions( + expect_htlc_failure_conditions( nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[2].node.process_pending_htlc_forwards(); @@ -1009,7 +1005,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); check_added_monitors(&nodes[2], 1); }, @@ -1019,7 +1015,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors(&nodes[2], 1); }, @@ -1035,7 +1031,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_shutdown(nodes[1].node.get_our_node_id(), &node_1_shutdown); commitment_signed_dance!(nodes[2], nodes[1], (), false, true, false, false); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors(&nodes[2], 1); }, @@ -1044,16 +1040,16 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); - expect_pending_htlcs_forwardable!(nodes[2]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[2], - [HTLCHandlingFailureType::Receive { payment_hash }]); + nodes[2].node.process_pending_htlc_forwards(); + nodes[2].node.process_pending_htlc_forwards(); + expect_htlc_failure_conditions(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[2], 1); }, ReceiveCheckFail::PaymentConstraints => { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors(&nodes[2], 1); } @@ -1142,7 +1138,7 @@ fn blinded_path_retries() { macro_rules! fail_payment_back { ($intro_node: expr) => { nodes[3].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_conditions( + expect_htlc_failure_conditions( nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); @@ -1162,7 +1158,7 @@ fn blinded_path_retries() { do_commitment_signed_dance(&nodes[0], &$intro_node, &updates.commitment_signed, false, false); let mut events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); + assert_eq!(events.len(), 1); match events[0] { Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => { assert_eq!(payment_hash, ev_payment_hash); @@ -1170,10 +1166,6 @@ fn blinded_path_retries() { }, _ => panic!("Unexpected event"), } - match events[1] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - } nodes[0].node.process_pending_htlc_forwards(); } } @@ -1262,7 +1254,7 @@ fn min_htlc() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); check_added_monitors!(nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, true, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }] @@ -1455,7 +1447,7 @@ fn fails_receive_tlvs_authentication() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, true, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); diff --git a/lightning/src/ln/bolt11_payment_tests.rs b/lightning/src/ln/bolt11_payment_tests.rs index 3a18c719022..560dbe66b19 100644 --- a/lightning/src/ln/bolt11_payment_tests.rs +++ b/lightning/src/ln/bolt11_payment_tests.rs @@ -78,7 +78,7 @@ fn payment_metadata_end_to_end_for_invoice_with_amount() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let mut events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -146,7 +146,7 @@ fn payment_metadata_end_to_end_for_invoice_with_no_amount() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let mut events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index ef8f256ed5e..18b8e9eee56 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -208,7 +208,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let events_3 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); @@ -646,7 +646,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let events_5 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_5.len(), 1); @@ -781,7 +781,7 @@ fn test_monitor_update_fail_cs() { nodes[1].node.handle_revoke_and_ack(node_a_id, &final_raa); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -853,7 +853,7 @@ fn test_monitor_update_fail_no_rebroadcast() { nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -938,12 +938,12 @@ fn test_monitor_update_raa_while_paused() { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); check_added_monitors!(nodes[0], 1); - expect_pending_htlcs_forwardable!(nodes[0]); + nodes[0].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); @@ -972,7 +972,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[2], [HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }] ); @@ -1005,7 +1005,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1034,7 +1034,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell // and not forwarded. - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -1064,7 +1064,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_2.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] ); @@ -1201,7 +1201,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[2], 1); assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); let events_6 = nodes[2].node.get_and_clear_pending_events(); assert_eq!(events_6.len(), 2); @@ -1219,7 +1219,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { }; if test_ignore_second_cs { - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); send_event = SendEvent::from_node(&nodes[1]); @@ -1228,7 +1228,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[0].node.handle_update_add_htlc(node_b_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[0]); + nodes[0].node.process_pending_htlc_forwards(); let events_9 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_9.len(), 1); @@ -1425,7 +1425,7 @@ fn raa_no_response_awaiting_raa_state() { // nodes[1] should be AwaitingRAA here! check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); // We send a third payment here, which is somewhat of a redundant test, but the @@ -1454,7 +1454,7 @@ fn raa_no_response_awaiting_raa_state() { // Finally deliver the RAA to nodes[1] which results in a CS response to the last update nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); let bs_update = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -1467,7 +1467,7 @@ fn raa_no_response_awaiting_raa_state() { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); @@ -1595,7 +1595,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); @@ -1691,7 +1691,7 @@ fn monitor_failed_no_reestablish_response() { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); @@ -1779,7 +1779,8 @@ fn first_message_on_recv_ordering() { nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.get_and_clear_pending_events(); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); @@ -1792,7 +1793,7 @@ fn first_message_on_recv_ordering() { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); @@ -1855,7 +1856,7 @@ fn test_monitor_update_fail_claim() { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]); let id_3 = PaymentId(payment_hash_3.0); @@ -1890,7 +1891,7 @@ fn test_monitor_update_fail_claim() { nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[0]); nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[1]); commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false); - expect_pending_htlcs_forwardable!(nodes[0]); + nodes[0].node.process_pending_htlc_forwards(); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -1969,7 +1970,7 @@ fn test_monitor_update_on_pending_forwards() { let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[2], [HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }] ); @@ -1994,7 +1995,7 @@ fn test_monitor_update_on_pending_forwards() { commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] ); @@ -2011,23 +2012,19 @@ fn test_monitor_update_on_pending_forwards() { commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 3); - if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] { + assert_eq!(events.len(), 2); + if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[0] { assert_eq!(payment_hash, payment_hash_1); assert!(payment_failed_permanently); } else { panic!("Unexpected event!"); } - match events[2] { + match events[1] { Event::PaymentFailed { payment_hash, .. } => { assert_eq!(payment_hash, Some(payment_hash_1)); }, _ => panic!("Unexpected event"), } - match events[0] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - }; nodes[0].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[0], payment_hash_2, payment_secret_2, 1000000); @@ -2084,7 +2081,7 @@ fn monitor_update_claim_fail_no_response() { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -2452,7 +2449,7 @@ fn test_pending_update_fee_ack_on_reconnect() { ); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[0]); + nodes[0].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 1_000_000); claim_payment(&nodes[1], &[&nodes[0]], payment_preimage); @@ -2501,7 +2498,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] ); @@ -2803,19 +2800,15 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000); check_added_monitors!(nodes[1], 1); commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false); let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); + assert_eq!(events.len(), 1); match events[0] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - }; - match events[1] { Event::PaymentPathSuccessful { .. } => {}, _ => panic!("Unexpected event"), }; @@ -2891,7 +2884,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f msgs::UpdateFulfillHTLC { channel_id: chan_id_2, htlc_id: 0, payment_preimage }; if second_fails { nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[2], [HTLCHandlingFailureType::Receive { payment_hash }] ); @@ -2940,7 +2933,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect_args.pending_htlc_fails.0 = 1; reconnect_nodes(reconnect_args); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] ); @@ -2953,7 +2946,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f if htlc_status == HTLCStatusAtDupClaim::HoldingCell { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa.unwrap()); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); // We finally receive the second payment, but don't claim it bs_updates = Some(get_htlc_update_msgs!(nodes[1], node_a_id)); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 94bc9972a30..4a8276a5949 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -962,12 +962,6 @@ impl MsgHandleErrInternal { } } -/// We hold back HTLCs we intend to relay for a random interval greater than this (see -/// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited). -/// This provides some limited amount of privacy. Ideally this would range from somewhere like one -/// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. -pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; - /// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should /// be sent in the order they appear in the return value, however sometimes the order needs to be /// variable at runtime (eg FundedChannel::channel_reestablish needs to re-send messages in the order @@ -2726,6 +2720,10 @@ pub struct ChannelManager< /// A simple atomic flag to ensure only one task at a time can be processing events asynchronously. pending_events_processor: AtomicBool, + /// A simple atomic flag to ensure only one task at a time can be processing HTLC forwards via + /// [`Self::process_pending_htlc_forwards`]. + pending_htlc_forwards_processor: AtomicBool, + /// If we are running during init (either directly during the deserialization method or in /// block connection methods which run after deserialization but before normal operation) we /// cannot provide the user with [`ChannelMonitorUpdate`]s through the normal update flow - @@ -3790,6 +3788,7 @@ where pending_events: Mutex::new(VecDeque::new()), pending_events_processor: AtomicBool::new(false), + pending_htlc_forwards_processor: AtomicBool::new(false), pending_background_events: Mutex::new(Vec::new()), total_consistency_lock: RwLock::new(()), background_events_processed_since_startup: AtomicBool::new(false), @@ -6148,7 +6147,6 @@ where Ok(()) } - #[rustfmt::skip] pub(crate) fn process_pending_update_add_htlcs(&self) { let mut decode_update_add_htlcs = new_hash_map(); mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap()); @@ -6156,11 +6154,12 @@ where let get_htlc_failure_type = |outgoing_scid_opt: Option, payment_hash: PaymentHash| { if let Some(outgoing_scid) = outgoing_scid_opt { match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) { - Some((outgoing_counterparty_node_id, outgoing_channel_id)) => + Some((outgoing_counterparty_node_id, outgoing_channel_id)) => { HTLCHandlingFailureType::Forward { node_id: Some(*outgoing_counterparty_node_id), channel_id: *outgoing_channel_id, - }, + } + }, None => HTLCHandlingFailureType::InvalidForward { requested_forward_scid: outgoing_scid, }, @@ -6171,18 +6170,28 @@ where }; 'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs { - let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut FundedChannel| { - let counterparty_node_id = chan.context.get_counterparty_node_id(); - let channel_id = chan.context.channel_id(); - let funding_txo = chan.funding.get_funding_txo().unwrap(); - let user_channel_id = chan.context.get_user_id(); - let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs; - (counterparty_node_id, channel_id, funding_txo, user_channel_id, accept_underpaying_htlcs) - }); + let incoming_channel_details_opt = + self.do_funded_channel_callback(incoming_scid, |chan: &mut FundedChannel| { + let counterparty_node_id = chan.context.get_counterparty_node_id(); + let channel_id = chan.context.channel_id(); + let funding_txo = chan.funding.get_funding_txo().unwrap(); + let user_channel_id = chan.context.get_user_id(); + let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs; + ( + counterparty_node_id, + channel_id, + funding_txo, + user_channel_id, + accept_underpaying_htlcs, + ) + }); let ( - incoming_counterparty_node_id, incoming_channel_id, incoming_funding_txo, - incoming_user_channel_id, incoming_accept_underpaying_htlcs - ) = if let Some(incoming_channel_details) = incoming_channel_details_opt { + incoming_counterparty_node_id, + incoming_channel_id, + incoming_funding_txo, + incoming_user_channel_id, + incoming_accept_underpaying_htlcs, + ) = if let Some(incoming_channel_details) = incoming_channel_details_opt { incoming_channel_details } else { // The incoming channel no longer exists, HTLCs should be resolved onchain instead. @@ -6192,40 +6201,53 @@ where let mut htlc_forwards = Vec::new(); let mut htlc_fails = Vec::new(); for update_add_htlc in &update_add_htlcs { - let (next_hop, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion( - &update_add_htlc, &*self.node_signer, &*self.logger, &self.secp_ctx - ) { - Ok(decoded_onion) => decoded_onion, + let (next_hop, next_packet_details_opt) = + match decode_incoming_update_add_htlc_onion( + &update_add_htlc, + &*self.node_signer, + &*self.logger, + &self.secp_ctx, + ) { + Ok(decoded_onion) => decoded_onion, - Err((htlc_fail, reason)) => { - htlc_fails.push((htlc_fail, HTLCHandlingFailureType::InvalidOnion, reason.into())); - continue; - }, - }; + Err((htlc_fail, reason)) => { + let failure_type = HTLCHandlingFailureType::InvalidOnion; + htlc_fails.push((htlc_fail, failure_type, reason.into())); + continue; + }, + }; let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward(); - let outgoing_scid_opt = next_packet_details_opt.as_ref().and_then(|d| { - match d.outgoing_connector { - HopConnector::ShortChannelId(scid) => { Some(scid) } - HopConnector::Trampoline(_) => { None } - } - }); + let outgoing_scid_opt = + next_packet_details_opt.as_ref().and_then(|d| match d.outgoing_connector { + HopConnector::ShortChannelId(scid) => Some(scid), + HopConnector::Trampoline(_) => None, + }); let shared_secret = next_hop.shared_secret().secret_bytes(); // Process the HTLC on the incoming channel. - match self.do_funded_channel_callback(incoming_scid, |chan: &mut FundedChannel| { - let logger = WithChannelContext::from(&self.logger, &chan.context, Some(update_add_htlc.payment_hash)); - chan.can_accept_incoming_htlc( - update_add_htlc, &self.fee_estimator, &logger, - ) - }) { + match self.do_funded_channel_callback( + incoming_scid, + |chan: &mut FundedChannel| { + let logger = WithChannelContext::from( + &self.logger, + &chan.context, + Some(update_add_htlc.payment_hash), + ); + chan.can_accept_incoming_htlc(update_add_htlc, &self.fee_estimator, &logger) + }, + ) { Some(Ok(_)) => {}, Some(Err(reason)) => { let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, &incoming_counterparty_node_id, reason, - is_intro_node_blinded_forward, &shared_secret, + &update_add_htlc, + &incoming_counterparty_node_id, + reason, + is_intro_node_blinded_forward, + &shared_secret, ); - let failure_type = get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + let failure_type = + get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); htlc_fails.push((htlc_fail, failure_type, reason.into())); continue; }, @@ -6235,28 +6257,41 @@ where // Now process the HTLC on the outgoing channel if it's a forward. if let Some(next_packet_details) = next_packet_details_opt.as_ref() { - if let Err(reason) = self.can_forward_htlc( - &update_add_htlc, next_packet_details - ) { + if let Err(reason) = + self.can_forward_htlc(&update_add_htlc, next_packet_details) + { let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, &incoming_counterparty_node_id, reason, - is_intro_node_blinded_forward, &shared_secret, + &update_add_htlc, + &incoming_counterparty_node_id, + reason, + is_intro_node_blinded_forward, + &shared_secret, ); - let failure_type = get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + let failure_type = + get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); htlc_fails.push((htlc_fail, failure_type, reason.into())); continue; } } match self.get_pending_htlc_info( - &update_add_htlc, shared_secret, next_hop, incoming_accept_underpaying_htlcs, + &update_add_htlc, + shared_secret, + next_hop, + incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey), ) { Ok(info) => htlc_forwards.push((info, update_add_htlc.htlc_id)), Err(inbound_err) => { - let failure_type = get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + let failure_type = + get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); let htlc_failure = inbound_err.reason.into(); - let htlc_fail = self.construct_pending_htlc_fail_msg(&update_add_htlc, &incoming_counterparty_node_id, shared_secret, inbound_err); + let htlc_fail = self.construct_pending_htlc_fail_msg( + &update_add_htlc, + &incoming_counterparty_node_id, + shared_secret, + inbound_err, + ); htlc_fails.push((htlc_fail, failure_type, htlc_failure)); }, } @@ -6265,8 +6300,12 @@ where // Process all of the forwards and failures for the channel in which the HTLCs were // proposed to as a batch. let pending_forwards = ( - incoming_scid, Some(incoming_counterparty_node_id), incoming_funding_txo, - incoming_channel_id, incoming_user_channel_id, htlc_forwards.drain(..).collect() + incoming_scid, + Some(incoming_counterparty_node_id), + incoming_funding_txo, + incoming_channel_id, + incoming_user_channel_id, + htlc_forwards.drain(..).collect(), ); self.forward_htlcs_without_forward_event(&mut [pending_forwards]); for (htlc_fail, failure_type, failure_reason) in htlc_fails.drain(..) { @@ -6275,40 +6314,60 @@ where htlc_id: fail_htlc.htlc_id, err_packet: fail_htlc.into(), }, - HTLCFailureMsg::Malformed(fail_malformed_htlc) => HTLCForwardInfo::FailMalformedHTLC { - htlc_id: fail_malformed_htlc.htlc_id, - sha256_of_onion: fail_malformed_htlc.sha256_of_onion, - failure_code: fail_malformed_htlc.failure_code.into(), + HTLCFailureMsg::Malformed(fail_malformed_htlc) => { + HTLCForwardInfo::FailMalformedHTLC { + htlc_id: fail_malformed_htlc.htlc_id, + sha256_of_onion: fail_malformed_htlc.sha256_of_onion, + failure_code: fail_malformed_htlc.failure_code.into(), + } }, }; self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_default().push(failure); - self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed { - prev_channel_id: incoming_channel_id, - failure_type, - failure_reason: Some(failure_reason), - }, None)); + self.pending_events.lock().unwrap().push_back(( + events::Event::HTLCHandlingFailed { + prev_channel_id: incoming_channel_id, + failure_type, + failure_reason: Some(failure_reason), + }, + None, + )); } } } /// Processes HTLCs which are pending waiting on random forward delay. /// - /// Should only really ever be called in response to a PendingHTLCsForwardable event. - /// Will likely generate further events. - #[rustfmt::skip] + /// Will regularly be called by the background processor. pub fn process_pending_htlc_forwards(&self) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + if self + .pending_htlc_forwards_processor + .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_err() + { + return; + } + + let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { + self.internal_process_pending_htlc_forwards() + }); + self.pending_htlc_forwards_processor.store(false, Ordering::Release); + } + + // Returns whether or not we need to re-persist. + fn internal_process_pending_htlc_forwards(&self) -> NotifyOption { + let mut should_persist = NotifyOption::SkipPersistNoEvents; self.process_pending_update_add_htlcs(); let mut new_events = VecDeque::new(); let mut failed_forwards = Vec::new(); - let mut phantom_receives: Vec<(u64, Option, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); + let mut phantom_receives: Vec = Vec::new(); { let mut forward_htlcs = new_hash_map(); mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap()); for (short_chan_id, mut pending_forwards) in forward_htlcs { + should_persist = NotifyOption::DoPersist; if short_chan_id != 0 { let mut forwarding_counterparty = None; macro_rules! forwarding_channel_not_found { @@ -6421,13 +6480,14 @@ where } } } - let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned(); + let chan_info_opt = + self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned(); let (counterparty_node_id, forward_chan_id) = match chan_info_opt { Some((cp_id, chan_id)) => (cp_id, chan_id), None => { forwarding_channel_not_found!(pending_forwards.drain(..)); continue; - } + }, }; forwarding_counterparty = Some(counterparty_node_id); let per_peer_state = self.per_peer_state.read().unwrap(); @@ -6442,35 +6502,56 @@ where while let Some(forward_info) = draining_pending_forwards.next() { let queue_fail_htlc_res = match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, - prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo { - incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, - routing: PendingHTLCRouting::Forward { - ref onion_packet, blinded, incoming_cltv_expiry, .. - }, skimmed_fee_msat, .. - }, + prev_short_channel_id, + prev_htlc_id, + prev_channel_id, + prev_funding_outpoint, + prev_user_channel_id, + prev_counterparty_node_id, + forward_info: + PendingHTLCInfo { + incoming_shared_secret, + payment_hash, + outgoing_amt_msat, + outgoing_cltv_value, + routing: + PendingHTLCRouting::Forward { + ref onion_packet, + blinded, + incoming_cltv_expiry, + .. + }, + skimmed_fee_msat, + .. + }, }) => { - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - user_channel_id: Some(prev_user_channel_id), - counterparty_node_id: prev_counterparty_node_id, - channel_id: prev_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: incoming_shared_secret, - // Phantom payments are only PendingHTLCRouting::Receive. - phantom_shared_secret: None, - blinded_failure: blinded.map(|b| b.failure), - cltv_expiry: incoming_cltv_expiry, - }); + let htlc_source = + HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + user_channel_id: Some(prev_user_channel_id), + counterparty_node_id: prev_counterparty_node_id, + channel_id: prev_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: incoming_shared_secret, + // Phantom payments are only PendingHTLCRouting::Receive. + phantom_shared_secret: None, + blinded_failure: blinded.map(|b| b.failure), + cltv_expiry: incoming_cltv_expiry, + }); let next_blinding_point = blinded.and_then(|b| { b.next_blinding_override.or_else(|| { - let encrypted_tlvs_ss = self.node_signer.ecdh( - Recipient::Node, &b.inbound_blinding_point, None - ).unwrap().secret_bytes(); + let encrypted_tlvs_ss = self + .node_signer + .ecdh(Recipient::Node, &b.inbound_blinding_point, None) + .unwrap() + .secret_bytes(); onion_utils::next_hop_pubkey( - &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss - ).ok() + &self.secp_ctx, + b.inbound_blinding_point, + &encrypted_tlvs_ss, + ) + .ok() }) }); @@ -6478,61 +6559,94 @@ where // applying non-strict forwarding. // The channel with the least amount of outbound liquidity will be used to maximize the // probability of being able to successfully forward a subsequent HTLC. - let maybe_optimal_channel = peer_state.channel_by_id.values_mut() + let maybe_optimal_channel = peer_state + .channel_by_id + .values_mut() .filter_map(Channel::as_funded_mut) .filter_map(|chan| { - let balances = chan.get_available_balances(&self.fee_estimator); - if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat && - outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat && - chan.context.is_usable() { + let balances = + chan.get_available_balances(&self.fee_estimator); + let is_in_range = (balances.next_outbound_htlc_minimum_msat + ..=balances.next_outbound_htlc_limit_msat) + .contains(&outgoing_amt_msat); + if is_in_range && chan.context.is_usable() { Some((chan, balances)) } else { None } }) - .min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c); + .min_by_key(|(_, balances)| { + balances.next_outbound_htlc_limit_msat + }) + .map(|(c, _)| c); let optimal_channel = match maybe_optimal_channel { Some(chan) => chan, None => { // Fall back to the specified channel to return an appropriate error. - if let Some(chan) = peer_state.channel_by_id + if let Some(chan) = peer_state + .channel_by_id .get_mut(&forward_chan_id) .and_then(Channel::as_funded_mut) { chan } else { - forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + let fwd_iter = core::iter::once(forward_info) + .chain(draining_pending_forwards); + forwarding_channel_not_found!(fwd_iter); break; } - } + }, }; - let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash)); - let channel_description = if optimal_channel.funding.get_short_channel_id() == Some(short_chan_id) { - "specified" - } else { - "alternate" - }; + let logger = WithChannelContext::from( + &self.logger, + &optimal_channel.context, + Some(payment_hash), + ); + let channel_description = + if optimal_channel.funding.get_short_channel_id() + == Some(short_chan_id) + { + "specified" + } else { + "alternate" + }; log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}", prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id); - if let Err((reason, msg)) = optimal_channel.queue_add_htlc(outgoing_amt_msat, - payment_hash, outgoing_cltv_value, htlc_source.clone(), - onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator, - &&logger) - { + if let Err((reason, msg)) = optimal_channel.queue_add_htlc( + outgoing_amt_msat, + payment_hash, + outgoing_cltv_value, + htlc_source.clone(), + onion_packet.clone(), + skimmed_fee_msat, + next_blinding_point, + &self.fee_estimator, + &&logger, + ) { log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg); - if let Some(chan) = peer_state.channel_by_id + if let Some(chan) = peer_state + .channel_by_id .get_mut(&forward_chan_id) .and_then(Channel::as_funded_mut) { let data = self.get_htlc_inbound_temp_fail_data(reason); - failed_forwards.push((htlc_source, payment_hash, + let failure_type = HTLCHandlingFailureType::Forward { + node_id: Some(chan.context.get_counterparty_node_id()), + channel_id: forward_chan_id, + }; + failed_forwards.push(( + htlc_source, + payment_hash, HTLCFailReason::reason(reason, data), - HTLCHandlingFailureType::Forward { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id } + failure_type, )); } else { - forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + forwarding_channel_not_found!(core::iter::once( + forward_info + ) + .chain(draining_pending_forwards)); break; } } @@ -6542,31 +6656,47 @@ where panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); }, HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => { - if let Some(chan) = peer_state.channel_by_id + if let Some(chan) = peer_state + .channel_by_id .get_mut(&forward_chan_id) .and_then(Channel::as_funded_mut) { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); + let logger = + WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id)) + Some(( + chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), + htlc_id, + )) } else { - forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + forwarding_channel_not_found!(core::iter::once(forward_info) + .chain(draining_pending_forwards)); break; } }, - HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { - if let Some(chan) = peer_state.channel_by_id + HTLCForwardInfo::FailMalformedHTLC { + htlc_id, + failure_code, + sha256_of_onion, + } => { + if let Some(chan) = peer_state + .channel_by_id .get_mut(&forward_chan_id) .and_then(Channel::as_funded_mut) { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); + let logger = + WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); let res = chan.queue_fail_malformed_htlc( - htlc_id, failure_code, sha256_of_onion, &&logger + htlc_id, + failure_code, + sha256_of_onion, + &&logger, ); Some((res, htlc_id)) } else { - forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + forwarding_channel_not_found!(core::iter::once(forward_info) + .chain(draining_pending_forwards)); break; } }, @@ -6574,11 +6704,16 @@ where if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res { if let Err(e) = queue_fail_htlc_res { if let ChannelError::Ignore(msg) = e { - if let Some(chan) = peer_state.channel_by_id + if let Some(chan) = peer_state + .channel_by_id .get_mut(&forward_chan_id) .and_then(Channel::as_funded_mut) { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); + let logger = WithChannelContext::from( + &self.logger, + &chan.context, + None, + ); log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); } } else { @@ -6594,46 +6729,92 @@ where 'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, - prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo { - routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat, - skimmed_fee_msat, .. - } + prev_short_channel_id, + prev_htlc_id, + prev_channel_id, + prev_funding_outpoint, + prev_user_channel_id, + prev_counterparty_node_id, + forward_info: + PendingHTLCInfo { + routing, + incoming_shared_secret, + payment_hash, + incoming_amt_msat, + outgoing_amt_msat, + skimmed_fee_msat, + .. + }, }) => { let blinded_failure = routing.blinded_failure(); let ( - cltv_expiry, onion_payload, payment_data, payment_context, phantom_shared_secret, - mut onion_fields, has_recipient_created_payment_secret, invoice_request_opt + cltv_expiry, + onion_payload, + payment_data, + payment_context, + phantom_shared_secret, + mut onion_fields, + has_recipient_created_payment_secret, + invoice_request_opt, ) = match routing { PendingHTLCRouting::Receive { - payment_data, payment_metadata, payment_context, - incoming_cltv_expiry, phantom_shared_secret, custom_tlvs, - requires_blinded_error: _ + payment_data, + payment_metadata, + payment_context, + incoming_cltv_expiry, + phantom_shared_secret, + custom_tlvs, + requires_blinded_error: _, } => { let _legacy_hop_data = Some(payment_data.clone()); - let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret), - payment_metadata, custom_tlvs }; - (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, - Some(payment_data), payment_context, phantom_shared_secret, onion_fields, - true, None) + let onion_fields = RecipientOnionFields { + payment_secret: Some(payment_data.payment_secret), + payment_metadata, + custom_tlvs, + }; + ( + incoming_cltv_expiry, + OnionPayload::Invoice { _legacy_hop_data }, + Some(payment_data), + payment_context, + phantom_shared_secret, + onion_fields, + true, + None, + ) }, PendingHTLCRouting::ReceiveKeysend { - payment_data, payment_preimage, payment_metadata, - incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _, - has_recipient_created_payment_secret, payment_context, invoice_request, + payment_data, + payment_preimage, + payment_metadata, + incoming_cltv_expiry, + custom_tlvs, + requires_blinded_error: _, + has_recipient_created_payment_secret, + payment_context, + invoice_request, } => { let onion_fields = RecipientOnionFields { - payment_secret: payment_data.as_ref().map(|data| data.payment_secret), + payment_secret: payment_data + .as_ref() + .map(|data| data.payment_secret), payment_metadata, custom_tlvs, }; - (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), - payment_data, payment_context, None, onion_fields, - has_recipient_created_payment_secret, invoice_request) + ( + incoming_cltv_expiry, + OnionPayload::Spontaneous(payment_preimage), + payment_data, + payment_context, + None, + onion_fields, + has_recipient_created_payment_secret, + invoice_request, + ) }, _ => { panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); - } + }, }; let claimable_htlc = ClaimableHTLC { prev_hop: HTLCPreviousHopData { @@ -6655,7 +6836,11 @@ where sender_intended_value: outgoing_amt_msat, timer_ticks: 0, total_value_received: None, - total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat }, + total_msat: if let Some(data) = &payment_data { + data.total_msat + } else { + outgoing_amt_msat + }, cltv_expiry, onion_payload, counterparty_skimmed_fee_msat: skimmed_fee_msat, @@ -6666,29 +6851,52 @@ where macro_rules! fail_htlc { ($htlc: expr, $payment_hash: expr) => { debug_assert!(!committed_to_claimable); - let err_data = invalid_payment_err_data($htlc.value, self.best_block.read().unwrap().height); - failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: $htlc.prev_hop.short_channel_id, - user_channel_id: $htlc.prev_hop.user_channel_id, - counterparty_node_id: $htlc.prev_hop.counterparty_node_id, - channel_id: prev_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: $htlc.prev_hop.htlc_id, - incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, + let err_data = invalid_payment_err_data( + $htlc.value, + self.best_block.read().unwrap().height, + ); + let short_channel_id = $htlc.prev_hop.short_channel_id; + let user_channel_id = $htlc.prev_hop.user_channel_id; + let counterparty_node_id = + $htlc.prev_hop.counterparty_node_id; + let channel_id = prev_channel_id; + let outpoint = prev_funding_outpoint; + let htlc_id = $htlc.prev_hop.htlc_id; + let incoming_packet_shared_secret = + $htlc.prev_hop.incoming_packet_shared_secret; + let cltv_expiry = Some(cltv_expiry); + failed_forwards.push(( + HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id, + user_channel_id, + counterparty_node_id, + channel_id, + outpoint, + htlc_id, + incoming_packet_shared_secret, phantom_shared_secret, blinded_failure, - cltv_expiry: Some(cltv_expiry), - }), payment_hash, - HTLCFailReason::reason(LocalHTLCFailureReason::IncorrectPaymentDetails, err_data), - HTLCHandlingFailureType::Receive { payment_hash: $payment_hash }, + cltv_expiry, + }), + payment_hash, + HTLCFailReason::reason( + LocalHTLCFailureReason::IncorrectPaymentDetails, + err_data, + ), + HTLCHandlingFailureType::Receive { + payment_hash: $payment_hash, + }, )); continue 'next_forwardable_htlc; - } + }; } - let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret; + let phantom_shared_secret = + claimable_htlc.prev_hop.phantom_shared_secret; let mut receiver_node_id = self.our_network_pubkey; if phantom_shared_secret.is_some() { - receiver_node_id = self.node_signer.get_node_id(Recipient::PhantomNode) + receiver_node_id = self + .node_signer + .get_node_id(Recipient::PhantomNode) .expect("Failed to get node_id for phantom node recipient"); } @@ -6790,15 +6998,27 @@ where // associated with the same payment_hash pending or not. let payment_preimage = if has_recipient_created_payment_secret { if let Some(ref payment_data) = payment_data { - let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) { - Ok(result) => result, - Err(()) => { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", &payment_hash); - fail_htlc!(claimable_htlc, payment_hash); - } - }; - if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta { - let expected_min_expiry_height = (self.current_best_block().height + min_final_cltv_expiry_delta as u32) as u64; + let (payment_preimage, min_final_cltv_expiry_delta) = + match inbound_payment::verify( + payment_hash, + &payment_data, + self.highest_seen_timestamp.load(Ordering::Acquire) + as u64, + &self.inbound_payment_key, + &self.logger, + ) { + Ok(result) => result, + Err(()) => { + log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", &payment_hash); + fail_htlc!(claimable_htlc, payment_hash); + }, + }; + if let Some(min_final_cltv_expiry_delta) = + min_final_cltv_expiry_delta + { + let expected_min_expiry_height = + (self.current_best_block().height + + min_final_cltv_expiry_delta as u32) as u64; if (cltv_expiry as u64) < expected_min_expiry_height { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})", &payment_hash, cltv_expiry, expected_min_expiry_height); @@ -6806,8 +7026,12 @@ where } } payment_preimage - } else { fail_htlc!(claimable_htlc, payment_hash); } - } else { None }; + } else { + fail_htlc!(claimable_htlc, payment_hash); + } + } else { + None + }; match claimable_htlc.onion_payload { OnionPayload::Invoice { .. } => { let payment_data = payment_data.unwrap(); @@ -6824,9 +7048,12 @@ where check_total_value!(purpose); }, OnionPayload::Spontaneous(keysend_preimage) => { - let purpose = if let Some(PaymentContext::AsyncBolt12Offer( - AsyncBolt12OfferContext { offer_nonce } - )) = payment_context { + let purpose = if let Some( + PaymentContext::AsyncBolt12Offer( + AsyncBolt12OfferContext { offer_nonce }, + ), + ) = payment_context + { let payment_data = match payment_data { Some(data) => data, None => { @@ -6836,48 +7063,64 @@ where }; let verified_invreq = match invoice_request_opt - .and_then(|invreq| invreq.verify_using_recipient_data( - offer_nonce, &self.inbound_payment_key, &self.secp_ctx - ).ok()) - { + .and_then(|invreq| { + invreq + .verify_using_recipient_data( + offer_nonce, + &self.inbound_payment_key, + &self.secp_ctx, + ) + .ok() + }) { Some(verified_invreq) => { - if let Some(invreq_amt_msat) = verified_invreq.amount_msats() { - if payment_data.total_msat < invreq_amt_msat { - fail_htlc!(claimable_htlc, payment_hash); + if let Some(invreq_amt_msat) = + verified_invreq.amount_msats() + { + if payment_data.total_msat < invreq_amt_msat + { + fail_htlc!( + claimable_htlc, + payment_hash + ); } } verified_invreq }, None => { fail_htlc!(claimable_htlc, payment_hash); - } + }, }; - let payment_purpose_context = PaymentContext::Bolt12Offer(Bolt12OfferContext { - offer_id: verified_invreq.offer_id, - invoice_request: verified_invreq.fields(), - }); + let payment_purpose_context = + PaymentContext::Bolt12Offer(Bolt12OfferContext { + offer_id: verified_invreq.offer_id, + invoice_request: verified_invreq.fields(), + }); match events::PaymentPurpose::from_parts( - Some(keysend_preimage), payment_data.payment_secret, + Some(keysend_preimage), + payment_data.payment_secret, Some(payment_purpose_context), ) { Ok(purpose) => purpose, Err(()) => { fail_htlc!(claimable_htlc, payment_hash); - } + }, } } else if payment_context.is_some() { log_trace!(self.logger, "Failing new HTLC with payment_hash {}: received a keysend payment to a non-async payments context {:#?}", payment_hash, payment_context); fail_htlc!(claimable_htlc, payment_hash); } else { - events::PaymentPurpose::SpontaneousPayment(keysend_preimage) + events::PaymentPurpose::SpontaneousPayment( + keysend_preimage, + ) }; check_total_value!(purpose); - } + }, } }, - HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => { + HTLCForwardInfo::FailHTLC { .. } + | HTLCForwardInfo::FailMalformedHTLC { .. } => { panic!("Got pending fail of our own HTLC"); - } + }, } } } @@ -6885,12 +7128,28 @@ where } let best_block_height = self.best_block.read().unwrap().height; - self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(), - || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, - &self.pending_events, &self.logger, |args| self.send_payment_along_path(args)); + let needs_persist = self.pending_outbound_payments.check_retry_payments( + &self.router, + || self.list_usable_channels(), + || self.compute_inflight_htlcs(), + &self.entropy_source, + &self.node_signer, + best_block_height, + &self.pending_events, + &self.logger, + |args| self.send_payment_along_path(args), + ); + if needs_persist { + should_persist = NotifyOption::DoPersist; + } for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { - self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); + self.fail_htlc_backwards_internal( + &htlc_source, + &payment_hash, + &failure_reason, + destination, + ); } self.forward_htlcs(&mut phantom_receives); @@ -6898,11 +7157,18 @@ where // next get a `get_and_clear_pending_msg_events` call, but some tests rely on it, and it's // nice to do the work now if we can rather than while we're trying to get messages in the // network stack. - self.check_free_holding_cells(); + if self.check_free_holding_cells() { + should_persist = NotifyOption::DoPersist; + } - if new_events.is_empty() { return } + if new_events.is_empty() { + return should_persist; + } let mut events = self.pending_events.lock().unwrap(); events.append(&mut new_events); + should_persist = NotifyOption::DoPersist; + + should_persist } /// Free the background events, generally called from [`PersistenceNotifierGuard`] constructors. @@ -6998,7 +7264,8 @@ where for (chan_id, chan) in peer_state.channel_by_id.iter_mut() .filter_map(|(chan_id, chan)| chan.as_funded_mut().map(|chan| (chan_id, chan))) { - let new_feerate = if chan.funding.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + let is_anchors_chan = chan.funding.get_channel_type().supports_anchors_zero_fee_htlc_tx(); + let new_feerate = if is_anchors_chan { anchor_feerate } else { non_anchor_feerate @@ -7033,13 +7300,16 @@ where /// /// [`ChannelUpdate`]: msgs::ChannelUpdate /// [`ChannelConfig`]: crate::util::config::ChannelConfig - #[rustfmt::skip] pub fn timer_tick_occurred(&self) { PersistenceNotifierGuard::optionally_notify(self, || { let mut should_persist = NotifyOption::SkipPersistNoEvents; - let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee); - let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee); + let non_anchor_feerate = self + .fee_estimator + .bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee); + let anchor_feerate = self + .fee_estimator + .bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee); let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); @@ -7053,98 +7323,165 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; let counterparty_node_id = *counterparty_node_id; - peer_state.channel_by_id.retain(|chan_id, chan| { - match chan.as_funded_mut() { - Some(funded_chan) => { - let new_feerate = if funded_chan.funding.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - anchor_feerate - } else { - non_anchor_feerate - }; - let chan_needs_persist = self.update_channel_fee(chan_id, funded_chan, new_feerate); - if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + peer_state.channel_by_id.retain(|chan_id, chan| match chan.as_funded_mut() { + Some(funded_chan) => { + let new_feerate = if funded_chan + .funding + .get_channel_type() + .supports_anchors_zero_fee_htlc_tx() + { + anchor_feerate + } else { + non_anchor_feerate + }; + let chan_needs_persist = + self.update_channel_fee(chan_id, funded_chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { + should_persist = NotifyOption::DoPersist; + } - if let Err(e) = funded_chan.timer_check_closing_negotiation_progress() { - let (needs_close, err) = convert_channel_err!(self, peer_state, e, funded_chan, chan_id, FUNDED_CHANNEL); - handle_errors.push((Err(err), counterparty_node_id)); - if needs_close { return false; } + if let Err(e) = funded_chan.timer_check_closing_negotiation_progress() { + let (needs_close, err) = convert_channel_err!( + self, + peer_state, + e, + funded_chan, + chan_id, + FUNDED_CHANNEL + ); + handle_errors.push((Err(err), counterparty_node_id)); + if needs_close { + return false; } + } - match funded_chan.channel_update_status() { - ChannelUpdateStatus::Enabled if !funded_chan.context.is_live() => funded_chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)), - ChannelUpdateStatus::Disabled if funded_chan.context.is_live() => funded_chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)), - ChannelUpdateStatus::DisabledStaged(_) if funded_chan.context.is_live() - => funded_chan.set_channel_update_status(ChannelUpdateStatus::Enabled), - ChannelUpdateStatus::EnabledStaged(_) if !funded_chan.context.is_live() - => funded_chan.set_channel_update_status(ChannelUpdateStatus::Disabled), - ChannelUpdateStatus::DisabledStaged(mut n) if !funded_chan.context.is_live() => { - n += 1; - if n >= DISABLE_GOSSIP_TICKS { - funded_chan.set_channel_update_status(ChannelUpdateStatus::Disabled); - if let Ok(update) = self.get_channel_update_for_broadcast(&funded_chan) { - let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - } else { - funded_chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n)); + match funded_chan.channel_update_status() { + ChannelUpdateStatus::Enabled if !funded_chan.context.is_live() => { + funded_chan.set_channel_update_status( + ChannelUpdateStatus::DisabledStaged(0), + ) + }, + ChannelUpdateStatus::Disabled if funded_chan.context.is_live() => { + funded_chan.set_channel_update_status( + ChannelUpdateStatus::EnabledStaged(0), + ) + }, + ChannelUpdateStatus::DisabledStaged(_) + if funded_chan.context.is_live() => + { + funded_chan + .set_channel_update_status(ChannelUpdateStatus::Enabled) + }, + ChannelUpdateStatus::EnabledStaged(_) + if !funded_chan.context.is_live() => + { + funded_chan + .set_channel_update_status(ChannelUpdateStatus::Disabled) + }, + ChannelUpdateStatus::DisabledStaged(mut n) + if !funded_chan.context.is_live() => + { + n += 1; + if n >= DISABLE_GOSSIP_TICKS { + funded_chan.set_channel_update_status( + ChannelUpdateStatus::Disabled, + ); + if let Ok(update) = + self.get_channel_update_for_broadcast(&funded_chan) + { + let mut pending_broadcast_messages = + self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push( + MessageSendEvent::BroadcastChannelUpdate { + msg: update, + }, + ); } - }, - ChannelUpdateStatus::EnabledStaged(mut n) if funded_chan.context.is_live() => { - n += 1; - if n >= ENABLE_GOSSIP_TICKS { - funded_chan.set_channel_update_status(ChannelUpdateStatus::Enabled); - if let Ok(update) = self.get_channel_update_for_broadcast(&funded_chan) { - let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - } else { - funded_chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n)); + should_persist = NotifyOption::DoPersist; + } else { + funded_chan.set_channel_update_status( + ChannelUpdateStatus::DisabledStaged(n), + ); + } + }, + ChannelUpdateStatus::EnabledStaged(mut n) + if funded_chan.context.is_live() => + { + n += 1; + if n >= ENABLE_GOSSIP_TICKS { + funded_chan.set_channel_update_status( + ChannelUpdateStatus::Enabled, + ); + if let Ok(update) = + self.get_channel_update_for_broadcast(&funded_chan) + { + let mut pending_broadcast_messages = + self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push( + MessageSendEvent::BroadcastChannelUpdate { + msg: update, + }, + ); } - }, - _ => {}, - } + should_persist = NotifyOption::DoPersist; + } else { + funded_chan.set_channel_update_status( + ChannelUpdateStatus::EnabledStaged(n), + ); + } + }, + _ => {}, + } - funded_chan.context.maybe_expire_prev_config(); + funded_chan.context.maybe_expire_prev_config(); - if peer_state.is_connected { - if funded_chan.should_disconnect_peer_awaiting_response() { - let logger = WithChannelContext::from(&self.logger, &funded_chan.context, None); - log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}", + if peer_state.is_connected { + if funded_chan.should_disconnect_peer_awaiting_response() { + let logger = WithChannelContext::from( + &self.logger, + &funded_chan.context, + None, + ); + log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}", counterparty_node_id, chan_id); - pending_msg_events.push(MessageSendEvent::HandleError { - node_id: counterparty_node_id, - action: msgs::ErrorAction::DisconnectPeerWithWarning { - msg: msgs::WarningMessage { - channel_id: *chan_id, - data: "Disconnecting due to timeout awaiting response".to_owned(), - }, + pending_msg_events.push(MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: msgs::ErrorAction::DisconnectPeerWithWarning { + msg: msgs::WarningMessage { + channel_id: *chan_id, + data: + "Disconnecting due to timeout awaiting response" + .to_owned(), }, - }); - } + }, + }); } + } - true - }, - None => { - chan.context_mut().maybe_expire_prev_config(); - let unfunded_context = chan.unfunded_context_mut().expect("channel should be unfunded"); - if unfunded_context.should_expire_unfunded_channel() { - let context = chan.context(); - let logger = WithChannelContext::from(&self.logger, context, None); - log_error!(logger, + true + }, + None => { + chan.context_mut().maybe_expire_prev_config(); + let unfunded_context = + chan.unfunded_context_mut().expect("channel should be unfunded"); + if unfunded_context.should_expire_unfunded_channel() { + let context = chan.context(); + let logger = WithChannelContext::from(&self.logger, context, None); + log_error!(logger, "Force-closing pending channel with ID {} for not establishing in a timely manner", context.channel_id()); - let mut close_res = chan.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }); - let (funding, context) = chan.funding_and_context_mut(); - locked_close_channel!(self, peer_state, context, funding, close_res); - shutdown_channels.push(close_res); - pending_msg_events.push(MessageSendEvent::HandleError { + let mut close_res = chan.force_shutdown( + false, + ClosureReason::HolderForceClosed { + broadcasted_latest_txn: Some(false), + }, + ); + let (funding, context) = chan.funding_and_context_mut(); + locked_close_channel!( + self, peer_state, context, funding, close_res + ); + shutdown_channels.push(close_res); + pending_msg_events.push(MessageSendEvent::HandleError { node_id: context.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { @@ -7153,29 +7490,40 @@ where }, }, }); - false - } else { - true - } - }, - } + false + } else { + true + } + }, }); for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() { - if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 { - let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id), None); + if { + req.ticks_remaining -= 1; + req.ticks_remaining + } <= 0 + { + let logger = WithContext::from( + &self.logger, + Some(counterparty_node_id), + Some(*chan_id), + None, + ); log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id); - peer_state.pending_msg_events.push( - MessageSendEvent::HandleError { - node_id: counterparty_node_id, - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: chan_id.clone(), data: "Channel force-closed".to_owned() } + peer_state.pending_msg_events.push(MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { + channel_id: chan_id.clone(), + data: "Channel force-closed".to_owned(), }, - } - ); + }, + }); } } - peer_state.inbound_channel_request_by_id.retain(|_, req| req.ticks_remaining > 0); + peer_state + .inbound_channel_request_by_id + .retain(|_, req| req.ticks_remaining > 0); if peer_state.ok_to_remove(true) { pending_peers_awaiting_removal.push(counterparty_node_id); @@ -7205,37 +7553,43 @@ where entry.remove_entry(); } }, - hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ } + hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ + }, } } } - self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| { - if payment.htlcs.is_empty() { - // This should be unreachable - debug_assert!(false); - return false; - } - if let OnionPayload::Invoice { .. } = payment.htlcs[0].onion_payload { - // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat). - // In this case we're not going to handle any timeouts of the parts here. - // This condition determining whether the MPP is complete here must match - // exactly the condition used in `process_pending_htlc_forwards`. - if payment.htlcs[0].total_msat <= payment.htlcs.iter() - .fold(0, |total, htlc| total + htlc.sender_intended_value) - { - return true; - } else if payment.htlcs.iter_mut().any(|htlc| { - htlc.timer_ticks += 1; - return htlc.timer_ticks >= MPP_TIMEOUT_TICKS - }) { - timed_out_mpp_htlcs.extend(payment.htlcs.drain(..) - .map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash))); + self.claimable_payments.lock().unwrap().claimable_payments.retain( + |payment_hash, payment| { + if payment.htlcs.is_empty() { + // This should be unreachable + debug_assert!(false); return false; } - } - true - }); + if let OnionPayload::Invoice { .. } = payment.htlcs[0].onion_payload { + // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat). + // In this case we're not going to handle any timeouts of the parts here. + // This condition determining whether the MPP is complete here must match + // exactly the condition used in `process_pending_htlc_forwards`. + let htlc_total_msat = + payment.htlcs.iter().map(|h| h.sender_intended_value).sum(); + if payment.htlcs[0].total_msat <= htlc_total_msat { + return true; + } else if payment.htlcs.iter_mut().any(|htlc| { + htlc.timer_ticks += 1; + return htlc.timer_ticks >= MPP_TIMEOUT_TICKS; + }) { + let htlcs = payment + .htlcs + .drain(..) + .map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)); + timed_out_mpp_htlcs.extend(htlcs); + return false; + } + } + true + }, + ); for htlc_source in timed_out_mpp_htlcs.drain(..) { let source = HTLCSource::PreviousHopData(htlc_source.0.clone()); @@ -7259,12 +7613,11 @@ where .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH"); #[cfg(not(feature = "std"))] let duration_since_epoch = Duration::from_secs( - self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64 + self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64, ); - self.pending_outbound_payments.remove_stale_payments( - duration_since_epoch, &self.pending_events - ); + self.pending_outbound_payments + .remove_stale_payments(duration_since_epoch, &self.pending_events); #[cfg(async_payments)] self.check_refresh_async_receive_offer_cache(true); @@ -7371,10 +7724,9 @@ where // Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be // failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to // be surfaced to the user. - #[rustfmt::skip] fn fail_holding_cell_htlcs( &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: ChannelId, - counterparty_node_id: &PublicKey + counterparty_node_id: &PublicKey, ) { let (failure_reason, onion_failure_data) = { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -7393,14 +7745,21 @@ where (LocalHTLCFailureReason::UnknownNextPeer, Vec::new()) } }, - hash_map::Entry::Vacant(_) => (LocalHTLCFailureReason::UnknownNextPeer, Vec::new()) + hash_map::Entry::Vacant(_) => { + (LocalHTLCFailureReason::UnknownNextPeer, Vec::new()) + }, } - } else { (LocalHTLCFailureReason::UnknownNextPeer, Vec::new()) } + } else { + (LocalHTLCFailureReason::UnknownNextPeer, Vec::new()) + } }; for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { let reason = HTLCFailReason::reason(failure_reason, onion_failure_data.clone()); - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id.clone()), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { + node_id: Some(counterparty_node_id.clone()), + channel_id, + }; self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver); } } @@ -7409,21 +7768,20 @@ where &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCHandlingFailureType, ) { - let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event( + self.fail_htlc_backwards_internal_without_forward_event( source, payment_hash, onion_error, destination, ); - if push_forward_event { - self.push_pending_forwards_ev(); - } } /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. - #[rustfmt::skip] - fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, failure_type: HTLCHandlingFailureType) -> bool { + fn fail_htlc_backwards_internal_without_forward_event( + &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, + failure_type: HTLCHandlingFailureType, + ) { // Ensure that no peer state channel storage lock is held when calling this function. // This ensures that future code doesn't introduce a lock-order requirement for // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling @@ -7441,66 +7799,85 @@ where // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // from block_connected which may run during initialization prior to the chain_monitor // being fully configured. See the docs for `ChannelManagerReadArgs` for more. - let mut push_forward_event; match source { HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => { - push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path, - session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx, - &self.pending_events, &self.logger); + self.pending_outbound_payments.fail_htlc( + source, + payment_hash, + onion_error, + path, + session_priv, + payment_id, + self.probing_cookie_secret, + &self.secp_ctx, + &self.pending_events, + &self.logger, + ); }, HTLCSource::PreviousHopData(HTLCPreviousHopData { - ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, - ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, .. + ref short_channel_id, + ref htlc_id, + ref incoming_packet_shared_secret, + ref phantom_shared_secret, + outpoint: _, + ref blinded_failure, + ref channel_id, + .. }) => { log_trace!( WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)), "Failing {}HTLC with payment_hash {} backwards from us: {:?}", - if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error + if blinded_failure.is_some() { "blinded " } else { "" }, + &payment_hash, + onion_error ); let failure = match blinded_failure { Some(BlindedFailure::FromIntroductionNode) => { - let blinded_onion_error = HTLCFailReason::reason(LocalHTLCFailureReason::InvalidOnionBlinding, vec![0; 32]); + let blinded_onion_error = HTLCFailReason::reason( + LocalHTLCFailureReason::InvalidOnionBlinding, + vec![0; 32], + ); let err_packet = blinded_onion_error.get_encrypted_failure_packet( - incoming_packet_shared_secret, phantom_shared_secret + incoming_packet_shared_secret, + phantom_shared_secret, ); HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet } }, - Some(BlindedFailure::FromBlindedNode) => { - HTLCForwardInfo::FailMalformedHTLC { - htlc_id: *htlc_id, - failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), - sha256_of_onion: [0; 32] - } + Some(BlindedFailure::FromBlindedNode) => HTLCForwardInfo::FailMalformedHTLC { + htlc_id: *htlc_id, + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), + sha256_of_onion: [0; 32], }, None => { let err_packet = onion_error.get_encrypted_failure_packet( - incoming_packet_shared_secret, phantom_shared_secret + incoming_packet_shared_secret, + phantom_shared_secret, ); HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet } - } + }, }; - push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty(); let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); - push_forward_event &= forward_htlcs.is_empty(); match forward_htlcs.entry(*short_channel_id) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(failure); }, hash_map::Entry::Vacant(entry) => { - entry.insert(vec!(failure)); - } + entry.insert(vec![failure]); + }, } mem::drop(forward_htlcs); let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push_back((events::Event::HTLCHandlingFailed { - prev_channel_id: *channel_id, - failure_type, - failure_reason: Some(onion_error.into()), - }, None)); + pending_events.push_back(( + events::Event::HTLCHandlingFailed { + prev_channel_id: *channel_id, + failure_type, + failure_reason: Some(onion_error.into()), + }, + None, + )); }, } - push_forward_event } /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any @@ -9641,9 +10018,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec)) { - let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty(); let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); - push_forward_event &= decode_update_add_htlcs.is_empty(); let scid = update_add_htlcs.0; match decode_update_add_htlcs.entry(scid) { hash_map::Entry::Occupied(mut e) => { @@ -9653,25 +10028,26 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ e.insert(update_add_htlcs.1); }, } - if push_forward_event { - self.push_pending_forwards_ev(); - } } #[inline] fn forward_htlcs(&self, per_source_pending_forwards: &mut [PerSourcePendingForward]) { - let push_forward_event = - self.forward_htlcs_without_forward_event(per_source_pending_forwards); - if push_forward_event { - self.push_pending_forwards_ev() - } + self.forward_htlcs_without_forward_event(per_source_pending_forwards); } #[inline] - #[rustfmt::skip] - fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, Option, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool { - let mut push_forward_event = false; - for &mut (prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards { + fn forward_htlcs_without_forward_event( + &self, per_source_pending_forwards: &mut [PerSourcePendingForward], + ) { + for &mut ( + prev_short_channel_id, + prev_counterparty_node_id, + prev_funding_outpoint, + prev_channel_id, + prev_user_channel_id, + ref mut pending_forwards, + ) in per_source_pending_forwards + { let mut new_intercept_events = VecDeque::new(); let mut failed_intercept_forwards = Vec::new(); if !pending_forwards.is_empty() { @@ -9685,74 +10061,130 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Pull this now to avoid introducing a lock order with `forward_htlcs`. let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid); - let decode_update_add_htlcs_empty = self.decode_update_add_htlcs.lock().unwrap().is_empty(); let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); - let forward_htlcs_empty = forward_htlcs.is_empty(); match forward_htlcs.entry(scid) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, - prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info + prev_short_channel_id, + prev_counterparty_node_id, + prev_funding_outpoint, + prev_channel_id, + prev_htlc_id, + prev_user_channel_id, + forward_info, })); }, hash_map::Entry::Vacant(entry) => { - if !is_our_scid && forward_info.incoming_amt_msat.is_some() && - fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.chain_hash) - { - let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).to_byte_array()); - let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); + if !is_our_scid + && forward_info.incoming_amt_msat.is_some() + && fake_scid::is_valid_intercept( + &self.fake_scid_rand_bytes, + scid, + &self.chain_hash, + ) { + let intercept_id = InterceptId( + Sha256::hash(&forward_info.incoming_shared_secret) + .to_byte_array(), + ); + let mut pending_intercepts = + self.pending_intercepted_htlcs.lock().unwrap(); match pending_intercepts.entry(intercept_id) { hash_map::Entry::Vacant(entry) => { - new_intercept_events.push_back((events::Event::HTLCIntercepted { - requested_next_hop_scid: scid, - payment_hash: forward_info.payment_hash, - inbound_amount_msat: forward_info.incoming_amt_msat.unwrap(), - expected_outbound_amount_msat: forward_info.outgoing_amt_msat, - intercept_id - }, None)); + new_intercept_events.push_back(( + events::Event::HTLCIntercepted { + requested_next_hop_scid: scid, + payment_hash: forward_info.payment_hash, + inbound_amount_msat: forward_info + .incoming_amt_msat + .unwrap(), + expected_outbound_amount_msat: forward_info + .outgoing_amt_msat, + intercept_id, + }, + None, + )); entry.insert(PendingAddHTLCInfo { - prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, - prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info + prev_short_channel_id, + prev_counterparty_node_id, + prev_funding_outpoint, + prev_channel_id, + prev_htlc_id, + prev_user_channel_id, + forward_info, }); }, hash_map::Entry::Occupied(_) => { - let logger = WithContext::from(&self.logger, None, Some(prev_channel_id), Some(forward_info.payment_hash)); - log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - user_channel_id: Some(prev_user_channel_id), - counterparty_node_id: prev_counterparty_node_id, - outpoint: prev_funding_outpoint, - channel_id: prev_channel_id, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: forward_info.incoming_shared_secret, - phantom_shared_secret: None, - blinded_failure: forward_info.routing.blinded_failure(), - cltv_expiry: forward_info.routing.incoming_cltv_expiry(), - }); + let logger = WithContext::from( + &self.logger, + None, + Some(prev_channel_id), + Some(forward_info.payment_hash), + ); + log_info!( + logger, + "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", + scid + ); + let htlc_source = + HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + user_channel_id: Some(prev_user_channel_id), + counterparty_node_id: prev_counterparty_node_id, + outpoint: prev_funding_outpoint, + channel_id: prev_channel_id, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: forward_info + .incoming_shared_secret, + phantom_shared_secret: None, + blinded_failure: forward_info + .routing + .blinded_failure(), + cltv_expiry: forward_info + .routing + .incoming_cltv_expiry(), + }); - failed_intercept_forwards.push((htlc_source, forward_info.payment_hash, - HTLCFailReason::from_failure_code(LocalHTLCFailureReason::UnknownNextPeer), - HTLCHandlingFailureType::InvalidForward { requested_forward_scid: scid }, + let payment_hash = forward_info.payment_hash; + let reason = HTLCFailReason::from_failure_code( + LocalHTLCFailureReason::UnknownNextPeer, + ); + let failure_type = + HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: scid, + }; + failed_intercept_forwards.push(( + htlc_source, + payment_hash, + reason, + failure_type, )); - } + }, } } else { - // We don't want to generate a PendingHTLCsForwardable event if only intercepted - // payments are being processed. - push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty; - entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, - prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info - }))); + entry.insert(vec![HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + prev_short_channel_id, + prev_counterparty_node_id, + prev_funding_outpoint, + prev_channel_id, + prev_htlc_id, + prev_user_channel_id, + forward_info, + })]); } - } + }, } } } - for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) { - push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination); + for (htlc_source, payment_hash, failure_reason, destination) in + failed_intercept_forwards.drain(..) + { + self.fail_htlc_backwards_internal_without_forward_event( + &htlc_source, + &payment_hash, + &failure_reason, + destination, + ); } if !new_intercept_events.is_empty() { @@ -9760,30 +10192,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ events.append(&mut new_intercept_events); } } - push_forward_event - } - - fn push_pending_forwards_ev(&self) { - let mut pending_events = self.pending_events.lock().unwrap(); - let is_processing_events = self.pending_events_processor.load(Ordering::Acquire); - let num_forward_events = pending_events - .iter() - .filter(|(ev, _)| matches!(ev, events::Event::PendingHTLCsForwardable { .. })) - .count(); - // We only want to push a PendingHTLCsForwardable event if no others are queued. Processing - // events is done in batches and they are not removed until we're done processing each - // batch. Since handling a `PendingHTLCsForwardable` event will call back into the - // `ChannelManager`, we'll still see the original forwarding event not removed. Phantom - // payments will need an additional forwarding event before being claimed to make them look - // real by taking more time. - if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 { - pending_events.push_back(( - Event::PendingHTLCsForwardable { - time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS), - }, - None, - )); - } } /// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote @@ -10374,19 +10782,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ has_pending_monitor_events } - /// In chanmon_consistency_target, we'd like to be able to restore monitor updating without - /// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor - /// update events as a separate process method here. - #[cfg(fuzzing)] - pub fn process_monitor_events(&self) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - self.process_pending_monitor_events(); - } - /// Check the holding cell in each channel and free any pending HTLCs in them if possible. /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor /// update was applied. - #[rustfmt::skip] fn check_free_holding_cells(&self) -> bool { let mut has_monitor_update = false; let mut failed_htlcs = Vec::new(); @@ -10401,22 +10799,36 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ 'chan_loop: loop { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state: &mut PeerState<_> = &mut *peer_state_lock; - for (channel_id, chan) in peer_state.channel_by_id - .iter_mut() - .filter_map(|(chan_id, chan)| chan.as_funded_mut().map(|chan| (chan_id, chan))) - { + for (channel_id, chan) in + peer_state.channel_by_id.iter_mut().filter_map(|(chan_id, chan)| { + chan.as_funded_mut().map(|chan| (chan_id, chan)) + }) { let counterparty_node_id = chan.context.get_counterparty_node_id(); let funding_txo = chan.funding.get_funding_txo(); - let (monitor_opt, holding_cell_failed_htlcs) = - chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context, None)); + let (monitor_opt, holding_cell_failed_htlcs) = chan + .maybe_free_holding_cell_htlcs( + &self.fee_estimator, + &&WithChannelContext::from(&self.logger, &chan.context, None), + ); if !holding_cell_failed_htlcs.is_empty() { - failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id)); + failed_htlcs.push(( + holding_cell_failed_htlcs, + *channel_id, + counterparty_node_id, + )); } if let Some(monitor_update) = monitor_opt { has_monitor_update = true; - handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan); + handle_new_monitor_update!( + self, + funding_txo.unwrap(), + monitor_update, + peer_state_lock, + peer_state, + per_peer_state, + chan + ); continue 'peer_loop; } } @@ -14649,102 +15061,145 @@ where MR::Target: MessageRouter, L::Target: Logger, { - #[rustfmt::skip] - fn read(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>) -> Result { + fn read( + reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, + ) -> Result { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); let chain_hash: ChainHash = Readable::read(reader)?; let best_block_height: u32 = Readable::read(reader)?; let best_block_hash: BlockHash = Readable::read(reader)?; - let empty_peer_state = || { - PeerState { - channel_by_id: new_hash_map(), - inbound_channel_request_by_id: new_hash_map(), - latest_features: InitFeatures::empty(), - pending_msg_events: Vec::new(), - in_flight_monitor_updates: BTreeMap::new(), - monitor_update_blocked_actions: BTreeMap::new(), - actions_blocking_raa_monitor_updates: BTreeMap::new(), - closed_channel_monitor_update_ids: BTreeMap::new(), - peer_storage: Vec::new(), - is_connected: false, - } + let empty_peer_state = || PeerState { + channel_by_id: new_hash_map(), + inbound_channel_request_by_id: new_hash_map(), + latest_features: InitFeatures::empty(), + pending_msg_events: Vec::new(), + in_flight_monitor_updates: BTreeMap::new(), + monitor_update_blocked_actions: BTreeMap::new(), + actions_blocking_raa_monitor_updates: BTreeMap::new(), + closed_channel_monitor_update_ids: BTreeMap::new(), + peer_storage: Vec::new(), + is_connected: false, }; let mut failed_htlcs = Vec::new(); let channel_count: u64 = Readable::read(reader)?; let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); - let mut per_peer_state = hash_map_with_capacity(cmp::min(channel_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); + let mut per_peer_state = hash_map_with_capacity(cmp::min( + channel_count as usize, + MAX_ALLOC_SIZE / mem::size_of::<(PublicKey, Mutex>)>(), + )); let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); for _ in 0..channel_count { - let mut channel: FundedChannel = FundedChannel::read(reader, ( - &args.entropy_source, &args.signer_provider, &provided_channel_type_features(&args.default_config) - ))?; + let mut channel: FundedChannel = FundedChannel::read( + reader, + ( + &args.entropy_source, + &args.signer_provider, + &provided_channel_type_features(&args.default_config), + ), + )?; let logger = WithChannelContext::from(&args.logger, &channel.context, None); let channel_id = channel.context.channel_id(); channel_id_set.insert(channel_id); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&channel_id) { - if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || - channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || - channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || - channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() { + if channel.get_cur_holder_commitment_transaction_number() + > monitor.get_cur_holder_commitment_number() + || channel.get_revoked_counterparty_commitment_transaction_number() + > monitor.get_min_seen_secret() + || channel.get_cur_counterparty_commitment_transaction_number() + > monitor.get_cur_counterparty_commitment_number() + || channel.context.get_latest_monitor_update_id() + < monitor.get_latest_update_id() + { // But if the channel is behind of the monitor, close the channel: - log_error!(logger, "A ChannelManager is stale compared to the current ChannelMonitor!"); + log_error!( + logger, + "A ChannelManager is stale compared to the current ChannelMonitor!" + ); log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); - if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() { + if channel.context.get_latest_monitor_update_id() + < monitor.get_latest_update_id() + { log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); } - if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() { + if channel.get_cur_holder_commitment_transaction_number() + > monitor.get_cur_holder_commitment_number() + { log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.", &channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number()); } - if channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() { + if channel.get_revoked_counterparty_commitment_transaction_number() + > monitor.get_min_seen_secret() + { log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.", &channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number()); } - if channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() { + if channel.get_cur_counterparty_commitment_transaction_number() + > monitor.get_cur_counterparty_commitment_number() + { log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); } - let mut shutdown_result = channel.context.force_shutdown(&channel.funding, true, ClosureReason::OutdatedChannelManager); + let mut shutdown_result = channel.context.force_shutdown( + &channel.funding, + true, + ClosureReason::OutdatedChannelManager, + ); if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { return Err(DecodeError::InvalidValue); } - if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = shutdown_result.monitor_update { + if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = + shutdown_result.monitor_update + { // Our channel information is out of sync with the `ChannelMonitor`, so // force the update to use the `ChannelMonitor`'s update_id for the close // update. let latest_update_id = monitor.get_latest_update_id().saturating_add(1); update.update_id = latest_update_id; - per_peer_state.entry(counterparty_node_id) + per_peer_state + .entry(counterparty_node_id) .or_insert_with(|| Mutex::new(empty_peer_state())) - .lock().unwrap() - .closed_channel_monitor_update_ids.entry(channel_id) - .and_modify(|v| *v = cmp::max(latest_update_id, *v)) - .or_insert(latest_update_id); + .lock() + .unwrap() + .closed_channel_monitor_update_ids + .entry(channel_id) + .and_modify(|v| *v = cmp::max(latest_update_id, *v)) + .or_insert(latest_update_id); - close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, funding_txo, channel_id, update - }); + close_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo, + channel_id, + update, + }, + ); } failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs); - channel_closures.push_back((events::Event::ChannelClosed { - channel_id: channel.context.channel_id(), - user_channel_id: channel.context.get_user_id(), - reason: ClosureReason::OutdatedChannelManager, - counterparty_node_id: Some(channel.context.get_counterparty_node_id()), - channel_capacity_sats: Some(channel.funding.get_value_satoshis()), - channel_funding_txo: channel.funding.get_funding_txo(), - last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()), - }, None)); + channel_closures.push_back(( + events::Event::ChannelClosed { + channel_id: channel.context.channel_id(), + user_channel_id: channel.context.get_user_id(), + reason: ClosureReason::OutdatedChannelManager, + counterparty_node_id: Some(channel.context.get_counterparty_node_id()), + channel_capacity_sats: Some(channel.funding.get_value_satoshis()), + channel_funding_txo: channel.funding.get_funding_txo(), + last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()), + }, + None, + )); for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { let mut found_htlc = false; for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() { - if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; } + if *channel_htlc_source == monitor_htlc_source { + found_htlc = true; + break; + } } if !found_htlc { // If we have some HTLCs in the channel which are not present in the newer @@ -14754,50 +15209,91 @@ where // claim update ChannelMonitor updates were persisted prior to persising // the ChannelMonitor update for the forward leg, so attempting to fail the // backwards leg of the HTLC will simply be rejected. - let logger = WithChannelContext::from(&args.logger, &channel.context, Some(*payment_hash)); + let logger = WithChannelContext::from( + &args.logger, + &channel.context, + Some(*payment_hash), + ); log_info!(logger, "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", &channel.context.channel_id(), &payment_hash); - failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id())); + failed_htlcs.push(( + channel_htlc_source.clone(), + *payment_hash, + channel.context.get_counterparty_node_id(), + channel.context.channel_id(), + )); } } } else { - channel.on_startup_drop_completed_blocked_mon_updates_through(&logger, monitor.get_latest_update_id()); + channel.on_startup_drop_completed_blocked_mon_updates_through( + &logger, + monitor.get_latest_update_id(), + ); log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates", &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(), monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending()); if let Some(short_channel_id) = channel.funding.get_short_channel_id() { - short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); + short_to_chan_info.insert( + short_channel_id, + ( + channel.context.get_counterparty_node_id(), + channel.context.channel_id(), + ), + ); } for short_channel_id in channel.context.historical_scids() { - short_to_chan_info.insert(*short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); + short_to_chan_info.insert( + *short_channel_id, + ( + channel.context.get_counterparty_node_id(), + channel.context.channel_id(), + ), + ); } - per_peer_state.entry(channel.context.get_counterparty_node_id()) + per_peer_state + .entry(channel.context.get_counterparty_node_id()) .or_insert_with(|| Mutex::new(empty_peer_state())) - .get_mut().unwrap() - .channel_by_id.insert(channel.context.channel_id(), Channel::from(channel)); + .get_mut() + .unwrap() + .channel_by_id + .insert(channel.context.channel_id(), Channel::from(channel)); } } else if channel.is_awaiting_initial_mon_persist() { // If we were persisted and shut down while the initial ChannelMonitor persistence // was in-progress, we never broadcasted the funding transaction and can still // safely discard the channel. - let _ = channel.context.force_shutdown(&channel.funding, false, ClosureReason::DisconnectedPeer); - channel_closures.push_back((events::Event::ChannelClosed { - channel_id: channel.context.channel_id(), - user_channel_id: channel.context.get_user_id(), - reason: ClosureReason::DisconnectedPeer, - counterparty_node_id: Some(channel.context.get_counterparty_node_id()), - channel_capacity_sats: Some(channel.funding.get_value_satoshis()), - channel_funding_txo: channel.funding.get_funding_txo(), - last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()), - }, None)); + let _ = channel.context.force_shutdown( + &channel.funding, + false, + ClosureReason::DisconnectedPeer, + ); + channel_closures.push_back(( + events::Event::ChannelClosed { + channel_id: channel.context.channel_id(), + user_channel_id: channel.context.get_user_id(), + reason: ClosureReason::DisconnectedPeer, + counterparty_node_id: Some(channel.context.get_counterparty_node_id()), + channel_capacity_sats: Some(channel.funding.get_value_satoshis()), + channel_funding_txo: channel.funding.get_funding_txo(), + last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()), + }, + None, + )); } else { - log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id()); + log_error!( + logger, + "Missing ChannelMonitor for channel {} needed by ChannelManager.", + &channel.context.channel_id() + ); log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); - log_error!(logger, " Without the ChannelMonitor we cannot continue without risking funds."); + log_error!( + logger, + " Without the ChannelMonitor we cannot continue without risking funds." + ); log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); return Err(DecodeError::InvalidValue); } @@ -14820,12 +15316,15 @@ where if should_queue_fc_update { latest_update_id += 1; } - per_peer_state.entry(counterparty_node_id) + per_peer_state + .entry(counterparty_node_id) .or_insert_with(|| Mutex::new(empty_peer_state())) - .lock().unwrap() - .closed_channel_monitor_update_ids.entry(monitor.channel_id()) - .and_modify(|v| *v = cmp::max(latest_update_id, *v)) - .or_insert(latest_update_id); + .lock() + .unwrap() + .closed_channel_monitor_update_ids + .entry(monitor.channel_id()) + .and_modify(|v| *v = cmp::max(latest_update_id, *v)) + .or_insert(latest_update_id); } if !should_queue_fc_update { @@ -14834,11 +15333,16 @@ where let logger = WithChannelMonitor::from(&args.logger, monitor, None); let channel_id = monitor.channel_id(); - log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed", - &channel_id); + log_info!( + logger, + "Queueing monitor update to ensure missing channel {} is force closed", + &channel_id + ); let monitor_update = ChannelMonitorUpdate { update_id: monitor.get_latest_update_id().saturating_add(1), - updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], + updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { + should_broadcast: true, + }], channel_id: Some(monitor.channel_id()), }; let funding_txo = monitor.get_funding_txo(); @@ -14858,7 +15362,10 @@ where for _ in 0..forward_htlcs_count { let short_channel_id = Readable::read(reader)?; let pending_forwards_count: u64 = Readable::read(reader)?; - let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); + let mut pending_forwards = Vec::with_capacity(cmp::min( + pending_forwards_count as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); for _ in 0..pending_forwards_count { pending_forwards.push(Readable::read(reader)?); } @@ -14866,11 +15373,15 @@ where } let claimable_htlcs_count: u64 = Readable::read(reader)?; - let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); + let mut claimable_htlcs_list = + Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); for _ in 0..claimable_htlcs_count { let payment_hash = Readable::read(reader)?; let previous_hops_len: u64 = Readable::read(reader)?; - let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::())); + let mut previous_hops = Vec::with_capacity(cmp::min( + previous_hops_len as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); for _ in 0..previous_hops_len { previous_hops.push(::read(reader)?); } @@ -14888,7 +15399,10 @@ where let event_count: u64 = Readable::read(reader)?; let mut pending_events_read: VecDeque<(events::Event, Option)> = - VecDeque::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option)>())); + VecDeque::with_capacity(cmp::min( + event_count as usize, + MAX_ALLOC_SIZE / mem::size_of::<(events::Event, Option)>(), + )); for _ in 0..event_count { match MaybeReadable::read(reader)? { Some(event) => pending_events_read.push_back((event, None)), @@ -14905,7 +15419,7 @@ where // on-startup monitor updates. let _: OutPoint = Readable::read(reader)?; let _: ChannelMonitorUpdate = Readable::read(reader)?; - } + }, _ => return Err(DecodeError::InvalidValue), } } @@ -14919,38 +15433,53 @@ where let payment_hash: PaymentHash = Readable::read(reader)?; let logger = WithContext::from(&args.logger, None, None, Some(payment_hash)); let inbound: PendingInboundPayment = Readable::read(reader)?; - log_warn!(logger, "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", payment_hash, inbound); + log_warn!( + logger, + "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", + payment_hash, + inbound + ); } let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; let mut pending_outbound_payments_compat: HashMap = - hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); + hash_map_with_capacity(cmp::min( + pending_outbound_payments_count_compat as usize, + MAX_ALLOC_SIZE / 32, + )); for _ in 0..pending_outbound_payments_count_compat { let session_priv = Readable::read(reader)?; let payment = PendingOutboundPayment::Legacy { session_privs: hash_set_from_iter([session_priv]), }; if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() { - return Err(DecodeError::InvalidValue) + return Err(DecodeError::InvalidValue); }; } // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. - let mut pending_outbound_payments_no_retry: Option>> = None; + let mut pending_outbound_payments_no_retry: Option>> = + None; let mut pending_outbound_payments = None; - let mut pending_intercepted_htlcs: Option> = Some(new_hash_map()); + let mut pending_intercepted_htlcs: Option> = + Some(new_hash_map()); let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; let mut claimable_htlc_purposes = None; let mut claimable_htlc_onion_fields = None; let mut pending_claiming_payments = Some(new_hash_map()); - let mut monitor_update_blocked_actions_per_peer: Option>)>> = Some(Vec::new()); + let mut monitor_update_blocked_actions_per_peer: Option>)>> = + Some(Vec::new()); let mut events_override = None; - let mut legacy_in_flight_monitor_updates: Option>> = None; + let mut legacy_in_flight_monitor_updates: Option< + HashMap<(PublicKey, OutPoint), Vec>, + > = None; // We use this one over the legacy since they represent the same data, just with a different // key. We still need to read the legacy one as it's an even TLV. - let mut in_flight_monitor_updates: Option>> = None; + let mut in_flight_monitor_updates: Option< + HashMap<(PublicKey, ChannelId), Vec>, + > = None; let mut decode_update_add_htlcs: Option>> = None; let mut inbound_payment_id_secret = None; let mut peer_storage_dir: Option)>> = None; @@ -15020,7 +15549,8 @@ where return Err(DecodeError::InvalidValue); } if in_flight_monitor_updates.is_none() { - let in_flight_upds = in_flight_monitor_updates.get_or_insert_with(|| new_hash_map()); + let in_flight_upds = + in_flight_monitor_updates.get_or_insert_with(|| new_hash_map()); for ((counterparty_node_id, funding_txo), updates) in legacy_in_flight_upds { // All channels with legacy in flight monitor updates are v1 channels. let channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); @@ -15104,22 +15634,40 @@ where // Channels that were persisted have to be funded, otherwise they should have been // discarded. - let monitor = args.channel_monitors.get(chan_id) + let monitor = args + .channel_monitors + .get(chan_id) .expect("We already checked for monitor presence when loading channels"); let mut max_in_flight_update_id = monitor.get_latest_update_id(); if let Some(in_flight_upds) = &mut in_flight_monitor_updates { - if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, *chan_id)) { - max_in_flight_update_id = cmp::max(max_in_flight_update_id, - handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds, - monitor, peer_state, logger, "")); + if let Some(mut chan_in_flight_upds) = + in_flight_upds.remove(&(*counterparty_id, *chan_id)) + { + max_in_flight_update_id = cmp::max( + max_in_flight_update_id, + handle_in_flight_updates!( + *counterparty_id, + chan_in_flight_upds, + monitor, + peer_state, + logger, + "" + ), + ); } } - if funded_chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id { + if funded_chan.get_latest_unblocked_monitor_update_id() + > max_in_flight_update_id + { // If the channel is ahead of the monitor, return DangerousValue: log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight", chan_id, monitor.get_latest_update_id(), max_in_flight_update_id); - log_error!(logger, " but the ChannelManager is at update_id {}.", funded_chan.get_latest_unblocked_monitor_update_id()); + log_error!( + logger, + " but the ChannelManager is at update_id {}.", + funded_chan.get_latest_unblocked_monitor_update_id() + ); log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); @@ -15137,25 +15685,40 @@ where if let Some(in_flight_upds) = in_flight_monitor_updates { for ((counterparty_id, channel_id), mut chan_in_flight_updates) in in_flight_upds { - let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(channel_id), None); + let logger = + WithContext::from(&args.logger, Some(counterparty_id), Some(channel_id), None); if let Some(monitor) = args.channel_monitors.get(&channel_id) { // Now that we've removed all the in-flight monitor updates for channels that are // still open, we need to replay any monitor updates that are for closed channels, // creating the neccessary peer_state entries as we go. - let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| { - Mutex::new(empty_peer_state()) - }); + let peer_state_mutex = per_peer_state + .entry(counterparty_id) + .or_insert_with(|| Mutex::new(empty_peer_state())); let mut peer_state = peer_state_mutex.lock().unwrap(); - handle_in_flight_updates!(counterparty_id, chan_in_flight_updates, monitor, - peer_state, logger, "closed "); + handle_in_flight_updates!( + counterparty_id, + chan_in_flight_updates, + monitor, + peer_state, + logger, + "closed " + ); } else { log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!"); - log_error!(logger, " The ChannelMonitor for channel {} is missing.", channel_id); + log_error!( + logger, + " The ChannelMonitor for channel {} is missing.", + channel_id + ); log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); - log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates); + log_error!( + logger, + " Pending in-flight updates are: {:?}", + chan_in_flight_updates + ); return Err(DecodeError::InvalidValue); } } @@ -15166,22 +15729,34 @@ where pending_background_events.reserve(close_background_events.len()); 'each_bg_event: for mut new_event in close_background_events { if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, funding_txo, channel_id, update, - } = &mut new_event { + counterparty_node_id, + funding_txo, + channel_id, + update, + } = &mut new_event + { debug_assert_eq!(update.updates.len(), 1); - debug_assert!(matches!(update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. })); + debug_assert!(matches!( + update.updates[0], + ChannelMonitorUpdateStep::ChannelForceClosed { .. } + )); let mut updated_id = false; for pending_event in pending_background_events.iter() { if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: pending_cp, funding_txo: pending_funding, - channel_id: pending_chan_id, update: pending_update, - } = pending_event { + counterparty_node_id: pending_cp, + funding_txo: pending_funding, + channel_id: pending_chan_id, + update: pending_update, + } = pending_event + { let for_same_channel = counterparty_node_id == pending_cp && funding_txo == pending_funding && channel_id == pending_chan_id; if for_same_channel { debug_assert!(update.update_id >= pending_update.update_id); - if pending_update.updates.iter().any(|upd| matches!(upd, ChannelMonitorUpdateStep::ChannelForceClosed { .. })) { + if pending_update.updates.iter().any(|upd| { + matches!(upd, ChannelMonitorUpdateStep::ChannelForceClosed { .. }) + }) { // If the background event we're looking at is just // force-closing the channel which already has a pending // force-close update, no need to duplicate it. @@ -15192,18 +15767,23 @@ where } } } - let mut per_peer_state = per_peer_state.get(counterparty_node_id) + let mut per_peer_state = per_peer_state + .get(counterparty_node_id) .expect("If we have pending updates for a channel it must have an entry") - .lock().unwrap(); + .lock() + .unwrap(); if updated_id { per_peer_state - .closed_channel_monitor_update_ids.entry(*channel_id) + .closed_channel_monitor_update_ids + .entry(*channel_id) .and_modify(|v| *v = cmp::max(update.update_id, *v)) .or_insert(update.update_id); } - let in_flight_updates = &mut per_peer_state.in_flight_monitor_updates + let in_flight_updates = &mut per_peer_state + .in_flight_monitor_updates .entry(*channel_id) - .or_insert_with(|| (*funding_txo, Vec::new())).1; + .or_insert_with(|| (*funding_txo, Vec::new())) + .1; debug_assert!(!in_flight_updates.iter().any(|upd| upd == update)); in_flight_updates.push(update.clone()); } @@ -15234,9 +15814,17 @@ where } if is_channel_closed { - for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() { - let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); - if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source { + for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() + { + let logger = WithChannelMonitor::from( + &args.logger, + monitor, + Some(htlc.payment_hash), + ); + if let HTLCSource::OutboundRoute { + payment_id, session_priv, path, .. + } = htlc_source + { if path.hops.is_empty() { log_error!(logger, "Got an empty path for a pending payment"); return Err(DecodeError::InvalidValue); @@ -15245,17 +15833,28 @@ where let mut session_priv_bytes = [0; 32]; session_priv_bytes[..].copy_from_slice(&session_priv[..]); pending_outbounds.insert_from_monitor_on_startup( - payment_id, htlc.payment_hash, session_priv_bytes, &path, best_block_height, logger + payment_id, + htlc.payment_hash, + session_priv_bytes, + &path, + best_block_height, + logger, ); } } - for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() { - let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); + for (htlc_source, (htlc, preimage_opt)) in + monitor.get_all_current_outbound_htlcs() + { + let logger = WithChannelMonitor::from( + &args.logger, + monitor, + Some(htlc.payment_hash), + ); match htlc_source { HTLCSource::PreviousHopData(prev_hop_data) => { let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint && - info.prev_htlc_id == prev_hop_data.htlc_id + info.prev_funding_outpoint == prev_hop_data.outpoint + && info.prev_htlc_id == prev_hop_data.htlc_id }; // The ChannelMonitor is now responsible for this HTLC's // failure/success and will let us know what its outcome is. If we @@ -15299,7 +15898,13 @@ where } else { true } }); }, - HTLCSource::OutboundRoute { payment_id, session_priv, path, bolt12_invoice, .. } => { + HTLCSource::OutboundRoute { + payment_id, + session_priv, + path, + bolt12_invoice, + .. + } => { if let Some(preimage) = preimage_opt { let pending_events = Mutex::new(pending_events_read); // Note that we set `from_onchain` to "false" here, @@ -15316,8 +15921,17 @@ where channel_id: monitor.channel_id(), counterparty_node_id: path.hops[0].pubkey, }; - pending_outbounds.claim_htlc(payment_id, preimage, bolt12_invoice, session_priv, - path, false, compl_action, &pending_events, &&logger); + pending_outbounds.claim_htlc( + payment_id, + preimage, + bolt12_invoice, + session_priv, + path, + false, + compl_action, + &pending_events, + &&logger, + ); pending_events_read = pending_events.into_inner().unwrap(); } }, @@ -15440,17 +16054,6 @@ where } } - if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() { - // If we have pending HTLCs to forward, assume we either dropped a - // `PendingHTLCsForwardable` or the user received it but never processed it as they - // shut down before the timer hit. Either way, set the time_forwardable to a small - // constant as enough time has likely passed that we should simply handle the forwards - // now, or at least after the user gets a chance to reconnect to our peers. - pending_events_read.push_back((events::Event::PendingHTLCsForwardable { - time_forwardable: Duration::from_secs(2), - }, None)); - } - let expanded_inbound_key = args.node_signer.get_inbound_payment_key(); let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len()); @@ -15462,20 +16065,29 @@ where if onion_fields.len() != claimable_htlcs_list.len() { return Err(DecodeError::InvalidValue); } - for (purpose, (onion, (payment_hash, htlcs))) in - purposes.into_iter().zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter())) + for (purpose, (onion, (payment_hash, htlcs))) in purposes + .into_iter() + .zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter())) { - let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment { - purpose, htlcs, onion_fields: onion, - }); - if existing_payment.is_some() { return Err(DecodeError::InvalidValue); } + let existing_payment = claimable_payments.insert( + payment_hash, + ClaimablePayment { purpose, htlcs, onion_fields: onion }, + ); + if existing_payment.is_some() { + return Err(DecodeError::InvalidValue); + } } } else { - for (purpose, (payment_hash, htlcs)) in purposes.into_iter().zip(claimable_htlcs_list.into_iter()) { - let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment { - purpose, htlcs, onion_fields: None, - }); - if existing_payment.is_some() { return Err(DecodeError::InvalidValue); } + for (purpose, (payment_hash, htlcs)) in + purposes.into_iter().zip(claimable_htlcs_list.into_iter()) + { + let existing_payment = claimable_payments.insert( + payment_hash, + ClaimablePayment { purpose, htlcs, onion_fields: None }, + ); + if existing_payment.is_some() { + return Err(DecodeError::InvalidValue); + } } } } else { @@ -15489,26 +16101,31 @@ where OnionPayload::Invoice { _legacy_hop_data } => { if let Some(hop_data) = _legacy_hop_data { events::PaymentPurpose::Bolt11InvoicePayment { - payment_preimage: - match inbound_payment::verify( - payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger - ) { - Ok((payment_preimage, _)) => payment_preimage, - Err(()) => { - log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash); - return Err(DecodeError::InvalidValue); - } + payment_preimage: match inbound_payment::verify( + payment_hash, + &hop_data, + 0, + &expanded_inbound_key, + &args.logger, + ) { + Ok((payment_preimage, _)) => payment_preimage, + Err(()) => { + log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash); + return Err(DecodeError::InvalidValue); }, + }, payment_secret: hop_data.payment_secret, } - } else { return Err(DecodeError::InvalidValue); } + } else { + return Err(DecodeError::InvalidValue); + } + }, + OnionPayload::Spontaneous(payment_preimage) => { + events::PaymentPurpose::SpontaneousPayment(*payment_preimage) }, - OnionPayload::Spontaneous(payment_preimage) => - events::PaymentPurpose::SpontaneousPayment(*payment_preimage), }; - claimable_payments.insert(payment_hash, ClaimablePayment { - purpose, htlcs, onion_fields: None, - }); + claimable_payments + .insert(payment_hash, ClaimablePayment { purpose, htlcs, onion_fields: None }); } } @@ -15544,7 +16161,7 @@ where let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) { Ok(key) => key, - Err(()) => return Err(DecodeError::InvalidValue) + Err(()) => return Err(DecodeError::InvalidValue), }; if let Some(network_pubkey) = received_network_pubkey { if network_pubkey != our_network_pubkey { @@ -15564,21 +16181,44 @@ where let mut outbound_scid_alias; loop { outbound_scid_alias = fake_scid::Namespace::OutboundAlias - .get_fake_scid(best_block_height, &chain_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source); - if outbound_scid_aliases.insert(outbound_scid_alias) { break; } + .get_fake_scid( + best_block_height, + &chain_hash, + fake_scid_rand_bytes.as_ref().unwrap(), + &args.entropy_source, + ); + if outbound_scid_aliases.insert(outbound_scid_alias) { + break; + } } funded_chan.context.set_outbound_scid_alias(outbound_scid_alias); - } else if !outbound_scid_aliases.insert(funded_chan.context.outbound_scid_alias()) { + } else if !outbound_scid_aliases + .insert(funded_chan.context.outbound_scid_alias()) + { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(logger, "Got duplicate outbound SCID alias; {}", funded_chan.context.outbound_scid_alias()); + log_error!( + logger, + "Got duplicate outbound SCID alias; {}", + funded_chan.context.outbound_scid_alias() + ); return Err(DecodeError::InvalidValue); } if funded_chan.context.is_usable() { - if short_to_chan_info.insert(funded_chan.context.outbound_scid_alias(), (funded_chan.context.get_counterparty_node_id(), *chan_id)).is_some() { + if short_to_chan_info + .insert( + funded_chan.context.outbound_scid_alias(), + (funded_chan.context.get_counterparty_node_id(), *chan_id), + ) + .is_some() + { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(logger, "Got duplicate outbound SCID alias; {}", funded_chan.context.outbound_scid_alias()); + log_error!( + logger, + "Got duplicate outbound SCID alias; {}", + funded_chan.context.outbound_scid_alias() + ); return Err(DecodeError::InvalidValue); } } @@ -15593,10 +16233,13 @@ where let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator); - for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() { + for (node_id, monitor_update_blocked_actions) in + monitor_update_blocked_actions_per_peer.unwrap() + { if let Some(peer_state) = per_peer_state.get(&node_id) { for (channel_id, actions) in monitor_update_blocked_actions.iter() { - let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None); + let logger = + WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None); for action in actions.iter() { if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { downstream_counterparty_and_funding_outpoint: @@ -15605,15 +16248,21 @@ where funding_txo: _, channel_id: blocked_channel_id, blocking_action, - }), .. - } = action { + }), + .. + } = action + { if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) { log_trace!(logger, "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor", blocked_channel_id); - blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates + blocked_peer_state + .lock() + .unwrap() + .actions_blocking_raa_monitor_updates .entry(*blocked_channel_id) - .or_insert_with(Vec::new).push(blocking_action.clone()); + .or_insert_with(Vec::new) + .push(blocking_action.clone()); } else { // If the channel we were blocking has closed, we don't need to // worry about it - the blocked monitor update should never have @@ -15622,7 +16271,10 @@ where // anymore. } } - if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { .. } = action { + if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { + .. + } = action + { debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue"); } } @@ -15632,7 +16284,8 @@ where // `ChannelManager` was serialized. In that case, we'll run the post-update // actions as soon as we get going. } - peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions; + peer_state.lock().unwrap().monitor_update_blocked_actions = + monitor_update_blocked_actions; } else { for actions in monitor_update_blocked_actions.values() { for action in actions.iter() { @@ -15646,7 +16299,12 @@ where // which we ignore here. } else { let logger = WithContext::from(&args.logger, Some(node_id), None, None); - log_error!(logger, "Got blocked actions {:?} without a per-peer-state for {}", monitor_update_blocked_actions, node_id); + log_error!( + logger, + "Got blocked actions {:?} without a per-peer-state for {}", + monitor_update_blocked_actions, + node_id + ); return Err(DecodeError::InvalidValue); } } @@ -15656,10 +16314,15 @@ where let best_block = BestBlock::new(best_block_hash, best_block_height); let flow = OffersMessageFlow::new( - chain_hash, best_block, our_network_pubkey, - highest_seen_timestamp, expanded_inbound_key, - secp_ctx.clone(), args.message_router - ).with_async_payments_offers_cache(async_receive_offer_cache); + chain_hash, + best_block, + our_network_pubkey, + highest_seen_timestamp, + expanded_inbound_key, + secp_ctx.clone(), + args.message_router, + ) + .with_async_payments_offers_cache(async_receive_offer_cache); let channel_manager = ChannelManager { chain_hash, @@ -15677,7 +16340,10 @@ where forward_htlcs: Mutex::new(forward_htlcs), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), - claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }), + claimable_payments: Mutex::new(ClaimablePayments { + claimable_payments, + pending_claiming_payments: pending_claiming_payments.unwrap(), + }), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), short_to_chan_info: FairRwLock::new(short_to_chan_info), fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(), @@ -15697,6 +16363,7 @@ where pending_events: Mutex::new(pending_events_read), pending_events_processor: AtomicBool::new(false), + pending_htlc_forwards_processor: AtomicBool::new(false), pending_background_events: Mutex::new(pending_background_events), total_consistency_lock: RwLock::new(()), background_events_processed_since_startup: AtomicBool::new(false), @@ -15706,7 +16373,6 @@ where funding_batch_states: Mutex::new(BTreeMap::new()), - pending_broadcast_messages: Mutex::new(Vec::new()), entropy_source: args.entropy_source, @@ -15724,7 +16390,8 @@ where let mut processed_claims: HashSet> = new_hash_set(); for (_, monitor) in args.channel_monitors.iter() { - for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() { + for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() + { if !payment_claims.is_empty() { for payment_claim in payment_claims { if processed_claims.contains(&payment_claim.mpp_parts) { @@ -15740,8 +16407,12 @@ where { let payments = channel_manager.claimable_payments.lock().unwrap(); if !payments.claimable_payments.contains_key(&payment_hash) { - if let Some(payment) = payments.pending_claiming_payments.get(&payment_hash) { - if payment.payment_id == payment_claim.claiming_payment.payment_id { + if let Some(payment) = + payments.pending_claiming_payments.get(&payment_hash) + { + if payment.payment_id + == payment_claim.claiming_payment.payment_id + { // If this payment already exists and was marked as // being-claimed then the serialized state must contain all // of the pending `ChannelMonitorUpdate`s required to get @@ -15753,8 +16424,16 @@ where } } - let mut channels_without_preimage = payment_claim.mpp_parts.iter() - .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.funding_txo, htlc_info.channel_id)) + let mut channels_without_preimage = payment_claim + .mpp_parts + .iter() + .map(|htlc_info| { + ( + htlc_info.counterparty_node_id, + htlc_info.funding_txo, + htlc_info.channel_id, + ) + }) .collect::>(); // If we have multiple MPP parts which were received over the same channel, // we only track it once as once we get a preimage durably in the @@ -15775,16 +16454,26 @@ where // preimages eventually timing out from ChannelMonitors to prevent us from // doing so forever. - let claim_found = - channel_manager.claimable_payments.lock().unwrap().begin_claiming_payment( - payment_hash, &channel_manager.node_signer, &channel_manager.logger, - &channel_manager.inbound_payment_id_secret, true, + let claim_found = channel_manager + .claimable_payments + .lock() + .unwrap() + .begin_claiming_payment( + payment_hash, + &channel_manager.node_signer, + &channel_manager.logger, + &channel_manager.inbound_payment_id_secret, + true, ); if claim_found.is_err() { - let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap(); + let mut claimable_payments = + channel_manager.claimable_payments.lock().unwrap(); match claimable_payments.pending_claiming_payments.entry(payment_hash) { hash_map::Entry::Occupied(_) => { - debug_assert!(false, "Entry was added in begin_claiming_payment"); + debug_assert!( + false, + "Entry was added in begin_claiming_payment" + ); return Err(DecodeError::InvalidValue); }, hash_map::Entry::Vacant(entry) => { @@ -15794,22 +16483,34 @@ where } for part in payment_claim.mpp_parts.iter() { - let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| ( - part.counterparty_node_id, part.channel_id, - PendingMPPClaimPointer(Arc::clone(&ptr)) - )); - let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr| + let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| { + ( + part.counterparty_node_id, + part.channel_id, + PendingMPPClaimPointer(Arc::clone(&ptr)), + ) + }); + let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr| { RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim: PendingMPPClaimPointer(Arc::clone(&ptr)), } - ); + }); // Note that we don't need to pass the `payment_info` here - its // already (clearly) durably on disk in the `ChannelMonitor` so there's // no need to worry about getting it into others. channel_manager.claim_mpp_part( - part.into(), payment_preimage, None, - |_, _| - (Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim }), pending_claim_ptr) + part.into(), + payment_preimage, + None, + |_, _| { + ( + Some(MonitorUpdateCompletionAction::PaymentClaimed { + payment_hash, + pending_mpp_claim, + }), + pending_claim_ptr, + ) + }, ); } processed_claims.insert(payment_claim.mpp_parts); @@ -15825,7 +16526,9 @@ where let mut receiver_node_id = Some(our_network_pubkey); let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret; if phantom_shared_secret.is_some() { - let phantom_pubkey = channel_manager.node_signer.get_node_id(Recipient::PhantomNode) + let phantom_pubkey = channel_manager + .node_signer + .get_node_id(Recipient::PhantomNode) .expect("Failed to get node_id for phantom node recipient"); receiver_node_id = Some(phantom_pubkey) } @@ -15853,17 +16556,27 @@ where let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let Some(channel) = peer_state.channel_by_id + if let Some(channel) = peer_state + .channel_by_id .get_mut(&previous_channel_id) .and_then(Channel::as_funded_mut) { - let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash)); - channel.claim_htlc_while_disconnected_dropping_mon_update_legacy( - claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger + let logger = WithChannelContext::from( + &channel_manager.logger, + &channel.context, + Some(payment_hash), ); + channel + .claim_htlc_while_disconnected_dropping_mon_update_legacy( + claimable_htlc.prev_hop.htlc_id, + payment_preimage, + &&logger, + ); } } - if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.channel_id) { + if let Some(previous_hop_monitor) = + args.channel_monitors.get(&claimable_htlc.prev_hop.channel_id) + { // Note that this is unsafe as we no longer require the // `ChannelMonitor`s to be re-persisted prior to this // `ChannelManager` being persisted after we get started running. @@ -15877,23 +16590,37 @@ where // for nodes during upgrade, and we explicitly require the old // persistence semantics on upgrade in the release notes. previous_hop_monitor.provide_payment_preimage_unsafe_legacy( - &payment_hash, &payment_preimage, &channel_manager.tx_broadcaster, - &channel_manager.fee_estimator, &channel_manager.logger + &payment_hash, + &payment_preimage, + &channel_manager.tx_broadcaster, + &channel_manager.fee_estimator, + &channel_manager.logger, ); } } let mut pending_events = channel_manager.pending_events.lock().unwrap(); - let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap()); - pending_events.push_back((events::Event::PaymentClaimed { - receiver_node_id, - payment_hash, - purpose: payment.purpose, - amount_msat: claimable_amt_msat, - htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(), - sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat), - onion_fields: payment.onion_fields, - payment_id: Some(payment_id), - }, None)); + let payment_id = + payment.inbound_payment_id(&inbound_payment_id_secret.unwrap()); + pending_events.push_back(( + events::Event::PaymentClaimed { + receiver_node_id, + payment_hash, + purpose: payment.purpose, + amount_msat: claimable_amt_msat, + htlcs: payment + .htlcs + .iter() + .map(events::ClaimedHTLC::from) + .collect(), + sender_intended_total_msat: payment + .htlcs + .first() + .map(|htlc| htlc.total_msat), + onion_fields: payment.onion_fields, + payment_id: Some(payment_id), + }, + None, + )); } } } @@ -15902,18 +16629,38 @@ where for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let failure_reason = LocalHTLCFailureReason::ChannelClosed; - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { + node_id: Some(counterparty_node_id), + channel_id, + }; let reason = HTLCFailReason::from_failure_code(failure_reason); channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay { + for ( + source, + preimage, + downstream_value, + downstream_closed, + downstream_node_id, + downstream_funding, + downstream_channel_id, + ) in pending_claims_to_replay + { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we // don't remember in the `ChannelMonitor` where we got a preimage from, but if the // channel is closed we just assume that it probably came from an on-chain claim. - channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None, - downstream_closed, true, downstream_node_id, downstream_funding, - downstream_channel_id, None + channel_manager.claim_funds_internal( + source, + preimage, + Some(downstream_value), + None, + downstream_closed, + true, + downstream_node_id, + downstream_funding, + downstream_channel_id, + None, ); } @@ -16072,8 +16819,8 @@ mod tests { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); + nodes[1].node.process_pending_htlc_forwards(); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -16292,8 +17039,8 @@ mod tests { commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward - expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash }]); + nodes[1].node.process_pending_htlc_forwards(); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -16337,8 +17084,8 @@ mod tests { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash }]); + nodes[1].node.process_pending_htlc_forwards(); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -16384,8 +17131,8 @@ mod tests { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash }]); + nodes[1].node.process_pending_htlc_forwards(); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -16442,7 +17189,7 @@ mod tests { assert!(updates.update_fee.is_none()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash: mismatch_payment_hash }]); check_added_monitors(&nodes[1], 1); let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -17149,7 +17896,7 @@ mod tests { assert!(deserialized_fwd_htlcs.is_empty()); core::mem::drop(deserialized_fwd_htlcs); - expect_pending_htlcs_forwardable!(nodes[0]); + nodes[0].node.process_pending_htlc_forwards(); } } @@ -17360,7 +18107,7 @@ pub mod bench { $node_a.handle_commitment_signed_batch_test($node_b.get_our_node_id(), &cs); $node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id())); - expect_pending_htlcs_forwardable!(ANodeHolder { node: &$node_b }); + $node_b.process_pending_htlc_forwards(); expect_payment_claimable!(ANodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000); $node_b.claim_funds(payment_preimage); expect_payment_claimed!(ANodeHolder { node: &$node_b }, payment_hash, 10_000); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 245479e1df8..afb34dc5601 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2333,24 +2333,12 @@ impl SendEvent { } } -#[macro_export] -/// Don't use this, use the identically-named function instead. -macro_rules! expect_pending_htlcs_forwardable_conditions { - ($node: expr, $expected_failures: expr) => { - $crate::ln::functional_test_utils::expect_pending_htlcs_forwardable_conditions( - $node.node.get_and_clear_pending_events(), - &$expected_failures, - ); - }; -} - #[macro_export] macro_rules! expect_htlc_handling_failed_destinations { ($events: expr, $expected_failures: expr) => {{ let mut num_expected_failures = $expected_failures.len(); for event in $events { match event { - $crate::events::Event::PendingHTLCsForwardable { .. } => {}, $crate::events::Event::HTLCHandlingFailed { ref failure_type, .. } => { assert!($expected_failures.contains(&failure_type)); num_expected_failures -= 1; @@ -2362,70 +2350,22 @@ macro_rules! expect_htlc_handling_failed_destinations { }}; } -/// Checks that an [`Event::PendingHTLCsForwardable`] is available in the given events and, if -/// there are any [`Event::HTLCHandlingFailed`] events their [`HTLCHandlingFailureType`] is included in the -/// `expected_failures` set. -pub fn expect_pending_htlcs_forwardable_conditions( +/// Checks that, if there are any [`Event::HTLCHandlingFailed`] events, their +/// [`HTLCHandlingFailureType`] is included in the `expected_failures` set. +pub fn expect_htlc_failure_conditions( events: Vec, expected_failures: &[HTLCHandlingFailureType], ) { - let count = expected_failures.len() + 1; - assert_eq!(events.len(), count); - assert!(events - .iter() - .find(|event| matches!(event, Event::PendingHTLCsForwardable { .. })) - .is_some()); + assert_eq!(events.len(), expected_failures.len()); if expected_failures.len() > 0 { expect_htlc_handling_failed_destinations!(events, expected_failures) } } #[macro_export] -/// Clears (and ignores) a PendingHTLCsForwardable event -/// -/// Don't use this, call [`expect_pending_htlcs_forwardable_conditions()`] with an empty failure -/// set instead. -macro_rules! expect_pending_htlcs_forwardable_ignore { - ($node: expr) => { - $crate::ln::functional_test_utils::expect_pending_htlcs_forwardable_conditions( - $node.node.get_and_clear_pending_events(), - &[], - ); - }; -} - -#[macro_export] -/// Clears (and ignores) PendingHTLCsForwardable and HTLCHandlingFailed events -/// -/// Don't use this, call [`expect_pending_htlcs_forwardable_conditions()`] instead. -macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore { - ($node: expr, $expected_failures: expr) => { - $crate::ln::functional_test_utils::expect_pending_htlcs_forwardable_conditions( - $node.node.get_and_clear_pending_events(), - &$expected_failures, - ); - }; -} - -#[macro_export] -/// Handles a PendingHTLCsForwardable event -macro_rules! expect_pending_htlcs_forwardable { - ($node: expr) => {{ - $crate::ln::functional_test_utils::expect_pending_htlcs_forwardable_conditions( - $node.node.get_and_clear_pending_events(), - &[], - ); - $node.node.process_pending_htlc_forwards(); - - // Ensure process_pending_htlc_forwards is idempotent. - $node.node.process_pending_htlc_forwards(); - }}; -} - -#[macro_export] -/// Handles a PendingHTLCsForwardable and HTLCHandlingFailed event -macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed { +/// Processes any HTLC forwards and handles an expected [`Event::HTLCHandlingFailed`]. +macro_rules! process_htlcs_and_expect_htlc_handling_failed { ($node: expr, $expected_failures: expr) => {{ - $crate::ln::functional_test_utils::expect_pending_htlcs_forwardable_conditions( + $crate::ln::functional_test_utils::expect_htlc_failure_conditions( $node.node.get_and_clear_pending_events(), &$expected_failures, ); @@ -2436,23 +2376,6 @@ macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed { }}; } -#[cfg(any(test, feature = "_externalize_tests"))] -macro_rules! expect_pending_htlcs_forwardable_from_events { - ($node: expr, $events: expr, $ignore: expr) => {{ - assert_eq!($events.len(), 1); - match $events[0] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - }; - if $ignore { - $node.node.process_pending_htlc_forwards(); - - // Ensure process_pending_htlc_forwards is idempotent. - $node.node.process_pending_htlc_forwards(); - } - }}; -} - #[macro_export] /// Performs the "commitment signed dance" - the series of message exchanges which occur after a /// commitment update. @@ -2600,7 +2523,7 @@ pub fn do_commitment_signed_dance( } if fail_backwards { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( node_a, [crate::events::HTLCHandlingFailureType::Forward { node_id: Some(node_b_id), @@ -3156,7 +3079,7 @@ pub fn expect_payment_failed_conditions_event<'a, 'b, 'c, 'd, 'e>( payment_failed_events: Vec, expected_payment_hash: PaymentHash, expected_payment_failed_permanently: bool, conditions: PaymentFailedConditions<'e>, ) { - if conditions.expected_mpp_parts_remain { + if conditions.expected_mpp_parts_remain || conditions.retry_expected { assert_eq!(payment_failed_events.len(), 1); } else { assert_eq!(payment_failed_events.len(), 2); @@ -3226,11 +3149,6 @@ pub fn expect_payment_failed_conditions_event<'a, 'b, 'c, 'd, 'e>( }, _ => panic!("Unexpected second event"), } - } else if conditions.retry_expected { - match &payment_failed_events[1] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected second event"), - } } } @@ -3388,11 +3306,11 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option if is_last_hop && is_probe { commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, true, true); - expect_pending_htlcs_forwardable!(node); + node.node.process_pending_htlc_forwards(); check_added_monitors(node, 1); } else { commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(node); + node.node.process_pending_htlc_forwards(); } if is_last_hop && clear_recipient_events { @@ -3482,11 +3400,7 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option } event = Some(events_2[0].clone()); } else if let Some(ref failure) = expected_failure { - // If we successfully decode the HTLC onion but then fail later in - // process_pending_htlc_forwards, then we'll queue the failure and generate a new - // `ProcessPendingHTLCForwards` event. If we fail during the process of decoding the HTLC, - // we'll fail it immediately with no intermediate forwarding event. - assert!(events_2.len() == 1 || events_2.len() == 2); + assert!(events_2.len() == 1); expect_htlc_handling_failed_destinations!(events_2, &[failure]); node.node.process_pending_htlc_forwards(); check_added_monitors!(node, 1); @@ -3972,7 +3886,7 @@ pub fn fail_payment_along_route<'a, 'b, 'c>( repeat(HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }) .take(expected_paths.len()) .collect(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( expected_paths[0].last().unwrap(), expected_destinations ); @@ -4054,7 +3968,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>( update_next_node ); if !update_next_node { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( node, [HTLCHandlingFailureType::Forward { node_id: Some(prev_node.node.get_our_node_id()), diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 8ca290ef165..ae2505ea8df 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -871,7 +871,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks); // Check that nodes[1] fails the HTLC upstream - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] ); @@ -892,7 +892,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout connect_blocks(&nodes[1], ANTI_REORG_DELAY); // Expect handling another fail back event, but the HTLC is already gone - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), @@ -921,7 +921,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac }, PostFailBackAction::FailOffChain => { nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[2], [HTLCHandlingFailureType::Receive { payment_hash }] ); @@ -2102,7 +2102,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.fail_htlc_backwards(&payment_hash); check_added_monitors(&nodes[2], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[2], [HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }] ); @@ -2179,7 +2179,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] ); @@ -2267,7 +2267,7 @@ pub fn test_simple_commitment_revoked_fail_backward() { check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] ); @@ -2360,7 +2360,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive( let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); nodes[2].node.fail_htlc_backwards(&first_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[2], [HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }] ); @@ -2377,7 +2377,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive( // Drop the last RAA from 3 -> 2 nodes[2].node.fail_htlc_backwards(&second_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[2], [HTLCHandlingFailureType::Receive { payment_hash: second_payment_hash }] ); @@ -2397,7 +2397,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive( check_added_monitors(&nodes[2], 1); nodes[2].node.fail_htlc_backwards(&third_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[2], [HTLCHandlingFailureType::Receive { payment_hash: third_payment_hash }] ); @@ -2435,15 +2435,11 @@ fn do_test_commitment_revoked_fail_backward_exhaustive( // commitment transaction for nodes[0] until process_pending_htlc_forwards(). check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); + assert_eq!(events.len(), 1); match events[0] { Event::HTLCHandlingFailed { .. } => {}, _ => panic!("Unexpected event"), } - match events[1] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - }; // Deliberately don't process the pending fail-back so they all fail back at once after // block connection just like the !deliver_bs_raa case } @@ -2456,7 +2452,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive( connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() }); + assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 3 + nodes.len() }); assert!(events.iter().any(|ev| matches!( ev, Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } @@ -2816,7 +2812,7 @@ pub fn test_force_close_fail_back() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); @@ -3176,21 +3172,13 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let events_1 = nodes[1].node.get_and_clear_pending_events(); if messages_delivered == 0 { - assert_eq!(events_1.len(), 2); + assert_eq!(events_1.len(), 1); match events_1[0] { Event::ChannelReady { .. } => {}, _ => panic!("Unexpected event"), }; - match events_1[1] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - }; } else { - assert_eq!(events_1.len(), 1); - match events_1[0] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - }; + assert_eq!(events_1.len(), 0); } nodes[0].node.peer_disconnected(node_b_id); @@ -3620,7 +3608,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors(&nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let events_5 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_5.len(), 1); @@ -3718,7 +3706,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { connect_block(&nodes[1], &block); } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }] ); @@ -3787,7 +3775,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); } check_added_monitors(&nodes[1], 0); @@ -3799,7 +3787,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { if forwarded_htlc { let fail_type = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [fail_type]); check_added_monitors(&nodes[1], 1); let fail_commit = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(fail_commit.len(), 1); @@ -4582,7 +4570,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // Mine the HTLC timeout transaction on node B. mine_transaction(&nodes[1], &htlc_timeout_tx); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] ); @@ -4805,7 +4793,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno HTLCHandlingFailureType::Receive { payment_hash: hash_5 }, HTLCHandlingFailureType::Receive { payment_hash: hash_6 }, ]; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations); + process_htlcs_and_expect_htlc_handling_failed!(nodes[4], failed_destinations); check_added_monitors(&nodes[4], 1); let four_removes = get_htlc_update_msgs!(nodes[4], node_d_id); @@ -4824,7 +4812,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno HTLCHandlingFailureType::Receive { payment_hash: hash_2 }, HTLCHandlingFailureType::Receive { payment_hash: hash_4 }, ]; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2); + process_htlcs_and_expect_htlc_handling_failed!(nodes[5], failed_destinations_2); check_added_monitors(&nodes[5], 1); let two_removes = get_htlc_update_msgs!(nodes[5], node_d_id); @@ -4843,7 +4831,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno HTLCHandlingFailureType::Forward { node_id: Some(node_f_id), channel_id: chan_3_5.2 }, HTLCHandlingFailureType::Forward { node_id: Some(node_f_id), channel_id: chan_3_5.2 }, ]; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3); + process_htlcs_and_expect_htlc_handling_failed!(nodes[3], failed_destinations_3); check_added_monitors(&nodes[3], 1); let six_removes = get_htlc_update_msgs!(nodes[3], node_c_id); nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[0]); @@ -4879,7 +4867,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno } let events = nodes[2].node.get_and_clear_pending_events(); let close_event = if deliver_last_raa { - assert_eq!(events.len(), 2 + 6); + assert_eq!(events.len(), 2 + 5); events.last().clone().unwrap() } else { assert_eq!(events.len(), 1); @@ -4893,7 +4881,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1); check_closed_broadcast!(nodes[2], true); if deliver_last_raa { - expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true); + nodes[2].node.process_pending_htlc_forwards(); let expected_destinations: Vec = repeat(HTLCHandlingFailureType::Forward { @@ -4923,7 +4911,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno .collect() }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations); + process_htlcs_and_expect_htlc_handling_failed!(nodes[2], expected_destinations); } check_added_monitors(&nodes[2], 3); @@ -5404,7 +5392,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no let htlc_value = if use_dust { 50000 } else { 3000000 }; let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); nodes[1].node.fail_htlc_backwards(&our_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }] ); @@ -5797,12 +5785,6 @@ pub fn test_free_and_fail_holding_cell_htlcs() { check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - } nodes[1].node.process_pending_htlc_forwards(); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -5891,7 +5873,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); @@ -5917,15 +5899,9 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { check_added_monitors(&nodes[2], 1); assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); - // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process. + // Call ChannelManager's process_pending_htlc_forwards let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(process_htlc_forwards_event.len(), 2); - match &process_htlc_forwards_event[1] { - &Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - } - - // In response, we call ChannelManager's process_pending_htlc_forwards + assert_eq!(process_htlc_forwards_event.len(), 1); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[1], 1); @@ -6003,7 +5979,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); check_added_monitors(&nodes[1], 1); @@ -6015,7 +5991,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); check_added_monitors(&nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion] @@ -6053,7 +6029,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] ); @@ -6114,7 +6090,7 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[1], 1); payment_event = SendEvent::from_node(&nodes[1]); assert_eq!(payment_event.msgs.len(), 1); @@ -6124,7 +6100,7 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); check_added_monitors(&nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion] @@ -6145,7 +6121,7 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { _ => panic!("Unexpected event"), } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] ); @@ -6227,7 +6203,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { // Fail one HTLC to prune it in the will-be-latest-local commitment tx nodes[1].node.fail_htlc_backwards(&payment_hash_2); check_added_monitors(&nodes[1], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }] ); @@ -6592,8 +6568,8 @@ pub fn test_check_htlc_underpaying() { // Note that we first have to wait a random delay before processing the receipt of the HTLC, // and then will wait a second random delay before failing the HTLC back: - expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1].node.process_pending_htlc_forwards(); + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }] ); @@ -6937,8 +6913,8 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { ); connect_block(&nodes[0], &block_129); let events = nodes[0].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable_conditions( - events[0..2].to_vec(), + expect_htlc_failure_conditions( + events[0..1].to_vec(), &[HTLCHandlingFailureType::Receive { payment_hash: failed_payment_hash }], ); match events.last().unwrap() { @@ -7248,9 +7224,9 @@ pub fn test_bump_txn_sanitize_tracking_maps() { // Broadcast set of revoked txn on A connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( - nodes[0], - [HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }] + expect_htlc_failure_conditions( + nodes[0].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }], ); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0); @@ -7803,7 +7779,7 @@ pub fn test_onion_value_mpp_set_calculation() { node.node.handle_update_add_htlc(prev_node.node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors(&node, 0); commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(node); + node.node.process_pending_htlc_forwards(); if idx == 0 { let mut events_2 = node.node.get_and_clear_pending_msg_events(); @@ -7979,7 +7955,7 @@ pub fn test_preimage_storage() { } // Note that after leaving the above scope we have no knowledge of any arguments or return // values from previous calls. - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { @@ -8024,8 +8000,8 @@ pub fn test_bad_secret_hash() { // We have to forward pending HTLCs once to process the receipt of the HTLC and then // again to process the pending backwards-failure of the HTLC - expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1].node.process_pending_htlc_forwards(); + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: $payment_hash }] ); @@ -9440,7 +9416,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t &[(0, htlc_tx)], conf_height + 1, ); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id }] ); @@ -9470,7 +9446,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // When the HTLC times out on the A<->B edge, the B<->C channel will fail the HTLC back to // avoid the A<->B channel closing (even though it already has). This will generate a // spurious HTLCHandlingFailed event. - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id }] ); @@ -9513,7 +9489,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); } - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 10_000); { @@ -9533,20 +9509,20 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { // the first HTLC delivered above. } - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_htlc_forwards(); if test_for_second_fail_panic { // Now we go fail back the first HTLC from the user end. nodes[1].node.fail_htlc_backwards(&our_payment_hash); - let expected_destinations = vec![ + let expected_destinations = &[ HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }, HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }, ]; - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( - nodes[1], - expected_destinations + expect_htlc_failure_conditions( + nodes[1].node.get_and_clear_pending_events(), + expected_destinations, ); nodes[1].node.process_pending_htlc_forwards(); @@ -9578,9 +9554,9 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { } } else { // Let the second HTLC fail and claim the first - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( - nodes[1], - [HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }] + expect_htlc_failure_conditions( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }], ); nodes[1].node.process_pending_htlc_forwards(); @@ -9693,7 +9669,7 @@ pub fn test_inconsistent_mpp_params() { nodes[2].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); @@ -9708,10 +9684,10 @@ pub fn test_inconsistent_mpp_params() { // amount. It will assume the second is a privacy attack (no longer particularly relevant // post-payment_secrets) and fail back the new HTLC. } - expect_pending_htlcs_forwardable_ignore!(nodes[3]); + expect_htlc_failure_conditions(nodes[3].node.get_and_clear_pending_events(), &[]); nodes[3].node.process_pending_htlc_forwards(); let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], [fail_type]); + expect_htlc_failure_conditions(nodes[3].node.get_and_clear_pending_events(), &[fail_type]); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[3], 1); @@ -9720,7 +9696,7 @@ pub fn test_inconsistent_mpp_params() { nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[2], [HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }] ); @@ -9795,7 +9771,7 @@ pub fn test_double_partial_claim() { HTLCHandlingFailureType::Receive { payment_hash: hash }, HTLCHandlingFailureType::Receive { payment_hash: hash }, ]; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations); + process_htlcs_and_expect_htlc_handling_failed!(nodes[3], failed_destinations); let reason = PaymentFailureReason::RecipientRejected; pass_failed_payment_back(&nodes[0], paths, false, hash, reason); @@ -10056,7 +10032,7 @@ fn do_test_max_dust_htlc_exposure( let payment_event = SendEvent::from_event(events.remove(0)); nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[0]); + nodes[0].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] @@ -10323,7 +10299,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let payment_event = SendEvent::from_event(events.remove(0)); nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[0]); + nodes[0].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] @@ -10376,7 +10352,8 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { nodes[0].node.handle_update_add_htlc(node_c_id, &send.msgs[0]); commitment_signed_dance!(nodes[0], nodes[2], send.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[0]); + nodes[0].node.process_pending_htlc_forwards(); + nodes[0].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[0], 1); let node_id_1 = node_b_id; expect_htlc_handling_failed_destinations!( @@ -10498,7 +10475,7 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) let payment_event = SendEvent::from_event(events.remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] @@ -10770,7 +10747,7 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash let mut payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); if valid_delta { let preimage = if use_user_hash { None } else { Some(payment_preimage) }; @@ -10779,7 +10756,7 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); } else { let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [fail_type]); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 4f1f5c581df..3749eaad28a 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -342,13 +342,13 @@ pub fn test_channel_reserve_holding_cell_htlcs() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors(&nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let ref payment_event_11 = expect_forward!(nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_11.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1); // flush the htlcs in the holding cell @@ -356,7 +356,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { nodes[1].node.handle_update_add_htlc(node_a_id, &commitment_update_2.update_add_htlcs[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &commitment_update_2.update_add_htlcs[1]); commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let ref payment_event_3 = expect_forward!(nodes[1]); assert_eq!(payment_event_3.msgs.len(), 2); @@ -364,7 +364,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_3.msgs[1]); commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); let events = nodes[2].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -552,7 +552,7 @@ pub fn channel_reserve_in_flight_removes() { check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000); // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't @@ -602,7 +602,7 @@ pub fn channel_reserve_in_flight_removes() { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors(&nodes[0], 1); - expect_pending_htlcs_forwardable!(nodes[0]); + nodes[0].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000); claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4); @@ -674,9 +674,9 @@ pub fn holding_cell_htlc_counting() { commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward. - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [fail]); check_added_monitors(&nodes[1], 1); let bs_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -720,7 +720,7 @@ pub fn holding_cell_htlc_counting() { nodes[2].node.handle_revoke_and_ack(node_b_id, &as_final_raa); check_added_monitors(&nodes[2], 1); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); let events = nodes[2].node.get_and_clear_pending_events(); assert_eq!(events.len(), payments.len()); @@ -917,7 +917,7 @@ pub fn test_fee_spike_violation_fails_htlc() { next_local_nonce: None, }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] @@ -1439,7 +1439,7 @@ pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increme check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000); } let onion = RecipientOnionFields::secret_only(our_payment_secret); @@ -2035,7 +2035,7 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion] diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index f2e8284a617..509cb2e3b7b 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -1329,23 +1329,23 @@ mod test { // Note that we have to "forward pending HTLCs" twice before we see the PaymentClaimable as // this "emulates" the payment taking two hops, providing some privacy to make phantom node // payments "look real" by taking more time. - let other_events = RefCell::new(Vec::new()); - let forward_event_handler = |event: Event| { - if let Event::PendingHTLCsForwardable { .. } = event { - nodes[fwd_idx].node.process_pending_htlc_forwards(); - } else { - other_events.borrow_mut().push(event); - } + nodes[fwd_idx].node.process_pending_htlc_forwards(); + nodes[fwd_idx].node.process_pending_htlc_forwards(); + + let events = RefCell::new(Vec::new()); + let event_handler = |event: Event| { + events.borrow_mut().push(event); Ok(()) }; - nodes[fwd_idx].node.process_pending_events(&forward_event_handler); - nodes[fwd_idx].node.process_pending_events(&forward_event_handler); + + nodes[fwd_idx].node.process_pending_events(&event_handler); + nodes[fwd_idx].node.process_pending_events(&event_handler); let payment_preimage_opt = if user_generated_pmt_hash { None } else { Some(payment_preimage) }; - assert_eq!(other_events.borrow().len(), 1); + assert_eq!(events.borrow().len(), 1); check_payment_claimable( - &other_events.borrow()[0], + &events.borrow()[0], payment_hash, payment_secret, payment_amt, diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 0a89899f118..6f5480686a5 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -76,7 +76,7 @@ fn chanmon_fail_from_stale_commitment() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); @@ -89,7 +89,7 @@ fn chanmon_fail_from_stale_commitment() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); check_added_monitors!(nodes[1], 1); let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -902,7 +902,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash, payment_secret, 10_000_000); let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 20_000_000); @@ -914,7 +914,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 20_000_000); nodes[1].node.claim_funds(payment_preimage_2); get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1219,8 +1219,8 @@ fn test_no_preimage_inbound_htlc_balances() { let as_htlc_timeout_claim = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(as_htlc_timeout_claim.len(), 1); check_spends!(as_htlc_timeout_claim[0], as_txn[0]); - expect_pending_htlcs_forwardable_conditions!(nodes[0], - [HTLCHandlingFailureType::Receive { payment_hash: to_a_failed_payment_hash }]); + expect_htlc_failure_conditions(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash: to_a_failed_payment_hash }]); + nodes[0].node.process_pending_htlc_forwards(); assert_eq!(as_pre_spend_claims, sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); @@ -1237,8 +1237,8 @@ fn test_no_preimage_inbound_htlc_balances() { // The next few blocks for B look the same as for A, though for the opposite HTLC nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); connect_blocks(&nodes[1], TEST_FINAL_CLTV - (ANTI_REORG_DELAY - 1)); - expect_pending_htlcs_forwardable_conditions!(nodes[1], - [HTLCHandlingFailureType::Receive { payment_hash: to_b_failed_payment_hash }]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash: to_b_failed_payment_hash }]); + nodes[1].node.process_pending_htlc_forwards(); let bs_htlc_timeout_claim = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_timeout_claim.len(), 1); check_spends!(bs_htlc_timeout_claim[0], as_txn[0]); @@ -1414,7 +1414,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 6); + assert_eq!(events.len(), 5); let mut failed_payments: HashSet<_> = [timeout_payment_hash, dust_payment_hash, live_payment_hash, missing_htlc_payment_hash] .iter().map(|a| *a).collect(); @@ -1433,8 +1433,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ } }); assert!(failed_payments.is_empty()); - if let Event::PendingHTLCsForwardable { .. } = events[0] {} else { panic!(); } - match &events[1] { + match &events[0] { Event::ChannelClosed { reason: ClosureReason::HTLCsTimedOut, .. } => {}, _ => panic!(), } @@ -1739,8 +1738,8 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { // `COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE` blocks, making us consider all the HTLCs // pinnable claims, which the remainder of the test assumes. connect_blocks(&nodes[0], TEST_FINAL_CLTV - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(&nodes[0], - [HTLCHandlingFailureType::Receive { payment_hash: failed_payment_hash }]); + expect_htlc_failure_conditions(nodes[0].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash: failed_payment_hash }]); // A will generate justice tx from B's revoked commitment/HTLC tx mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); @@ -2594,18 +2593,16 @@ fn do_test_yield_anchors_events(have_htlcs: bool) { check_closed_broadcast(&nodes[0], 1, true); let a_events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(a_events.len(), if have_htlcs { 3 } else { 1 }); + assert_eq!(a_events.len(), if have_htlcs { 2 } else { 1 }); if have_htlcs { - assert!(a_events.iter().any(|ev| matches!(ev, Event::PendingHTLCsForwardable { .. }))); assert!(a_events.iter().any(|ev| matches!(ev, Event::HTLCHandlingFailed { .. }))); } assert!(a_events.iter().any(|ev| matches!(ev, Event::ChannelClosed { .. }))); check_closed_broadcast(&nodes[1], 1, true); let b_events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(b_events.len(), if have_htlcs { 3 } else { 1 }); + assert_eq!(b_events.len(), if have_htlcs { 2 } else { 1 }); if have_htlcs { - assert!(b_events.iter().any(|ev| matches!(ev, Event::PendingHTLCsForwardable { .. }))); assert!(b_events.iter().any(|ev| matches!(ev, Event::HTLCHandlingFailed { .. }))); } assert!(b_events.iter().any(|ev| matches!(ev, Event::ChannelClosed { .. }))); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 416a4e130a9..b7443e903d9 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -122,7 +122,6 @@ fn run_onion_failure_test_with_fail_intercept( macro_rules! expect_htlc_forward { ($node: expr) => {{ - expect_event!($node, Event::PendingHTLCsForwardable); $node.node.process_pending_htlc_forwards(); }}; } @@ -149,7 +148,7 @@ fn run_onion_failure_test_with_fail_intercept( let update_1_0 = match test_case { 0 | 100 => { // intermediate node failure; fail backward to 0 - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[expected_failure_type.clone().unwrap()] @@ -188,7 +187,7 @@ fn run_onion_failure_test_with_fail_intercept( expect_htlc_forward!(&nodes[2]); expect_event!(&nodes[2], Event::PaymentClaimable); callback_node(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[2], [HTLCHandlingFailureType::Receive { payment_hash: *payment_hash }] ); @@ -1564,7 +1563,7 @@ fn test_overshoot_final_cltv() { } } } - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(&nodes[1], 1); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); @@ -1572,7 +1571,7 @@ fn test_overshoot_final_cltv() { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_1); commitment_signed_dance!(nodes[2], nodes[1], update_1.commitment_signed, false, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[2], payment_hash, payment_secret, 40_000); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); } @@ -2291,11 +2290,11 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash, payment_secret, payment_amount); nodes[1].node.fail_htlc_backwards_with_reason(&payment_hash, failure_code); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Receive { payment_hash }] ); @@ -2439,7 +2438,7 @@ fn test_phantom_onion_hmac_failure() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_update_add_htlcs(); // Modify the payload so the phantom hop's HMAC is bogus. @@ -2462,9 +2461,9 @@ fn test_phantom_onion_hmac_failure() { } }; nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( - nodes[1], - [HTLCHandlingFailureType::Receive { payment_hash }] + expect_htlc_failure_conditions( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2512,7 +2511,7 @@ fn test_phantom_invalid_onion_payload() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_update_add_htlcs(); // Modify the onion packet to have an invalid payment amount. @@ -2563,9 +2562,9 @@ fn test_phantom_invalid_onion_payload() { } } nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( - nodes[1], - [HTLCHandlingFailureType::Receive { payment_hash }] + expect_htlc_failure_conditions( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2611,7 +2610,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_update_add_htlcs(); // Modify the payload so the phantom hop's HMAC is bogus. @@ -2629,9 +2628,9 @@ fn test_phantom_final_incorrect_cltv_expiry() { } } nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( - nodes[1], - [HTLCHandlingFailureType::Receive { payment_hash }] + expect_htlc_failure_conditions( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2681,11 +2680,11 @@ fn test_phantom_failure_too_low_cltv() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( - nodes[1], - [HTLCHandlingFailureType::Receive { payment_hash }] + expect_htlc_failure_conditions( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2736,7 +2735,7 @@ fn test_phantom_failure_modified_cltv() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: phantom_scid }] @@ -2789,7 +2788,7 @@ fn test_phantom_failure_expires_too_soon() { connect_blocks(&nodes[1], CLTV_FAR_FAR_AWAY); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: phantom_scid }] @@ -2839,13 +2838,13 @@ fn test_phantom_failure_too_low_recv_amt() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( - nodes[1], - [HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }] + expect_htlc_failure_conditions( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }], ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2908,7 +2907,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: phantom_scid }] @@ -2959,9 +2958,9 @@ fn test_phantom_failure_reject_payment() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!( nodes[1], @@ -2972,9 +2971,9 @@ fn test_phantom_failure_reject_payment() { route.paths[0].hops.last().unwrap().pubkey ); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( - nodes[1], - [HTLCHandlingFailureType::Receive { payment_hash }] + expect_htlc_failure_conditions( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[1].node.process_pending_htlc_forwards(); diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index fdbfcc089e5..612dd369271 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -1226,13 +1226,14 @@ impl OutboundPayments { ) } + // Returns whether the data changed and needs to be repersisted. #[rustfmt::skip] pub(super) fn check_retry_payments( &self, router: &R, first_hops: FH, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, pending_events: &Mutex)>>, logger: &L, send_payment_along_path: SP, - ) + ) -> bool where R::Target: Router, ES::Target: EntropySource, @@ -1243,6 +1244,7 @@ impl OutboundPayments { L::Target: Logger, { let _single_thread = self.retry_lock.lock().unwrap(); + let mut should_persist = false; loop { let mut outbounds = self.pending_outbound_payments.lock().unwrap(); let mut retry_id_route_params = None; @@ -1262,7 +1264,8 @@ impl OutboundPayments { } core::mem::drop(outbounds); if let Some((payment_hash, payment_id, route_params)) = retry_id_route_params { - self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops(), &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path) + self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops(), &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path); + should_persist = true; } else { break } } @@ -1278,18 +1281,12 @@ impl OutboundPayments { reason: *reason, }, None)); retain = false; + should_persist = true; } } retain }); - } - - #[rustfmt::skip] - pub(super) fn needs_abandon(&self) -> bool { - let outbounds = self.pending_outbound_payments.lock().unwrap(); - outbounds.iter().any(|(_, pmt)| - !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 && !pmt.is_fulfilled() && - !pmt.is_awaiting_invoice()) + should_persist } #[rustfmt::skip] @@ -2262,15 +2259,13 @@ impl OutboundPayments { }); } - // Returns a bool indicating whether a PendingHTLCsForwardable event should be generated. pub(super) fn fail_htlc( &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, path: &Path, session_priv: &SecretKey, payment_id: &PaymentId, probing_cookie_secret: [u8; 32], secp_ctx: &Secp256k1, pending_events: &Mutex)>>, logger: &L, - ) -> bool - where + ) where L::Target: Logger, { #[cfg(any(test, feature = "_test_utils"))] @@ -2298,8 +2293,6 @@ impl OutboundPayments { session_priv_bytes.copy_from_slice(&session_priv[..]); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); - // If any payments already need retry, there's no need to generate a redundant - // `PendingHTLCsForwardable`. let already_awaiting_retry = outbounds.iter().any(|(_, pmt)| { let mut awaiting_retry = false; if pmt.is_auto_retryable_now() { @@ -2314,7 +2307,6 @@ impl OutboundPayments { }); let mut full_failure_ev = None; - let mut pending_retry_ev = false; let attempts_remaining = if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(*payment_id) { if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) { @@ -2323,7 +2315,7 @@ impl OutboundPayments { "Received duplicative fail for HTLC with payment_hash {}", &payment_hash ); - return false; + return; } if payment.get().is_fulfilled() { log_trace!( @@ -2331,7 +2323,7 @@ impl OutboundPayments { "Received failure of HTLC with payment_hash {} after payment completion", &payment_hash ); - return false; + return; } let mut is_retryable_now = payment.get().is_auto_retryable_now(); if let Some(scid) = short_channel_id { @@ -2379,7 +2371,7 @@ impl OutboundPayments { "Received duplicative fail for HTLC with payment_hash {}", &payment_hash ); - return false; + return; }; core::mem::drop(outbounds); log_trace!(logger, "Failing outbound payment HTLC with payment_hash {}", &payment_hash); @@ -2405,7 +2397,6 @@ impl OutboundPayments { // payment will sit in our outbounds forever. if attempts_remaining && !already_awaiting_retry { debug_assert!(full_failure_ev.is_none()); - pending_retry_ev = true; } events::Event::PaymentPathFailed { payment_id: Some(*payment_id), @@ -2428,7 +2419,6 @@ impl OutboundPayments { if let Some(ev) = full_failure_ev { pending_events.push_back((ev, None)); } - pending_retry_ev } #[rustfmt::skip] diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 9fde71ad72e..b468d8de826 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -158,9 +158,9 @@ fn mpp_retry() { commitment_signed_dance!(nodes[2], nodes[0], &send_event.commitment_msg, false); // Attempt to forward the payment and complete the 2nd path's failure. - expect_pending_htlcs_forwardable!(&nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], [fail]); + process_htlcs_and_expect_htlc_handling_failed!(&nodes[2], [fail]); let htlc_updates = get_htlc_update_msgs!(nodes[2], node_a_id); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -170,11 +170,6 @@ fn mpp_retry() { nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false); let mut events = nodes[0].node.get_and_clear_pending_events(); - match events[1] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - } - events.remove(1); let conditions = PaymentFailedConditions::new().mpp_parts_remain(); expect_payment_failed_conditions_event(events, hash, false, conditions); @@ -280,9 +275,9 @@ fn mpp_retry_overpay() { commitment_signed_dance!(nodes[2], nodes[0], &send_event.commitment_msg, false); // Attempt to forward the payment and complete the 2nd path's failure. - expect_pending_htlcs_forwardable!(&nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], [fail]); + process_htlcs_and_expect_htlc_handling_failed!(&nodes[2], [fail]); let htlc_updates = get_htlc_update_msgs!(nodes[2], node_a_id); assert!(htlc_updates.update_add_htlcs.is_empty()); @@ -293,11 +288,6 @@ fn mpp_retry_overpay() { nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false); let mut events = nodes[0].node.get_and_clear_pending_events(); - match events[1] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - } - events.remove(1); let fail_conditions = PaymentFailedConditions::new().mpp_parts_remain(); expect_payment_failed_conditions_event(events, hash, false, fail_conditions); @@ -385,7 +375,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { // Failed HTLC from node 3 -> 1 let fail = HTLCHandlingFailureType::Receive { payment_hash: hash }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], [fail]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[3], [fail]); let htlc_fail_updates = get_htlc_update_msgs!(nodes[3], node_b_id); assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); @@ -397,7 +387,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { // Failed HTLC from node 1 -> 0 let fail_type = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_3_id }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [fail_type]); let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); @@ -469,7 +459,7 @@ fn do_test_keysend_payments(public_node: bool) { let send_event = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); // Previously, a refactor caused us to stop including the payment preimage in the onion which // is sent as a part of keysend payments. Thus, to be extra careful here, we scope the preimage // above to demonstrate that we have no way to get the preimage at this point except by @@ -572,14 +562,14 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let update_add_0 = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(node_a_id, &update_add_0); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(&nodes[1], 1); let update_1 = get_htlc_update_msgs!(nodes[1], node_d_id); let update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(node_b_id, &update_add_1); commitment_signed_dance!(nodes[3], nodes[1], update_1.commitment_signed, false, true); - expect_pending_htlcs_forwardable_ignore!(nodes[3]); + expect_htlc_failure_conditions(nodes[3].node.get_and_clear_pending_events(), &[]); nodes[3].node.process_pending_update_add_htlcs(); assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty()); @@ -621,14 +611,14 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let update_add_2 = update_2.update_add_htlcs[0].clone(); nodes[2].node.handle_update_add_htlc(node_a_id, &update_add_2); commitment_signed_dance!(nodes[2], nodes[0], &update_2.commitment_signed, false, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); check_added_monitors!(&nodes[2], 1); let update_3 = get_htlc_update_msgs!(nodes[2], node_d_id); let update_add_3 = update_3.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(node_c_id, &update_add_3); commitment_signed_dance!(nodes[3], nodes[2], update_3.commitment_signed, false, true); - expect_pending_htlcs_forwardable_ignore!(nodes[3]); + expect_htlc_failure_conditions(nodes[3].node.get_and_clear_pending_events(), &[]); nodes[3].node.process_pending_update_add_htlcs(); assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty()); @@ -654,7 +644,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { } nodes[3].node.process_pending_htlc_forwards(); let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], [fail_type]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[3], [fail_type]); check_added_monitors!(nodes[3], 1); // Fail back along nodes[2] @@ -664,7 +654,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let fail_type = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_chan_id }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [fail_type]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[2], [fail_type]); check_added_monitors!(nodes[2], 1); let update_fail_1 = get_htlc_update_msgs!(nodes[2], node_a_id); @@ -765,7 +755,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] @@ -1055,13 +1045,13 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // incoming HTLCs with the same payment hash later. nodes[2].node.fail_htlc_backwards(&hash); let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [fail_type]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[2], [fail_type]); check_added_monitors!(nodes[2], 1); let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], node_b_id); nodes[1].node.handle_update_fail_htlc(node_c_id, &htlc_fulfill_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] ); @@ -1360,7 +1350,7 @@ fn test_fulfill_restart_failure() { nodes[1].node.fail_htlc_backwards(&payment_hash); let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [fail_type]); check_added_monitors!(nodes[1], 1); let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -1505,12 +1495,12 @@ fn failed_probe_yields_event() { nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); + nodes[1].node.process_pending_htlc_forwards(); // node[0] <- update_fail_htlcs -- node[1] check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], node_a_id); - // Skip the PendingHTLCsForwardable event let _events = nodes[1].node.get_and_clear_pending_events(); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); check_added_monitors!(nodes[0], 0); @@ -1557,7 +1547,7 @@ fn onchain_failed_probe_yields_event() { nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); let _ = get_htlc_update_msgs!(nodes[1], node_c_id); @@ -1869,7 +1859,7 @@ fn abandoned_send_payment_idempotent() { nodes[1].node.fail_htlc_backwards(&first_payment_hash); let fail_type = HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [fail_type]); // Until we abandon the payment upon path failure, no matter how many timer ticks pass, we still cannot reuse the // PaymentId. @@ -2161,7 +2151,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { }; nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); // Check that we generate the PaymentIntercepted event when an intercept forward is detected. let events = nodes[1].node.get_and_clear_pending_events(); @@ -2198,7 +2188,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); let fail = HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], [fail]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[fail]); nodes[1].node.process_pending_htlc_forwards(); let update_fail = get_htlc_update_msgs!(nodes[1], node_a_id); check_added_monitors!(&nodes[1], 1); @@ -2233,7 +2223,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { .node .forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt) .unwrap(); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let payment_event = { { @@ -2247,7 +2237,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { }; nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], &payment_event.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); let preimage = Some(nodes[2].node.get_payment_preimage(hash, payment_secret).unwrap()); expect_payment_claimable!(&nodes[2], hash, payment_secret, amt_msat, preimage, node_c_id); @@ -2283,7 +2273,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { } let fail_type = HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [fail_type]); check_added_monitors!(nodes[1], 1); let htlc_fail = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -2388,7 +2378,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let ev = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(node_a_id, &ev.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &ev.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -2409,7 +2399,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { .node .forward_intercepted_htlc(intercept_id, &chan_ids[idx], node_c_id, amt) .unwrap(); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let pay_event = { { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); @@ -2423,7 +2413,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { nodes[2].node.handle_update_add_htlc(node_b_id, &pay_event.msgs[0]); do_commitment_signed_dance(&nodes[2], &nodes[1], &pay_event.commitment_msg, false, true); if idx == num_mpp_parts - 1 { - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); } } @@ -2541,13 +2531,15 @@ fn do_automatic_retries(test: AutoRetry) { let mut update_add = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(node_a_id, &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], - [HTLCHandlingFailureType::Forward { + expect_htlc_failure_conditions( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: $failing_channel_id, - }]); + }], + ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], node_a_id); check_added_monitors!(&nodes[1], 1); @@ -2556,22 +2548,21 @@ fn do_automatic_retries(test: AutoRetry) { nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); commitment_signed_dance!(nodes[0], nodes[1], update_1.commitment_signed, false); - // Ensure the attempt fails and a new PendingHTLCsForwardable event is generated for the retry + // Ensure the attempt fails let mut events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); + if $expect_pending_htlcs_forwardable { + assert_eq!(events.len(), 1); + } else { + assert_eq!(events.len(), 2); + } match events[0] { - Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => { + Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => { assert_eq!(hash, payment_hash); assert_eq!(payment_failed_permanently, false); }, _ => panic!("Unexpected event"), } - if $expect_pending_htlcs_forwardable { - match events[1] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - } - } else { + if !$expect_pending_htlcs_forwardable { match events[1] { Event::PaymentFailed { payment_hash, .. } => { assert_eq!(Some(hash), payment_hash); @@ -2579,7 +2570,7 @@ fn do_automatic_retries(test: AutoRetry) { _ => panic!("Unexpected event"), } } - } + }; } if test == AutoRetry::Success { @@ -2702,8 +2693,7 @@ fn do_automatic_retries(test: AutoRetry) { let mon_ser = get_monitor!(nodes[0], channel_id_1).encode(); reload_node!(nodes[0], node_encoded, &[&mon_ser], persister, chain_monitor, node_a_reload); - let mut events = nodes[0].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable_from_events!(nodes[0], events, true); + nodes[0].node.process_pending_htlc_forwards(); // Make sure we don't retry again. let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 0); @@ -2934,7 +2924,7 @@ fn auto_retry_partial_failure() { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash, payment_secret, amt_msat); nodes[1].node.claim_funds(payment_preimage); @@ -3095,12 +3085,12 @@ fn fails_paying_after_rejected_by_payee() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(&nodes[1], payment_hash, payment_secret, amt_msat); nodes[1].node.fail_htlc_backwards(&payment_hash); let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [fail_type]); let reason = PaymentFailureReason::RecipientRejected; pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash, reason); } @@ -3461,7 +3451,7 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let next_hop_failure = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; expect_htlc_handling_failed_destinations!( @@ -3489,7 +3479,7 @@ fn no_extra_retries_on_back_to_back_fail() { // Because we now retry payments as a batch, we simply return a single-path route in the // second, batched, request, have that fail, ensure the payment was abandoned. let mut events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 3); + assert_eq!(events.len(), 2); match events[0] { Event::PaymentPathFailed { payment_hash: ev_payment_hash, @@ -3502,10 +3492,6 @@ fn no_extra_retries_on_back_to_back_fail() { _ => panic!("Unexpected event"), } match events[1] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - } - match events[2] { Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, @@ -3523,7 +3509,7 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()] @@ -3695,7 +3681,7 @@ fn test_simple_partial_retry() { nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc_updates.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], second_htlc_updates.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let next_hop_failure = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; expect_htlc_handling_failed_destinations!( @@ -3729,7 +3715,7 @@ fn test_simple_partial_retry() { } let mut events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); + assert_eq!(events.len(), 1); match events[0] { Event::PaymentPathFailed { payment_hash: ev_payment_hash, @@ -3741,10 +3727,6 @@ fn test_simple_partial_retry() { }, _ => panic!("Unexpected event"), } - match events[1] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - } nodes[0].node.process_pending_htlc_forwards(); let retry_htlc_updates = SendEvent::from_node(&nodes[0]); @@ -3753,14 +3735,14 @@ fn test_simple_partial_retry() { nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); let bs_second_forward = get_htlc_update_msgs!(nodes[1], node_c_id); nodes[2].node.handle_update_add_htlc(node_b_id, &bs_second_forward.update_add_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[1], &bs_second_forward.commitment_signed, false); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat); } @@ -3899,9 +3881,7 @@ fn test_threaded_payment_retries() { let _ = &node_ref; let node_a = unsafe { &*node_ref.0 }; while Instant::now() < end_time { - node_a.node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable - // Ignore if we have any pending events, just always pretend we just got a - // PendingHTLCsForwardable + node_a.node.get_and_clear_pending_events(); node_a.node.process_pending_htlc_forwards(); } } @@ -3923,7 +3903,7 @@ fn test_threaded_payment_retries() { nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), @@ -3965,7 +3945,7 @@ fn test_threaded_payment_retries() { } // Make sure we have some events to handle when we go around... - nodes[0].node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable + nodes[0].node.get_and_clear_pending_events(); nodes[0].node.process_pending_htlc_forwards(); send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); check_added_monitors!(nodes[0], 2); @@ -4192,9 +4172,9 @@ fn do_claim_from_closed_chan(fail_payment: bool) { // We fail the HTLC on the A->B->D path first as it expires 4 blocks earlier. We go ahead // and expire both immediately, though, by connecting another 4 blocks. let reason = HTLCHandlingFailureType::Receive { payment_hash: hash }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason.clone()]); + process_htlcs_and_expect_htlc_handling_failed!(&nodes[3], [reason.clone()]); connect_blocks(&nodes[3], 4); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason]); + process_htlcs_and_expect_htlc_handling_failed!(&nodes[3], [reason]); let reason = PaymentFailureReason::RecipientRejected; pass_failed_payment_back(&nodes[0], &[path_a, path_b], false, hash, reason); @@ -4323,7 +4303,7 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); check_added_monitors!(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -4352,7 +4332,7 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { (false, true) => { nodes[1].node.claim_funds(preimage); let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [fail_type]); let reason = PaymentFailureReason::RecipientRejected; pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, hash, reason); }, @@ -4401,9 +4381,9 @@ fn test_retry_custom_tlvs() { commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); // Attempt to forward the payment and complete the path's failure. - expect_pending_htlcs_forwardable!(&nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2_id }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], [fail]); + process_htlcs_and_expect_htlc_handling_failed!(&nodes[1], [fail]); check_added_monitors!(nodes[1], 1); let htlc_updates = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -4413,11 +4393,6 @@ fn test_retry_custom_tlvs() { commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); let mut events = nodes[0].node.get_and_clear_pending_events(); - match events[1] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - } - events.remove(1); let conditions = PaymentFailedConditions::new().mpp_parts_remain(); expect_payment_failed_conditions_event(events, hash, false, conditions); @@ -4564,7 +4539,7 @@ fn do_test_custom_tlvs_consistency( nodes[2].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); @@ -4575,7 +4550,7 @@ fn do_test_custom_tlvs_consistency( check_added_monitors!(nodes[3], 0); commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true); } - expect_pending_htlcs_forwardable_ignore!(nodes[3]); + expect_htlc_failure_conditions(nodes[3].node.get_and_clear_pending_events(), &[]); nodes[3].node.process_pending_htlc_forwards(); if let Some(expected_tlvs) = expected_receive_tlvs { @@ -4597,7 +4572,7 @@ fn do_test_custom_tlvs_consistency( } else { // Expect fail back let expected_destinations = [HTLCHandlingFailureType::Receive { payment_hash: hash }]; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], expected_destinations); + process_htlcs_and_expect_htlc_handling_failed!(nodes[3], expected_destinations); check_added_monitors!(nodes[3], 1); let fail_updates_1 = get_htlc_update_msgs!(nodes[3], node_c_id); @@ -4606,7 +4581,7 @@ fn do_test_custom_tlvs_consistency( let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [fail]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[2], [fail]); check_added_monitors!(nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs!(nodes[2], node_a_id); @@ -4683,13 +4658,13 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[1].node.handle_update_add_htlc(node_a_id, &b_recv_ev.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], b_recv_ev.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[1], 1); let b_forward_ev = SendEvent::from_node(&nodes[1]); nodes[3].node.handle_update_add_htlc(node_b_id, &b_forward_ev.msgs[0]); commitment_signed_dance!(nodes[3], nodes[1], b_forward_ev.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[3]); + nodes[3].node.process_pending_htlc_forwards(); // Before delivering the second MPP HTLC to nodes[2], disconnect nodes[2] and nodes[3], which // will result in nodes[2] failing the HTLC back. @@ -4698,7 +4673,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[2].node.handle_update_add_htlc(node_a_id, &c_recv_ev.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], c_recv_ev.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_id_cd }] @@ -4710,15 +4685,11 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { commitment_signed_dance!(nodes[0], nodes[2], cs_fail.commitment_signed, false, true); let payment_fail_retryable_evs = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(payment_fail_retryable_evs.len(), 2); + assert_eq!(payment_fail_retryable_evs.len(), 1); if let Event::PaymentPathFailed { .. } = payment_fail_retryable_evs[0] { } else { panic!(); } - if let Event::PendingHTLCsForwardable { .. } = payment_fail_retryable_evs[1] { - } else { - panic!(); - } // Before we allow the HTLC to be retried, optionally change the payment_metadata we have // stored for our payment. @@ -4752,7 +4723,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[2].node.handle_update_add_htlc(node_a_id, &as_resend.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], as_resend.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[2], 1); let cs_forward = SendEvent::from_node(&nodes[2]); let cd_chan_id = cs_forward.msgs[0].channel_id; @@ -4763,9 +4734,9 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { // the payment metadata was modified, failing only the one modified HTLC and retaining the // other. if do_modify { - expect_pending_htlcs_forwardable_ignore!(nodes[3]); + expect_htlc_failure_conditions(nodes[3].node.get_and_clear_pending_events(), &[]); nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_conditions( + expect_htlc_failure_conditions( nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }], ); @@ -4779,9 +4750,9 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { let events = nodes[2].node.get_and_clear_pending_events(); let fail_type = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: cd_chan_id }; - expect_pending_htlcs_forwardable_conditions(events, &[fail_type]); + expect_htlc_failure_conditions(events, &[fail_type]); } else { - expect_pending_htlcs_forwardable!(nodes[3]); + nodes[3].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat); let route: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, payment_preimage)); @@ -4880,9 +4851,9 @@ fn test_htlc_forward_considers_anchor_outputs_value() { }; // The forwarding node should reject forwarding it as expected. - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], [fail]); + process_htlcs_and_expect_htlc_handling_failed!(&nodes[1], [fail]); check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -5029,7 +5000,7 @@ fn test_non_strict_forwarding() { nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -5043,7 +5014,7 @@ fn test_non_strict_forwarding() { nodes[2].node.handle_update_add_htlc(node_b_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], &send_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); let events = nodes[2].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); assert!(matches!(events[0], Event::PaymentClaimable { .. })); @@ -5069,7 +5040,8 @@ fn test_non_strict_forwarding() { nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); let routed_scid = route.paths[0].hops[1].short_channel_id; let routed_chan_id = match routed_scid { @@ -5081,7 +5053,7 @@ fn test_non_strict_forwarding() { let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: routed_chan_id }; - expect_pending_htlcs_forwardable_conditions(events, &[fail]); + expect_htlc_failure_conditions(events, &[fail]); let updates = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 623774acb5e..704a3406156 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -87,7 +87,7 @@ fn test_priv_forwarding_rejection() { SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] @@ -605,7 +605,7 @@ fn test_inbound_scid_privacy() { assert_eq!(node_b_id, payment_event.node_id); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, true, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Forward { @@ -701,8 +701,8 @@ fn test_scid_alias_returned() { nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1].node.process_pending_htlc_forwards(); + process_htlcs_and_expect_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), @@ -739,7 +739,7 @@ fn test_scid_alias_returned() { nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Forward { @@ -971,13 +971,14 @@ fn test_0conf_channel_with_async_monitor() { .channel_monitor_updated(bs_raa.channel_id, latest_update) .unwrap(); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.get_and_clear_pending_events(); + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); let bs_send = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &bs_send.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], bs_send.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[2]); + nodes[2].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[2], payment_hash, payment_secret, 1_000_000); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index 99342cb970f..776638c72b1 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -141,7 +141,7 @@ fn allow_shutdown_while_awaiting_quiescence(local_shutdown: bool) { get_event_msg!(local_node, MessageSendEvent::SendRevokeAndACK, remote_node_id); remote_node.node.handle_revoke_and_ack(local_node_id, &last_revoke_and_ack); check_added_monitors(remote_node, 1); - expect_pending_htlcs_forwardable!(remote_node); + remote_node.node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( remote_node.node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] @@ -376,7 +376,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { let update_add = get_htlc_update_msgs!(&nodes[0], node_id_1); nodes[1].node.handle_update_add_htlc(node_id_0, &update_add.update_add_htlcs[0]); commitment_signed_dance!(&nodes[1], &nodes[0], update_add.commitment_signed, false); - expect_pending_htlcs_forwardable!(&nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash2, payment_secret2, payment_amount); // Have nodes[1] attempt to fail/claim nodes[0]'s payment. Since nodes[1] already sent out @@ -384,7 +384,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { if fail_htlc { nodes[1].node.fail_htlc_backwards(&payment_hash2); let failed_payment = HTLCHandlingFailureType::Receive { payment_hash: payment_hash2 }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], [failed_payment]); + process_htlcs_and_expect_htlc_handling_failed!(&nodes[1], [failed_payment]); } else { nodes[1].node.claim_funds(payment_preimage2); check_added_monitors(&nodes[1], 1); @@ -417,8 +417,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { // The payment from nodes[0] should now be seen as failed/successful. let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 3); - assert!(events.iter().find(|e| matches!(e, Event::PendingHTLCsForwardable { .. })).is_some()); + assert_eq!(events.len(), 2); if fail_htlc { assert!(events.iter().find(|e| matches!(e, Event::PaymentFailed { .. })).is_some()); assert!(events.iter().find(|e| matches!(e, Event::PaymentPathFailed { .. })).is_some()); @@ -434,7 +433,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { if fail_htlc { nodes[0].node.fail_htlc_backwards(&payment_hash1); let failed_payment = HTLCHandlingFailureType::Receive { payment_hash: payment_hash1 }; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[0], [failed_payment]); + process_htlcs_and_expect_htlc_handling_failed!(&nodes[0], [failed_payment]); } else { nodes[0].node.claim_funds(payment_preimage1); } diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 17028b36ae5..8637bde8fb9 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -559,7 +559,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, let raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa); check_added_monitors(&nodes[1], 1); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); } } else { send_payment(&nodes[0], &[&nodes[1]], 8000000); @@ -709,82 +709,6 @@ fn test_data_loss_protect() { do_test_data_loss_protect(false, false, false); } -#[test] -fn test_forwardable_regen() { - // Tests that if we reload a ChannelManager while forwards are pending we will regenerate the - // PendingHTLCsForwardable event automatically, ensuring we don't forget to forward/receive - // HTLCs. - // We test it for both payment receipt and payment forwarding. - - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let persister; - let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let nodes_1_deserialized; - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; - let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; - - // First send a payment to nodes[1] - let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - - expect_pending_htlcs_forwardable_ignore!(nodes[1]); - - // Next send a payment which is forwarded by nodes[1] - let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 200_000); - nodes[0].node.send_payment_with_route(route_2, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - - // Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); - - let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); - let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); - reload_node!(nodes[1], nodes[1].node.encode(), &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); - - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - // Note that nodes[1] and nodes[2] resend their channel_ready here since they haven't updated - // the commitment state. - let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); - reconnect_args.send_channel_ready = (true, true); - reconnect_nodes(reconnect_args); - - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - expect_pending_htlcs_forwardable!(nodes[1]); - expect_payment_claimable!(nodes[1], payment_hash, payment_secret, 100_000); - check_added_monitors!(nodes[1], 1); - - let mut events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[2]); - expect_payment_claimable!(nodes[2], payment_hash_2, payment_secret_2, 200_000); - - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); - claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); -} - fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_restart: bool) { // Test what happens if a node receives an MPP payment, claims it, but crashes before // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only @@ -1026,12 +950,11 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - // Store the `ChannelManager` before handling the `PendingHTLCsForwardable`/`HTLCIntercepted` - // events, expecting either event (and the HTLC itself) to be missing on reload even though its - // present when we serialized. + // Store the `ChannelManager` before handling the `HTLCIntercepted` events, expecting the event + // (and the HTLC itself) to be missing on reload even though its present when we serialized. let node_encoded = nodes[1].node.encode(); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); let mut intercept_id = None; let mut expected_outbound_amount_msat = None; @@ -1123,7 +1046,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht } if !claim_htlc { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); } else { expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true); } @@ -1189,7 +1112,7 @@ fn removed_payment_no_manager_persistence() { let node_encoded = nodes[1].node.encode(); nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCHandlingFailureType::Receive { payment_hash }]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[2], [HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1221,7 +1144,7 @@ fn removed_payment_no_manager_persistence() { nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1334,7 +1257,7 @@ fn test_htlc_localremoved_persistence() { let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash: mismatch_payment_hash }]); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 0ae3048215e..e55dfc74009 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -132,7 +132,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { } else { // Confirm the timeout tx and check that we fail the HTLC backwards connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, Vec::new())); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + process_htlcs_and_expect_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); } check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 9826b8a39cd..124e9e6f8e5 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -567,7 +567,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { check_added_monitors!(nodes[1], 1); nodes[1].node.handle_shutdown(node_a_id, &node_0_shutdown); commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false); - expect_pending_htlcs_forwardable!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] @@ -1557,7 +1557,7 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { if use_htlc { nodes[0].node.fail_htlc_backwards(&payment_hash_opt.unwrap()); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + process_htlcs_and_expect_htlc_handling_failed!( nodes[0], [HTLCHandlingFailureType::Receive { payment_hash: payment_hash_opt.unwrap() }] ); diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs index 8fa508ecf93..ff04807775f 100644 --- a/lightning/src/ln/update_fee_tests.rs +++ b/lightning/src/ln/update_fee_tests.rs @@ -136,13 +136,6 @@ pub fn test_async_inbound_update_fee() { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke); check_added_monitors(&nodes[0], 1); - let events_2 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_2.len(), 1); - match events_2[0] { - Event::PendingHTLCsForwardable { .. } => {}, // If we actually processed we'd receive the payment - _ => panic!("Unexpected event"), - } - nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_revoke); // deliver (6) check_added_monitors(&nodes[1], 1); } @@ -726,7 +719,7 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - expect_pending_htlcs_forwardable!(nodes[0]); + nodes[0].node.process_pending_htlc_forwards(); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -1136,12 +1129,7 @@ pub fn do_cannot_afford_on_holding_cell_release( panic!(); } - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - assert!(matches!(events[0], Event::PendingHTLCsForwardable { .. })); - // Release the update_fee from its holding cell - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); if can_afford { // We could afford the update_fee, sanity check everything