diff --git a/fuzz/src/lsps_message.rs b/fuzz/src/lsps_message.rs index 299b9f07955..2bc83c3fcd6 100644 --- a/fuzz/src/lsps_message.rs +++ b/fuzz/src/lsps_message.rs @@ -21,7 +21,7 @@ use lightning::util::test_utils::{ }; use lightning_liquidity::lsps0::ser::LSPS_MESSAGE_TYPE_ID; -use lightning_liquidity::LiquidityManager; +use lightning_liquidity::LiquidityManagerSync; use core::time::Duration; @@ -77,15 +77,16 @@ pub fn do_test(data: &[u8]) { genesis_block.header.time, )); - let liquidity_manager = Arc::new(LiquidityManager::new( + let liquidity_manager = Arc::new(LiquidityManagerSync::new( Arc::clone(&keys_manager), Arc::clone(&keys_manager), Arc::clone(&manager), None::>, None, + kv_store, None, None, - )); + ).unwrap()); let mut reader = data; if let Ok(Some(msg)) = liquidity_manager.read(LSPS_MESSAGE_TYPE_ID, &mut reader) { let secp = Secp256k1::signing_only(); diff --git a/lightning-background-processor/Cargo.toml b/lightning-background-processor/Cargo.toml index 47d3211344b..415676f4ea1 100644 --- a/lightning-background-processor/Cargo.toml +++ b/lightning-background-processor/Cargo.toml @@ -31,6 +31,7 @@ possiblyrandom = { version = "0.2", path = "../possiblyrandom", default-features tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] } lightning = { version = "0.2.0", path = "../lightning", features = ["_test_utils"] } lightning-invoice = { version = "0.34.0", path = "../lightning-invoice" } +lightning-liquidity = { version = "0.2.0", path = "../lightning-liquidity", default-features = false, features = ["_test_utils"] } lightning-persister = { version = "0.2.0", path = "../lightning-persister" } [lints] diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 95adc65149d..44ce52b8291 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -69,6 +69,8 @@ use lightning::util::wakers::Sleeper; use lightning_rapid_gossip_sync::RapidGossipSync; use lightning_liquidity::ALiquidityManager; +#[cfg(feature = "std")] +use lightning_liquidity::ALiquidityManagerSync; use core::ops::Deref; use core::time::Duration; @@ -424,6 +426,31 @@ pub const NO_LIQUIDITY_MANAGER: Option< CM = &DynChannelManager, Filter = dyn chain::Filter, C = &dyn chain::Filter, + KVStore = dyn lightning::util::persist::KVStore, + K = &dyn lightning::util::persist::KVStore, + TimeProvider = dyn lightning_liquidity::utils::time::TimeProvider, + TP = &dyn lightning_liquidity::utils::time::TimeProvider, + > + Send + + Sync, + >, +> = None; + +/// When initializing a background processor without a liquidity manager, this can be used to avoid +/// specifying a concrete `LiquidityManagerSync` type. +#[cfg(all(not(c_bindings), feature = "std"))] +pub const NO_LIQUIDITY_MANAGER_SYNC: Option< + Arc< + dyn ALiquidityManagerSync< + EntropySource = dyn EntropySource, + ES = &dyn EntropySource, + NodeSigner = dyn lightning::sign::NodeSigner, + NS = &dyn lightning::sign::NodeSigner, + AChannelManager = DynChannelManager, + CM = &DynChannelManager, + Filter = dyn chain::Filter, + C = &dyn chain::Filter, + KVStoreSync = dyn lightning::util::persist::KVStoreSync, + KS = &dyn lightning::util::persist::KVStoreSync, TimeProvider = dyn lightning_liquidity::utils::time::TimeProvider, TP = &dyn lightning_liquidity::utils::time::TimeProvider, > + Send @@ -544,31 +571,34 @@ pub(crate) mod futures_util { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } } - enum JoinerResult> + Unpin> { + enum JoinerResult> + Unpin> { Pending(Option), - Ready(Result<(), E>), + Ready(Result<(), ERR>), } pub(crate) struct Joiner< - E, - A: Future> + Unpin, - B: Future> + Unpin, - C: Future> + Unpin, - D: Future> + Unpin, + ERR, + A: Future> + Unpin, + B: Future> + Unpin, + C: Future> + Unpin, + D: Future> + Unpin, + E: Future> + Unpin, > { - a: JoinerResult, - b: JoinerResult, - c: JoinerResult, - d: JoinerResult, + a: JoinerResult, + b: JoinerResult, + c: JoinerResult, + d: JoinerResult, + e: JoinerResult, } impl< - E, - A: Future> + Unpin, - B: Future> + Unpin, - C: Future> + Unpin, - D: Future> + Unpin, - > Joiner + ERR, + A: Future> + Unpin, + B: Future> + Unpin, + C: Future> + Unpin, + D: Future> + Unpin, + E: Future> + Unpin, + > Joiner { pub(crate) fn new() -> Self { Self { @@ -576,13 +606,14 @@ pub(crate) mod futures_util { b: JoinerResult::Pending(None), c: JoinerResult::Pending(None), d: JoinerResult::Pending(None), + e: JoinerResult::Pending(None), } } pub(crate) fn set_a(&mut self, fut: A) { self.a = JoinerResult::Pending(Some(fut)); } - pub(crate) fn set_a_res(&mut self, res: Result<(), E>) { + pub(crate) fn set_a_res(&mut self, res: Result<(), ERR>) { self.a = JoinerResult::Ready(res); } pub(crate) fn set_b(&mut self, fut: B) { @@ -594,19 +625,23 @@ pub(crate) mod futures_util { pub(crate) fn set_d(&mut self, fut: D) { self.d = JoinerResult::Pending(Some(fut)); } + pub(crate) fn set_e(&mut self, fut: E) { + self.e = JoinerResult::Pending(Some(fut)); + } } impl< - E, - A: Future> + Unpin, - B: Future> + Unpin, - C: Future> + Unpin, - D: Future> + Unpin, - > Future for Joiner + ERR, + A: Future> + Unpin, + B: Future> + Unpin, + C: Future> + Unpin, + D: Future> + Unpin, + E: Future> + Unpin, + > Future for Joiner where - Joiner: Unpin, + Joiner: Unpin, { - type Output = [Result<(), E>; 4]; + type Output = [Result<(), ERR>; 5]; fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll { let mut all_complete = true; macro_rules! handle { @@ -615,7 +650,7 @@ pub(crate) mod futures_util { JoinerResult::Pending(None) => { self.$val = JoinerResult::Ready(Ok(())); }, - JoinerResult::::Pending(Some(ref mut val)) => { + JoinerResult::::Pending(Some(ref mut val)) => { match Pin::new(val).poll(ctx) { Poll::Ready(res) => { self.$val = JoinerResult::Ready(res); @@ -633,9 +668,10 @@ pub(crate) mod futures_util { handle!(b); handle!(c); handle!(d); + handle!(e); if all_complete { - let mut res = [Ok(()), Ok(()), Ok(()), Ok(())]; + let mut res = [Ok(()), Ok(()), Ok(()), Ok(()), Ok(())]; if let JoinerResult::Ready(ref mut val) = &mut self.a { core::mem::swap(&mut res[0], val); } @@ -648,6 +684,9 @@ pub(crate) mod futures_util { if let JoinerResult::Ready(ref mut val) = &mut self.d { core::mem::swap(&mut res[3], val); } + if let JoinerResult::Ready(ref mut val) = &mut self.e { + core::mem::swap(&mut res[4], val); + } Poll::Ready(res) } else { Poll::Pending @@ -731,7 +770,7 @@ use futures_util::{dummy_waker, Joiner, OptionalSelector, Selector, SelectorOutp /// # type P2PGossipSync
    = lightning::routing::gossip::P2PGossipSync, Arc
      , Arc>; /// # type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager, B, FE, Logger>; /// # type OnionMessenger = lightning::onion_message::messenger::OnionMessenger, Arc, Arc, Arc>, Arc, Arc, Arc>>, Arc>, lightning::ln::peer_handler::IgnoringMessageHandler, lightning::ln::peer_handler::IgnoringMessageHandler, lightning::ln::peer_handler::IgnoringMessageHandler>; -/// # type LiquidityManager = lightning_liquidity::LiquidityManager, Arc, Arc>, Arc, Arc>; +/// # type LiquidityManager = lightning_liquidity::LiquidityManager, Arc, Arc>, Arc, Arc, Arc>; /// # type Scorer = RwLock, Arc>>; /// # type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager, B, FE, Arc
        , Logger, F, StoreSync>; /// # type OutputSweeper = lightning::util::sweep::OutputSweeper, Arc, Arc, Arc, Arc, Arc, Arc>; @@ -976,7 +1015,7 @@ where OptionalSelector { optional_future: None } }; let lm_fut = if let Some(lm) = liquidity_manager.as_ref() { - let fut = lm.get_lm().get_pending_msgs_future(); + let fut = lm.get_lm().get_pending_msgs_or_needs_persist_future(); OptionalSelector { optional_future: Some(fut) } } else { OptionalSelector { optional_future: None } @@ -1179,6 +1218,17 @@ where None => {}, } + if let Some(liquidity_manager) = liquidity_manager.as_ref() { + log_trace!(logger, "Persisting LiquidityManager..."); + let fut = async { + liquidity_manager.get_lm().persist().await.map_err(|e| { + log_error!(logger, "Persisting LiquidityManager failed: {}", e); + e + }) + }; + futures.set_e(Box::pin(fut)); + } + // Run persistence tasks in parallel and exit if any of them returns an error. for res in futures.await { res?; @@ -1450,7 +1500,7 @@ impl BackgroundProcessor { CM::Target: AChannelManager, OM::Target: AOnionMessenger, PM::Target: APeerManager, - LM::Target: ALiquidityManager, + LM::Target: ALiquidityManagerSync, D::Target: ChangeDestinationSourceSync, O::Target: 'static + OutputSpender, K::Target: 'static + KVStoreSync, @@ -1535,7 +1585,7 @@ impl BackgroundProcessor { &channel_manager.get_cm().get_event_or_persistence_needed_future(), &chain_monitor.get_update_future(), &om.get_om().get_update_future(), - &lm.get_lm().get_pending_msgs_future(), + &lm.get_lm().get_pending_msgs_or_needs_persist_future(), ), (Some(om), None) => Sleeper::from_three_futures( &channel_manager.get_cm().get_event_or_persistence_needed_future(), @@ -1545,7 +1595,7 @@ impl BackgroundProcessor { (None, Some(lm)) => Sleeper::from_three_futures( &channel_manager.get_cm().get_event_or_persistence_needed_future(), &chain_monitor.get_update_future(), - &lm.get_lm().get_pending_msgs_future(), + &lm.get_lm().get_pending_msgs_or_needs_persist_future(), ), (None, None) => Sleeper::from_two_futures( &channel_manager.get_cm().get_event_or_persistence_needed_future(), @@ -1579,6 +1629,13 @@ impl BackgroundProcessor { log_trace!(logger, "Done persisting ChannelManager."); } + if let Some(liquidity_manager) = liquidity_manager.as_ref() { + log_trace!(logger, "Persisting LiquidityManager..."); + let _ = liquidity_manager.get_lm().persist().map_err(|e| { + log_error!(logger, "Persisting LiquidityManager failed: {}", e); + }); + } + // Note that we want to run a graph prune once not long after startup before // falling back to our usual hourly prunes. This avoids short-lived clients never // pruning their network graph. We run once 60 seconds after startup before @@ -1793,7 +1850,7 @@ mod tests { use lightning::util::test_utils; use lightning::{get_event, get_event_msg}; use lightning_liquidity::utils::time::DefaultTimeProvider; - use lightning_liquidity::LiquidityManager; + use lightning_liquidity::{ALiquidityManagerSync, LiquidityManagerSync}; use lightning_persister::fs_store::FilesystemStore; use lightning_rapid_gossip_sync::RapidGossipSync; use std::collections::VecDeque; @@ -1890,11 +1947,12 @@ mod tests { IgnoringMessageHandler, >; - type LM = LiquidityManager< + type LM = LiquidityManagerSync< Arc, Arc, Arc, Arc, + Arc, Arc, >; @@ -2342,15 +2400,19 @@ mod tests { Arc::clone(&logger), Arc::clone(&keys_manager), )); - let liquidity_manager = Arc::new(LiquidityManager::new( - Arc::clone(&keys_manager), - Arc::clone(&keys_manager), - Arc::clone(&manager), - None, - None, - None, - None, - )); + let liquidity_manager = Arc::new( + LiquidityManagerSync::new( + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + Arc::clone(&manager), + None, + None, + Arc::clone(&kv_store), + None, + None, + ) + .unwrap(), + ); let node = Node { node: manager, p2p_gossip_sync, @@ -2727,7 +2789,7 @@ mod tests { Some(Arc::clone(&nodes[0].messenger)), nodes[0].rapid_gossip_sync(), Arc::clone(&nodes[0].peer_manager), - Some(Arc::clone(&nodes[0].liquidity_manager)), + Some(nodes[0].liquidity_manager.get_lm_async()), Some(nodes[0].sweeper.sweeper_async()), Arc::clone(&nodes[0].logger), Some(Arc::clone(&nodes[0].scorer)), @@ -3236,7 +3298,7 @@ mod tests { Some(Arc::clone(&nodes[0].messenger)), nodes[0].rapid_gossip_sync(), Arc::clone(&nodes[0].peer_manager), - Some(Arc::clone(&nodes[0].liquidity_manager)), + Some(nodes[0].liquidity_manager.get_lm_async()), Some(nodes[0].sweeper.sweeper_async()), Arc::clone(&nodes[0].logger), Some(Arc::clone(&nodes[0].scorer)), @@ -3451,7 +3513,7 @@ mod tests { Some(Arc::clone(&nodes[0].messenger)), nodes[0].no_gossip_sync(), Arc::clone(&nodes[0].peer_manager), - Some(Arc::clone(&nodes[0].liquidity_manager)), + Some(nodes[0].liquidity_manager.get_lm_async()), Some(nodes[0].sweeper.sweeper_async()), Arc::clone(&nodes[0].logger), Some(Arc::clone(&nodes[0].scorer)), @@ -3500,7 +3562,7 @@ mod tests { crate::NO_ONION_MESSENGER, nodes[0].no_gossip_sync(), Arc::clone(&nodes[0].peer_manager), - crate::NO_LIQUIDITY_MANAGER, + crate::NO_LIQUIDITY_MANAGER_SYNC, Some(Arc::clone(&nodes[0].sweeper)), Arc::clone(&nodes[0].logger), Some(Arc::clone(&nodes[0].scorer)), diff --git a/lightning-liquidity/Cargo.toml b/lightning-liquidity/Cargo.toml index f301e4fe34c..a29fc36043a 100644 --- a/lightning-liquidity/Cargo.toml +++ b/lightning-liquidity/Cargo.toml @@ -18,11 +18,13 @@ default = ["std", "time"] std = ["lightning/std"] time = ["std"] backtrace = ["dep:backtrace"] +_test_utils = [] [dependencies] lightning = { version = "0.2.0", path = "../lightning", default-features = false } lightning-types = { version = "0.3.0", path = "../lightning-types", default-features = false } lightning-invoice = { version = "0.34.0", path = "../lightning-invoice", default-features = false, features = ["serde"] } +lightning-macros = { version = "0.2", path = "../lightning-macros" } bitcoin = { version = "0.32.2", default-features = false, features = ["serde"] } diff --git a/lightning-liquidity/src/events/event_queue.rs b/lightning-liquidity/src/events/event_queue.rs index f59d34ed34a..34ce27dbb91 100644 --- a/lightning-liquidity/src/events/event_queue.rs +++ b/lightning-liquidity/src/events/event_queue.rs @@ -1,55 +1,101 @@ use super::LiquidityEvent; + +use crate::lsps2::event::LSPS2ServiceEvent; +use crate::persist::{ + LIQUIDITY_MANAGER_EVENT_QUEUE_PERSISTENCE_KEY, + LIQUIDITY_MANAGER_EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, + LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, +}; use crate::sync::{Arc, Mutex}; use alloc::collections::VecDeque; use alloc::vec::Vec; use core::future::Future; +use core::ops::Deref; use core::task::{Poll, Waker}; +use lightning::ln::msgs::DecodeError; +use lightning::util::persist::KVStore; +use lightning::util::ser::{ + BigSize, CollectionLength, FixedLengthReader, Readable, Writeable, Writer, +}; +use lightning::util::wakers::Notifier; + /// The maximum queue size we allow before starting to drop events. pub const MAX_EVENT_QUEUE_SIZE: usize = 1000; -pub(crate) struct EventQueue { - queue: Arc>>, +pub(crate) struct EventQueue +where + K::Target: KVStore, +{ + state: Arc>, waker: Arc>>, #[cfg(feature = "std")] condvar: Arc, + kv_store: K, + persist_notifier: Arc, } -impl EventQueue { - pub fn new() -> Self { - let queue = Arc::new(Mutex::new(VecDeque::new())); +impl EventQueue +where + K::Target: KVStore, +{ + pub fn new( + queue: VecDeque, kv_store: K, persist_notifier: Arc, + ) -> Self { + let state = Arc::new(Mutex::new(QueueState { queue, needs_persist: false })); let waker = Arc::new(Mutex::new(None)); Self { - queue, + state, waker, #[cfg(feature = "std")] condvar: Arc::new(crate::sync::Condvar::new()), + kv_store, + persist_notifier, } } pub fn next_event(&self) -> Option { - self.queue.lock().unwrap().pop_front() + let event_opt = { + let mut state_lock = self.state.lock().unwrap(); + if state_lock.queue.is_empty() { + // Skip notifying below if nothing changed. + return None; + } + + state_lock.needs_persist = true; + state_lock.queue.pop_front() + }; + + self.persist_notifier.notify(); + + event_opt } pub async fn next_event_async(&self) -> LiquidityEvent { - EventFuture { event_queue: Arc::clone(&self.queue), waker: Arc::clone(&self.waker) }.await + EventFuture { + queue_state: Arc::clone(&self.state), + waker: Arc::clone(&self.waker), + persist_notifier: Arc::clone(&self.persist_notifier), + } + .await } #[cfg(feature = "std")] pub fn wait_next_event(&self) -> LiquidityEvent { - let mut queue = self + let mut state_lock = self .condvar - .wait_while(self.queue.lock().unwrap(), |queue: &mut VecDeque| { - queue.is_empty() + .wait_while(self.state.lock().unwrap(), |state_lock: &mut QueueState| { + state_lock.queue.is_empty() }) .unwrap(); - let event = queue.pop_front().expect("non-empty queue"); - let should_notify = !queue.is_empty(); + let event = state_lock.queue.pop_front().expect("non-empty queue"); + let should_notify = !state_lock.queue.is_empty(); + state_lock.needs_persist = true; - drop(queue); + drop(state_lock); if should_notify { if let Some(waker) = self.waker.lock().unwrap().take() { @@ -59,37 +105,98 @@ impl EventQueue { self.condvar.notify_one(); } + self.persist_notifier.notify(); + event } pub fn get_and_clear_pending_events(&self) -> Vec { - self.queue.lock().unwrap().split_off(0).into() + let mut state_lock = self.state.lock().unwrap(); + + let needs_persist = !state_lock.queue.is_empty(); + let events = state_lock.queue.split_off(0).into(); + + if needs_persist { + state_lock.needs_persist = true; + } + + drop(state_lock); + + if needs_persist { + self.persist_notifier.notify(); + } + + events } // Returns an [`EventQueueNotifierGuard`] that will notify about new event when dropped. - pub fn notifier(&self) -> EventQueueNotifierGuard<'_> { + pub fn notifier(&self) -> EventQueueNotifierGuard<'_, K> { EventQueueNotifierGuard(self) } + + pub async fn persist(&self) -> Result<(), lightning::io::Error> { + let fut = { + let mut state_lock = self.state.lock().unwrap(); + + if !state_lock.needs_persist { + return Ok(()); + } + + state_lock.needs_persist = false; + let encoded = EventQueueSerWrapper(&state_lock.queue).encode(); + + self.kv_store.write( + LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + LIQUIDITY_MANAGER_EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, + LIQUIDITY_MANAGER_EVENT_QUEUE_PERSISTENCE_KEY, + encoded, + ) + }; + + fut.await.map_err(|e| { + self.state.lock().unwrap().needs_persist = true; + e + })?; + + Ok(()) + } +} + +struct QueueState { + queue: VecDeque, + needs_persist: bool, } // A guard type that will notify about new events when dropped. #[must_use] -pub(crate) struct EventQueueNotifierGuard<'a>(&'a EventQueue); - -impl<'a> EventQueueNotifierGuard<'a> { +pub(crate) struct EventQueueNotifierGuard<'a, K: Deref + Clone>(&'a EventQueue) +where + K::Target: KVStore; + +impl<'a, K: Deref + Clone> EventQueueNotifierGuard<'a, K> +where + K::Target: KVStore, +{ pub fn enqueue>(&self, event: E) { - let mut queue = self.0.queue.lock().unwrap(); - if queue.len() < MAX_EVENT_QUEUE_SIZE { - queue.push_back(event.into()); + let mut state_lock = self.0.state.lock().unwrap(); + if state_lock.queue.len() < MAX_EVENT_QUEUE_SIZE { + state_lock.queue.push_back(event.into()); + state_lock.needs_persist = true; } else { return; } } } -impl<'a> Drop for EventQueueNotifierGuard<'a> { +impl<'a, K: Deref + Clone> Drop for EventQueueNotifierGuard<'a, K> +where + K::Target: KVStore, +{ fn drop(&mut self) { - let should_notify = !self.0.queue.lock().unwrap().is_empty(); + let (should_notify, should_persist_notify) = { + let state_lock = self.0.state.lock().unwrap(); + (!state_lock.queue.is_empty(), state_lock.needs_persist) + }; if should_notify { if let Some(waker) = self.0.waker.lock().unwrap().take() { @@ -99,12 +206,17 @@ impl<'a> Drop for EventQueueNotifierGuard<'a> { #[cfg(feature = "std")] self.0.condvar.notify_one(); } + + if should_persist_notify { + self.0.persist_notifier.notify(); + } } } struct EventFuture { - event_queue: Arc>>, + queue_state: Arc>, waker: Arc>>, + persist_notifier: Arc, } impl Future for EventFuture { @@ -113,12 +225,147 @@ impl Future for EventFuture { fn poll( self: core::pin::Pin<&mut Self>, cx: &mut core::task::Context<'_>, ) -> core::task::Poll { - if let Some(event) = self.event_queue.lock().unwrap().pop_front() { - Poll::Ready(event) - } else { - *self.waker.lock().unwrap() = Some(cx.waker().clone()); - Poll::Pending + let (res, should_persist_notify) = { + let mut state_lock = self.queue_state.lock().unwrap(); + if let Some(event) = state_lock.queue.pop_front() { + state_lock.needs_persist = true; + (Poll::Ready(event), true) + } else { + *self.waker.lock().unwrap() = Some(cx.waker().clone()); + (Poll::Pending, false) + } + }; + + if should_persist_notify { + self.persist_notifier.notify(); + } + + res + } +} + +pub(crate) struct EventQueueDeserWrapper(pub VecDeque); + +impl Readable for EventQueueDeserWrapper { + fn read(reader: &mut R) -> Result { + let len: CollectionLength = Readable::read(reader)?; + let mut queue = VecDeque::with_capacity(len.0 as usize); + for _ in 0..len.0 { + let event = match Readable::read(reader)? { + 0u8 => { + // LSPS0ClientEvents are not persisted. + continue; + }, + 1u8 => { + // LSPS1ClientEvents are not persisted. + continue; + }, + 2u8 => { + // LSPS1ServiceEvents are not persisted. + continue; + }, + 3u8 => { + // LSPS2ClientEvents are not persisted. + continue; + }, + 4u8 => { + let ev = Readable::read(reader)?; + LiquidityEvent::LSPS2Service(ev) + }, + 5u8 => { + // LSPS5ClientEvents are not persisted. + continue; + }, + 6u8 => { + let ev = Readable::read(reader)?; + LiquidityEvent::LSPS5Service(ev) + }, + x if x % 2 == 1 => { + // If the event is of unknown type, assume it was written with `write_tlv_fields`, + // which prefixes the whole thing with a length BigSize. Because the event is + // odd-type unknown, we should treat it as `Ok(None)` even if it has some TLV + // fields that are even. Thus, we avoid using `read_tlv_fields` and simply read + // exactly the number of bytes specified, ignoring them entirely. + let tlv_len: BigSize = Readable::read(reader)?; + FixedLengthReader::new(reader, tlv_len.0) + .eat_remaining() + .map_err(|_| DecodeError::ShortRead)?; + continue; + }, + _ => return Err(DecodeError::InvalidValue), + }; + queue.push_back(event); + } + Ok(Self(queue)) + } +} + +struct EventQueueSerWrapper<'a>(&'a VecDeque); + +impl Writeable for EventQueueSerWrapper<'_> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + let maybe_process_event = |event: &LiquidityEvent, + writer: Option<&mut W>| + -> Result { + match event { + LiquidityEvent::LSPS0Client(_) => { + // LSPS0ClientEvents are not persisted. + Ok(false) + }, + LiquidityEvent::LSPS1Client(_) => { + // LSPS1ClientEvents are not persisted. + Ok(false) + }, + #[cfg(lsps1_service)] + LiquidityEvent::LSPS1Service(_) => { + // LSPS1ServiceEvents are not persisted. + Ok(false) + }, + LiquidityEvent::LSPS2Client(_) => { + // LSPS2ClientEvents are not persisted. + Ok(false) + }, + LiquidityEvent::LSPS2Service(event) => { + if matches!(event, LSPS2ServiceEvent::GetInfo { .. }) + || matches!(event, LSPS2ServiceEvent::BuyRequest { .. }) + { + // Skip persisting GetInfoRequest and BuyRequest events as we prune the pending + // request state currently anyways. + Ok(false) + } else { + if let Some(writer) = writer { + 4u8.write(writer)?; + event.write(writer)?; + } + Ok(true) + } + }, + LiquidityEvent::LSPS5Client(_) => { + // LSPS5ClientEvents are not persisted. + Ok(false) + }, + LiquidityEvent::LSPS5Service(event) => { + if let Some(writer) = writer { + 6u8.write(writer)?; + event.write(writer)?; + } + Ok(true) + }, + } + }; + + let mut persisted_events_len = 0; + for e in self.0.iter() { + if maybe_process_event(e, None)? { + persisted_events_len += 1; + } + } + + CollectionLength(persisted_events_len).write(writer)?; + for e in self.0.iter() { + maybe_process_event(e, Some(writer))?; } + Ok(()) } } @@ -131,10 +378,14 @@ mod tests { use crate::lsps0::event::LSPS0ClientEvent; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; use core::sync::atomic::{AtomicU16, Ordering}; + use lightning::util::persist::KVStoreSyncWrapper; + use lightning::util::test_utils::TestStore; use std::sync::Arc; use std::time::Duration; - let event_queue = Arc::new(EventQueue::new()); + let kv_store = Arc::new(KVStoreSyncWrapper(Arc::new(TestStore::new(false)))); + let persist_notifier = Arc::new(Notifier::new()); + let event_queue = Arc::new(EventQueue::new(VecDeque::new(), kv_store, persist_notifier)); assert_eq!(event_queue.next_event(), None); let secp_ctx = Secp256k1::new(); diff --git a/lightning-liquidity/src/events/mod.rs b/lightning-liquidity/src/events/mod.rs index 82e480a454c..c39b8b9fd59 100644 --- a/lightning-liquidity/src/events/mod.rs +++ b/lightning-liquidity/src/events/mod.rs @@ -17,8 +17,8 @@ mod event_queue; -pub(crate) use event_queue::EventQueue; pub use event_queue::MAX_EVENT_QUEUE_SIZE; +pub(crate) use event_queue::{EventQueue, EventQueueDeserWrapper}; use crate::lsps0; use crate::lsps1; diff --git a/lightning-liquidity/src/lib.rs b/lightning-liquidity/src/lib.rs index 6c26b21f3b2..ee081f720c6 100644 --- a/lightning-liquidity/src/lib.rs +++ b/lightning-liquidity/src/lib.rs @@ -65,6 +65,7 @@ pub mod lsps2; pub mod lsps5; mod manager; pub mod message_queue; +pub mod persist; #[allow(dead_code)] #[allow(unused_imports)] mod sync; @@ -73,5 +74,6 @@ mod tests; pub mod utils; pub use manager::{ - ALiquidityManager, LiquidityClientConfig, LiquidityManager, LiquidityServiceConfig, + ALiquidityManager, ALiquidityManagerSync, LiquidityClientConfig, LiquidityManager, + LiquidityManagerSync, LiquidityServiceConfig, }; diff --git a/lightning-liquidity/src/lsps0/client.rs b/lightning-liquidity/src/lsps0/client.rs index f7e01b323f3..7f26d74d707 100644 --- a/lightning-liquidity/src/lsps0/client.rs +++ b/lightning-liquidity/src/lsps0/client.rs @@ -18,28 +18,31 @@ use crate::utils; use lightning::ln::msgs::{ErrorAction, LightningError}; use lightning::sign::EntropySource; use lightning::util::logger::Level; +use lightning::util::persist::KVStore; use bitcoin::secp256k1::PublicKey; use core::ops::Deref; /// A message handler capable of sending and handling bLIP-50 / LSPS0 messages. -pub struct LSPS0ClientHandler +pub struct LSPS0ClientHandler where ES::Target: EntropySource, + K::Target: KVStore, { entropy_source: ES, pending_messages: Arc, - pending_events: Arc, + pending_events: Arc>, } -impl LSPS0ClientHandler +impl LSPS0ClientHandler where ES::Target: EntropySource, + K::Target: KVStore, { /// Returns a new instance of [`LSPS0ClientHandler`]. pub(crate) fn new( - entropy_source: ES, pending_messages: Arc, pending_events: Arc, + entropy_source: ES, pending_messages: Arc, pending_events: Arc>, ) -> Self { Self { entropy_source, pending_messages, pending_events } } @@ -86,9 +89,10 @@ where } } -impl LSPSProtocolMessageHandler for LSPS0ClientHandler +impl LSPSProtocolMessageHandler for LSPS0ClientHandler where ES::Target: EntropySource, + K::Target: KVStore, { type ProtocolMessage = LSPS0Message; const PROTOCOL_NUMBER: Option = None; @@ -113,10 +117,14 @@ where #[cfg(test)] mod tests { - + use alloc::collections::VecDeque; use alloc::string::ToString; use alloc::sync::Arc; + use lightning::util::persist::KVStoreSyncWrapper; + use lightning::util::test_utils::TestStore; + use lightning::util::wakers::Notifier; + use crate::lsps0::ser::{LSPSMessage, LSPSRequestId}; use crate::tests::utils::{self, TestEntropy}; @@ -124,9 +132,12 @@ mod tests { #[test] fn test_list_protocols() { - let pending_messages = Arc::new(MessageQueue::new()); + let notifier = Arc::new(Notifier::new()); + let pending_messages = Arc::new(MessageQueue::new(notifier)); let entropy_source = Arc::new(TestEntropy {}); - let event_queue = Arc::new(EventQueue::new()); + let kv_store = Arc::new(KVStoreSyncWrapper(Arc::new(TestStore::new(false)))); + let persist_notifier = Arc::new(Notifier::new()); + let event_queue = Arc::new(EventQueue::new(VecDeque::new(), kv_store, persist_notifier)); let lsps0_handler = Arc::new(LSPS0ClientHandler::new( entropy_source, diff --git a/lightning-liquidity/src/lsps0/event.rs b/lightning-liquidity/src/lsps0/event.rs index 97a3a950090..4141b51df25 100644 --- a/lightning-liquidity/src/lsps0/event.rs +++ b/lightning-liquidity/src/lsps0/event.rs @@ -14,6 +14,8 @@ use alloc::vec::Vec; use bitcoin::secp256k1::PublicKey; /// An event which an bLIP-50 / LSPS0 client may want to take some action in response to. +/// +/// **Note: ** This event will *not* be persisted across restarts. #[derive(Clone, Debug, PartialEq, Eq)] pub enum LSPS0ClientEvent { /// Information from the LSP about the protocols they support. diff --git a/lightning-liquidity/src/lsps0/ser.rs b/lightning-liquidity/src/lsps0/ser.rs index 213e2760119..70649fe0f50 100644 --- a/lightning-liquidity/src/lsps0/ser.rs +++ b/lightning-liquidity/src/lsps0/ser.rs @@ -30,7 +30,7 @@ use crate::prelude::HashMap; use lightning::ln::msgs::{DecodeError, LightningError}; use lightning::ln::wire; -use lightning::util::ser::{LengthLimitedRead, LengthReadable, WithoutLength}; +use lightning::util::ser::{LengthLimitedRead, LengthReadable, Readable, WithoutLength, Writeable}; use bitcoin::secp256k1::PublicKey; @@ -217,6 +217,22 @@ impl wire::Type for RawLSPSMessage { #[serde(transparent)] pub struct LSPSRequestId(pub String); +impl Writeable for LSPSRequestId { + fn write( + &self, writer: &mut W, + ) -> Result<(), lightning::io::Error> { + self.0.write(writer)?; + Ok(()) + } +} + +impl Readable for LSPSRequestId { + fn read(reader: &mut R) -> Result { + let s: String = Readable::read(reader)?; + Ok(Self(s)) + } +} + /// An object representing datetimes as described in bLIP-50 / LSPS0. #[derive(Clone, Debug, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(transparent)] @@ -266,6 +282,23 @@ impl Display for LSPSDateTime { } } +impl Writeable for LSPSDateTime { + fn write( + &self, writer: &mut W, + ) -> Result<(), lightning::io::Error> { + self.to_rfc3339().write(writer)?; + Ok(()) + } +} + +impl Readable for LSPSDateTime { + fn read(reader: &mut R) -> Result { + let s: String = Readable::read(reader)?; + let val = Self::from_str(&s).map_err(|_| lightning::ln::msgs::DecodeError::InvalidValue)?; + Ok(val) + } +} + /// An error returned in response to an JSON-RPC request. /// /// Please refer to the [JSON-RPC 2.0 specification](https://www.jsonrpc.org/specification#error_object) for @@ -933,3 +966,19 @@ pub(crate) mod u32_fee_rate { Ok(FeeRate::from_sat_per_kwu(fee_rate_sat_kwu as u64)) } } + +#[cfg(test)] +mod tests { + use super::*; + + use lightning::io::Cursor; + + #[test] + fn datetime_serializaton() { + let expected_datetime = LSPSDateTime::from_str("2035-05-20T08:30:45Z").unwrap(); + let mut buf = Vec::new(); + expected_datetime.write(&mut buf).unwrap(); + let decoded_datetime: LSPSDateTime = Readable::read(&mut Cursor::new(buf)).unwrap(); + assert_eq!(expected_datetime, decoded_datetime); + } +} diff --git a/lightning-liquidity/src/lsps0/service.rs b/lightning-liquidity/src/lsps0/service.rs index 2b4e6782ce8..e71150c1fd1 100644 --- a/lightning-liquidity/src/lsps0/service.rs +++ b/lightning-liquidity/src/lsps0/service.rs @@ -87,13 +87,15 @@ mod tests { use crate::tests::utils; use alloc::string::ToString; use alloc::sync::Arc; + use lightning::util::wakers::Notifier; use super::*; #[test] fn test_handle_list_protocols_request() { let protocols: Vec = vec![]; - let pending_messages = Arc::new(MessageQueue::new()); + let notifier = Arc::new(Notifier::new()); + let pending_messages = Arc::new(MessageQueue::new(notifier)); let lsps0_handler = Arc::new(LSPS0ServiceHandler::new(protocols, Arc::clone(&pending_messages))); diff --git a/lightning-liquidity/src/lsps1/client.rs b/lightning-liquidity/src/lsps1/client.rs index 45008baaa77..5b9d373698a 100644 --- a/lightning-liquidity/src/lsps1/client.rs +++ b/lightning-liquidity/src/lsps1/client.rs @@ -25,6 +25,7 @@ use crate::sync::{Arc, Mutex, RwLock}; use lightning::ln::msgs::{ErrorAction, LightningError}; use lightning::sign::EntropySource; use lightning::util::logger::Level; +use lightning::util::persist::KVStore; use bitcoin::secp256k1::PublicKey; use bitcoin::Address; @@ -46,25 +47,27 @@ struct PeerState { } /// The main object allowing to send and receive bLIP-51 / LSPS1 messages. -pub struct LSPS1ClientHandler +pub struct LSPS1ClientHandler where ES::Target: EntropySource, + K::Target: KVStore, { entropy_source: ES, pending_messages: Arc, - pending_events: Arc, + pending_events: Arc>, per_peer_state: RwLock>>, config: LSPS1ClientConfig, } -impl LSPS1ClientHandler +impl LSPS1ClientHandler where ES::Target: EntropySource, + K::Target: KVStore, { /// Constructs an `LSPS1ClientHandler`. pub(crate) fn new( - entropy_source: ES, pending_messages: Arc, pending_events: Arc, - config: LSPS1ClientConfig, + entropy_source: ES, pending_messages: Arc, + pending_events: Arc>, config: LSPS1ClientConfig, ) -> Self { Self { entropy_source, @@ -429,9 +432,10 @@ where } } -impl LSPSProtocolMessageHandler for LSPS1ClientHandler +impl LSPSProtocolMessageHandler for LSPS1ClientHandler where ES::Target: EntropySource, + K::Target: KVStore, { type ProtocolMessage = LSPS1Message; const PROTOCOL_NUMBER: Option = Some(1); diff --git a/lightning-liquidity/src/lsps1/event.rs b/lightning-liquidity/src/lsps1/event.rs index 508a5a42a90..9443ad2269f 100644 --- a/lightning-liquidity/src/lsps1/event.rs +++ b/lightning-liquidity/src/lsps1/event.rs @@ -25,6 +25,8 @@ pub enum LSPS1ClientEvent { /// You must check whether LSP supports the parameters the client wants and then call /// [`LSPS1ClientHandler::create_order`] to place an order. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`LSPS1ClientHandler::request_supported_options`]: crate::lsps1::client::LSPS1ClientHandler::request_supported_options /// [`LSPS1ClientHandler::create_order`]: crate::lsps1::client::LSPS1ClientHandler::create_order SupportedOptionsReady { @@ -43,6 +45,8 @@ pub enum LSPS1ClientEvent { /// A request previously issued via [`LSPS1ClientHandler::request_supported_options`] /// failed as the LSP returned an error response. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`LSPS1ClientHandler::request_supported_options`]: crate::lsps1::client::LSPS1ClientHandler::request_supported_options SupportedOptionsRequestFailed { /// The identifier of the issued bLIP-51 / LSPS1 `get_info` request, as returned by @@ -66,6 +70,8 @@ pub enum LSPS1ClientEvent { /// call [`LSPS1ClientHandler::check_order_status`] with the order id /// to get information from LSP about progress of the order. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`LSPS1ClientHandler::check_order_status`]: crate::lsps1::client::LSPS1ClientHandler::check_order_status OrderCreated { /// The identifier of the issued bLIP-51 / LSPS1 `create_order` request, as returned by @@ -90,6 +96,8 @@ pub enum LSPS1ClientEvent { /// /// Will be emitted in response to calling [`LSPS1ClientHandler::check_order_status`]. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`LSPS1ClientHandler::check_order_status`]: crate::lsps1::client::LSPS1ClientHandler::check_order_status OrderStatus { /// The identifier of the issued bLIP-51 / LSPS1 `get_order` request, as returned by @@ -113,6 +121,8 @@ pub enum LSPS1ClientEvent { /// A request previously issued via [`LSPS1ClientHandler::create_order`] or [`LSPS1ClientHandler::check_order_status`]. /// failed as the LSP returned an error response. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`LSPS1ClientHandler::create_order`]: crate::lsps1::client::LSPS1ClientHandler::create_order /// [`LSPS1ClientHandler::check_order_status`]: crate::lsps1::client::LSPS1ClientHandler::check_order_status OrderRequestFailed { @@ -142,6 +152,8 @@ pub enum LSPS1ServiceEvent { /// send order parameters including the details regarding the /// payment and order id for this order for the client. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`LSPS1ServiceHandler::send_payment_details`]: crate::lsps1::service::LSPS1ServiceHandler::send_payment_details RequestForPaymentDetails { /// An identifier that must be passed to [`LSPS1ServiceHandler::send_payment_details`]. @@ -160,6 +172,8 @@ pub enum LSPS1ServiceEvent { /// You must call [`LSPS1ServiceHandler::update_order_status`] to update the client /// regarding the status of the payment and order. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`LSPS1ServiceHandler::update_order_status`]: crate::lsps1::service::LSPS1ServiceHandler::update_order_status CheckPaymentConfirmation { /// An identifier that must be passed to [`LSPS1ServiceHandler::update_order_status`]. diff --git a/lightning-liquidity/src/lsps1/service.rs b/lightning-liquidity/src/lsps1/service.rs index 1b4bdf5cf46..8afea1b4345 100644 --- a/lightning-liquidity/src/lsps1/service.rs +++ b/lightning-liquidity/src/lsps1/service.rs @@ -36,6 +36,7 @@ use lightning::ln::msgs::{ErrorAction, LightningError}; use lightning::sign::EntropySource; use lightning::util::errors::APIError; use lightning::util::logger::Level; +use lightning::util::persist::KVStore; use bitcoin::secp256k1::PublicKey; @@ -131,32 +132,35 @@ impl PeerState { } /// The main object allowing to send and receive bLIP-51 / LSPS1 messages. -pub struct LSPS1ServiceHandler +pub struct LSPS1ServiceHandler where ES::Target: EntropySource, CM::Target: AChannelManager, C::Target: Filter, + K::Target: KVStore, { entropy_source: ES, channel_manager: CM, chain_source: Option, pending_messages: Arc, - pending_events: Arc, + pending_events: Arc>, per_peer_state: RwLock>>, config: LSPS1ServiceConfig, } -impl LSPS1ServiceHandler +impl LSPS1ServiceHandler where ES::Target: EntropySource, CM::Target: AChannelManager, C::Target: Filter, ES::Target: EntropySource, + K::Target: KVStore, { /// Constructs a `LSPS1ServiceHandler`. pub(crate) fn new( - entropy_source: ES, pending_messages: Arc, pending_events: Arc, - channel_manager: CM, chain_source: Option, config: LSPS1ServiceConfig, + entropy_source: ES, pending_messages: Arc, + pending_events: Arc>, channel_manager: CM, chain_source: Option, + config: LSPS1ServiceConfig, ) -> Self { Self { entropy_source, @@ -417,12 +421,13 @@ where } } -impl LSPSProtocolMessageHandler - for LSPS1ServiceHandler +impl LSPSProtocolMessageHandler + for LSPS1ServiceHandler where ES::Target: EntropySource, CM::Target: AChannelManager, C::Target: Filter, + K::Target: KVStore, { type ProtocolMessage = LSPS1Message; const PROTOCOL_NUMBER: Option = Some(1); diff --git a/lightning-liquidity/src/lsps2/client.rs b/lightning-liquidity/src/lsps2/client.rs index 7008d42e345..71b2a2bfda7 100644 --- a/lightning-liquidity/src/lsps2/client.rs +++ b/lightning-liquidity/src/lsps2/client.rs @@ -10,6 +10,7 @@ //! Contains the main bLIP-52 / LSPS2 client object, [`LSPS2ClientHandler`]. use alloc::string::{String, ToString}; +use lightning::util::persist::KVStore; use core::default::Default; use core::ops::Deref; @@ -67,25 +68,27 @@ impl PeerState { /// opened. Please refer to the [`bLIP-52 / LSPS2 specification`] for more information. /// /// [`bLIP-52 / LSPS2 specification`]: https://github.com/lightning/blips/blob/master/blip-0052.md#trust-models -pub struct LSPS2ClientHandler +pub struct LSPS2ClientHandler where ES::Target: EntropySource, + K::Target: KVStore, { entropy_source: ES, pending_messages: Arc, - pending_events: Arc, + pending_events: Arc>, per_peer_state: RwLock>>, config: LSPS2ClientConfig, } -impl LSPS2ClientHandler +impl LSPS2ClientHandler where ES::Target: EntropySource, + K::Target: KVStore, { /// Constructs an `LSPS2ClientHandler`. pub(crate) fn new( - entropy_source: ES, pending_messages: Arc, pending_events: Arc, - config: LSPS2ClientConfig, + entropy_source: ES, pending_messages: Arc, + pending_events: Arc>, config: LSPS2ClientConfig, ) -> Self { Self { entropy_source, @@ -366,9 +369,10 @@ where } } -impl LSPSProtocolMessageHandler for LSPS2ClientHandler +impl LSPSProtocolMessageHandler for LSPS2ClientHandler where ES::Target: EntropySource, + K::Target: KVStore, { type ProtocolMessage = LSPS2Message; const PROTOCOL_NUMBER: Option = Some(2); diff --git a/lightning-liquidity/src/lsps2/event.rs b/lightning-liquidity/src/lsps2/event.rs index f738dc0d7bc..29cc577f293 100644 --- a/lightning-liquidity/src/lsps2/event.rs +++ b/lightning-liquidity/src/lsps2/event.rs @@ -16,6 +16,8 @@ use alloc::vec::Vec; use bitcoin::secp256k1::PublicKey; +use lightning::impl_writeable_tlv_based_enum; + /// An event which an LSPS2 client should take some action in response to. #[derive(Clone, Debug, PartialEq, Eq)] pub enum LSPS2ClientEvent { @@ -24,6 +26,8 @@ pub enum LSPS2ClientEvent { /// You must call [`LSPS2ClientHandler::select_opening_params`] with the fee parameter /// you want to use if you wish to proceed opening a channel. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`LSPS2ClientHandler::select_opening_params`]: crate::lsps2::client::LSPS2ClientHandler::select_opening_params OpeningParametersReady { /// The identifier of the issued bLIP-52 / LSPS2 `get_info` request, as returned by @@ -44,6 +48,8 @@ pub enum LSPS2ClientEvent { /// /// When the invoice is paid, the LSP will open a channel with the previously agreed upon /// parameters to you. + /// + /// **Note: ** This event will *not* be persisted across restarts. InvoiceParametersReady { /// The identifier of the issued bLIP-52 / LSPS2 `buy` request, as returned by /// [`LSPS2ClientHandler::select_opening_params`]. @@ -64,6 +70,8 @@ pub enum LSPS2ClientEvent { /// A request previously issued via [`LSPS2ClientHandler::request_opening_params`] /// failed as the LSP returned an error response. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`LSPS2ClientHandler::request_opening_params`]: crate::lsps2::client::LSPS2ClientHandler::request_opening_params GetInfoFailed { /// The identifier of the issued LSPS2 `get_info` request, as returned by @@ -81,6 +89,8 @@ pub enum LSPS2ClientEvent { /// A request previously issued via [`LSPS2ClientHandler::select_opening_params`] /// failed as the LSP returned an error response. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`LSPS2ClientHandler::select_opening_params`]: crate::lsps2::client::LSPS2ClientHandler::select_opening_params BuyRequestFailed { /// The identifier of the issued LSPS2 `buy` request, as returned by @@ -108,6 +118,8 @@ pub enum LSPS2ServiceEvent { /// If an unrecognized or stale token is provided you can use /// `[LSPS2ServiceHandler::invalid_token_provided`] to error the request. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`LSPS2ServiceHandler::opening_fee_params_generated`]: crate::lsps2::service::LSPS2ServiceHandler::opening_fee_params_generated /// [`LSPS2ServiceHandler::invalid_token_provided`]: crate::lsps2::service::LSPS2ServiceHandler::invalid_token_provided GetInfo { @@ -130,6 +142,8 @@ pub enum LSPS2ServiceEvent { /// [`ChannelManager::get_intercept_scid`] for them to use and then call /// [`LSPS2ServiceHandler::invoice_parameters_generated`]. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`ChannelManager::get_intercept_scid`]: lightning::ln::channelmanager::ChannelManager::get_intercept_scid /// /// [`LSPS2ServiceHandler::invoice_parameters_generated`]: crate::lsps2::service::LSPS2ServiceHandler::invoice_parameters_generated @@ -147,6 +161,11 @@ pub enum LSPS2ServiceEvent { }, /// You should open a channel using [`ChannelManager::create_channel`]. /// + /// **Note: ** As this event is persisted and might get replayed after restart, you'll need to + /// ensure channel creation idempotency. I.e., please check if you already created a + /// corresponding channel based on the given `their_network_key` and `intercept_scid` and + /// ignore this event in case you did. + /// /// [`ChannelManager::create_channel`]: lightning::ln::channelmanager::ChannelManager::create_channel OpenChannel { /// The node to open channel with. @@ -161,3 +180,24 @@ pub enum LSPS2ServiceEvent { intercept_scid: u64, }, } + +impl_writeable_tlv_based_enum!(LSPS2ServiceEvent, + (0, GetInfo) => { + (0, request_id, required), + (2, counterparty_node_id, required), + (4, token, option), + }, + (2, BuyRequest) => { + (0, request_id, required), + (2, counterparty_node_id, required), + (4, opening_fee_params, required), + (6, payment_size_msat, option), + }, + (4, OpenChannel) => { + (0, their_network_key, required), + (2, amt_to_forward_msat, required), + (4, opening_fee_msat, required), + (6, user_channel_id, required), + (8, intercept_scid, required), + } +); diff --git a/lightning-liquidity/src/lsps2/msgs.rs b/lightning-liquidity/src/lsps2/msgs.rs index 699e5a31737..21e1af80c9e 100644 --- a/lightning-liquidity/src/lsps2/msgs.rs +++ b/lightning-liquidity/src/lsps2/msgs.rs @@ -21,6 +21,7 @@ use bitcoin::secp256k1::PublicKey; use serde::{Deserialize, Serialize}; +use lightning::impl_writeable_tlv_based; use lightning::util::scid_utils; use crate::lsps0::ser::{ @@ -122,6 +123,17 @@ pub struct LSPS2OpeningFeeParams { pub promise: String, } +impl_writeable_tlv_based!(LSPS2OpeningFeeParams, { + (0, min_fee_msat, required), + (2, proportional, required), + (4, valid_until, required), + (6, min_lifetime, required), + (8, max_client_to_self_delay, required), + (10, min_payment_size_msat, required), + (12, max_payment_size_msat, required), + (14, promise, required), +}); + /// A response to a [`LSPS2GetInfoRequest`] #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct LSPS2GetInfoResponse { diff --git a/lightning-liquidity/src/lsps2/payment_queue.rs b/lightning-liquidity/src/lsps2/payment_queue.rs index d6474dc97a0..003939d699d 100644 --- a/lightning-liquidity/src/lsps2/payment_queue.rs +++ b/lightning-liquidity/src/lsps2/payment_queue.rs @@ -9,6 +9,7 @@ use alloc::vec::Vec; +use lightning::impl_writeable_tlv_based; use lightning::ln::channelmanager::InterceptId; use lightning_types::payment::PaymentHash; @@ -62,12 +63,21 @@ impl PaymentQueue { } } +impl_writeable_tlv_based!(PaymentQueue, { + (0, payments, optional_vec), +}); + #[derive(Clone, PartialEq, Eq, Debug)] pub(crate) struct PaymentQueueEntry { pub(crate) payment_hash: PaymentHash, pub(crate) htlcs: Vec, } +impl_writeable_tlv_based!(PaymentQueueEntry, { + (0, payment_hash, required), + (2, htlcs, optional_vec), +}); + #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub(crate) struct InterceptedHTLC { pub(crate) intercept_id: InterceptId, @@ -75,6 +85,12 @@ pub(crate) struct InterceptedHTLC { pub(crate) payment_hash: PaymentHash, } +impl_writeable_tlv_based!(InterceptedHTLC, { + (0, intercept_id, required), + (2, expected_outbound_amount_msat, required), + (4, payment_hash, required), +}); + #[cfg(test)] mod tests { use super::*; diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index 3aeca0e0937..4330bae18b4 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -9,12 +9,16 @@ //! Contains the main bLIP-52 / LSPS2 server-side object, [`LSPS2ServiceHandler`]. +use alloc::boxed::Box; use alloc::string::{String, ToString}; use alloc::vec::Vec; +use lightning::util::persist::KVStore; use core::cmp::Ordering as CmpOrdering; +use core::future::Future as StdFuture; use core::ops::Deref; use core::sync::atomic::{AtomicUsize, Ordering}; +use core::task; use crate::events::EventQueue; use crate::lsps0::ser::{ @@ -28,9 +32,13 @@ use crate::lsps2::utils::{ compute_opening_fee, is_expired_opening_fee_params, is_valid_opening_fee_params, }; use crate::message_queue::{MessageQueue, MessageQueueNotifierGuard}; +use crate::persist::{ + LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, LSPS2_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE, +}; use crate::prelude::hash_map::Entry; use crate::prelude::{new_hash_map, HashMap}; use crate::sync::{Arc, Mutex, MutexGuard, RwLock}; +use crate::utils::async_poll::dummy_waker; use lightning::events::HTLCHandlingFailureType; use lightning::ln::channelmanager::{AChannelManager, FailureCode, InterceptId}; @@ -38,6 +46,8 @@ use lightning::ln::msgs::{ErrorAction, LightningError}; use lightning::ln::types::ChannelId; use lightning::util::errors::APIError; use lightning::util::logger::Level; +use lightning::util::ser::Writeable; +use lightning::{impl_writeable_tlv_based, impl_writeable_tlv_based_enum}; use lightning_types::payment::PaymentHash; @@ -372,6 +382,29 @@ impl OutboundJITChannelState { } } +impl_writeable_tlv_based_enum!(OutboundJITChannelState, + (0, PendingInitialPayment) => { + (0, payment_queue, required), + }, + (2, PendingChannelOpen) => { + (0, payment_queue, required), + (2, opening_fee_msat, required), + }, + (4, PendingPaymentForward) => { + (0, payment_queue, required), + (2, opening_fee_msat, required), + (4, channel_id, required), + }, + (6, PendingPayment) => { + (0, payment_queue, required), + (2, opening_fee_msat, required), + (4, channel_id, required), + }, + (8, PaymentForwarded) => { + (0, channel_id, required), + }, +); + struct OutboundJITChannel { state: OutboundJITChannelState, user_channel_id: u128, @@ -379,6 +412,13 @@ struct OutboundJITChannel { payment_size_msat: Option, } +impl_writeable_tlv_based!(OutboundJITChannel, { + (0, state, required), + (2, user_channel_id, required), + (4, opening_fee_params, required), + (6, payment_size_msat, option), +}); + impl OutboundJITChannel { fn new( payment_size_msat: Option, opening_fee_params: LSPS2OpeningFeeParams, @@ -429,11 +469,12 @@ impl OutboundJITChannel { } } -struct PeerState { +pub(crate) struct PeerState { outbound_channels_by_intercept_scid: HashMap, intercept_scid_by_user_channel_id: HashMap, intercept_scid_by_channel_id: HashMap, pending_requests: HashMap, + needs_persist: bool, } impl PeerState { @@ -442,16 +483,19 @@ impl PeerState { let pending_requests = new_hash_map(); let intercept_scid_by_user_channel_id = new_hash_map(); let intercept_scid_by_channel_id = new_hash_map(); + let needs_persist = true; Self { outbound_channels_by_intercept_scid, pending_requests, intercept_scid_by_user_channel_id, intercept_scid_by_channel_id, + needs_persist, } } fn insert_outbound_channel(&mut self, intercept_scid: u64, channel: OutboundJITChannel) { self.outbound_channels_by_intercept_scid.insert(intercept_scid, channel); + self.needs_persist |= true; } fn prune_expired_request_state(&mut self) { @@ -470,6 +514,7 @@ impl PeerState { // We abort the flow, and prune any data kept. self.intercept_scid_by_channel_id.retain(|_, iscid| intercept_scid != iscid); self.intercept_scid_by_user_channel_id.retain(|_, iscid| intercept_scid != iscid); + self.needs_persist |= true; return false; } true @@ -492,6 +537,14 @@ impl PeerState { } } +impl_writeable_tlv_based!(PeerState, { + (0, outbound_channels_by_intercept_scid, required), + (2, intercept_scid_by_user_channel_id, required), + (4, intercept_scid_by_channel_id, required), + (_unused, pending_requests, (static_value, new_hash_map())), + (_unused, needs_persist, (static_value, false)), +}); + macro_rules! get_or_insert_peer_state_entry { ($self: ident, $outer_state_lock: expr, $message_queue_notifier: expr, $counterparty_node_id: expr) => {{ // Return an internal error and abort if we hit the maximum allowed number of total peers. @@ -526,13 +579,15 @@ macro_rules! get_or_insert_peer_state_entry { } /// The main object allowing to send and receive bLIP-52 / LSPS2 messages. -pub struct LSPS2ServiceHandler +pub struct LSPS2ServiceHandler where CM::Target: AChannelManager, + K::Target: KVStore, { channel_manager: CM, + kv_store: K, pending_messages: Arc, - pending_events: Arc, + pending_events: Arc>, per_peer_state: RwLock>>, peer_by_intercept_scid: RwLock>, peer_by_channel_id: RwLock>, @@ -540,23 +595,45 @@ where config: LSPS2ServiceConfig, } -impl LSPS2ServiceHandler +impl LSPS2ServiceHandler where CM::Target: AChannelManager, + K::Target: KVStore, { /// Constructs a `LSPS2ServiceHandler`. pub(crate) fn new( - pending_messages: Arc, pending_events: Arc, channel_manager: CM, + peer_states: Vec<(PublicKey, PeerState)>, pending_messages: Arc, + pending_events: Arc>, channel_manager: CM, kv_store: K, config: LSPS2ServiceConfig, ) -> Self { + let mut peer_by_intercept_scid = new_hash_map(); + let mut peer_by_channel_id = new_hash_map(); + for (node_id, peer_state) in peer_states.iter() { + for (intercept_scid, _) in peer_state.outbound_channels_by_intercept_scid.iter() { + let res = peer_by_intercept_scid.insert(*intercept_scid, *node_id); + debug_assert!(res.is_none(), "Intercept SCIDs should never collide"); + } + + for (channel_id, _) in peer_state.intercept_scid_by_channel_id.iter() { + let res = peer_by_channel_id.insert(*channel_id, *node_id); + debug_assert!(res.is_none(), "Channel IDs should never collide"); + } + } + + let per_peer_state = peer_states + .into_iter() + .map(|(k, v)| (k, Mutex::new(v))) + .collect::>>(); + Self { pending_messages, pending_events, - per_peer_state: RwLock::new(new_hash_map()), - peer_by_intercept_scid: RwLock::new(new_hash_map()), - peer_by_channel_id: RwLock::new(new_hash_map()), + per_peer_state: RwLock::new(per_peer_state), + peer_by_intercept_scid: RwLock::new(peer_by_intercept_scid), + peer_by_channel_id: RwLock::new(peer_by_channel_id), total_pending_requests: AtomicUsize::new(0), channel_manager, + kv_store, config, } } @@ -672,22 +749,27 @@ where } } - /// Used by LSP to provide the client with the intercept scid and - /// `cltv_expiry_delta` to include in their invoice. The intercept scid - /// must be retrieved from [`ChannelManager::get_intercept_scid`]. + /// Used by LSP to provide the client with the intercept scid, a unique `user_channel_id`, and `cltv_expiry_delta` to + /// include in their invoice. + /// + /// The intercept scid must be retrieved from [`ChannelManager::get_intercept_scid`]. The given + /// `user_channel_id` must be locally unique and will eventually be returned via events to be + /// used when opening the channel via [`ChannelManager::create_channel`]. /// /// Should be called in response to receiving a [`LSPS2ServiceEvent::BuyRequest`] event. /// + /// [`ChannelManager::create_channel`]: lightning::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::get_intercept_scid`]: lightning::ln::channelmanager::ChannelManager::get_intercept_scid /// [`LSPS2ServiceEvent::BuyRequest`]: crate::lsps2::event::LSPS2ServiceEvent::BuyRequest - pub fn invoice_parameters_generated( + #[allow(clippy::await_holding_lock)] + pub async fn invoice_parameters_generated( &self, counterparty_node_id: &PublicKey, request_id: LSPSRequestId, intercept_scid: u64, cltv_expiry_delta: u32, client_trusts_lsp: bool, user_channel_id: u128, ) -> Result<(), APIError> { let mut message_queue_notifier = self.pending_messages.notifier(); + let mut should_persist = false; let outer_state_lock = self.per_peer_state.read().unwrap(); - match outer_state_lock.get(counterparty_node_id) { Some(inner_state_lock) => { let mut peer_state_lock = inner_state_lock.lock().unwrap(); @@ -711,6 +793,7 @@ where .insert(user_channel_id, intercept_scid); peer_state_lock .insert_outbound_channel(intercept_scid, outbound_jit_channel); + should_persist |= peer_state_lock.needs_persist; let response = LSPS2Response::Buy(LSPS2BuyResponse { jit_channel_scid: intercept_scid.into(), @@ -719,17 +802,38 @@ where }); let msg = LSPS2Message::Response(request_id, response).into(); message_queue_notifier.enqueue(counterparty_node_id, msg); - Ok(()) }, - _ => Err(APIError::APIMisuseError { - err: format!("No pending buy request for request_id: {:?}", request_id), - }), + _ => { + return Err(APIError::APIMisuseError { + err: format!("No pending buy request for request_id: {:?}", request_id), + }) + }, } }, - None => Err(APIError::APIMisuseError { - err: format!("No state for the counterparty exists: {:?}", counterparty_node_id), - }), + None => { + return Err(APIError::APIMisuseError { + err: format!( + "No state for the counterparty exists: {:?}", + counterparty_node_id + ), + }) + }, + }; + + drop(outer_state_lock); + + if should_persist { + self.persist_peer_state(*counterparty_node_id).await.map_err(|e| { + APIError::APIMisuseError { + err: format!( + "Failed to persist peer state for {}: {}", + counterparty_node_id, e + ), + } + })?; } + + Ok(()) } /// Forward [`Event::HTLCIntercepted`] event parameters into this function. @@ -744,11 +848,13 @@ where /// /// [`Event::HTLCIntercepted`]: lightning::events::Event::HTLCIntercepted /// [`LSPS2ServiceEvent::OpenChannel`]: crate::lsps2::event::LSPS2ServiceEvent::OpenChannel - pub fn htlc_intercepted( + #[allow(clippy::await_holding_lock)] + pub async fn htlc_intercepted( &self, intercept_scid: u64, intercept_id: InterceptId, expected_outbound_amount_msat: u64, payment_hash: PaymentHash, ) -> Result<(), APIError> { let event_queue_notifier = self.pending_events.notifier(); + let mut should_persist = None; let peer_by_intercept_scid = self.peer_by_intercept_scid.read().unwrap(); if let Some(counterparty_node_id) = peer_by_intercept_scid.get(&intercept_scid) { @@ -756,6 +862,10 @@ where match outer_state_lock.get(counterparty_node_id) { Some(inner_state_lock) => { let mut peer_state = inner_state_lock.lock().unwrap(); + peer_state.needs_persist |= peer_state + .outbound_channels_by_intercept_scid + .contains_key(&intercept_scid); + should_persist = Some(*counterparty_node_id); if let Some(jit_channel) = peer_state.outbound_channels_by_intercept_scid.get_mut(&intercept_scid) { @@ -823,6 +933,19 @@ where } } + drop(peer_by_intercept_scid); + + if let Some(counterparty_node_id) = should_persist { + self.persist_peer_state(counterparty_node_id).await.map_err(|e| { + APIError::APIMisuseError { + err: format!( + "Failed to persist peer state for {}: {}", + counterparty_node_id, e + ), + } + })?; + } + Ok(()) } @@ -833,9 +956,10 @@ where /// or if the payment queue is empty /// /// [`Event::HTLCHandlingFailed`]: lightning::events::Event::HTLCHandlingFailed - pub fn htlc_handling_failed( + pub async fn htlc_handling_failed( &self, failure_type: HTLCHandlingFailureType, ) -> Result<(), APIError> { + let mut should_persist = None; if let HTLCHandlingFailureType::Forward { channel_id, .. } = failure_type { let peer_by_channel_id = self.peer_by_channel_id.read().unwrap(); if let Some(counterparty_node_id) = peer_by_channel_id.get(&channel_id) { @@ -843,6 +967,9 @@ where match outer_state_lock.get(counterparty_node_id) { Some(inner_state_lock) => { let mut peer_state = inner_state_lock.lock().unwrap(); + peer_state.needs_persist |= + peer_state.intercept_scid_by_channel_id.contains_key(&channel_id); + should_persist = Some(*counterparty_node_id); if let Some(intercept_scid) = peer_state.intercept_scid_by_channel_id.get(&channel_id).copied() { @@ -889,6 +1016,17 @@ where } } + if let Some(counterparty_node_id) = should_persist { + self.persist_peer_state(counterparty_node_id).await.map_err(|e| { + APIError::APIMisuseError { + err: format!( + "Failed to persist peer state for {}: {}", + counterparty_node_id, e + ), + } + })?; + } + Ok(()) } @@ -903,7 +1041,9 @@ where /// greater or equal to 0.0.107. /// /// [`Event::PaymentForwarded`]: lightning::events::Event::PaymentForwarded - pub fn payment_forwarded(&self, next_channel_id: ChannelId) -> Result<(), APIError> { + pub async fn payment_forwarded(&self, next_channel_id: ChannelId) -> Result<(), APIError> { + let mut should_persist = None; + if let Some(counterparty_node_id) = self.peer_by_channel_id.read().unwrap().get(&next_channel_id) { @@ -911,6 +1051,9 @@ where match outer_state_lock.get(counterparty_node_id) { Some(inner_state_lock) => { let mut peer_state = inner_state_lock.lock().unwrap(); + peer_state.needs_persist |= + peer_state.intercept_scid_by_channel_id.contains_key(&next_channel_id); + should_persist = Some(*counterparty_node_id); if let Some(intercept_scid) = peer_state.intercept_scid_by_channel_id.get(&next_channel_id).copied() { @@ -953,6 +1096,17 @@ where } } + if let Some(counterparty_node_id) = should_persist { + self.persist_peer_state(counterparty_node_id).await.map_err(|e| { + APIError::APIMisuseError { + err: format!( + "Failed to persist peer state for {}: {}", + counterparty_node_id, e + ), + } + })?; + } + Ok(()) } @@ -971,7 +1125,8 @@ where /// open, as it only affects the local LSPS2 state and doesn't affect any channels that /// might already exist on-chain. Any pending channel open attempts must be managed /// separately. - pub fn channel_open_abandoned( + #[allow(clippy::await_holding_lock)] + pub async fn channel_open_abandoned( &self, counterparty_node_id: &PublicKey, user_channel_id: u128, ) -> Result<(), APIError> { let outer_state_lock = self.per_peer_state.read().unwrap(); @@ -1015,6 +1170,16 @@ where peer_state.intercept_scid_by_user_channel_id.remove(&user_channel_id); peer_state.outbound_channels_by_intercept_scid.remove(&intercept_scid); peer_state.intercept_scid_by_channel_id.retain(|_, &mut scid| scid != intercept_scid); + peer_state.needs_persist |= true; + + drop(peer_state); + drop(outer_state_lock); + + self.persist_peer_state(*counterparty_node_id).await.map_err(|e| { + APIError::APIMisuseError { + err: format!("Failed to persist peer state for {}: {}", counterparty_node_id, e), + } + })?; Ok(()) } @@ -1026,9 +1191,11 @@ where /// state so that the payer may try the payment again. /// /// [`LSPS2ServiceEvent::OpenChannel`]: crate::lsps2::event::LSPS2ServiceEvent::OpenChannel - pub fn channel_open_failed( + #[allow(clippy::await_holding_lock)] + pub async fn channel_open_failed( &self, counterparty_node_id: &PublicKey, user_channel_id: u128, ) -> Result<(), APIError> { + let mut should_persist = false; let outer_state_lock = self.per_peer_state.read().unwrap(); let inner_state_lock = @@ -1046,6 +1213,9 @@ where err: format!("Could not find a channel with user_channel_id {}", user_channel_id), })?; + peer_state.needs_persist |= + peer_state.outbound_channels_by_intercept_scid.contains_key(&intercept_scid); + should_persist |= peer_state.needs_persist; let jit_channel = peer_state .outbound_channels_by_intercept_scid .get_mut(&intercept_scid) @@ -1070,12 +1240,27 @@ where jit_channel.state = OutboundJITChannelState::PendingInitialPayment { payment_queue: PaymentQueue::new(), }; - Ok(()) } else { - Err(APIError::APIMisuseError { + return Err(APIError::APIMisuseError { err: "Channel is not in the PendingChannelOpen state.".to_string(), - }) + }); } + + drop(peer_state); + drop(outer_state_lock); + + if should_persist { + self.persist_peer_state(*counterparty_node_id).await.map_err(|e| { + APIError::APIMisuseError { + err: format!( + "Failed to persist peer state for {}: {}", + counterparty_node_id, e + ), + } + })?; + } + + Ok(()) } /// Forward [`Event::ChannelReady`] event parameters into this function. @@ -1084,9 +1269,11 @@ where /// we need to forward a payment over otherwise it will be ignored. /// /// [`Event::ChannelReady`]: lightning::events::Event::ChannelReady - pub fn channel_ready( + #[allow(clippy::await_holding_lock)] + pub async fn channel_ready( &self, user_channel_id: u128, channel_id: &ChannelId, counterparty_node_id: &PublicKey, ) -> Result<(), APIError> { + let mut should_persist = false; { let mut peer_by_channel_id = self.peer_by_channel_id.write().unwrap(); peer_by_channel_id.insert(*channel_id, *counterparty_node_id); @@ -1095,6 +1282,9 @@ where match outer_state_lock.get(counterparty_node_id) { Some(inner_state_lock) => { let mut peer_state = inner_state_lock.lock().unwrap(); + peer_state.needs_persist |= + peer_state.intercept_scid_by_user_channel_id.contains_key(&user_channel_id); + should_persist |= peer_state.needs_persist; if let Some(intercept_scid) = peer_state.intercept_scid_by_user_channel_id.get(&user_channel_id).copied() { @@ -1154,6 +1344,17 @@ where }, } + if should_persist { + self.persist_peer_state(*counterparty_node_id).await.map_err(|e| { + APIError::APIMisuseError { + err: format!( + "Failed to persist peer state for {}: {}", + counterparty_node_id, e + ), + } + })?; + } + Ok(()) } @@ -1404,35 +1605,112 @@ where ); } - pub(crate) fn peer_disconnected(&self, counterparty_node_id: PublicKey) { - let mut outer_state_lock = self.per_peer_state.write().unwrap(); - let is_prunable = - if let Some(inner_state_lock) = outer_state_lock.get(&counterparty_node_id) { - let mut peer_state_lock = inner_state_lock.lock().unwrap(); - peer_state_lock.prune_expired_request_state(); - peer_state_lock.is_prunable() - } else { - return; + async fn persist_peer_state( + &self, counterparty_node_id: PublicKey, + ) -> Result<(), lightning::io::Error> { + let fut = { + let outer_state_lock = self.per_peer_state.read().unwrap(); + let encoded = match outer_state_lock.get(&counterparty_node_id) { + None => { + // We dropped the peer state by now. + return Ok(()); + }, + Some(entry) => { + let mut peer_state_lock = entry.lock().unwrap(); + if !peer_state_lock.needs_persist { + // We already have persisted otherwise by now. + return Ok(()); + } else { + peer_state_lock.needs_persist = false; + peer_state_lock.encode() + } + }, }; - if is_prunable { - outer_state_lock.remove(&counterparty_node_id); + let key = counterparty_node_id.to_string(); + + self.kv_store.write( + LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + LSPS2_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE, + &key, + encoded, + ) + }; + + fut.await.map_err(|e| { + self.per_peer_state + .read() + .unwrap() + .get(&counterparty_node_id) + .map(|p| p.lock().unwrap().needs_persist = true); + e + }) + } + + pub(crate) async fn persist(&self) -> Result<(), lightning::io::Error> { + // TODO: We should eventually persist in parallel, however, when we do, we probably want to + // introduce some batching to upper-bound the number of requests inflight at any given + // time. + let need_persist: Vec = { + let outer_state_lock = self.per_peer_state.read().unwrap(); + outer_state_lock + .iter() + .filter_map(|(k, v)| if v.lock().unwrap().needs_persist { Some(*k) } else { None }) + .collect() + }; + + for counterparty_node_id in need_persist.into_iter() { + self.persist_peer_state(counterparty_node_id).await?; } + + Ok(()) } - #[allow(clippy::bool_comparison)] - pub(crate) fn prune_peer_state(&self) { - let mut outer_state_lock = self.per_peer_state.write().unwrap(); - outer_state_lock.retain(|_, inner_state_lock| { + pub(crate) fn peer_disconnected(&self, counterparty_node_id: PublicKey) { + let outer_state_lock = self.per_peer_state.write().unwrap(); + if let Some(inner_state_lock) = outer_state_lock.get(&counterparty_node_id) { let mut peer_state_lock = inner_state_lock.lock().unwrap(); + // We clean up the peer state, but leave removing the peer entry to `prune_peer_state` + // which also removes it from the store. peer_state_lock.prune_expired_request_state(); - peer_state_lock.is_prunable() == false - }); + } + } + + #[allow(clippy::bool_comparison)] + pub(crate) async fn prune_peer_state(&self) { + let mut need_remove = Vec::new(); + + { + let mut outer_state_lock = self.per_peer_state.write().unwrap(); + outer_state_lock.retain(|counterparty_node_id, inner_state_lock| { + let mut peer_state_lock = inner_state_lock.lock().unwrap(); + peer_state_lock.prune_expired_request_state(); + let is_prunable = peer_state_lock.is_prunable(); + if is_prunable { + need_remove.push(*counterparty_node_id); + } + is_prunable == false + }); + } + + for counterparty_node_id in need_remove { + let key = counterparty_node_id.to_string(); + let _ = self + .kv_store + .remove( + LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + LSPS2_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE, + &key, + true, + ) + .await; + } } } -impl LSPSProtocolMessageHandler for LSPS2ServiceHandler +impl LSPSProtocolMessageHandler for LSPS2ServiceHandler where CM::Target: AChannelManager, + K::Target: KVStore, { type ProtocolMessage = LSPS2Message; const PROTOCOL_NUMBER: Option = Some(2); @@ -1500,6 +1778,213 @@ fn calculate_amount_to_forward_per_htlc( per_htlc_forwards } +/// A synchroneous wrapper around [`LSPS2ServiceHandler`] to be used in contexts where async is not +/// available. +pub struct LSPS2ServiceHandlerSync<'a, CM: Deref, K: Deref + Clone> +where + CM::Target: AChannelManager, + K::Target: KVStore, +{ + inner: &'a LSPS2ServiceHandler, +} + +impl<'a, CM: Deref, K: Deref + Clone> LSPS2ServiceHandlerSync<'a, CM, K> +where + CM::Target: AChannelManager, + K::Target: KVStore, +{ + pub(crate) fn from_inner(inner: &'a LSPS2ServiceHandler) -> Self { + Self { inner } + } + + /// Returns a reference to the used config. + /// + /// Wraps [`LSPS2ServiceHandler::config`]. + pub fn config(&self) -> &LSPS2ServiceConfig { + &self.inner.config + } + + /// Used by LSP to inform a client requesting a JIT Channel the token they used is invalid. + /// + /// Wraps [`LSPS2ServiceHandler::invalid_token_provided`]. + pub fn invalid_token_provided( + &self, counterparty_node_id: &PublicKey, request_id: LSPSRequestId, + ) -> Result<(), APIError> { + self.inner.invalid_token_provided(counterparty_node_id, request_id) + } + + /// Used by LSP to provide fee parameters to a client requesting a JIT Channel. + /// + /// Wraps [`LSPS2ServiceHandler::opening_fee_params_generated`]. + pub fn opening_fee_params_generated( + &self, counterparty_node_id: &PublicKey, request_id: LSPSRequestId, + opening_fee_params_menu: Vec, + ) -> Result<(), APIError> { + self.inner.opening_fee_params_generated( + counterparty_node_id, + request_id, + opening_fee_params_menu, + ) + } + + /// Used by LSP to provide the client with the intercept scid and + /// `cltv_expiry_delta` to include in their invoice. + /// + /// Wraps [`LSPS2ServiceHandler::invoice_parameters_generated`]. + pub fn invoice_parameters_generated( + &self, counterparty_node_id: &PublicKey, request_id: LSPSRequestId, intercept_scid: u64, + cltv_expiry_delta: u32, client_trusts_lsp: bool, user_channel_id: u128, + ) -> Result<(), APIError> { + let mut fut = Box::pin(self.inner.invoice_parameters_generated( + counterparty_node_id, + request_id, + intercept_scid, + cltv_expiry_delta, + client_trusts_lsp, + user_channel_id, + )); + + let mut waker = dummy_waker(); + let mut ctx = task::Context::from_waker(&mut waker); + match fut.as_mut().poll(&mut ctx) { + task::Poll::Ready(result) => result, + task::Poll::Pending => { + // In a sync context, we can't wait for the future to complete. + unreachable!("Should not be pending in a sync context"); + }, + } + } + + /// Forward [`Event::HTLCIntercepted`] event parameters into this function. + /// + /// Wraps [`LSPS2ServiceHandler::htlc_intercepted`]. + /// + /// [`Event::HTLCIntercepted`]: lightning::events::Event::HTLCIntercepted + pub fn htlc_intercepted( + &self, intercept_scid: u64, intercept_id: InterceptId, expected_outbound_amount_msat: u64, + payment_hash: PaymentHash, + ) -> Result<(), APIError> { + let mut fut = Box::pin(self.inner.htlc_intercepted( + intercept_scid, + intercept_id, + expected_outbound_amount_msat, + payment_hash, + )); + + let mut waker = dummy_waker(); + let mut ctx = task::Context::from_waker(&mut waker); + match fut.as_mut().poll(&mut ctx) { + task::Poll::Ready(result) => result, + task::Poll::Pending => { + // In a sync context, we can't wait for the future to complete. + unreachable!("Should not be pending in a sync context"); + }, + } + } + + /// Forward [`Event::HTLCHandlingFailed`] event parameter into this function. + /// + /// Wraps [`LSPS2ServiceHandler::htlc_handling_failed`]. + /// + /// [`Event::HTLCHandlingFailed`]: lightning::events::Event::HTLCHandlingFailed + pub fn htlc_handling_failed( + &self, failure_type: HTLCHandlingFailureType, + ) -> Result<(), APIError> { + let mut fut = Box::pin(self.inner.htlc_handling_failed(failure_type)); + + let mut waker = dummy_waker(); + let mut ctx = task::Context::from_waker(&mut waker); + match fut.as_mut().poll(&mut ctx) { + task::Poll::Ready(result) => result, + task::Poll::Pending => { + // In a sync context, we can't wait for the future to complete. + unreachable!("Should not be pending in a sync context"); + }, + } + } + + /// Forward [`Event::PaymentForwarded`] event parameter into this function. + /// + /// Wraps [`LSPS2ServiceHandler::payment_forwarded`]. + /// + /// [`Event::PaymentForwarded`]: lightning::events::Event::PaymentForwarded + pub fn payment_forwarded(&self, next_channel_id: ChannelId) -> Result<(), APIError> { + let mut fut = Box::pin(self.inner.payment_forwarded(next_channel_id)); + + let mut waker = dummy_waker(); + let mut ctx = task::Context::from_waker(&mut waker); + match fut.as_mut().poll(&mut ctx) { + task::Poll::Ready(result) => result, + task::Poll::Pending => { + // In a sync context, we can't wait for the future to complete. + unreachable!("Should not be pending in a sync context"); + }, + } + } + + /// Abandons a pending JIT‐open flow for `user_channel_id`, removing all local state. + /// + /// Wraps [`LSPS2ServiceHandler::channel_open_abandoned`]. + pub fn channel_open_abandoned( + &self, counterparty_node_id: &PublicKey, user_channel_id: u128, + ) -> Result<(), APIError> { + let mut fut = + Box::pin(self.inner.channel_open_abandoned(counterparty_node_id, user_channel_id)); + + let mut waker = dummy_waker(); + let mut ctx = task::Context::from_waker(&mut waker); + match fut.as_mut().poll(&mut ctx) { + task::Poll::Ready(result) => result, + task::Poll::Pending => { + // In a sync context, we can't wait for the future to complete. + unreachable!("Should not be pending in a sync context"); + }, + } + } + + /// Used to fail intercepted HTLCs backwards when a channel open attempt ultimately fails. + /// + /// Wraps [`LSPS2ServiceHandler::channel_open_failed`]. + pub fn channel_open_failed( + &self, counterparty_node_id: &PublicKey, user_channel_id: u128, + ) -> Result<(), APIError> { + let mut fut = + Box::pin(self.inner.channel_open_failed(counterparty_node_id, user_channel_id)); + + let mut waker = dummy_waker(); + let mut ctx = task::Context::from_waker(&mut waker); + match fut.as_mut().poll(&mut ctx) { + task::Poll::Ready(result) => result, + task::Poll::Pending => { + // In a sync context, we can't wait for the future to complete. + unreachable!("Should not be pending in a sync context"); + }, + } + } + + /// Forward [`Event::ChannelReady`] event parameters into this function. + /// + /// Wraps [`LSPS2ServiceHandler::channel_ready`]. + /// + /// [`Event::ChannelReady`]: lightning::events::Event::ChannelReady + pub fn channel_ready( + &self, user_channel_id: u128, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> Result<(), APIError> { + let mut fut = + Box::pin(self.inner.channel_ready(user_channel_id, channel_id, counterparty_node_id)); + + let mut waker = dummy_waker(); + let mut ctx = task::Context::from_waker(&mut waker); + match fut.as_mut().poll(&mut ctx) { + task::Poll::Ready(result) => result, + task::Poll::Pending => { + // In a sync context, we can't wait for the future to complete. + unreachable!("Should not be pending in a sync context"); + }, + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/lightning-liquidity/src/lsps5/client.rs b/lightning-liquidity/src/lsps5/client.rs index 2e905454805..1c6f8b8a250 100644 --- a/lightning-liquidity/src/lsps5/client.rs +++ b/lightning-liquidity/src/lsps5/client.rs @@ -33,6 +33,7 @@ use lightning::util::logger::Level; use alloc::collections::VecDeque; use alloc::string::String; +use lightning::util::persist::KVStore; use core::ops::Deref; @@ -124,25 +125,27 @@ impl PeerState { /// [`lsps5.list_webhooks`]: super::msgs::LSPS5Request::ListWebhooks /// [`lsps5.remove_webhook`]: super::msgs::LSPS5Request::RemoveWebhook /// [`LSPS5Validator`]: super::validator::LSPS5Validator -pub struct LSPS5ClientHandler +pub struct LSPS5ClientHandler where ES::Target: EntropySource, + K::Target: KVStore, { pending_messages: Arc, - pending_events: Arc, + pending_events: Arc>, entropy_source: ES, per_peer_state: RwLock>>, _config: LSPS5ClientConfig, } -impl LSPS5ClientHandler +impl LSPS5ClientHandler where ES::Target: EntropySource, + K::Target: KVStore, { /// Constructs an `LSPS5ClientHandler`. pub(crate) fn new( - entropy_source: ES, pending_messages: Arc, pending_events: Arc, - _config: LSPS5ClientConfig, + entropy_source: ES, pending_messages: Arc, + pending_events: Arc>, _config: LSPS5ClientConfig, ) -> Self { Self { pending_messages, @@ -423,9 +426,10 @@ where } } -impl LSPSProtocolMessageHandler for LSPS5ClientHandler +impl LSPSProtocolMessageHandler for LSPS5ClientHandler where ES::Target: EntropySource, + K::Target: KVStore, { type ProtocolMessage = LSPS5Message; const PROTOCOL_NUMBER: Option = Some(5); @@ -444,6 +448,9 @@ mod tests { use crate::{lsps0::ser::LSPSRequestId, lsps5::msgs::SetWebhookResponse}; use bitcoin::{key::Secp256k1, secp256k1::SecretKey}; use core::sync::atomic::{AtomicU64, Ordering}; + use lightning::util::persist::KVStoreSyncWrapper; + use lightning::util::test_utils::TestStore; + use lightning::util::wakers::Notifier; struct UniqueTestEntropy { counter: AtomicU64, @@ -459,15 +466,19 @@ mod tests { } fn setup_test_client() -> ( - LSPS5ClientHandler>, + LSPS5ClientHandler, Arc>>>, Arc, - Arc, + Arc>>>>, PublicKey, PublicKey, ) { let test_entropy_source = Arc::new(UniqueTestEntropy { counter: AtomicU64::new(2) }); - let message_queue = Arc::new(MessageQueue::new()); - let event_queue = Arc::new(EventQueue::new()); + let notifier = Arc::new(Notifier::new()); + let message_queue = Arc::new(MessageQueue::new(notifier)); + + let kv_store = Arc::new(KVStoreSyncWrapper(Arc::new(TestStore::new(false)))); + let persist_notifier = Arc::new(Notifier::new()); + let event_queue = Arc::new(EventQueue::new(VecDeque::new(), kv_store, persist_notifier)); let client = LSPS5ClientHandler::new( test_entropy_source, Arc::clone(&message_queue), diff --git a/lightning-liquidity/src/lsps5/event.rs b/lightning-liquidity/src/lsps5/event.rs index f401c0e10ac..a9c1052250a 100644 --- a/lightning-liquidity/src/lsps5/event.rs +++ b/lightning-liquidity/src/lsps5/event.rs @@ -13,6 +13,8 @@ use crate::lsps0::ser::LSPSRequestId; use alloc::string::String; use alloc::vec::Vec; use bitcoin::secp256k1::PublicKey; + +use lightning::impl_writeable_tlv_based_enum; use lightning::util::hash_tables::HashMap; use super::msgs::LSPS5AppName; @@ -37,6 +39,8 @@ pub enum LSPS5ServiceEvent { /// when received by the client. The client verifies this signature using /// [`validate`], which guards against replay attacks and tampering. /// + /// **Note: ** This event will be persisted across restarts. + /// /// [`validate`]: super::validator::LSPS5Validator::validate /// [`url`]: super::msgs::LSPS5WebhookUrl /// [`notification`]: super::msgs::WebhookNotification @@ -70,6 +74,16 @@ pub enum LSPS5ServiceEvent { }, } +impl_writeable_tlv_based_enum!(LSPS5ServiceEvent, + (0, SendWebhookNotification) => { + (0, counterparty_node_id, required), + (2, app_name, required), + (4, url, required), + (6, notification, required), + (8, headers, required), + } +); + /// An event which an LSPS5 client should take some action in response to. #[derive(Debug, Clone, PartialEq, Eq)] pub enum LSPS5ClientEvent { @@ -82,6 +96,8 @@ pub enum LSPS5ClientEvent { /// the LSP will also emit a [`SendWebhookNotification`] event with a [`webhook_registered`] notification /// to notify the client about this registration. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`lsps5.set_webhook`]: super::msgs::LSPS5Request::SetWebhook /// [`SendWebhookNotification`]: super::event::LSPS5ServiceEvent::SendWebhookNotification /// [`webhook_registered`]: super::msgs::WebhookNotificationMethod::LSPS5WebhookRegistered @@ -117,6 +133,8 @@ pub enum LSPS5ClientEvent { /// - Maximum number of webhooks per client has been reached (error [`TooManyWebhooks`]). Remove a webhook before /// registering a new one. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`lsps5.set_webhook`]: super::msgs::LSPS5Request::SetWebhook /// [`app_name`]: super::msgs::LSPS5AppName /// [`url`]: super::msgs::LSPS5WebhookUrl @@ -170,6 +188,8 @@ pub enum LSPS5ClientEvent { /// After this event, the app_name is free to be reused for a new webhook /// registration if desired. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`lsps5.remove_webhook`]: super::msgs::LSPS5Request::RemoveWebhook WebhookRemoved { /// The node id of the LSP that confirmed the removal. @@ -191,6 +211,8 @@ pub enum LSPS5ClientEvent { /// (error code [`LSPS5_APP_NAME_NOT_FOUND_ERROR_CODE`]), which indicates /// the given [`app_name`] was not found in the LSP's registration database. /// + /// **Note: ** This event will *not* be persisted across restarts. + /// /// [`lsps5.remove_webhook`]: super::msgs::LSPS5Request::RemoveWebhook /// [`AppNameNotFound`]: super::msgs::LSPS5ProtocolError::AppNameNotFound /// [`LSPS5ProtocolError::AppNameNotFound`]: super::msgs::LSPS5ProtocolError::AppNameNotFound diff --git a/lightning-liquidity/src/lsps5/msgs.rs b/lightning-liquidity/src/lsps5/msgs.rs index c45e1883920..341dfcddf00 100644 --- a/lightning-liquidity/src/lsps5/msgs.rs +++ b/lightning-liquidity/src/lsps5/msgs.rs @@ -16,6 +16,9 @@ use crate::lsps0::ser::LSPSResponseError; use super::url_utils::LSPSUrl; +use lightning::ln::msgs::DecodeError; +use lightning::util::ser::{Readable, Writeable}; +use lightning::{impl_writeable_tlv_based, impl_writeable_tlv_based_enum}; use lightning_types::string::UntrustedString; use serde::de::{self, Deserializer, MapAccess, Visitor}; @@ -288,6 +291,20 @@ impl From for LSPSResponseError { #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct LSPS5AppName(UntrustedString); +impl Writeable for LSPS5AppName { + fn write( + &self, writer: &mut W, + ) -> Result<(), lightning::io::Error> { + self.0.write(writer) + } +} + +impl Readable for LSPS5AppName { + fn read(reader: &mut R) -> Result { + Ok(Self(Readable::read(reader)?)) + } +} + impl LSPS5AppName { /// Create a new LSPS5 app name. pub fn new(app_name: String) -> Result { @@ -430,6 +447,20 @@ impl From for String { } } +impl Writeable for LSPS5WebhookUrl { + fn write( + &self, writer: &mut W, + ) -> Result<(), lightning::io::Error> { + self.0.write(writer) + } +} + +impl Readable for LSPS5WebhookUrl { + fn read(reader: &mut R) -> Result { + Ok(Self(Readable::read(reader)?)) + } +} + /// Parameters for `lsps5.set_webhook` request. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct SetWebhookRequest { @@ -492,6 +523,16 @@ pub enum WebhookNotificationMethod { LSPS5OnionMessageIncoming, } +impl_writeable_tlv_based_enum!(WebhookNotificationMethod, + (0, LSPS5WebhookRegistered) => {}, + (2, LSPS5PaymentIncoming) => {}, + (4, LSPS5ExpirySoon) => { + (0, timeout, required), + }, + (6, LSPS5LiquidityManagementRequest) => {}, + (8, LSPS5OnionMessageIncoming) => {}, +); + /// Webhook notification payload. #[derive(Debug, Clone, PartialEq, Eq)] pub struct WebhookNotification { @@ -642,6 +683,10 @@ impl<'de> Deserialize<'de> for WebhookNotification { } } +impl_writeable_tlv_based!(WebhookNotification, { + (0, method, required), +}); + /// An LSPS5 protocol request. #[derive(Clone, Debug, PartialEq, Eq)] pub enum LSPS5Request { diff --git a/lightning-liquidity/src/lsps5/service.rs b/lightning-liquidity/src/lsps5/service.rs index 9f0a80254d8..e8cf7e193ec 100644 --- a/lightning-liquidity/src/lsps5/service.rs +++ b/lightning-liquidity/src/lsps5/service.rs @@ -17,16 +17,22 @@ use crate::lsps5::msgs::{ SetWebhookRequest, SetWebhookResponse, WebhookNotification, WebhookNotificationMethod, }; use crate::message_queue::MessageQueue; +use crate::persist::{ + LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, LSPS5_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE, +}; use crate::prelude::*; use crate::sync::{Arc, Mutex, RwLock, RwLockWriteGuard}; use crate::utils::time::TimeProvider; use bitcoin::secp256k1::PublicKey; +use lightning::impl_writeable_tlv_based; use lightning::ln::channelmanager::AChannelManager; use lightning::ln::msgs::{ErrorAction, LightningError}; use lightning::sign::NodeSigner; use lightning::util::logger::Level; +use lightning::util::persist::KVStore; +use lightning::util::ser::Writeable; use core::ops::Deref; use core::time::Duration; @@ -58,6 +64,14 @@ struct Webhook { last_notification_sent: Option, } +impl_writeable_tlv_based!(Webhook, { + (0, _app_name, required), + (2, url, required), + (4, _counterparty_node_id, required), + (6, last_used, required), + (8, last_notification_sent, option), +}); + /// Server-side configuration options for LSPS5 Webhook Registration. #[derive(Clone, Debug)] pub struct LSPS5ServiceConfig { @@ -109,42 +123,49 @@ impl Default for LSPS5ServiceConfig { /// [`LSPS5ServiceEvent::SendWebhookNotification`]: super::event::LSPS5ServiceEvent::SendWebhookNotification /// [`app_name`]: super::msgs::LSPS5AppName /// [`lsps5.webhook_registered`]: super::msgs::WebhookNotificationMethod::LSPS5WebhookRegistered -pub struct LSPS5ServiceHandler +pub struct LSPS5ServiceHandler where CM::Target: AChannelManager, NS::Target: NodeSigner, + K::Target: KVStore, TP::Target: TimeProvider, { config: LSPS5ServiceConfig, per_peer_state: RwLock>, - event_queue: Arc, + event_queue: Arc>, pending_messages: Arc, time_provider: TP, channel_manager: CM, node_signer: NS, + kv_store: K, last_pruning: Mutex>, } -impl LSPS5ServiceHandler +impl LSPS5ServiceHandler where CM::Target: AChannelManager, NS::Target: NodeSigner, + K::Target: KVStore, TP::Target: TimeProvider, { /// Constructs a `LSPS5ServiceHandler` using the given time provider. pub(crate) fn new_with_time_provider( - event_queue: Arc, pending_messages: Arc, channel_manager: CM, - node_signer: NS, config: LSPS5ServiceConfig, time_provider: TP, + peer_states: Vec<(PublicKey, PeerState)>, event_queue: Arc>, + pending_messages: Arc, channel_manager: CM, kv_store: K, node_signer: NS, + config: LSPS5ServiceConfig, time_provider: TP, ) -> Self { assert!(config.max_webhooks_per_client > 0, "`max_webhooks_per_client` must be > 0"); + let per_peer_state = + RwLock::new(peer_states.into_iter().collect::>()); Self { config, - per_peer_state: RwLock::new(new_hash_map()), + per_peer_state, event_queue, pending_messages, time_provider, channel_manager, node_signer, + kv_store, last_pruning: Mutex::new(None), } } @@ -177,6 +198,66 @@ where } } + async fn persist_peer_state( + &self, counterparty_node_id: PublicKey, + ) -> Result<(), lightning::io::Error> { + let fut = { + let mut outer_state_lock = self.per_peer_state.write().unwrap(); + let encoded = match outer_state_lock.get_mut(&counterparty_node_id) { + None => { + // We dropped the peer state by now. + return Ok(()); + }, + Some(entry) => { + if !entry.needs_persist { + // We already have persisted otherwise by now. + return Ok(()); + } else { + entry.needs_persist = false; + entry.encode() + } + }, + }; + + let key = counterparty_node_id.to_string(); + + self.kv_store.write( + LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + LSPS5_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE, + &key, + encoded, + ) + }; + + fut.await.map_err(|e| { + self.per_peer_state + .write() + .unwrap() + .get_mut(&counterparty_node_id) + .map(|p| p.needs_persist = true); + e + }) + } + + pub(crate) async fn persist(&self) -> Result<(), lightning::io::Error> { + // TODO: We should eventually persist in parallel, however, when we do, we probably want to + // introduce some batching to upper-bound the number of requests inflight at any given + // time. + let need_persist: Vec = { + let outer_state_lock = self.per_peer_state.read().unwrap(); + outer_state_lock + .iter() + .filter_map(|(k, v)| if v.needs_persist { Some(*k) } else { None }) + .collect() + }; + + for counterparty_node_id in need_persist.into_iter() { + self.persist_peer_state(counterparty_node_id).await?; + } + + Ok(()) + } + fn check_prune_stale_webhooks<'a>( &self, outer_state_lock: &mut RwLockWriteGuard<'a, HashMap>, ) { @@ -189,14 +270,47 @@ where }); if should_prune { + for (_, peer_state) in outer_state_lock.iter_mut() { + // Prune stale webhooks, but leave removal of the peers states to prune_peer_state + // which will also remove it from the store. + peer_state.prune_stale_webhooks(now) + } + *last_pruning = Some(now); + } + } + + pub(crate) async fn prune_peer_state(&self) { + let mut need_remove = Vec::new(); + + { + let mut outer_state_lock = self.per_peer_state.write().unwrap(); + self.check_prune_stale_webhooks(&mut outer_state_lock); + outer_state_lock.retain(|client_id, peer_state| { if self.client_has_open_channel(client_id) { // Don't prune clients with open channels return true; } - !peer_state.prune_stale_webhooks(now) + + let is_prunable = peer_state.is_prunable(); + if is_prunable { + need_remove.push(*client_id); + } + !is_prunable }); - *last_pruning = Some(now); + } + + for counterparty_node_id in need_remove { + let key = counterparty_node_id.to_string(); + let _ = self + .kv_store + .remove( + LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + LSPS5_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE, + &key, + true, + ) + .await; } } @@ -224,6 +338,7 @@ where webhook.url = params.webhook.clone(); webhook.last_used = now; webhook.last_notification_sent = None; + peer_state.needs_persist |= true; } } else { if num_webhooks >= self.config.max_webhooks_per_client as usize { @@ -540,10 +655,12 @@ where } } -impl LSPSProtocolMessageHandler for LSPS5ServiceHandler +impl LSPSProtocolMessageHandler + for LSPS5ServiceHandler where CM::Target: AChannelManager, NS::Target: NodeSigner, + K::Target: KVStore, TP::Target: TimeProvider, { type ProtocolMessage = LSPS5Message; @@ -582,14 +699,18 @@ where } } -#[derive(Debug, Default)] -struct PeerState { +#[derive(Debug)] +pub(crate) struct PeerState { webhooks: Vec<(LSPS5AppName, Webhook)>, + needs_persist: bool, } impl PeerState { fn webhook_mut(&mut self, name: &LSPS5AppName) -> Option<&mut Webhook> { - self.webhooks.iter_mut().find_map(|(n, h)| if n == name { Some(h) } else { None }) + let res = + self.webhooks.iter_mut().find_map(|(n, h)| if n == name { Some(h) } else { None }); + self.needs_persist |= true; + res } fn webhooks(&self) -> &Vec<(LSPS5AppName, Webhook)> { @@ -597,7 +718,9 @@ impl PeerState { } fn webhooks_mut(&mut self) -> &mut Vec<(LSPS5AppName, Webhook)> { - &mut self.webhooks + let res = &mut self.webhooks; + self.needs_persist |= true; + res } fn webhooks_len(&self) -> usize { @@ -617,6 +740,7 @@ impl PeerState { } self.webhooks.push((name, hook)); + self.needs_persist |= true; } fn remove_webhook(&mut self, name: &LSPS5AppName) -> bool { @@ -629,6 +753,7 @@ impl PeerState { false } }); + self.needs_persist |= true; removed } @@ -636,14 +761,34 @@ impl PeerState { for (_, h) in self.webhooks.iter_mut() { h.last_notification_sent = None; } + self.needs_persist |= true; } // Returns whether the entire state is empty and can be pruned. - fn prune_stale_webhooks(&mut self, now: LSPSDateTime) -> bool { + fn prune_stale_webhooks(&mut self, now: LSPSDateTime) { self.webhooks.retain(|(_, webhook)| { - now.duration_since(&webhook.last_used) < MIN_WEBHOOK_RETENTION_DAYS + let should_prune = now.duration_since(&webhook.last_used) >= MIN_WEBHOOK_RETENTION_DAYS; + if should_prune { + self.needs_persist |= true; + } + !should_prune }); + } + fn is_prunable(&mut self) -> bool { self.webhooks.is_empty() } } + +impl Default for PeerState { + fn default() -> Self { + let webhooks = Vec::new(); + let needs_persist = true; + Self { webhooks, needs_persist } + } +} + +impl_writeable_tlv_based!(PeerState, { + (0, webhooks, required_vec), + (_unused, needs_persist, (static_value, false)), +}); diff --git a/lightning-liquidity/src/lsps5/url_utils.rs b/lightning-liquidity/src/lsps5/url_utils.rs index 139b5e2bbf5..c9d5f9e79c7 100644 --- a/lightning-liquidity/src/lsps5/url_utils.rs +++ b/lightning-liquidity/src/lsps5/url_utils.rs @@ -11,15 +11,15 @@ use super::msgs::LSPS5ProtocolError; +use lightning::ln::msgs::DecodeError; +use lightning::util::ser::{Readable, Writeable}; use lightning_types::string::UntrustedString; use alloc::string::String; /// Represents a parsed URL for LSPS5 webhook notifications. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct LSPSUrl { - url: UntrustedString, -} +pub struct LSPSUrl(UntrustedString); impl LSPSUrl { /// Parses a URL string into a URL instance. @@ -66,17 +66,17 @@ impl LSPSUrl { None => {}, }; - Ok(LSPSUrl { url: UntrustedString(url_str) }) + Ok(LSPSUrl(UntrustedString(url_str))) } /// Returns URL length. pub fn url_length(&self) -> usize { - self.url.0.chars().count() + self.0 .0.chars().count() } /// Returns the full URL string. pub fn url(&self) -> &str { - self.url.0.as_str() + self.0 .0.as_str() } fn is_valid_url_char(c: char) -> bool { @@ -89,6 +89,20 @@ impl LSPSUrl { } } +impl Writeable for LSPSUrl { + fn write( + &self, writer: &mut W, + ) -> Result<(), lightning::io::Error> { + self.0.write(writer) + } +} + +impl Readable for LSPSUrl { + fn read(reader: &mut R) -> Result { + Ok(Self(Readable::read(reader)?)) + } +} + #[cfg(test)] mod tests { use super::*; @@ -104,7 +118,7 @@ mod tests { assert!(result.is_ok()); let url = result.unwrap(); - assert_eq!(url.url.0.chars().count(), url_chars); + assert_eq!(url.0 .0.chars().count(), url_chars); } #[test] diff --git a/lightning-liquidity/src/manager.rs b/lightning-liquidity/src/manager.rs index 3d49679c790..b132b58b30e 100644 --- a/lightning-liquidity/src/manager.rs +++ b/lightning-liquidity/src/manager.rs @@ -7,6 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. +use alloc::boxed::Box; use alloc::string::ToString; use alloc::vec::Vec; @@ -23,6 +24,9 @@ use crate::lsps5::client::{LSPS5ClientConfig, LSPS5ClientHandler}; use crate::lsps5::msgs::LSPS5Message; use crate::lsps5::service::{LSPS5ServiceConfig, LSPS5ServiceHandler}; use crate::message_queue::MessageQueue; +use crate::persist::{ + read_event_queue, read_lsps2_service_peer_states, read_lsps5_service_peer_states, +}; use crate::lsps1::client::{LSPS1ClientConfig, LSPS1ClientHandler}; use crate::lsps1::msgs::LSPS1Message; @@ -31,9 +35,10 @@ use crate::lsps1::service::{LSPS1ServiceConfig, LSPS1ServiceHandler}; use crate::lsps2::client::{LSPS2ClientConfig, LSPS2ClientHandler}; use crate::lsps2::msgs::LSPS2Message; -use crate::lsps2::service::{LSPS2ServiceConfig, LSPS2ServiceHandler}; +use crate::lsps2::service::{LSPS2ServiceConfig, LSPS2ServiceHandler, LSPS2ServiceHandlerSync}; use crate::prelude::{new_hash_map, new_hash_set, HashMap, HashSet}; use crate::sync::{Arc, Mutex, RwLock}; +use crate::utils::async_poll::dummy_waker; #[cfg(feature = "time")] use crate::utils::time::DefaultTimeProvider; use crate::utils::time::TimeProvider; @@ -45,14 +50,18 @@ use lightning::ln::peer_handler::CustomMessageHandler; use lightning::ln::wire::CustomMessageReader; use lightning::sign::{EntropySource, NodeSigner}; use lightning::util::logger::Level; +use lightning::util::persist::{KVStore, KVStoreSync, KVStoreSyncWrapper}; use lightning::util::ser::{LengthLimitedRead, LengthReadable}; -use lightning::util::wakers::Future; +use lightning::util::wakers::{Future, Notifier}; use lightning_types::features::{InitFeatures, NodeFeatures}; use bitcoin::secp256k1::PublicKey; +use core::future::Future as StdFuture; use core::ops::Deref; +use core::task; +use core::time::Duration; const LSPS_FEATURE_BIT: usize = 729; @@ -108,12 +117,17 @@ pub trait ALiquidityManager { type Filter: Filter + ?Sized; /// A type that may be dereferenced to [`Self::Filter`]. type C: Deref + Clone; + /// A type implementing [`KVStore`]. + type KVStore: KVStore + ?Sized; + /// A type that may be dereferenced to [`Self::KVStore`]. + type K: Deref + Clone; /// A type implementing [`TimeProvider`]. type TimeProvider: TimeProvider + ?Sized; /// A type that may be dereferenced to [`Self::TimeProvider`]. type TP: Deref + Clone; /// Returns a reference to the actual [`LiquidityManager`] object. - fn get_lm(&self) -> &LiquidityManager; + fn get_lm(&self) + -> &LiquidityManager; } impl< @@ -121,13 +135,15 @@ impl< NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, + K: Deref + Clone, TP: Deref + Clone, - > ALiquidityManager for LiquidityManager + > ALiquidityManager for LiquidityManager where ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, + K::Target: KVStore, TP::Target: TimeProvider, { type EntropySource = ES::Target; @@ -138,9 +154,109 @@ where type CM = CM; type Filter = C::Target; type C = C; + type KVStore = K::Target; + type K = K; type TimeProvider = TP::Target; type TP = TP; - fn get_lm(&self) -> &LiquidityManager { + fn get_lm(&self) -> &LiquidityManager { + self + } +} + +/// A trivial trait which describes any [`LiquidityManagerSync`]. +/// +/// This is not exported to bindings users as general cover traits aren't useful in other +/// languages. +pub trait ALiquidityManagerSync { + /// A type implementing [`EntropySource`] + type EntropySource: EntropySource + ?Sized; + /// A type that may be dereferenced to [`Self::EntropySource`]. + type ES: Deref + Clone; + /// A type implementing [`NodeSigner`] + type NodeSigner: NodeSigner + ?Sized; + /// A type that may be dereferenced to [`Self::NodeSigner`]. + type NS: Deref + Clone; + /// A type implementing [`AChannelManager`] + type AChannelManager: AChannelManager + ?Sized; + /// A type that may be dereferenced to [`Self::AChannelManager`]. + type CM: Deref + Clone; + /// A type implementing [`Filter`]. + type Filter: Filter + ?Sized; + /// A type that may be dereferenced to [`Self::Filter`]. + type C: Deref + Clone; + /// A type implementing [`KVStoreSync`]. + type KVStoreSync: KVStoreSync + ?Sized; + /// A type that may be dereferenced to [`Self::KVStoreSync`]. + type KS: Deref + Clone; + /// A type implementing [`TimeProvider`]. + type TimeProvider: TimeProvider + ?Sized; + /// A type that may be dereferenced to [`Self::TimeProvider`]. + type TP: Deref + Clone; + /// Returns the inner async [`LiquidityManager`] for testing purposes. + #[cfg(any(test, feature = "_test_utils"))] + fn get_lm_async( + &self, + ) -> Arc< + LiquidityManager< + Self::ES, + Self::NS, + Self::CM, + Self::C, + Arc>, + Self::TP, + >, + >; + /// Returns a reference to the actual [`LiquidityManager`] object. + fn get_lm( + &self, + ) -> &LiquidityManagerSync; +} + +impl< + ES: Deref + Clone, + NS: Deref + Clone, + CM: Deref + Clone, + C: Deref + Clone, + KS: Deref + Clone, + TP: Deref + Clone, + > ALiquidityManagerSync for LiquidityManagerSync +where + ES::Target: EntropySource, + NS::Target: NodeSigner, + CM::Target: AChannelManager, + C::Target: Filter, + KS::Target: KVStoreSync, + TP::Target: TimeProvider, +{ + type EntropySource = ES::Target; + type ES = ES; + type NodeSigner = NS::Target; + type NS = NS; + type AChannelManager = CM::Target; + type CM = CM; + type Filter = C::Target; + type C = C; + type KVStoreSync = KS::Target; + type KS = KS; + type TimeProvider = TP::Target; + type TP = TP; + /// Returns the inner async [`LiquidityManager`] for testing purposes. + #[cfg(any(test, feature = "_test_utils"))] + fn get_lm_async( + &self, + ) -> Arc< + LiquidityManager< + Self::ES, + Self::NS, + Self::CM, + Self::C, + Arc>, + Self::TP, + >, + > { + Arc::clone(&self.inner) + } + fn get_lm(&self) -> &LiquidityManagerSync { self } } @@ -169,49 +285,63 @@ pub struct LiquidityManager< NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, + K: Deref + Clone, TP: Deref + Clone, > where ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, + K::Target: KVStore, TP::Target: TimeProvider, { pending_messages: Arc, - pending_events: Arc, + pending_events: Arc>, request_id_to_method_map: Mutex>, // We ignore peers if they send us bogus data. ignored_peers: RwLock>, - lsps0_client_handler: LSPS0ClientHandler, + lsps0_client_handler: LSPS0ClientHandler, lsps0_service_handler: Option, #[cfg(lsps1_service)] - lsps1_service_handler: Option>, - lsps1_client_handler: Option>, - lsps2_service_handler: Option>, - lsps2_client_handler: Option>, - lsps5_service_handler: Option>, - lsps5_client_handler: Option>, + lsps1_service_handler: Option>, + lsps1_client_handler: Option>, + lsps2_service_handler: Option>, + lsps2_client_handler: Option>, + lsps5_service_handler: Option>, + lsps5_client_handler: Option>, service_config: Option, _client_config: Option, best_block: RwLock>, + last_peer_state_pruning: Mutex>, _chain_source: Option, + time_provider: TP, + pending_msgs_or_needs_persist_notifier: Arc, } #[cfg(feature = "time")] -impl - LiquidityManager> +impl< + ES: Deref + Clone, + NS: Deref + Clone, + CM: Deref + Clone, + C: Deref + Clone, + K: Deref + Clone, + > LiquidityManager> where ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, + K::Target: KVStore, { /// Constructor for the [`LiquidityManager`] using the default system clock - pub fn new( + /// + /// Will read persisted service states from the given [`KVStore`]. + pub async fn new( entropy_source: ES, node_signer: NS, channel_manager: CM, chain_source: Option, - chain_params: Option, service_config: Option, + chain_params: Option, kv_store: K, + service_config: Option, client_config: Option, - ) -> Self { + ) -> Result { let time_provider = Arc::new(DefaultTimeProvider); Self::new_with_custom_time_provider( entropy_source, @@ -219,10 +349,12 @@ where channel_manager, chain_source, chain_params, + kv_store, service_config, client_config, time_provider, ) + .await } } @@ -231,28 +363,40 @@ impl< NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, + K: Deref + Clone, TP: Deref + Clone, - > LiquidityManager + > LiquidityManager where ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, + K::Target: KVStore, TP::Target: TimeProvider, { /// Constructor for the [`LiquidityManager`] with a custom time provider. /// + /// Will read persisted service states from the given [`KVStore`]. + /// /// This should be used on non-std platforms where access to the system time is not /// available. /// Sets up the required protocol message handlers based on the given /// [`LiquidityClientConfig`] and [`LiquidityServiceConfig`]. - pub fn new_with_custom_time_provider( + pub async fn new_with_custom_time_provider( entropy_source: ES, node_signer: NS, channel_manager: CM, chain_source: Option, - chain_params: Option, service_config: Option, + chain_params: Option, kv_store: K, + service_config: Option, client_config: Option, time_provider: TP, - ) -> Self { - let pending_messages = Arc::new(MessageQueue::new()); - let pending_events = Arc::new(EventQueue::new()); + ) -> Result { + let pending_msgs_or_needs_persist_notifier = Arc::new(Notifier::new()); + let pending_messages = + Arc::new(MessageQueue::new(Arc::clone(&pending_msgs_or_needs_persist_notifier))); + let persisted_queue = read_event_queue(kv_store.clone()).await?.unwrap_or_default(); + let pending_events = Arc::new(EventQueue::new( + persisted_queue, + kv_store.clone(), + Arc::clone(&pending_msgs_or_needs_persist_notifier), + )); let ignored_peers = RwLock::new(new_hash_set()); let mut supported_protocols = Vec::new(); @@ -267,21 +411,30 @@ where ) }) }); - let lsps2_service_handler = service_config.as_ref().and_then(|config| { - config.lsps2_service_config.as_ref().map(|config| { + + let lsps2_service_handler = if let Some(service_config) = service_config.as_ref() { + if let Some(lsps2_service_config) = service_config.lsps2_service_config.as_ref() { if let Some(number) = - as LSPSProtocolMessageHandler>::PROTOCOL_NUMBER + as LSPSProtocolMessageHandler>::PROTOCOL_NUMBER { supported_protocols.push(number); } - LSPS2ServiceHandler::new( + + let peer_states = read_lsps2_service_peer_states(kv_store.clone()).await?; + Some(LSPS2ServiceHandler::new( + peer_states, Arc::clone(&pending_messages), Arc::clone(&pending_events), channel_manager.clone(), - config.clone(), - ) - }) - }); + kv_store.clone(), + lsps2_service_config.clone(), + )) + } else { + None + } + } else { + None + }; let lsps5_client_handler = client_config.as_ref().and_then(|config| { config.lsps5_client_config.as_ref().map(|config| { @@ -294,24 +447,31 @@ where }) }); - let lsps5_service_handler = service_config.as_ref().and_then(|config| { - config.lsps5_service_config.as_ref().map(|config| { + let lsps5_service_handler = if let Some(service_config) = service_config.as_ref() { + if let Some(lsps5_service_config) = service_config.lsps5_service_config.as_ref() { if let Some(number) = - as LSPSProtocolMessageHandler>::PROTOCOL_NUMBER + as LSPSProtocolMessageHandler>::PROTOCOL_NUMBER { supported_protocols.push(number); } - LSPS5ServiceHandler::new_with_time_provider( + let peer_states = read_lsps5_service_peer_states(kv_store.clone()).await?; + Some(LSPS5ServiceHandler::new_with_time_provider( + peer_states, Arc::clone(&pending_events), Arc::clone(&pending_messages), channel_manager.clone(), + kv_store.clone(), node_signer, - config.clone(), - time_provider, - ) - }) - }); + lsps5_service_config.clone(), + time_provider.clone(), + )) + } else { + None + } + } else { + None + }; let lsps1_client_handler = client_config.as_ref().and_then(|config| { config.lsps1_client_config.as_ref().map(|config| { @@ -327,7 +487,7 @@ where #[cfg(lsps1_service)] let lsps1_service_handler = service_config.as_ref().and_then(|config| { if let Some(number) = - as LSPSProtocolMessageHandler>::PROTOCOL_NUMBER + as LSPSProtocolMessageHandler>::PROTOCOL_NUMBER { supported_protocols.push(number); } @@ -355,7 +515,9 @@ where None }; - Self { + let last_peer_state_pruning = Mutex::new(None); + + Ok(Self { pending_messages, pending_events, request_id_to_method_map: Mutex::new(new_hash_map()), @@ -372,12 +534,15 @@ where service_config, _client_config: client_config, best_block: RwLock::new(chain_params.map(|chain_params| chain_params.best_block)), + last_peer_state_pruning, _chain_source: chain_source, - } + time_provider, + pending_msgs_or_needs_persist_notifier, + }) } /// Returns a reference to the LSPS0 client-side handler. - pub fn lsps0_client_handler(&self) -> &LSPS0ClientHandler { + pub fn lsps0_client_handler(&self) -> &LSPS0ClientHandler { &self.lsps0_client_handler } @@ -388,68 +553,63 @@ where /// Returns a reference to the LSPS1 client-side handler. /// - /// The returned hendler allows to initiate the LSPS1 client-side flow, i.e., allows to request + /// The returned handler allows to initiate the LSPS1 client-side flow, i.e., allows to request /// channels from the configured LSP. - pub fn lsps1_client_handler(&self) -> Option<&LSPS1ClientHandler> { + pub fn lsps1_client_handler(&self) -> Option<&LSPS1ClientHandler> { self.lsps1_client_handler.as_ref() } /// Returns a reference to the LSPS1 server-side handler. #[cfg(lsps1_service)] - pub fn lsps1_service_handler(&self) -> Option<&LSPS1ServiceHandler> { + pub fn lsps1_service_handler(&self) -> Option<&LSPS1ServiceHandler> { self.lsps1_service_handler.as_ref() } /// Returns a reference to the LSPS2 client-side handler. /// - /// The returned hendler allows to initiate the LSPS2 client-side flow. That is, it allows to + /// The returned handler allows to initiate the LSPS2 client-side flow. That is, it allows to /// retrieve all necessary data to create 'just-in-time' invoices that, when paid, will have /// the configured LSP open a 'just-in-time' channel. - pub fn lsps2_client_handler(&self) -> Option<&LSPS2ClientHandler> { + pub fn lsps2_client_handler(&self) -> Option<&LSPS2ClientHandler> { self.lsps2_client_handler.as_ref() } /// Returns a reference to the LSPS2 server-side handler. /// /// The returned hendler allows to initiate the LSPS2 service-side flow. - pub fn lsps2_service_handler(&self) -> Option<&LSPS2ServiceHandler> { + pub fn lsps2_service_handler(&self) -> Option<&LSPS2ServiceHandler> { self.lsps2_service_handler.as_ref() } /// Returns a reference to the LSPS5 client-side handler. /// - /// The returned hendler allows to initiate the LSPS5 client-side flow. That is, it allows to - pub fn lsps5_client_handler(&self) -> Option<&LSPS5ClientHandler> { + /// The returned handler allows to initiate the LSPS5 client-side flow. That is, it allows to + pub fn lsps5_client_handler(&self) -> Option<&LSPS5ClientHandler> { self.lsps5_client_handler.as_ref() } /// Returns a reference to the LSPS5 server-side handler. /// - /// The returned hendler allows to initiate the LSPS5 service-side flow. - pub fn lsps5_service_handler(&self) -> Option<&LSPS5ServiceHandler> { + /// The returned handler allows to initiate the LSPS5 service-side flow. + pub fn lsps5_service_handler(&self) -> Option<&LSPS5ServiceHandler> { self.lsps5_service_handler.as_ref() } /// Returns a [`Future`] that will complete when the next batch of pending messages is ready to - /// be processed. + /// be processed *or* we need to be repersisted. /// /// Note that callbacks registered on the [`Future`] MUST NOT call back into this /// [`LiquidityManager`] and should instead register actions to be taken later. - pub fn get_pending_msgs_future(&self) -> Future { - self.pending_messages.get_pending_msgs_future() + pub fn get_pending_msgs_or_needs_persist_future(&self) -> Future { + self.pending_msgs_or_needs_persist_notifier.get_future() } /// Blocks the current thread until next event is ready and returns it. /// - /// Typically you would spawn a thread or task that calls this in a loop. - /// - /// **Note**: Users must handle events as soon as possible to avoid an increased event queue - /// memory footprint. We will start dropping any generated events after - /// [`MAX_EVENT_QUEUE_SIZE`] has been reached. - /// - /// [`MAX_EVENT_QUEUE_SIZE`]: crate::events::MAX_EVENT_QUEUE_SIZE + /// Only available via the [`LiquidityManagerSync`] interface to avoid having users + /// accidentally blocking their async contexts. #[cfg(feature = "std")] - pub fn wait_next_event(&self) -> LiquidityEvent { + pub(crate) fn wait_next_event(&self) -> LiquidityEvent { self.pending_events.wait_next_event() } @@ -492,6 +652,43 @@ where self.pending_events.get_and_clear_pending_events() } + /// Persists the state of the service handlers towards the given [`KVStore`] implementation. + /// + /// This will be regularly called by LDK's background processor if necessary and only needs to + /// be called manually if it's not utilized. + pub async fn persist(&self) -> Result<(), lightning::io::Error> { + let should_prune_state = { + const PRUNE_INTERVAL: Duration = Duration::from_secs(600); + let mut last_peer_state_pruning_lock = self.last_peer_state_pruning.lock().unwrap(); + let now = self.time_provider.duration_since_epoch(); + if last_peer_state_pruning_lock.map_or(true, |l| l + PRUNE_INTERVAL < now) { + *last_peer_state_pruning_lock = Some(now); + true + } else { + false + } + }; + + // TODO: We should eventually persist in parallel. + self.pending_events.persist().await?; + + if let Some(lsps2_service_handler) = self.lsps2_service_handler.as_ref() { + if should_prune_state { + lsps2_service_handler.prune_peer_state().await; + } + lsps2_service_handler.persist().await?; + } + + if let Some(lsps5_service_handler) = self.lsps5_service_handler.as_ref() { + if should_prune_state { + lsps5_service_handler.prune_peer_state().await; + } + lsps5_service_handler.persist().await?; + } + + Ok(()) + } + fn handle_lsps_message( &self, msg: LSPSMessage, sender_node_id: &PublicKey, ) -> Result<(), lightning::ln::msgs::LightningError> { @@ -608,13 +805,15 @@ impl< NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, + K: Deref + Clone, TP: Deref + Clone, - > CustomMessageReader for LiquidityManager + > CustomMessageReader for LiquidityManager where ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, + K::Target: KVStore, TP::Target: TimeProvider, { type CustomMessage = RawLSPSMessage; @@ -636,13 +835,15 @@ impl< NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, + K: Deref + Clone, TP: Deref + Clone, - > CustomMessageHandler for LiquidityManager + > CustomMessageHandler for LiquidityManager where ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, + K::Target: KVStore, TP::Target: TimeProvider, { fn handle_custom_message( @@ -766,13 +967,15 @@ impl< NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, + K: Deref + Clone, TP: Deref + Clone, - > Listen for LiquidityManager + > Listen for LiquidityManager where ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, + K::Target: KVStore, TP::Target: TimeProvider, { fn filtered_block_connected( @@ -808,13 +1011,15 @@ impl< NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, + K: Deref + Clone, TP: Deref + Clone, - > Confirm for LiquidityManager + > Confirm for LiquidityManager where ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, + K::Target: KVStore, TP::Target: TimeProvider, { fn transactions_confirmed( @@ -835,9 +1040,6 @@ where *self.best_block.write().unwrap() = Some(new_best_block); // TODO: Call best_block_updated on all sub-modules that require it, e.g., LSPS1MessageHandler. - if let Some(lsps2_service_handler) = self.lsps2_service_handler.as_ref() { - lsps2_service_handler.prune_peer_state(); - } } fn get_relevant_txids(&self) -> Vec<(bitcoin::Txid, u32, Option)> { @@ -845,3 +1047,378 @@ where Vec::new() } } + +/// A synchroneous wrapper around [`LiquidityManager`] to be used in contexts where async is not +/// available. +pub struct LiquidityManagerSync< + ES: Deref + Clone, + NS: Deref + Clone, + CM: Deref + Clone, + C: Deref + Clone, + KS: Deref + Clone, + TP: Deref + Clone, +> where + ES::Target: EntropySource, + NS::Target: NodeSigner, + CM::Target: AChannelManager, + C::Target: Filter, + KS::Target: KVStoreSync, + TP::Target: TimeProvider, +{ + inner: Arc>, TP>>, +} + +#[cfg(feature = "time")] +impl< + ES: Deref + Clone, + NS: Deref + Clone, + CM: Deref + Clone, + C: Deref + Clone, + KS: Deref + Clone, + > LiquidityManagerSync> +where + ES::Target: EntropySource, + NS::Target: NodeSigner, + CM::Target: AChannelManager, + KS::Target: KVStoreSync, + C::Target: Filter, +{ + /// Constructor for the [`LiquidityManagerSync`] using the default system clock + /// + /// Wraps [`LiquidityManager::new`]. + pub fn new( + entropy_source: ES, node_signer: NS, channel_manager: CM, chain_source: Option, + chain_params: Option, kv_store_sync: KS, + service_config: Option, + client_config: Option, + ) -> Result { + let kv_store = Arc::new(KVStoreSyncWrapper(kv_store_sync)); + + let mut fut = Box::pin(LiquidityManager::new( + entropy_source, + node_signer, + channel_manager, + chain_source, + chain_params, + kv_store, + service_config, + client_config, + )); + + let mut waker = dummy_waker(); + let mut ctx = task::Context::from_waker(&mut waker); + let inner = match fut.as_mut().poll(&mut ctx) { + task::Poll::Ready(result) => result, + task::Poll::Pending => { + // In a sync context, we can't wait for the future to complete. + unreachable!("LiquidityManager::new should not be pending in a sync context"); + }, + }?; + Ok(Self { inner: Arc::new(inner) }) + } +} + +impl< + ES: Deref + Clone, + NS: Deref + Clone, + CM: Deref + Clone, + C: Deref + Clone, + KS: Deref + Clone, + TP: Deref + Clone, + > LiquidityManagerSync +where + ES::Target: EntropySource, + NS::Target: NodeSigner, + CM::Target: AChannelManager, + C::Target: Filter, + KS::Target: KVStoreSync, + TP::Target: TimeProvider, +{ + /// Constructor for the [`LiquidityManagerSync`] with a custom time provider. + /// + /// Wraps [`LiquidityManager::new_with_custom_time_provider`]. + pub fn new_with_custom_time_provider( + entropy_source: ES, node_signer: NS, channel_manager: CM, chain_source: Option, + chain_params: Option, kv_store_sync: KS, + service_config: Option, + client_config: Option, time_provider: TP, + ) -> Result { + let kv_store = Arc::new(KVStoreSyncWrapper(kv_store_sync)); + let mut fut = Box::pin(LiquidityManager::new_with_custom_time_provider( + entropy_source, + node_signer, + channel_manager, + chain_source, + chain_params, + kv_store, + service_config, + client_config, + time_provider, + )); + + let mut waker = dummy_waker(); + let mut ctx = task::Context::from_waker(&mut waker); + let inner = match fut.as_mut().poll(&mut ctx) { + task::Poll::Ready(result) => result, + task::Poll::Pending => { + // In a sync context, we can't wait for the future to complete. + unreachable!("LiquidityManager::new should not be pending in a sync context"); + }, + }?; + Ok(Self { inner: Arc::new(inner) }) + } + + /// Returns a reference to the LSPS0 client-side handler. + /// + /// Wraps [`LiquidityManager::lsps0_client_handler`]. + pub fn lsps0_client_handler(&self) -> &LSPS0ClientHandler>> { + self.inner.lsps0_client_handler() + } + + /// Returns a reference to the LSPS0 server-side handler. + /// + /// Wraps [`LiquidityManager::lsps0_service_handler`]. + pub fn lsps0_service_handler(&self) -> Option<&LSPS0ServiceHandler> { + self.inner.lsps0_service_handler() + } + + /// Returns a reference to the LSPS1 client-side handler. + /// + /// Wraps [`LiquidityManager::lsps1_client_handler`]. + pub fn lsps1_client_handler( + &self, + ) -> Option<&LSPS1ClientHandler>>> { + self.inner.lsps1_client_handler() + } + + /// Returns a reference to the LSPS1 server-side handler. + /// + /// Wraps [`LiquidityManager::lsps1_service_handler`]. + #[cfg(lsps1_service)] + pub fn lsps1_service_handler( + &self, + ) -> Option<&LSPS1ServiceHandler>>> { + self.inner.lsps1_service_handler() + } + + /// Returns a reference to the LSPS2 client-side handler. + /// + /// Wraps [`LiquidityManager::lsps2_client_handler`]. + pub fn lsps2_client_handler( + &self, + ) -> Option<&LSPS2ClientHandler>>> { + self.inner.lsps2_client_handler() + } + + /// Returns a reference to the LSPS2 server-side handler. + /// + /// Wraps [`LiquidityManager::lsps2_service_handler`]. + pub fn lsps2_service_handler<'a>( + &'a self, + ) -> Option>>> { + self.inner.lsps2_service_handler.as_ref().map(|r| LSPS2ServiceHandlerSync::from_inner(r)) + } + + /// Returns a reference to the LSPS5 client-side handler. + /// + /// Wraps [`LiquidityManager::lsps5_client_handler`]. + pub fn lsps5_client_handler( + &self, + ) -> Option<&LSPS5ClientHandler>>> { + self.inner.lsps5_client_handler() + } + + /// Returns a reference to the LSPS5 server-side handler. + /// + /// Wraps [`LiquidityManager::lsps5_service_handler`]. + pub fn lsps5_service_handler( + &self, + ) -> Option<&LSPS5ServiceHandler>, TP>> { + self.inner.lsps5_service_handler() + } + + /// Returns a [`Future`] that will complete when the next batch of pending messages is ready to + /// be processed *or* we need to be repersisted. + /// + /// Wraps [`LiquidityManager::get_pending_msgs_or_needs_persist_future`]. + pub fn get_pending_msgs_or_needs_persist_future(&self) -> Future { + self.inner.get_pending_msgs_or_needs_persist_future() + } + + /// Blocks the current thread until next event is ready and returns it. + /// + /// Typically you would spawn a thread or task that calls this in a loop. + /// + /// **Note**: Users must handle events as soon as possible to avoid an increased event queue + /// memory footprint. We will start dropping any generated events after + /// [`MAX_EVENT_QUEUE_SIZE`] has been reached. + /// + /// [`MAX_EVENT_QUEUE_SIZE`]: crate::events::MAX_EVENT_QUEUE_SIZE + #[cfg(feature = "std")] + pub fn wait_next_event(&self) -> LiquidityEvent { + self.inner.wait_next_event() + } + + /// Returns `Some` if an event is ready. + /// + /// Wraps [`LiquidityManager::next_event`]. + pub fn next_event(&self) -> Option { + self.inner.next_event() + } + + /// Returns and clears all events without blocking. + /// + /// Wraps [`LiquidityManager::get_and_clear_pending_events`]. + pub fn get_and_clear_pending_events(&self) -> Vec { + self.inner.get_and_clear_pending_events() + } + + /// Persists the state of the service handlers towards the given [`KVStoreSync`] implementation. + /// + /// Wraps [`LiquidityManager::persist`]. + pub fn persist(&self) -> Result<(), lightning::io::Error> { + let mut waker = dummy_waker(); + let mut ctx = task::Context::from_waker(&mut waker); + match Box::pin(self.inner.persist()).as_mut().poll(&mut ctx) { + task::Poll::Ready(result) => result, + task::Poll::Pending => { + // In a sync context, we can't wait for the future to complete. + unreachable!("LiquidityManager::persist should not be pending in a sync context"); + }, + } + } +} + +impl< + ES: Deref + Clone, + NS: Deref + Clone, + CM: Deref + Clone, + C: Deref + Clone, + KS: Deref + Clone, + TP: Deref + Clone, + > CustomMessageReader for LiquidityManagerSync +where + ES::Target: EntropySource, + NS::Target: NodeSigner, + CM::Target: AChannelManager, + C::Target: Filter, + KS::Target: KVStoreSync, + TP::Target: TimeProvider, +{ + type CustomMessage = RawLSPSMessage; + + fn read( + &self, message_type: u16, buffer: &mut RD, + ) -> Result, lightning::ln::msgs::DecodeError> { + self.inner.read(message_type, buffer) + } +} + +impl< + ES: Deref + Clone, + NS: Deref + Clone, + CM: Deref + Clone, + C: Deref + Clone, + KS: Deref + Clone, + TP: Deref + Clone, + > CustomMessageHandler for LiquidityManagerSync +where + ES::Target: EntropySource, + NS::Target: NodeSigner, + CM::Target: AChannelManager, + C::Target: Filter, + KS::Target: KVStoreSync, + TP::Target: TimeProvider, +{ + fn handle_custom_message( + &self, msg: Self::CustomMessage, sender_node_id: PublicKey, + ) -> Result<(), lightning::ln::msgs::LightningError> { + self.inner.handle_custom_message(msg, sender_node_id) + } + + fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { + self.inner.get_and_clear_pending_msg() + } + + fn provided_node_features(&self) -> NodeFeatures { + self.inner.provided_node_features() + } + + fn provided_init_features(&self, their_node_id: PublicKey) -> InitFeatures { + self.inner.provided_init_features(their_node_id) + } + + fn peer_disconnected(&self, counterparty_node_id: bitcoin::secp256k1::PublicKey) { + self.inner.peer_disconnected(counterparty_node_id) + } + fn peer_connected( + &self, counterparty_node_id: bitcoin::secp256k1::PublicKey, + init_msg: &lightning::ln::msgs::Init, inbound: bool, + ) -> Result<(), ()> { + self.inner.peer_connected(counterparty_node_id, init_msg, inbound) + } +} + +impl< + ES: Deref + Clone, + NS: Deref + Clone, + CM: Deref + Clone, + C: Deref + Clone, + KS: Deref + Clone, + TP: Deref + Clone, + > Listen for LiquidityManagerSync +where + ES::Target: EntropySource, + NS::Target: NodeSigner, + CM::Target: AChannelManager, + C::Target: Filter, + KS::Target: KVStoreSync, + TP::Target: TimeProvider, +{ + fn filtered_block_connected( + &self, header: &bitcoin::block::Header, txdata: &chain::transaction::TransactionData, + height: u32, + ) { + self.inner.filtered_block_connected(header, txdata, height) + } + + fn blocks_disconnected(&self, fork_point: BestBlock) { + self.inner.blocks_disconnected(fork_point); + } +} + +impl< + ES: Deref + Clone, + NS: Deref + Clone, + CM: Deref + Clone, + C: Deref + Clone, + KS: Deref + Clone, + TP: Deref + Clone, + > Confirm for LiquidityManagerSync +where + ES::Target: EntropySource, + NS::Target: NodeSigner, + CM::Target: AChannelManager, + C::Target: Filter, + KS::Target: KVStoreSync, + TP::Target: TimeProvider, +{ + fn transactions_confirmed( + &self, header: &bitcoin::block::Header, txdata: &chain::transaction::TransactionData, + height: u32, + ) { + self.inner.transactions_confirmed(header, txdata, height) + } + + fn transaction_unconfirmed(&self, txid: &bitcoin::Txid) { + self.inner.transaction_unconfirmed(txid) + } + + fn best_block_updated(&self, header: &bitcoin::block::Header, height: u32) { + self.inner.best_block_updated(header, height) + } + + fn get_relevant_txids(&self) -> Vec<(bitcoin::Txid, u32, Option)> { + self.inner.get_relevant_txids() + } +} diff --git a/lightning-liquidity/src/message_queue.rs b/lightning-liquidity/src/message_queue.rs index d097573cf04..8b248d8c6c6 100644 --- a/lightning-liquidity/src/message_queue.rs +++ b/lightning-liquidity/src/message_queue.rs @@ -13,9 +13,9 @@ use alloc::collections::VecDeque; use alloc::vec::Vec; use crate::lsps0::ser::LSPSMessage; -use crate::sync::Mutex; +use crate::sync::{Arc, Mutex}; -use lightning::util::wakers::{Future, Notifier}; +use lightning::util::wakers::Notifier; use bitcoin::secp256k1::PublicKey; @@ -24,13 +24,12 @@ use bitcoin::secp256k1::PublicKey; /// [`LiquidityManager`]: crate::LiquidityManager pub struct MessageQueue { queue: Mutex>, - pending_msgs_notifier: Notifier, + pending_msgs_notifier: Arc, } impl MessageQueue { - pub(crate) fn new() -> Self { + pub(crate) fn new(pending_msgs_notifier: Arc) -> Self { let queue = Mutex::new(VecDeque::new()); - let pending_msgs_notifier = Notifier::new(); Self { queue, pending_msgs_notifier } } @@ -38,10 +37,6 @@ impl MessageQueue { self.queue.lock().unwrap().drain(..).collect() } - pub(crate) fn get_pending_msgs_future(&self) -> Future { - self.pending_msgs_notifier.get_future() - } - pub(crate) fn notifier(&self) -> MessageQueueNotifierGuard<'_> { MessageQueueNotifierGuard { msg_queue: self, buffer: VecDeque::new() } } diff --git a/lightning-liquidity/src/persist.rs b/lightning-liquidity/src/persist.rs new file mode 100644 index 00000000000..e6077a88d51 --- /dev/null +++ b/lightning-liquidity/src/persist.rs @@ -0,0 +1,173 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Types and utils for persistence. + +use crate::events::{EventQueueDeserWrapper, LiquidityEvent}; +use crate::lsps2::service::PeerState as LSPS2ServicePeerState; +use crate::lsps5::service::PeerState as LSPS5ServicePeerState; + +use lightning::io::Cursor; +use lightning::util::persist::KVStore; +use lightning::util::ser::Readable; + +use bitcoin::secp256k1::PublicKey; + +use alloc::collections::VecDeque; +use alloc::vec::Vec; + +use core::ops::Deref; +use core::str::FromStr; + +/// The primary namespace under which the [`LiquidityManager`] will be persisted. +/// +/// [`LiquidityManager`]: crate::LiquidityManager +pub const LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "liquidity"; + +/// The secondary namespace under which the [`LiquidityManager`] event queue will be persisted. +/// +/// [`LiquidityManager`]: crate::LiquidityManager +pub const LIQUIDITY_MANAGER_EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; + +/// The key under which the [`LiquidityManager`] event queue will be persisted. +/// +/// [`LiquidityManager`]: crate::LiquidityManager +pub const LIQUIDITY_MANAGER_EVENT_QUEUE_PERSISTENCE_KEY: &str = "event_queue"; + +/// The secondary namespace under which the [`LSPS2ServiceHandler`] data will be persisted. +/// +/// [`LSPS2ServiceHandler`]: crate::lsps2::service::LSPS2ServiceHandler +pub const LSPS2_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE: &str = "lsps2_service"; + +/// The secondary namespace under which the [`LSPS5ServiceHandler`] data will be persisted. +/// +/// [`LSPS5ServiceHandler`]: crate::lsps5::service::LSPS5ServiceHandler +pub const LSPS5_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE: &str = "lsps5_service"; + +pub(crate) async fn read_event_queue( + kv_store: K, +) -> Result>, lightning::io::Error> +where + K::Target: KVStore, +{ + let read_fut = kv_store.read( + LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + LIQUIDITY_MANAGER_EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, + LIQUIDITY_MANAGER_EVENT_QUEUE_PERSISTENCE_KEY, + ); + + let mut reader = match read_fut.await { + Ok(r) => Cursor::new(r), + Err(e) => { + if e.kind() == lightning::io::ErrorKind::NotFound { + // Key wasn't found, no error but first time running. + return Ok(None); + } else { + return Err(e); + } + }, + }; + + let queue: EventQueueDeserWrapper = Readable::read(&mut reader).map_err(|_| { + lightning::io::Error::new( + lightning::io::ErrorKind::InvalidData, + "Failed to deserialize liquidity event queue", + ) + })?; + + Ok(Some(queue.0)) +} + +pub(crate) async fn read_lsps2_service_peer_states( + kv_store: K, +) -> Result, lightning::io::Error> +where + K::Target: KVStore, +{ + let mut res = Vec::new(); + + for stored_key in kv_store + .list( + LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + LSPS2_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE, + ) + .await? + { + let mut reader = Cursor::new( + kv_store + .read( + LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + LSPS2_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE, + &stored_key, + ) + .await?, + ); + + let peer_state = LSPS2ServicePeerState::read(&mut reader).map_err(|_| { + lightning::io::Error::new( + lightning::io::ErrorKind::InvalidData, + "Failed to deserialize LSPS2 peer state", + ) + })?; + + let key = PublicKey::from_str(&stored_key).map_err(|_| { + lightning::io::Error::new( + lightning::io::ErrorKind::InvalidData, + "Failed to deserialize stored key entry", + ) + })?; + + res.push((key, peer_state)); + } + Ok(res) +} + +pub(crate) async fn read_lsps5_service_peer_states( + kv_store: K, +) -> Result, lightning::io::Error> +where + K::Target: KVStore, +{ + let mut res = Vec::new(); + + for stored_key in kv_store + .list( + LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + LSPS5_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE, + ) + .await? + { + let mut reader = Cursor::new( + kv_store + .read( + LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + LSPS5_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE, + &stored_key, + ) + .await?, + ); + + let peer_state = LSPS5ServicePeerState::read(&mut reader).map_err(|_| { + lightning::io::Error::new( + lightning::io::ErrorKind::InvalidData, + "Failed to deserialize LSPS5 peer state", + ) + })?; + + let key = PublicKey::from_str(&stored_key).map_err(|_| { + lightning::io::Error::new( + lightning::io::ErrorKind::InvalidData, + "Failed to deserialize stored key entry", + ) + })?; + + res.push((key, peer_state)); + } + Ok(res) +} diff --git a/lightning-liquidity/src/utils/async_poll.rs b/lightning-liquidity/src/utils/async_poll.rs new file mode 120000 index 00000000000..9cbb5eb5170 --- /dev/null +++ b/lightning-liquidity/src/utils/async_poll.rs @@ -0,0 +1 @@ +../../../lightning/src/util/async_poll.rs \ No newline at end of file diff --git a/lightning-liquidity/src/utils/mod.rs b/lightning-liquidity/src/utils/mod.rs index 637c62c0d81..b66d3eb7ead 100644 --- a/lightning-liquidity/src/utils/mod.rs +++ b/lightning-liquidity/src/utils/mod.rs @@ -7,6 +7,9 @@ use lightning::sign::EntropySource; use crate::lsps0::ser::LSPSRequestId; +#[allow(dead_code)] +#[allow(unused_imports)] +pub(crate) mod async_poll; pub mod time; /// Converts a human-readable string representation of a short channel ID (SCID) diff --git a/lightning-liquidity/tests/common/mod.rs b/lightning-liquidity/tests/common/mod.rs index 013378f2cb0..08d705c285d 100644 --- a/lightning-liquidity/tests/common/mod.rs +++ b/lightning-liquidity/tests/common/mod.rs @@ -1,12 +1,12 @@ #![cfg(test)] use lightning_liquidity::utils::time::TimeProvider; -use lightning_liquidity::{LiquidityClientConfig, LiquidityManager, LiquidityServiceConfig}; +use lightning_liquidity::{LiquidityClientConfig, LiquidityManagerSync, LiquidityServiceConfig}; use lightning::chain::{BestBlock, Filter}; use lightning::ln::channelmanager::ChainParameters; use lightning::ln::functional_test_utils::{Node, TestChannelManager}; -use lightning::util::test_utils::TestKeysInterface; +use lightning::util::test_utils::{TestKeysInterface, TestStore}; use bitcoin::Network; @@ -19,35 +19,40 @@ pub(crate) struct LSPSNodes<'a, 'b, 'c> { pub client_node: LiquidityNode<'a, 'b, 'c>, } -pub(crate) fn create_service_and_client_nodes<'a, 'b, 'c>( +pub(crate) fn create_service_and_client_nodes_with_kv_stores<'a, 'b, 'c>( nodes: Vec>, service_config: LiquidityServiceConfig, client_config: LiquidityClientConfig, time_provider: Arc, + service_kv_store: Arc, client_kv_store: Arc, ) -> LSPSNodes<'a, 'b, 'c> { let chain_params = ChainParameters { network: Network::Testnet, best_block: BestBlock::from_network(Network::Testnet), }; - let service_lm = LiquidityManager::new_with_custom_time_provider( + let service_lm = LiquidityManagerSync::new_with_custom_time_provider( nodes[0].keys_manager, nodes[0].keys_manager, nodes[0].node, None::>, Some(chain_params.clone()), + service_kv_store, Some(service_config), None, Arc::clone(&time_provider), - ); + ) + .unwrap(); - let client_lm = LiquidityManager::new_with_custom_time_provider( + let client_lm = LiquidityManagerSync::new_with_custom_time_provider( nodes[1].keys_manager, nodes[1].keys_manager, nodes[1].node, None::>, Some(chain_params), + client_kv_store, None, Some(client_config), time_provider, - ); + ) + .unwrap(); let mut iter = nodes.into_iter(); let service_node = LiquidityNode::new(iter.next().unwrap(), service_lm); @@ -56,13 +61,31 @@ pub(crate) fn create_service_and_client_nodes<'a, 'b, 'c>( LSPSNodes { service_node, client_node } } +#[allow(unused)] +pub(crate) fn create_service_and_client_nodes<'a, 'b, 'c>( + nodes: Vec>, service_config: LiquidityServiceConfig, + client_config: LiquidityClientConfig, time_provider: Arc, +) -> LSPSNodes<'a, 'b, 'c> { + let service_kv_store = Arc::new(TestStore::new(false)); + let client_kv_store = Arc::new(TestStore::new(false)); + create_service_and_client_nodes_with_kv_stores( + nodes, + service_config, + client_config, + time_provider, + service_kv_store, + client_kv_store, + ) +} + pub(crate) struct LiquidityNode<'a, 'b, 'c> { pub inner: Node<'a, 'b, 'c>, - pub liquidity_manager: LiquidityManager< + pub liquidity_manager: LiquidityManagerSync< &'c TestKeysInterface, &'c TestKeysInterface, &'a TestChannelManager<'b, 'c>, Arc, + Arc, Arc, >, } @@ -70,11 +93,12 @@ pub(crate) struct LiquidityNode<'a, 'b, 'c> { impl<'a, 'b, 'c> LiquidityNode<'a, 'b, 'c> { pub fn new( node: Node<'a, 'b, 'c>, - liquidity_manager: LiquidityManager< + liquidity_manager: LiquidityManagerSync< &'c TestKeysInterface, &'c TestKeysInterface, &'a TestChannelManager<'b, 'c>, Arc, + Arc, Arc, >, ) -> Self { diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index 854d6e22136..da884c73dc4 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -2,7 +2,9 @@ mod common; -use common::{create_service_and_client_nodes, get_lsps_message, LSPSNodes, LiquidityNode}; +use common::{ + create_service_and_client_nodes_with_kv_stores, get_lsps_message, LSPSNodes, LiquidityNode, +}; use lightning_liquidity::events::LiquidityEvent; use lightning_liquidity::lsps0::ser::LSPSDateTime; @@ -12,10 +14,11 @@ use lightning_liquidity::lsps2::event::LSPS2ServiceEvent; use lightning_liquidity::lsps2::msgs::LSPS2RawOpeningFeeParams; use lightning_liquidity::lsps2::service::LSPS2ServiceConfig; use lightning_liquidity::lsps2::utils::is_valid_opening_fee_params; -use lightning_liquidity::utils::time::DefaultTimeProvider; -use lightning_liquidity::{LiquidityClientConfig, LiquidityServiceConfig}; +use lightning_liquidity::utils::time::{DefaultTimeProvider, TimeProvider}; +use lightning_liquidity::{LiquidityClientConfig, LiquidityManagerSync, LiquidityServiceConfig}; -use lightning::ln::channelmanager::{InterceptId, MIN_FINAL_CLTV_EXPIRY_DELTA}; +use lightning::chain::{BestBlock, Filter}; +use lightning::ln::channelmanager::{ChainParameters, InterceptId, MIN_FINAL_CLTV_EXPIRY_DELTA}; use lightning::ln::functional_test_utils::{ create_chanmon_cfgs, create_node_cfgs, create_node_chanmgrs, }; @@ -26,6 +29,7 @@ use lightning::routing::router::{RouteHint, RouteHintHop}; use lightning::sign::NodeSigner; use lightning::util::errors::APIError; use lightning::util::logger::Logger; +use lightning::util::test_utils::TestStore; use lightning_invoice::{Bolt11Invoice, InvoiceBuilder, RoutingFees}; @@ -42,8 +46,8 @@ use std::time::Duration; const MAX_PENDING_REQUESTS_PER_PEER: usize = 10; const MAX_TOTAL_PENDING_REQUESTS: usize = 1000; -fn setup_test_lsps2_nodes<'a, 'b, 'c>( - nodes: Vec>, +fn setup_test_lsps2_nodes_with_kv_stores<'a, 'b, 'c>( + nodes: Vec>, service_kv_store: Arc, client_kv_store: Arc, ) -> (LSPSNodes<'a, 'b, 'c>, [u8; 32]) { let promise_secret = [42; 32]; let lsps2_service_config = LSPS2ServiceConfig { promise_secret }; @@ -61,16 +65,26 @@ fn setup_test_lsps2_nodes<'a, 'b, 'c>( lsps2_client_config: Some(lsps2_client_config), lsps5_client_config: None, }; - let lsps_nodes = create_service_and_client_nodes( + let lsps_nodes = create_service_and_client_nodes_with_kv_stores( nodes, service_config, client_config, Arc::new(DefaultTimeProvider), + service_kv_store, + client_kv_store, ); (lsps_nodes, promise_secret) } +fn setup_test_lsps2_nodes<'a, 'b, 'c>( + nodes: Vec>, +) -> (LSPSNodes<'a, 'b, 'c>, [u8; 32]) { + let service_kv_store = Arc::new(TestStore::new(false)); + let client_kv_store = Arc::new(TestStore::new(false)); + setup_test_lsps2_nodes_with_kv_stores(nodes, service_kv_store, client_kv_store) +} + fn create_jit_invoice( node: &LiquidityNode<'_, '_, '_>, service_node_id: PublicKey, intercept_scid: u64, cltv_expiry_delta: u32, payment_size_msat: Option, description: &str, expiry_secs: u32, @@ -887,3 +901,199 @@ fn opening_fee_params_menu_is_sorted_by_spec() { panic!("Unexpected event"); } } + +#[test] +fn lsps2_service_handler_persistence_across_restarts() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Create shared KV store for service node that will persist across restarts + let service_kv_store = Arc::new(TestStore::new(false)); + let client_kv_store = Arc::new(TestStore::new(false)); + + let promise_secret = [42; 32]; + let service_config = LiquidityServiceConfig { + #[cfg(lsps1_service)] + lsps1_service_config: None, + lsps2_service_config: Some(LSPS2ServiceConfig { promise_secret }), + lsps5_service_config: None, + advertise_service: true, + }; + let time_provider: Arc = Arc::new(DefaultTimeProvider); + + // Variables to carry state between scopes + let user_channel_id = 42; + let cltv_expiry_delta = 144; + let intercept_scid; + let client_node_id; + + // First scope: Setup, persistence, and dropping of all node objects + { + // Use the helper function with custom KV stores + let (lsps_nodes, _) = setup_test_lsps2_nodes_with_kv_stores( + nodes, + Arc::clone(&service_kv_store), + client_kv_store, + ); + let LSPSNodes { service_node, client_node } = lsps_nodes; + + let service_node_id = service_node.inner.node.get_our_node_id(); + client_node_id = client_node.inner.node.get_our_node_id(); + + let client_handler = client_node.liquidity_manager.lsps2_client_handler().unwrap(); + let service_handler = service_node.liquidity_manager.lsps2_service_handler().unwrap(); + + // Set up a JIT channel request to create state that needs persistence + let _get_info_request_id = client_handler.request_opening_params(service_node_id, None); + let get_info_request = get_lsps_message!(client_node, service_node_id); + service_node + .liquidity_manager + .handle_custom_message(get_info_request, client_node_id) + .unwrap(); + + let get_info_event = service_node.liquidity_manager.next_event().unwrap(); + let request_id = match get_info_event { + LiquidityEvent::LSPS2Service(LSPS2ServiceEvent::GetInfo { request_id, .. }) => { + request_id + }, + _ => panic!("Unexpected event"), + }; + + let raw_opening_params = LSPS2RawOpeningFeeParams { + min_fee_msat: 100, + proportional: 21, + valid_until: LSPSDateTime::from_str("2035-05-20T08:30:45Z").unwrap(), + min_lifetime: 144, + max_client_to_self_delay: 128, + min_payment_size_msat: 1, + max_payment_size_msat: 100_000_000, + }; + + service_handler + .opening_fee_params_generated( + &client_node_id, + request_id.clone(), + vec![raw_opening_params], + ) + .unwrap(); + + let get_info_response = get_lsps_message!(service_node, client_node_id); + client_node + .liquidity_manager + .handle_custom_message(get_info_response, service_node_id) + .unwrap(); + + let opening_fee_params = match client_node.liquidity_manager.next_event().unwrap() { + LiquidityEvent::LSPS2Client(LSPS2ClientEvent::OpeningParametersReady { + opening_fee_params_menu, + .. + }) => opening_fee_params_menu.first().unwrap().clone(), + _ => panic!("Unexpected event"), + }; + + // Client makes a buy request + let payment_size_msat = Some(1_000_000); + let buy_request_id = client_handler + .select_opening_params(service_node_id, payment_size_msat, opening_fee_params.clone()) + .unwrap(); + + let buy_request = get_lsps_message!(client_node, service_node_id); + service_node.liquidity_manager.handle_custom_message(buy_request, client_node_id).unwrap(); + + let buy_event = service_node.liquidity_manager.next_event().unwrap(); + if let LiquidityEvent::LSPS2Service(LSPS2ServiceEvent::BuyRequest { request_id, .. }) = + buy_event + { + assert_eq!(request_id, buy_request_id); + } else { + panic!("Unexpected event"); + } + + // Service responds with invoice parameters, creating persistent channel state + intercept_scid = service_node.node.get_intercept_scid(); + let client_trusts_lsp = true; + + service_handler + .invoice_parameters_generated( + &client_node_id, + buy_request_id.clone(), + intercept_scid, + cltv_expiry_delta, + client_trusts_lsp, + user_channel_id, + ) + .unwrap(); + + let buy_response = get_lsps_message!(service_node, client_node_id); + client_node.liquidity_manager.handle_custom_message(buy_response, service_node_id).unwrap(); + + let _invoice_params_event = client_node.liquidity_manager.next_event().unwrap(); + + // Trigger persistence by calling persist + service_node.liquidity_manager.persist().unwrap(); + + // All node objects are dropped at the end of this scope + } + + // Second scope: Recovery from persisted store and verification + { + // Create fresh node configurations for restart to avoid connection conflicts + let node_chanmgrs_restart = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes_restart = create_network(2, &node_cfgs, &node_chanmgrs_restart); + + // Create a new LiquidityManager with the same configuration and KV store to simulate restart + let chain_params = ChainParameters { + network: Network::Testnet, + best_block: BestBlock::from_network(Network::Testnet), + }; + + let restarted_service_lm = LiquidityManagerSync::new_with_custom_time_provider( + nodes_restart[0].keys_manager, + nodes_restart[0].keys_manager, + nodes_restart[0].node, + None::>, + Some(chain_params), + service_kv_store, + Some(service_config), + None, + time_provider, + ) + .unwrap(); + + let restarted_service_handler = restarted_service_lm.lsps2_service_handler().unwrap(); + + // Verify the state was properly restored by checking if the channel exists + // We can do this by trying to call htlc_intercepted which should succeed if state was restored + let htlc_amount_msat = 1_000_000; + let intercept_id = InterceptId([0; 32]); + let payment_hash = PaymentHash([1; 32]); + + let result = restarted_service_handler.htlc_intercepted( + intercept_scid, + intercept_id, + htlc_amount_msat, + payment_hash, + ); + + // This should succeed if the channel state was properly restored + assert!(result.is_ok(), "HTLC interception should succeed with restored state"); + + // Check that we get an OpenChannel event, confirming the state was restored correctly + let event = restarted_service_lm.next_event(); + assert!(event.is_some(), "Should have an event after HTLC interception"); + + if let Some(LiquidityEvent::LSPS2Service(LSPS2ServiceEvent::OpenChannel { + user_channel_id: restored_channel_id, + intercept_scid: restored_scid, + .. + })) = event + { + assert_eq!(restored_channel_id, user_channel_id); + assert_eq!(restored_scid, intercept_scid); + } else { + panic!("Expected OpenChannel event after restart"); + } + } +} diff --git a/lightning-liquidity/tests/lsps5_integration_tests.rs b/lightning-liquidity/tests/lsps5_integration_tests.rs index b05526f4d8b..a3d2ecf00ef 100644 --- a/lightning-liquidity/tests/lsps5_integration_tests.rs +++ b/lightning-liquidity/tests/lsps5_integration_tests.rs @@ -2,11 +2,15 @@ mod common; -use common::{create_service_and_client_nodes, get_lsps_message, LSPSNodes, LiquidityNode}; +use common::{ + create_service_and_client_nodes, create_service_and_client_nodes_with_kv_stores, + get_lsps_message, LSPSNodes, LiquidityNode, +}; +use lightning::chain::{BestBlock, Filter}; use lightning::check_closed_event; use lightning::events::ClosureReason; -use lightning::ln::channelmanager::InterceptId; +use lightning::ln::channelmanager::{ChainParameters, InterceptId}; use lightning::ln::functional_test_utils::{ close_channel, create_chan_between_nodes, create_chanmon_cfgs, create_network, create_node_cfgs, create_node_chanmgrs, Node, @@ -14,6 +18,7 @@ use lightning::ln::functional_test_utils::{ use lightning::ln::msgs::Init; use lightning::ln::peer_handler::CustomMessageHandler; use lightning::util::hash_tables::{HashMap, HashSet}; +use lightning::util::test_utils::TestStore; use lightning_liquidity::events::LiquidityEvent; use lightning_liquidity::lsps0::ser::LSPSDateTime; use lightning_liquidity::lsps2::client::LSPS2ClientConfig; @@ -34,16 +39,20 @@ use lightning_liquidity::lsps5::service::{ }; use lightning_liquidity::lsps5::validator::{LSPS5Validator, MAX_RECENT_SIGNATURES}; use lightning_liquidity::utils::time::{DefaultTimeProvider, TimeProvider}; +use lightning_liquidity::LiquidityManagerSync; use lightning_liquidity::{LiquidityClientConfig, LiquidityServiceConfig}; use lightning_types::payment::PaymentHash; +use bitcoin::Network; + use std::str::FromStr; use std::sync::{Arc, RwLock}; use std::time::Duration; -pub(crate) fn lsps5_test_setup<'a, 'b, 'c>( +pub(crate) fn lsps5_test_setup_with_kv_stores<'a, 'b, 'c>( nodes: Vec>, time_provider: Arc, + service_kv_store: Arc, client_kv_store: Arc, ) -> (LSPSNodes<'a, 'b, 'c>, LSPS5Validator) { let lsps5_service_config = LSPS5ServiceConfig::default(); let service_config = LiquidityServiceConfig { @@ -62,11 +71,13 @@ pub(crate) fn lsps5_test_setup<'a, 'b, 'c>( lsps5_client_config: Some(lsps5_client_config), }; - let lsps_nodes = create_service_and_client_nodes( + let lsps_nodes = create_service_and_client_nodes_with_kv_stores( nodes, service_config, client_config, Arc::clone(&time_provider), + service_kv_store, + client_kv_store, ); let validator = LSPS5Validator::new(); @@ -74,6 +85,14 @@ pub(crate) fn lsps5_test_setup<'a, 'b, 'c>( (lsps_nodes, validator) } +pub(crate) fn lsps5_test_setup<'a, 'b, 'c>( + nodes: Vec>, time_provider: Arc, +) -> (LSPSNodes<'a, 'b, 'c>, LSPS5Validator) { + let service_kv_store = Arc::new(TestStore::new(false)); + let client_kv_store = Arc::new(TestStore::new(false)); + lsps5_test_setup_with_kv_stores(nodes, time_provider, service_kv_store, client_kv_store) +} + fn assert_lsps5_reject( service_node: &LiquidityNode<'_, '_, '_>, client_node: &LiquidityNode<'_, '_, '_>, ) { @@ -1479,3 +1498,147 @@ fn lsps2_state_allows_lsps5_request() { assert_lsps5_accept(&lsps_nodes.service_node, &lsps_nodes.client_node); } + +#[test] +fn lsps5_service_handler_persistence_across_restarts() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Create shared KV store for service node that will persist across restarts + let service_kv_store = Arc::new(TestStore::new(false)); + let client_kv_store = Arc::new(TestStore::new(false)); + + let service_config = LiquidityServiceConfig { + #[cfg(lsps1_service)] + lsps1_service_config: None, + lsps2_service_config: None, + lsps5_service_config: Some(LSPS5ServiceConfig::default()), + advertise_service: true, + }; + let time_provider: Arc = Arc::new(DefaultTimeProvider); + + // Variables to carry state between scopes + let client_node_id; + let app_name = "PersistenceTestApp"; + let webhook_url = "https://example.org/persistence-test"; + + // First scope: Setup, webhook registration, persistence, and dropping of all node objects + { + // Use the helper function with custom KV stores + let (lsps_nodes, _validator) = lsps5_test_setup_with_kv_stores( + nodes, + Arc::clone(&time_provider), + Arc::clone(&service_kv_store), + client_kv_store, + ); + let LSPSNodes { service_node, client_node } = lsps_nodes; + + // Establish a channel to meet LSPS5 requirements + create_chan_between_nodes(&service_node.inner, &client_node.inner); + + let service_node_id = service_node.inner.node.get_our_node_id(); + client_node_id = client_node.inner.node.get_our_node_id(); + + let client_handler = client_node.liquidity_manager.lsps5_client_handler().unwrap(); + + // Register a webhook to create state that needs persistence + let _request_id = client_handler + .set_webhook(service_node_id, app_name.to_string(), webhook_url.to_string()) + .expect("Register webhook request should succeed"); + let set_webhook_request = get_lsps_message!(client_node, service_node_id); + + service_node + .liquidity_manager + .handle_custom_message(set_webhook_request, client_node_id) + .unwrap(); + + // Consume SendWebhookNotification event for webhook_registered + let webhook_notification_event = service_node.liquidity_manager.next_event().unwrap(); + match webhook_notification_event { + LiquidityEvent::LSPS5Service(LSPS5ServiceEvent::SendWebhookNotification { + counterparty_node_id, + notification, + .. + }) => { + assert_eq!(counterparty_node_id, client_node_id); + assert_eq!(notification.method, WebhookNotificationMethod::LSPS5WebhookRegistered); + }, + _ => panic!("Expected SendWebhookNotification event"), + } + + let set_webhook_response = get_lsps_message!(service_node, client_node_id); + client_node + .liquidity_manager + .handle_custom_message(set_webhook_response, service_node_id) + .unwrap(); + + let webhook_registered_event = client_node.liquidity_manager.next_event().unwrap(); + match webhook_registered_event { + LiquidityEvent::LSPS5Client(LSPS5ClientEvent::WebhookRegistered { + num_webhooks, + .. + }) => { + assert_eq!(num_webhooks, 1); + }, + _ => panic!("Expected WebhookRegistered event"), + } + + // Trigger persistence by calling persist + service_node.liquidity_manager.persist().unwrap(); + + // All node objects are dropped at the end of this scope + } + + // Second scope: Recovery from persisted store and verification + { + // Create fresh node configurations for restart to avoid connection conflicts + let node_chanmgrs_restart = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes_restart = create_network(2, &node_cfgs, &node_chanmgrs_restart); + + // Create a new LiquidityManager with the same configuration and KV store to simulate restart + let chain_params = ChainParameters { + network: Network::Testnet, + best_block: BestBlock::from_network(Network::Testnet), + }; + + let restarted_service_lm = LiquidityManagerSync::new_with_custom_time_provider( + nodes_restart[0].keys_manager, + nodes_restart[0].keys_manager, + nodes_restart[0].node, + None::>, + Some(chain_params), + service_kv_store, + Some(service_config), + None, + Arc::clone(&time_provider), + ) + .unwrap(); + + let restarted_service_handler = restarted_service_lm.lsps5_service_handler().unwrap(); + + // Verify the state was properly restored by attempting to send a notification + // This should succeed if the webhook state was properly restored + let result = restarted_service_handler.notify_payment_incoming(client_node_id); + assert!(result.is_ok(), "Notification should succeed with restored webhook state"); + + // Check that we get a SendWebhookNotification event, confirming the state was restored correctly + let event = restarted_service_lm.next_event(); + assert!(event.is_some(), "Should have an event after sending notification"); + + if let Some(LiquidityEvent::LSPS5Service(LSPS5ServiceEvent::SendWebhookNotification { + counterparty_node_id, + url, + notification, + .. + })) = event + { + assert_eq!(counterparty_node_id, client_node_id); + assert_eq!(url.as_str(), webhook_url); + assert_eq!(notification.method, WebhookNotificationMethod::LSPS5PaymentIncoming); + } else { + panic!("Expected SendWebhookNotification event after restart"); + } + } +} diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index 024d433cf41..7c070f33e1c 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -9,7 +9,8 @@ //! Some utilities to make working with the standard library's [`Future`]s easier -use crate::prelude::*; +use alloc::boxed::Box; +use alloc::vec::Vec; use core::future::Future; use core::marker::Unpin; use core::pin::Pin; diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index aa2d1058805..2578af06cd2 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -553,7 +553,7 @@ impl Readable for BigSize { /// To ensure we only have one valid encoding per value, we add 0xffff to values written as eight /// bytes. Thus, 0xfffe is serialized as 0xfffe, whereas 0xffff is serialized as /// 0xffff0000000000000000 (i.e. read-eight-bytes then zero). -struct CollectionLength(pub u64); +pub struct CollectionLength(pub u64); impl Writeable for CollectionLength { #[inline] fn write(&self, writer: &mut W) -> Result<(), io::Error> {