Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
4ed8022
Split up some tests that have many variants
valentinewallace Mar 17, 2026
f492b10
Remove unnecessary pending_monitor_events clone
valentinewallace Mar 19, 2026
623c3f5
Add persistent_monitor_events flag to monitors/manager
valentinewallace Mar 17, 2026
4b12ecd
Add helper to push monitor events
valentinewallace Mar 18, 2026
be4d80c
Rename pending_monitor_events to _legacy
valentinewallace Mar 18, 2026
e17c387
Add chain::Watch ack_monitor_event API
valentinewallace Mar 16, 2026
6269b59
Add monitor event ids
valentinewallace Mar 18, 2026
a50d521
Ack monitor events immediately
valentinewallace Mar 16, 2026
0a2db12
Support persistent monitor events
valentinewallace Mar 17, 2026
b914b8b
Track recent monitor updates in TestChainMonitor
valentinewallace Mar 30, 2026
31ab44c
Persist user channel id in monitors
valentinewallace Mar 24, 2026
c7be336
Include user channel id in monitor event
valentinewallace Mar 24, 2026
283a67c
Pass best block height to outbound_payments::claim_htlc
valentinewallace Apr 3, 2026
2942cfd
Pass monitor event id to claim_funds_internal
valentinewallace Apr 3, 2026
42646ed
Stop hardcoding from_onchain in monitor ev claim_funds
valentinewallace Apr 3, 2026
a54a4d0
Add EventCompletionAction::AckMonitorEvent
valentinewallace Apr 3, 2026
0151a03
Persistent mon events for off-chain outbound claims
valentinewallace Apr 3, 2026
959c88a
Filter claims from get_onchain_failed_htlcs return value
valentinewallace Apr 3, 2026
727f424
Persistent monitor events for onchain outbound claims
valentinewallace Apr 3, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion fuzz/src/chanmon_consistency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ use lightning::chain;
use lightning::chain::chaininterface::{
BroadcasterInterface, ConfirmationTarget, FeeEstimator, TransactionType,
};
use lightning::chain::chainmonitor::MonitorEventSource;
use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent};
use lightning::chain::transaction::OutPoint;
use lightning::chain::{
Expand Down Expand Up @@ -427,9 +428,13 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {

fn release_pending_monitor_events(
&self,
) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)> {
) -> Vec<(OutPoint, ChannelId, Vec<(u64, MonitorEvent)>, PublicKey)> {
return self.chain_monitor.release_pending_monitor_events();
}

fn ack_monitor_event(&self, source: MonitorEventSource) {
self.chain_monitor.ack_monitor_event(source);
}
}

struct KeyProvider {
Expand Down
4 changes: 2 additions & 2 deletions lightning-liquidity/tests/lsps2_integration_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ use common::{
};

use lightning::events::{ClosureReason, Event};
use lightning::get_event_msg;
use lightning::ln::channelmanager::{
OptionalBolt11PaymentParams, PaymentId, TrustedChannelFeatures,
};
Expand All @@ -17,6 +16,7 @@ use lightning::ln::msgs::BaseMessageHandler;
use lightning::ln::msgs::ChannelMessageHandler;
use lightning::ln::msgs::MessageSendEvent;
use lightning::ln::types::ChannelId;
use lightning::{expect_payment_sent, get_event_msg};

use lightning_liquidity::events::LiquidityEvent;
use lightning_liquidity::lsps0::ser::LSPSDateTime;
Expand Down Expand Up @@ -1340,7 +1340,7 @@ fn client_trusts_lsp_end_to_end_test() {
let broadcasted = service_node.inner.tx_broadcaster.txn_broadcasted.lock().unwrap();
assert!(broadcasted.iter().any(|b| b.compute_txid() == funding_tx.compute_txid()));

expect_payment_sent(&payer_node, preimage.unwrap(), Some(total_fee_msat), true, true);
expect_payment_sent!(&payer_node, preimage.unwrap(), total_fee_msat);
}

fn execute_lsps2_dance(
Expand Down
79 changes: 39 additions & 40 deletions lightning/src/chain/chainmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,21 @@ use core::iter::Cycle;
use core::ops::Deref;
use core::sync::atomic::{AtomicUsize, Ordering};

/// Identifies the source of a [`MonitorEvent`] for acknowledgment via
/// [`chain::Watch::ack_monitor_event`] once the event has been processed.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct MonitorEventSource {
/// The event ID assigned by the [`ChannelMonitor`].
pub event_id: u64,
/// The channel from which the [`MonitorEvent`] originated.
pub channel_id: ChannelId,
}

impl_writeable_tlv_based!(MonitorEventSource, {
(1, event_id, required),
(3, channel_id, required),
});

/// A pending operation queued for later execution when `ChainMonitor` is in deferred mode.
enum PendingMonitorOp<ChannelSigner: EcdsaChannelSigner> {
/// A new monitor to insert and persist.
Expand Down Expand Up @@ -366,9 +381,6 @@ pub struct ChainMonitor<
fee_estimator: F,
persister: P,
_entropy_source: ES,
/// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
/// from the user and not from a [`ChannelMonitor`].
pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)>>,
/// The best block height seen, used as a proxy for the passage of time.
highest_chain_height: AtomicUsize,

Expand Down Expand Up @@ -436,7 +448,6 @@ where
logger,
fee_estimator: feeest,
_entropy_source,
pending_monitor_events: Mutex::new(Vec::new()),
highest_chain_height: AtomicUsize::new(0),
event_notifier: Arc::clone(&event_notifier),
persister: AsyncPersister { persister, event_notifier },
Expand Down Expand Up @@ -657,7 +668,6 @@ where
fee_estimator: feeest,
persister,
_entropy_source,
pending_monitor_events: Mutex::new(Vec::new()),
highest_chain_height: AtomicUsize::new(0),
event_notifier: Arc::new(Notifier::new()),
pending_send_only_events: Mutex::new(Vec::new()),
Expand Down Expand Up @@ -802,16 +812,11 @@ where
return Ok(());
}
let funding_txo = monitor_data.monitor.get_funding_txo();
self.pending_monitor_events.lock().unwrap().push((
monitor_data.monitor.push_monitor_event(MonitorEvent::Completed {
funding_txo,
channel_id,
vec![MonitorEvent::Completed {
funding_txo,
channel_id,
monitor_update_id: monitor_data.monitor.get_latest_update_id(),
}],
monitor_data.monitor.get_counterparty_node_id(),
));
monitor_update_id: monitor_data.monitor.get_latest_update_id(),
});

self.event_notifier.notify();
Ok(())
Expand All @@ -824,14 +829,11 @@ where
pub fn force_channel_monitor_updated(&self, channel_id: ChannelId, monitor_update_id: u64) {
let monitors = self.monitors.read().unwrap();
let monitor = &monitors.get(&channel_id).unwrap().monitor;
let counterparty_node_id = monitor.get_counterparty_node_id();
let funding_txo = monitor.get_funding_txo();
self.pending_monitor_events.lock().unwrap().push((
funding_txo,
monitor.push_monitor_event(MonitorEvent::Completed {
funding_txo: monitor.get_funding_txo(),
channel_id,
vec![MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id }],
counterparty_node_id,
));
monitor_update_id,
});
self.event_notifier.notify();
}

Expand Down Expand Up @@ -1266,21 +1268,13 @@ where
// The channel is post-close (funding spend seen, lockdown, or
// holder tx signed). Return InProgress so ChannelManager freezes
// the channel until the force-close MonitorEvents are processed.
// Push a Completed event into pending_monitor_events so it gets
// picked up after the per-monitor events in the next
// release_pending_monitor_events call.
let funding_txo = monitor.get_funding_txo();
let channel_id = monitor.channel_id();
self.pending_monitor_events.lock().unwrap().push((
funding_txo,
channel_id,
vec![MonitorEvent::Completed {
funding_txo,
channel_id,
monitor_update_id: monitor.get_latest_update_id(),
}],
monitor.get_counterparty_node_id(),
));
// Push a Completed event into the monitor so it gets picked up
// in the next release_pending_monitor_events call.
monitor.push_monitor_event(MonitorEvent::Completed {
funding_txo: monitor.get_funding_txo(),
channel_id: monitor.channel_id(),
monitor_update_id: monitor.get_latest_update_id(),
});
log_debug!(
logger,
"Deferring completion of ChannelMonitorUpdate id {:?} (channel is post-close)",
Expand Down Expand Up @@ -1645,7 +1639,7 @@ where

fn release_pending_monitor_events(
&self,
) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)> {
) -> Vec<(OutPoint, ChannelId, Vec<(u64, MonitorEvent)>, PublicKey)> {
for (channel_id, update_id) in self.persister.get_and_clear_completed_updates() {
let _ = self.channel_monitor_updated(channel_id, update_id);
}
Expand All @@ -1665,12 +1659,17 @@ where
));
}
}
// Drain pending_monitor_events (which includes deferred post-close
// completions) after per-monitor events so that force-close
// MonitorEvents are processed by ChannelManager first.
pending_monitor_events.extend(self.pending_monitor_events.lock().unwrap().split_off(0));
pending_monitor_events
}

fn ack_monitor_event(&self, source: MonitorEventSource) {
let monitors = self.monitors.read().unwrap();
if let Some(monitor_state) = monitors.get(&source.channel_id) {
monitor_state.monitor.ack_monitor_event(source.event_id);
} else {
debug_assert!(false, "Ack'd monitor events should always have a corresponding monitor");
}
}
}

impl<
Expand Down
Loading
Loading