From d39e34c099466bb400f84cf34f93ad362278f4d1 Mon Sep 17 00:00:00 2001 From: Guy Nir Date: Wed, 10 Dec 2025 14:53:03 +0200 Subject: [PATCH 1/3] apollo_l1_provider: minor updates to some bootstrapping tests of the L1 provider --- .../src/l1_provider_tests.rs | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/crates/apollo_l1_provider/src/l1_provider_tests.rs b/crates/apollo_l1_provider/src/l1_provider_tests.rs index fc20c389676..249ecb18d3d 100644 --- a/crates/apollo_l1_provider/src/l1_provider_tests.rs +++ b/crates/apollo_l1_provider/src/l1_provider_tests.rs @@ -49,7 +49,7 @@ fn commit_block_no_rejected( l1_provider.commit_block(txs.iter().copied().collect(), [].into(), block_number).unwrap(); } -fn commit_block_expect_error_just_to_start_bootstrapping( +fn call_commit_block_to_start_bootstrapping_and_expect_error( l1_provider: &mut L1Provider, block_number: BlockNumber, ) { @@ -437,7 +437,7 @@ async fn bootstrap_commit_block_received_twice_no_error() { .build_into_l1_provider(); l1_provider.initialize(BlockNumber(0), vec![]).await.expect("l1 provider initialize failed"); - commit_block_expect_error_just_to_start_bootstrapping(&mut l1_provider, BlockNumber(2)); + call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, BlockNumber(2)); commit_block_no_rejected(&mut l1_provider, &[], BlockNumber(2)); // l1_provider.start_bootstrapping(BlockNumber(2)); @@ -459,7 +459,7 @@ async fn bootstrap_commit_block_received_twice_error_if_new_uncommitted_txs() { .build_into_l1_provider(); l1_provider.initialize(BlockNumber(0), vec![]).await.expect("l1 provider initialize failed"); - commit_block_expect_error_just_to_start_bootstrapping(&mut l1_provider, BlockNumber(2)); + call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, BlockNumber(2)); // Test. commit_block_no_rejected(&mut l1_provider, &[tx_hash!(1)], BlockNumber(0)); @@ -1069,7 +1069,7 @@ async fn commit_block_historical_height_short_circuits_bootstrap() { let l1_provider_builder_clone = l1_provider_builder.clone(); let mut l1_provider = l1_provider_builder.clone().build_into_l1_provider(); l1_provider.initialize(STARTUP_HEIGHT, vec![]).await.expect("l1 provider initialize failed"); - commit_block_expect_error_just_to_start_bootstrapping(&mut l1_provider, TARGET_HEIGHT); + call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, TARGET_HEIGHT); let expected_unchanged = l1_provider_builder_clone .with_height(STARTUP_HEIGHT) @@ -1483,6 +1483,7 @@ async fn bootstrap_e2e() { let mut l1_provider = L1Provider::new(config, l1_provider_client.clone(), Arc::new(sync_client), None); + // Test. // Trigger the bootstrapper: this will trigger the sync task to start trying to fetch blocks @@ -1492,10 +1493,7 @@ async fn bootstrap_e2e() { // understand though. let scraped_l1_handler_txs = vec![]; // No txs to scrape in this test. l1_provider.initialize(STARTUP_HEIGHT, scraped_l1_handler_txs).await.unwrap(); - // TODO(guyn): this test assumes we start in bootstrapping state. The test should be updated to - // include the part where the batcher's first commit_block command is what determines the - // catchup height and causes the bootstrapping to begin. - l1_provider.start_bootstrapping(CATCH_UP_HEIGHT); + call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, CATCH_UP_HEIGHT); // Load first **Sync** response: the initializer task will pick it up within the specified // interval. @@ -1578,6 +1576,7 @@ async fn bootstrap_delayed_batcher_and_sync_state_with_trivial_catch_up() { let l1_provider_client = Arc::new(FakeL1ProviderClient::default()); const STARTUP_HEIGHT: BlockNumber = BlockNumber(3); + const CATCH_UP_HEIGHT: BlockNumber = BlockNumber(5); let sync_client = MockStateSyncClient::default(); let config = L1ProviderConfig { @@ -1587,12 +1586,13 @@ async fn bootstrap_delayed_batcher_and_sync_state_with_trivial_catch_up() { let mut l1_provider = L1Provider::new(config, l1_provider_client.clone(), Arc::new(sync_client), None); + // Test. // Start the sync sequence, should busy-wait until the batcher height is sent. let scraped_l1_handler_txs = []; // No txs to scrape in this test. l1_provider.initialize(STARTUP_HEIGHT, scraped_l1_handler_txs.into()).await.unwrap(); - + call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, CATCH_UP_HEIGHT); // **Commit** a few blocks. The height starts from the provider's current height, since this // is a trivial catchup scenario (nothing to catch up). // This checks that the trivial catch_up_height doesn't mess up this flow. @@ -1606,11 +1606,11 @@ async fn bootstrap_delayed_batcher_and_sync_state_with_trivial_catch_up() { // Commit blocks should have been applied. let start_height_plus_2 = height_add(STARTUP_HEIGHT, 2); assert_eq!(l1_provider.current_height, start_height_plus_2); - // TODO(guyn): it is possible that the rest of this test is trivial. + // Should still be bootstrapping, since catchup height isn't determined yet. // Technically we could end bootstrapping at this point, but its simpler to let it // terminate gracefully once the batcher and sync are ready. - assert_eq!(l1_provider.state, ProviderState::Pending); + assert!(l1_provider.state.is_bootstrapping()); // Let the sync task continue, it should short circuit. tokio::time::sleep(config.startup_sync_sleep_retry_interval_seconds).await; From a6c95541b7406ad9a64112a88d9416356ca4cdc9 Mon Sep 17 00:00:00 2001 From: Guy Nir Date: Thu, 11 Dec 2025 11:54:08 +0200 Subject: [PATCH 2/3] apollo_l1_provider: scraper historic height is one more --- crates/apollo_l1_provider/src/l1_scraper.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/apollo_l1_provider/src/l1_scraper.rs b/crates/apollo_l1_provider/src/l1_scraper.rs index 3909fcac7cf..3f3f4661e67 100644 --- a/crates/apollo_l1_provider/src/l1_scraper.rs +++ b/crates/apollo_l1_provider/src/l1_scraper.rs @@ -193,7 +193,7 @@ impl L1Scraper Date: Thu, 11 Dec 2025 11:53:17 +0200 Subject: [PATCH 3/3] apollo_l1_provider: rename catch_up_height and historic height --- crates/apollo_l1_provider/src/bootstrapper.rs | 26 +++++++++---------- crates/apollo_l1_provider/src/l1_provider.rs | 20 +++++++++++--- crates/apollo_l1_provider/src/test_utils.rs | 4 +-- 3 files changed, 31 insertions(+), 19 deletions(-) diff --git a/crates/apollo_l1_provider/src/bootstrapper.rs b/crates/apollo_l1_provider/src/bootstrapper.rs index 0b4f9c5d457..6036d45049b 100644 --- a/crates/apollo_l1_provider/src/bootstrapper.rs +++ b/crates/apollo_l1_provider/src/bootstrapper.rs @@ -20,7 +20,7 @@ use tracing::debug; /// Caches commits to be applied later. This flow is only relevant while the node is starting up. #[derive(Clone)] pub struct Bootstrapper { - pub catch_up_height: BlockNumber, + pub target_height: BlockNumber, pub sync_retry_interval: Duration, pub commit_block_backlog: Vec, pub l1_provider_client: SharedL1ProviderClient, @@ -49,13 +49,13 @@ impl Bootstrapper { n_sync_health_check_failures: Default::default(), // This is overriden when starting the sync task (e.g., when provider starts // bootstrapping). - catch_up_height: BlockNumber(0), + target_height: BlockNumber(0), } } /// Check if the caller has caught up with the bootstrapper. pub fn is_caught_up(&self, current_provider_height: BlockNumber) -> bool { - let is_caught_up = current_provider_height > self.catch_up_height; + let is_caught_up = current_provider_height > self.target_height; self.sync_task_health_check(is_caught_up); @@ -84,9 +84,9 @@ impl Bootstrapper { pub fn start_l2_sync( &mut self, current_provider_height: BlockNumber, - catch_up_height: BlockNumber, + target_height: BlockNumber, ) { - self.catch_up_height = catch_up_height; + self.target_height = target_height; // FIXME: spawning a task like this is evil. // However, we aren't using the task executor, so no choice :( // Once we start using a centralized threadpool, spawn through it instead of the @@ -95,15 +95,15 @@ impl Bootstrapper { self.l1_provider_client.clone(), self.sync_client.clone(), current_provider_height, - catch_up_height, + target_height, self.sync_retry_interval, )); self.sync_task_handle = SyncTaskHandle::Started(sync_task_handle.into()); } - pub fn catch_up_height(&self) -> BlockNumber { - self.catch_up_height + pub fn target_height(&self) -> BlockNumber { + self.target_height } fn sync_task_health_check(&self, is_caught_up: bool) { @@ -129,7 +129,7 @@ impl Bootstrapper { impl PartialEq for Bootstrapper { fn eq(&self, other: &Self) -> bool { - self.catch_up_height == other.catch_up_height + self.target_height == other.target_height && self.commit_block_backlog == other.commit_block_backlog } } @@ -139,7 +139,7 @@ impl Eq for Bootstrapper {} impl std::fmt::Debug for Bootstrapper { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Bootstrapper") - .field("catch_up_height", &self.catch_up_height) + .field("target_height", &self.target_height) .field("commit_block_backlog", &self.commit_block_backlog) .field("sync_task_handle", &self.sync_task_handle) .finish_non_exhaustive() @@ -150,14 +150,14 @@ async fn l2_sync_task( l1_provider_client: SharedL1ProviderClient, sync_client: SharedStateSyncClient, mut current_height: BlockNumber, - catch_up_height: BlockNumber, + target_height: BlockNumber, retry_interval: Duration, ) { - while current_height <= catch_up_height { + while current_height <= target_height { // TODO(Gilad): add tracing instrument. debug!( "Syncing L1Provider with L2 height: {} to target height: {}", - current_height, catch_up_height + current_height, target_height ); let block = sync_client.get_block(current_height).await.inspect_err(|err| debug!("{err}")); diff --git a/crates/apollo_l1_provider/src/l1_provider.rs b/crates/apollo_l1_provider/src/l1_provider.rs index bfcfa95fd66..9a7c986b2b7 100644 --- a/crates/apollo_l1_provider/src/l1_provider.rs +++ b/crates/apollo_l1_provider/src/l1_provider.rs @@ -29,6 +29,18 @@ use crate::L1ProviderConfig; #[path = "l1_provider_tests.rs"] pub mod l1_provider_tests; +/// Note on height specification: +/// - start_height: height of the next block after "historic" blocks, meaning blocks that were +/// already proved and sent to L1, and will not be scraped. Transactions from blocks lower than +/// start_height are already committed. The Provider is not interested in any block lower than +/// start_height. +/// - current_height: height of the next block that the Provider expects to see. It means the +/// provider has seen commits forall the previous blocks up to (not including) current_height. +/// - target_height: when bootstrapping, the height to which the bootstrapper will sync. After +/// bootstrapping is done, we expect the current_height to be one above the target_height. If any +/// more blocks are committed while bootstrapping, they are applied after the target_height, and +/// the current_height will be set to one above the last block in the backlog. + // TODO(Gilad): optimistic proposer support, will add later to keep things simple, but the design // here is compatible with it. #[derive(Debug, Clone)] @@ -84,7 +96,7 @@ impl L1Provider { // Start the provider, get first-scrape events, start L2 sync. pub async fn initialize( &mut self, - historic_l2_height: BlockNumber, + start_height: BlockNumber, events: Vec, ) -> L1ProviderResult<()> { info!("Initializing l1 provider"); @@ -103,8 +115,8 @@ impl L1Provider { // The current_height is set to a very old height, that doesn't include any of the events // sent now, or to be scraped in the future. The provider will begin bootstrapping when the // batcher calls commit_block with a height above the current height. - self.start_height = Some(historic_l2_height); - self.current_height = historic_l2_height; + self.start_height = Some(start_height); + self.current_height = start_height; self.state = ProviderState::Pending; self.add_events(events)?; @@ -310,7 +322,7 @@ impl L1Provider { if self.state.is_uninitialized() { warn!( "Provider received a block height ({height}) while it is uninitialized. \ - Cannot start bootstrapping until getting the historic_height from the \ + Cannot start bootstrapping until getting the start_height from the \ scraper during the initialize call." ); } else { diff --git a/crates/apollo_l1_provider/src/test_utils.rs b/crates/apollo_l1_provider/src/test_utils.rs index 274930d4629..9a731a13e45 100644 --- a/crates/apollo_l1_provider/src/test_utils.rs +++ b/crates/apollo_l1_provider/src/test_utils.rs @@ -43,7 +43,7 @@ macro_rules! make_bootstrapper { committed_txs: [$(tx_hash!($tx)),*].into() }),* ].into_iter().collect(), - catch_up_height: BlockNumber(0), + target_height: BlockNumber(0), l1_provider_client: Arc::new(FakeL1ProviderClient::default()), sync_client: Arc::new(MockStateSyncClient::default()), sync_task_handle: SyncTaskHandle::default(), @@ -636,7 +636,7 @@ impl L1ProviderClient for FakeL1ProviderClient { async fn initialize( &self, - _last_historic_l2_height: BlockNumber, + _start_height: BlockNumber, _events: Vec, ) -> L1ProviderClientResult<()> { todo!()