diff --git a/crates/apollo_l1_provider/src/bootstrapper.rs b/crates/apollo_l1_provider/src/catchupper.rs similarity index 95% rename from crates/apollo_l1_provider/src/bootstrapper.rs rename to crates/apollo_l1_provider/src/catchupper.rs index 6036d45049b..bba2b9b1a82 100644 --- a/crates/apollo_l1_provider/src/bootstrapper.rs +++ b/crates/apollo_l1_provider/src/catchupper.rs @@ -9,7 +9,7 @@ use starknet_api::block::BlockNumber; use starknet_api::transaction::TransactionHash; use tracing::debug; -// When the Provider gets a commit_block that is too high, it starts bootstrapping. +// When the Provider gets a commit_block that is too high, it starts catching up. // The commit is rejected by the provider, so it must use sync to catch up to the height of the // commit, including that height. The sync task continues until reaching the target height, // inclusive, and only after the commit_block (from sync) causes the Provider's current height to be @@ -19,7 +19,7 @@ use tracing::debug; /// Caches commits to be applied later. This flow is only relevant while the node is starting up. #[derive(Clone)] -pub struct Bootstrapper { +pub struct Catchupper { pub target_height: BlockNumber, pub sync_retry_interval: Duration, pub commit_block_backlog: Vec, @@ -30,7 +30,7 @@ pub struct Bootstrapper { pub n_sync_health_check_failures: Arc, } -impl Bootstrapper { +impl Catchupper { // FIXME: this isn't added to configs, since this test shouldn't be made here, it should be // handled through a task management layer. pub const MAX_HEALTH_CHECK_FAILURES: u8 = 5; @@ -48,12 +48,12 @@ impl Bootstrapper { sync_task_handle: SyncTaskHandle::NotStartedYet, n_sync_health_check_failures: Default::default(), // This is overriden when starting the sync task (e.g., when provider starts - // bootstrapping). + // catching up). target_height: BlockNumber(0), } } - /// Check if the caller has caught up with the bootstrapper. + /// Check if the caller has caught up with the catchupper. pub fn is_caught_up(&self, current_provider_height: BlockNumber) -> bool { let is_caught_up = current_provider_height > self.target_height; @@ -127,18 +127,18 @@ impl Bootstrapper { } } -impl PartialEq for Bootstrapper { +impl PartialEq for Catchupper { fn eq(&self, other: &Self) -> bool { self.target_height == other.target_height && self.commit_block_backlog == other.commit_block_backlog } } -impl Eq for Bootstrapper {} +impl Eq for Catchupper {} -impl std::fmt::Debug for Bootstrapper { +impl std::fmt::Debug for Catchupper { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Bootstrapper") + f.debug_struct("Catchupper") .field("target_height", &self.target_height) .field("commit_block_backlog", &self.commit_block_backlog) .field("sync_task_handle", &self.sync_task_handle) diff --git a/crates/apollo_l1_provider/src/l1_provider.rs b/crates/apollo_l1_provider/src/l1_provider.rs index 9a7c986b2b7..dbbde7ece17 100644 --- a/crates/apollo_l1_provider/src/l1_provider.rs +++ b/crates/apollo_l1_provider/src/l1_provider.rs @@ -21,7 +21,7 @@ use starknet_api::executable_transaction::L1HandlerTransaction; use starknet_api::transaction::TransactionHash; use tracing::{debug, error, info, instrument, trace, warn}; -use crate::bootstrapper::Bootstrapper; +use crate::catchupper::Catchupper; use crate::transaction_manager::TransactionManager; use crate::L1ProviderConfig; @@ -35,11 +35,11 @@ pub mod l1_provider_tests; /// start_height are already committed. The Provider is not interested in any block lower than /// start_height. /// - current_height: height of the next block that the Provider expects to see. It means the -/// provider has seen commits forall the previous blocks up to (not including) current_height. -/// - target_height: when bootstrapping, the height to which the bootstrapper will sync. After -/// bootstrapping is done, we expect the current_height to be one above the target_height. If any -/// more blocks are committed while bootstrapping, they are applied after the target_height, and -/// the current_height will be set to one above the last block in the backlog. +/// provider has seen commits for all the previous blocks up to (not including) current_height. +/// - target_height: when catching up, the height to which the catchupper will sync. After catching +/// up is done, we expect the current_height to be one above the target_height. If any more blocks +/// are committed while catching up, they are applied after the target_height, and the +/// current_height will be set to one above the last block in the backlog. // TODO(Gilad): optimistic proposer support, will add later to keep things simple, but the design // here is compatible with it. @@ -47,7 +47,7 @@ pub mod l1_provider_tests; pub struct L1Provider { pub config: L1ProviderConfig, /// Used for catching up at startup or after a crash. - pub bootstrapper: Bootstrapper, + pub catchupper: Catchupper, /// Represents the L2 block height being built. pub current_height: BlockNumber, pub tx_manager: TransactionManager, @@ -65,14 +65,14 @@ impl L1Provider { state_sync_client: SharedStateSyncClient, clock: Option>, ) -> Self { - let bootstrapper = Bootstrapper::new( + let catchupper = Catchupper::new( l1_provider_client, state_sync_client, config.startup_sync_sleep_retry_interval_seconds, ); Self { config, - bootstrapper, + catchupper, current_height: BlockNumber(0), tx_manager: TransactionManager::new( config.l1_handler_proposal_cooldown_seconds, @@ -84,10 +84,10 @@ impl L1Provider { start_height: None, } } - pub fn reset_bootstrapper(&mut self) { - self.bootstrapper = Bootstrapper::new( - self.bootstrapper.l1_provider_client.clone(), - self.bootstrapper.sync_client.clone(), + pub fn reset_catchupper(&mut self) { + self.catchupper = Catchupper::new( + self.catchupper.l1_provider_client.clone(), + self.catchupper.sync_client.clone(), self.config.startup_sync_sleep_retry_interval_seconds, ); } @@ -113,7 +113,7 @@ impl L1Provider { // The provider now goes into Pending state. // The current_height is set to a very old height, that doesn't include any of the events - // sent now, or to be scraped in the future. The provider will begin bootstrapping when the + // sent now, or to be scraped in the future. The provider will begin catching up when the // batcher calls commit_block with a height above the current height. self.start_height = Some(start_height); self.current_height = start_height; @@ -300,14 +300,14 @@ impl L1Provider { return Ok(()); } - // Reroute this block to bootstrapper, either adding it to the backlog, or applying it and - // ending the bootstrap. - if self.state.is_bootstrapping() { - // Once bootstrap completes it will transition to Pending state by itself. - return self.accept_commit_while_bootstrapping(committed_txs, height); + // Reroute this block to catchupper, either adding it to the backlog, or applying it and + // ending the catchup. + if self.state.is_catching_up() { + // Once catchup completes it will transition to Pending state by itself. + return self.accept_commit_while_catching_up(committed_txs, height); } - // If not historical height and not bootstrapping, must go into bootstrap state upon getting + // If not historical height and not catching up, must go into catchup state upon getting // wrong height. match self.check_height_with_error(height) { Ok(_) => { @@ -322,16 +322,16 @@ impl L1Provider { if self.state.is_uninitialized() { warn!( "Provider received a block height ({height}) while it is uninitialized. \ - Cannot start bootstrapping until getting the start_height from the \ - scraper during the initialize call." + Cannot start catching up until getting the start_height from the scraper \ + during the initialize call." ); } else { info!( "Provider received a block_height ({height}) that is higher than the \ - current height ({}), starting bootstrapping.", + current height ({}), starting catch-up process.", self.current_height ); - self.start_bootstrapping(height); + self.start_catching_up(height); } Err(err) } @@ -340,11 +340,11 @@ impl L1Provider { // Functions called internally. - /// Go from current state to Bootstrap state and start the L2 sync. - pub fn start_bootstrapping(&mut self, target_height: BlockNumber) { - self.reset_bootstrapper(); - self.state = ProviderState::Bootstrap; - self.bootstrapper.start_l2_sync(self.current_height, target_height); + /// Go from current state to CatchingUp state and start the L2 sync. + pub fn start_catching_up(&mut self, target_height: BlockNumber) { + self.reset_catchupper(); + self.state = ProviderState::CatchingUp; + self.catchupper.start_l2_sync(self.current_height, target_height); } /// Commit the given transactions, and increment the current height. @@ -361,19 +361,19 @@ impl L1Provider { self.current_height = self.current_height.unchecked_next(); } - /// Any commit_block call gets rerouted to this function when in bootstrap state. + /// Any commit_block call gets rerouted to this function when in CatchingUp state. /// - If block number is higher than current height, block is backlogged. /// - If provider gets a block consistent with current_height, apply it and then the rest of the /// backlog, then transition to Pending state. /// - Blocks lower than current height are checked for consistency with existing transactions. - fn accept_commit_while_bootstrapping( + fn accept_commit_while_catching_up( &mut self, committed_txs: IndexSet, new_height: BlockNumber, ) -> L1ProviderResult<()> { let current_height = self.current_height; debug!( - "Bootstrapper processing commit-block at height: {new_height}, current height is \ + "Catchupper processing commit-block at height: {new_height}, current height is \ {current_height}" ); match new_height.cmp(¤t_height) { @@ -397,7 +397,7 @@ impl L1Provider { return Ok(()); } else { // This is either a configuration error or a bug in the - // batcher/sync/bootstrapper. + // batcher/sync/catching up code. error!( "Duplicate commit block: commit block for {new_height:?} already \ received, with DIFFERENT transaction_hashes: \ @@ -413,8 +413,8 @@ impl L1Provider { Equal => self.apply_commit_block(committed_txs, Default::default()), // We're still syncing, backlog it, it'll get applied later. Greater => { - self.bootstrapper.add_commit_block_to_backlog(committed_txs, new_height); - // No need to check the backlog or bootstrap completion, since those are only + self.catchupper.add_commit_block_to_backlog(committed_txs, new_height); + // No need to check the backlog or catchup completion, since those are only // applicable if we just increased the provider's height, like in the `Equal` case. return Ok(()); } @@ -423,12 +423,12 @@ impl L1Provider { // If caught up, apply the backlog and transition to Pending. // Note that at this point self.current_height is already incremented to the next height, it // is one more than the latest block that was committed. - if self.bootstrapper.is_caught_up(self.current_height) { + if self.catchupper.is_caught_up(self.current_height) { info!( - "Bootstrapper sync completed, provider height is now {}, processing backlog...", + "Catch up sync completed, provider height is now {}, processing backlog...", self.current_height ); - let backlog = std::mem::take(&mut self.bootstrapper.commit_block_backlog); + let backlog = std::mem::take(&mut self.catchupper.commit_block_backlog); assert!( backlog.is_empty() || self.current_height == backlog.first().unwrap().height @@ -451,8 +451,8 @@ impl L1Provider { } info!( - "Bootstrapping done: commit-block backlog was processed, now transitioning to \ - Pending state at new height: {}.", + "Catch up done: commit-block backlog was processed, now transitioning to Pending \ + state at new height: {}.", self.current_height ); diff --git a/crates/apollo_l1_provider/src/l1_provider_tests.rs b/crates/apollo_l1_provider/src/l1_provider_tests.rs index 249ecb18d3d..eccaa5fa038 100644 --- a/crates/apollo_l1_provider/src/l1_provider_tests.rs +++ b/crates/apollo_l1_provider/src/l1_provider_tests.rs @@ -30,11 +30,11 @@ use starknet_api::test_utils::l1_handler::{executable_l1_handler_tx, L1HandlerTx use starknet_api::transaction::TransactionHash; use starknet_api::tx_hash; -use crate::bootstrapper::{Bootstrapper, CommitBlockBacklog, SyncTaskHandle}; +use crate::catchupper::{Catchupper, CommitBlockBacklog, SyncTaskHandle}; use crate::l1_provider::L1Provider; use crate::test_utils::{ l1_handler, - make_bootstrapper, + make_catchupper, ConsumedTransaction, FakeL1ProviderClient, L1ProviderContentBuilder, @@ -49,7 +49,7 @@ fn commit_block_no_rejected( l1_provider.commit_block(txs.iter().copied().collect(), [].into(), block_number).unwrap(); } -fn call_commit_block_to_start_bootstrapping_and_expect_error( +fn call_commit_block_to_start_catching_up_and_expect_error( l1_provider: &mut L1Provider, block_number: BlockNumber, ) { @@ -367,33 +367,33 @@ async fn commit_block_backlog() { const STARTUP_HEIGHT: BlockNumber = BlockNumber(8); const TARGET_HEIGHT: BlockNumber = BlockNumber(9); const BACKLOG_HEIGHT: BlockNumber = BlockNumber(11); - let bootstrapper = make_bootstrapper!(backlog: [10 => [2], 11 => [4]]); + let catchupper = make_catchupper!(backlog: [10 => [2], 11 => [4]]); let mut l1_provider = L1ProviderContentBuilder::new() - .with_bootstrapper(bootstrapper.clone()) + .with_catchupper(catchupper.clone()) .with_txs([l1_handler(1), l1_handler(2), l1_handler(4)]) .with_state(ProviderState::Uninitialized) .build_into_l1_provider(); l1_provider.initialize(STARTUP_HEIGHT, vec![]).await.expect("l1 provider initialize failed"); - l1_provider.state = ProviderState::Bootstrap; - l1_provider.bootstrapper.start_l2_sync(STARTUP_HEIGHT, TARGET_HEIGHT); + l1_provider.state = ProviderState::CatchingUp; + l1_provider.catchupper.start_l2_sync(STARTUP_HEIGHT, TARGET_HEIGHT); // Test. // Commit height is below target height. Doesn't trigger backlog. commit_block_no_rejected(&mut l1_provider, &[tx_hash!(1)], STARTUP_HEIGHT); let expected_l1_provider = L1ProviderContentBuilder::new() - .with_bootstrapper(bootstrapper.clone()) + .with_catchupper(catchupper.clone()) .with_txs([l1_handler(2), l1_handler(4)]) .with_height(STARTUP_HEIGHT.unchecked_next()) - .with_state(ProviderState::Bootstrap) + .with_state(ProviderState::CatchingUp) .build(); expected_l1_provider.assert_eq(&l1_provider); - // This height triggers finishing the bootstrapping and applying the backlog. + // This height triggers finishing the catching up and applying the backlog. commit_block_no_rejected(&mut l1_provider, &[], TARGET_HEIGHT); let expected_l1_provider = L1ProviderContentBuilder::new() - .with_bootstrapper(bootstrapper) + .with_catchupper(catchupper) .with_txs([]) .with_height(BACKLOG_HEIGHT.unchecked_next()) .with_state(ProviderState::Pending) @@ -427,20 +427,19 @@ fn commit_block_before_add_tx_stores_tx_in_committed() { } #[tokio::test] -async fn bootstrap_commit_block_received_twice_no_error() { +async fn catching_up_commit_block_received_twice_no_error() { // Setup. - let bootstrapper = make_bootstrapper!(backlog: []); + let catchupper = make_catchupper!(backlog: []); let mut l1_provider = L1ProviderContentBuilder::new() - .with_bootstrapper(bootstrapper) + .with_catchupper(catchupper) .with_txs([l1_handler(1), l1_handler(2)]) .with_state(ProviderState::Uninitialized) .build_into_l1_provider(); l1_provider.initialize(BlockNumber(0), vec![]).await.expect("l1 provider initialize failed"); - call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, BlockNumber(2)); + call_commit_block_to_start_catching_up_and_expect_error(&mut l1_provider, BlockNumber(2)); commit_block_no_rejected(&mut l1_provider, &[], BlockNumber(2)); - // l1_provider.start_bootstrapping(BlockNumber(2)); // Test. commit_block_no_rejected(&mut l1_provider, &[tx_hash!(1)], BlockNumber(0)); @@ -449,17 +448,17 @@ async fn bootstrap_commit_block_received_twice_no_error() { } #[tokio::test] -async fn bootstrap_commit_block_received_twice_error_if_new_uncommitted_txs() { +async fn catching_up_commit_block_received_twice_error_if_new_uncommitted_txs() { // Setup. - let bootstrapper = make_bootstrapper!(backlog: []); + let catchupper = make_catchupper!(backlog: []); let mut l1_provider = L1ProviderContentBuilder::new() - .with_bootstrapper(bootstrapper) + .with_catchupper(catchupper) .with_txs([l1_handler(1), l1_handler(2)]) .with_state(ProviderState::Uninitialized) .build_into_l1_provider(); l1_provider.initialize(BlockNumber(0), vec![]).await.expect("l1 provider initialize failed"); - call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, BlockNumber(2)); + call_commit_block_to_start_catching_up_and_expect_error(&mut l1_provider, BlockNumber(2)); // Test. commit_block_no_rejected(&mut l1_provider, &[tx_hash!(1)], BlockNumber(0)); @@ -1036,7 +1035,7 @@ fn validate_tx_unknown_returns_invalid_not_found() { } #[test] -fn commit_block_historical_height_short_circuits_non_bootstrap() { +fn commit_block_historical_height_short_circuits_non_catching_up() { // Setup. let l1_provider_builder = L1ProviderContentBuilder::new() .with_height(BlockNumber(5)) @@ -1055,25 +1054,25 @@ fn commit_block_historical_height_short_circuits_non_bootstrap() { } #[tokio::test] -async fn commit_block_historical_height_short_circuits_bootstrap() { +async fn commit_block_historical_height_short_circuits_catching_up() { // Setup. const STARTUP_HEIGHT: BlockNumber = BlockNumber(5); const TARGET_HEIGHT: BlockNumber = BlockNumber(6); let batcher_height_old = 4; - let bootstrapper = make_bootstrapper!(backlog: []); + let catchupper = make_catchupper!(backlog: []); let l1_provider_builder = L1ProviderContentBuilder::new() - .with_bootstrapper(bootstrapper) + .with_catchupper(catchupper) .with_state(ProviderState::Uninitialized) .with_txs([l1_handler(1)]); let l1_provider_builder_clone = l1_provider_builder.clone(); let mut l1_provider = l1_provider_builder.clone().build_into_l1_provider(); l1_provider.initialize(STARTUP_HEIGHT, vec![]).await.expect("l1 provider initialize failed"); - call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, TARGET_HEIGHT); + call_commit_block_to_start_catching_up_and_expect_error(&mut l1_provider, TARGET_HEIGHT); let expected_unchanged = l1_provider_builder_clone .with_height(STARTUP_HEIGHT) - .with_state(ProviderState::Bootstrap) + .with_state(ProviderState::CatchingUp) .build(); // Check that the content is the same as expected. @@ -1404,7 +1403,7 @@ fn consuming_multiple_txs_selective_deletion_after_timelock() { } #[test] -fn bootstrap_commit_block_received_while_uninitialized() { +fn catching_up_commit_block_received_while_uninitialized() { // Setup. let mut l1_provider = L1ProviderContentBuilder::new() .with_state(ProviderState::Uninitialized) @@ -1447,12 +1446,12 @@ fn receive_commit_block( // TODO(Gilad): figure out how To setup anvil on a specific L1 block (through genesis.json?) and // with a specified L2 block logged to L1 (hopefully without having to use real backup). -/// This test simulates a bootstrapping flow, in which 3 blocks are synced from L2, during which two +/// This test simulates a catching up flow, in which 3 blocks are synced from L2, during which two /// new blocks from past the catch-up height arrive. The expected behavior is that the synced /// commit_blocks are processed as they come, and the two new blocks are backlogged until the synced /// blocks are processed, after which they are processed in order. #[tokio::test] -async fn bootstrap_e2e() { +async fn catching_up_e2e() { if !in_ci() { return; } @@ -1486,14 +1485,14 @@ async fn bootstrap_e2e() { // Test. - // Trigger the bootstrapper: this will trigger the sync task to start trying to fetch blocks + // Trigger the catching up: this will trigger the sync task to start trying to fetch blocks // from the sync client, which will always return nothing since the hash map above is still // empty. The sync task will busy-wait on the height until we feed the hashmap. // TODO(Gilad): Consider adding txs here and in the commit blocks, might make the test harder to // understand though. let scraped_l1_handler_txs = vec![]; // No txs to scrape in this test. l1_provider.initialize(STARTUP_HEIGHT, scraped_l1_handler_txs).await.unwrap(); - call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, CATCH_UP_HEIGHT); + call_commit_block_to_start_catching_up_and_expect_error(&mut l1_provider, CATCH_UP_HEIGHT); // Load first **Sync** response: the initializer task will pick it up within the specified // interval. @@ -1561,12 +1560,12 @@ async fn bootstrap_e2e() { receive_commit_block(&mut l1_provider, &next_block.committed_txs, next_block.height); assert_eq!(l1_provider.current_height, BlockNumber(7)); - // Assert that the bootstrapper has been dropped. - assert!(!l1_provider.state.is_bootstrapping()); + // Assert that the catching up has been completed. + assert!(!l1_provider.state.is_catching_up()); } #[tokio::test] -async fn bootstrap_delayed_batcher_and_sync_state_with_trivial_catch_up() { +async fn catching_up_delayed_batcher_and_sync_state_with_trivial_catch_up() { if !in_ci() { return; } @@ -1592,7 +1591,7 @@ async fn bootstrap_delayed_batcher_and_sync_state_with_trivial_catch_up() { // Start the sync sequence, should busy-wait until the batcher height is sent. let scraped_l1_handler_txs = []; // No txs to scrape in this test. l1_provider.initialize(STARTUP_HEIGHT, scraped_l1_handler_txs.into()).await.unwrap(); - call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, CATCH_UP_HEIGHT); + call_commit_block_to_start_catching_up_and_expect_error(&mut l1_provider, CATCH_UP_HEIGHT); // **Commit** a few blocks. The height starts from the provider's current height, since this // is a trivial catchup scenario (nothing to catch up). // This checks that the trivial catch_up_height doesn't mess up this flow. @@ -1607,25 +1606,25 @@ async fn bootstrap_delayed_batcher_and_sync_state_with_trivial_catch_up() { let start_height_plus_2 = height_add(STARTUP_HEIGHT, 2); assert_eq!(l1_provider.current_height, start_height_plus_2); - // Should still be bootstrapping, since catchup height isn't determined yet. - // Technically we could end bootstrapping at this point, but its simpler to let it + // Should still be catching up, since catching up height isn't determined yet. + // Technically we could end catching up at this point, but its simpler to let it // terminate gracefully once the batcher and sync are ready. - assert!(l1_provider.state.is_bootstrapping()); + assert!(l1_provider.state.is_catching_up()); // Let the sync task continue, it should short circuit. tokio::time::sleep(config.startup_sync_sleep_retry_interval_seconds).await; // Assert height is unchanged from last time, no commit block was called from the sync task. assert_eq!(l1_provider.current_height, start_height_plus_2); - // Finally, commit a new block to trigger the bootstrapping check, should switch to steady + // Finally, commit a new block to trigger the catching up check, should switch to steady // state. receive_commit_block(&mut l1_provider, &no_txs_committed.into(), start_height_plus_2); assert_eq!(l1_provider.current_height, height_add(start_height_plus_2, 1)); - // The new commit block triggered the catch-up check, which ended the bootstrapping phase. - assert!(!l1_provider.state.is_bootstrapping()); + // The new commit block triggered the catch-up check, which ended the catching up phase. + assert!(!l1_provider.state.is_catching_up()); } #[tokio::test] -async fn bootstrap_delayed_sync_state_with_sync_behind_batcher() { +async fn catching_up_delayed_sync_state_with_sync_behind_batcher() { if !in_ci() { return; } @@ -1679,8 +1678,8 @@ async fn bootstrap_delayed_sync_state_with_sync_behind_batcher() { // Assert commit blocks are backlogged (didn't affect start height). assert_eq!(l1_provider.current_height, startup_height); - // Should still be bootstrapping, since sync hasn't caught up to the batcher height yet. - assert!(l1_provider.state.is_bootstrapping()); + // Should still be catching up, since sync hasn't caught up to the batcher height yet. + assert!(l1_provider.state.is_catching_up()); // Simulate the state sync service finally being ready, and give the async task enough time to // pick this up and sync up the provider. @@ -1698,11 +1697,11 @@ async fn bootstrap_delayed_sync_state_with_sync_behind_batcher() { l1_provider_client.flush_messages(&mut l1_provider).await; // Two things happened here: the async task sent 2 commit blocks it got from the sync_client, - // which bumped the provider height to batcher_height, then the backlog was applied which + // which bumped the provider height to catching up height, then the backlog was applied which // bumped it twice again. assert_eq!(l1_provider.current_height, batcher_height.unchecked_next().unchecked_next()); - // Batcher height was reached, bootstrapping was completed. - assert!(!l1_provider.state.is_bootstrapping()); + // Batcher height was reached, catching up was completed. + assert!(!l1_provider.state.is_catching_up()); } #[tokio::test] @@ -1727,9 +1726,9 @@ async fn test_stuck_sync() { // Start sync. l1_provider.initialize(STARTUP_HEIGHT, Default::default()).await.unwrap(); - l1_provider.start_bootstrapping(TARGET_HEIGHT); + l1_provider.start_catching_up(TARGET_HEIGHT); - for i in 0..=(Bootstrapper::MAX_HEALTH_CHECK_FAILURES + 1) { + for i in 0..=(Catchupper::MAX_HEALTH_CHECK_FAILURES + 1) { receive_commit_block(&mut l1_provider, &[].into(), height_add(STARTUP_HEIGHT, i.into())); tokio::time::sleep(config.startup_sync_sleep_retry_interval_seconds).await; } diff --git a/crates/apollo_l1_provider/src/l1_scraper_tests.rs b/crates/apollo_l1_provider/src/l1_scraper_tests.rs index 56591aaffe6..9105015f546 100644 --- a/crates/apollo_l1_provider/src/l1_scraper_tests.rs +++ b/crates/apollo_l1_provider/src/l1_scraper_tests.rs @@ -199,9 +199,9 @@ async fn base_layer_returns_block_number_below_finality_causes_error() { #[test] #[ignore = "similar to backlog_happy_flow, only shorter, and sprinkle some start_block/get_txs \ - attempts while its bootstrapping (and assert failure on height), then assert that they \ - succeed after bootstrapping ends."] -fn bootstrap_completion() { + attempts while its catching up (and assert failure on height), then assert that they \ + succeed after catching up ends."] +fn catching_up_completion() { todo!() } diff --git a/crates/apollo_l1_provider/src/lib.rs b/crates/apollo_l1_provider/src/lib.rs index 858aa54bf0d..889447f3ccf 100644 --- a/crates/apollo_l1_provider/src/lib.rs +++ b/crates/apollo_l1_provider/src/lib.rs @@ -1,4 +1,4 @@ -pub mod bootstrapper; +pub mod catchupper; pub mod communication; pub mod l1_provider; diff --git a/crates/apollo_l1_provider/src/test_utils.rs b/crates/apollo_l1_provider/src/test_utils.rs index 9a731a13e45..708f2920598 100644 --- a/crates/apollo_l1_provider/src/test_utils.rs +++ b/crates/apollo_l1_provider/src/test_utils.rs @@ -28,15 +28,15 @@ use starknet_api::hash::StarkHash; use starknet_api::test_utils::l1_handler::{executable_l1_handler_tx, L1HandlerTxArgs}; use starknet_api::transaction::TransactionHash; -use crate::bootstrapper::{Bootstrapper, CommitBlockBacklog, SyncTaskHandle}; +use crate::catchupper::{Catchupper, CommitBlockBacklog, SyncTaskHandle}; use crate::l1_provider::L1Provider; use crate::transaction_manager::{StagingEpoch, TransactionManager, TransactionManagerConfig}; use crate::transaction_record::{TransactionPayload, TransactionRecord}; use crate::L1ProviderConfig; -macro_rules! make_bootstrapper { +macro_rules! make_catchupper { (backlog: [$($height:literal => [$($tx:literal),* $(,)*]),* $(,)*]) => {{ - Bootstrapper { + Catchupper { commit_block_backlog: vec![ $(CommitBlockBacklog { height: BlockNumber($height), @@ -53,7 +53,7 @@ macro_rules! make_bootstrapper { }}; } -pub(crate) use make_bootstrapper; +pub(crate) use make_catchupper; pub fn l1_handler(tx_hash: usize) -> L1HandlerTransaction { let tx_hash = TransactionHash(StarkHash::from(tx_hash)); @@ -67,7 +67,7 @@ pub struct L1ProviderContent { config: Option, tx_manager_content: Option, state: Option, - bootstrapper: Option, + catchupper: Option, current_height: Option, clock: Option>, } @@ -93,9 +93,9 @@ impl L1ProviderContent { impl From for L1Provider { fn from(content: L1ProviderContent) -> L1Provider { - let bootstrapper = match content.bootstrapper { - Some(bootstrapper) => bootstrapper, - None => make_bootstrapper!(backlog: []), + let catchupper = match content.catchupper { + Some(catchupper) => catchupper, + None => make_catchupper!(backlog: []), }; L1Provider { config: content.config.unwrap_or_default(), @@ -103,7 +103,7 @@ impl From for L1Provider { // The real Provider starts as Uninitialized by default, but for testing purposes we // start it as Pending. state: content.state.unwrap_or(ProviderState::Pending), - bootstrapper, + catchupper, current_height: content.current_height.unwrap_or_default(), start_height: content.current_height, clock: content.clock.unwrap_or_else(|| Arc::new(DefaultClock)), @@ -116,7 +116,7 @@ pub struct L1ProviderContentBuilder { config: Option, tx_manager_content_builder: TransactionManagerContentBuilder, state: Option, - bootstrapper: Option, + catchupper: Option, current_height: Option, clock: Option>, } @@ -136,8 +136,8 @@ impl L1ProviderContentBuilder { self } - pub fn with_bootstrapper(mut self, bootstrapper: Bootstrapper) -> Self { - self.bootstrapper = Some(bootstrapper); + pub fn with_catchupper(mut self, catchupper: Catchupper) -> Self { + self.catchupper = Some(catchupper); self } @@ -275,7 +275,7 @@ impl L1ProviderContentBuilder { config: self.config, tx_manager_content: self.tx_manager_content_builder.build(), state: self.state, - bootstrapper: self.bootstrapper, + catchupper: self.catchupper, current_height: self.current_height, clock: self.clock, } diff --git a/crates/apollo_l1_provider/src/transaction_manager.rs b/crates/apollo_l1_provider/src/transaction_manager.rs index 8f0736ef497..6b6320a66b7 100644 --- a/crates/apollo_l1_provider/src/transaction_manager.rs +++ b/crates/apollo_l1_provider/src/transaction_manager.rs @@ -177,7 +177,7 @@ impl TransactionManager { // If exists, return false and do nothing. If not, create the record as a HashOnly payload. let is_new_record = self.create_record_if_not_exist(tx_hash); // Replace a HashOnly payload with a Full payload. Do not update a Full payload. - // A hash only payload can come from bootstrapping from state sync, and then updated by + // A hash only payload can come from catching up from state sync, and then updated by // add_events from the scraper. However, if we get the same full tx twice (from the scraper) // it could indicate a double-scrape, and may cause the tx to be re-added to the proposable // index. diff --git a/crates/apollo_l1_provider/tests/utils/mod.rs b/crates/apollo_l1_provider/tests/utils/mod.rs index a6c552f8ab0..0bab7038585 100644 --- a/crates/apollo_l1_provider/tests/utils/mod.rs +++ b/crates/apollo_l1_provider/tests/utils/mod.rs @@ -159,7 +159,7 @@ pub(crate) async fn setup_scraper_and_provider< } }); - // Start the L1 provider's bootstrapping process up to the target L2 height + // Start the L1 provider's catching up process up to the target L2 height let expect_error = l1_provider_client.commit_block([].into(), [].into(), TARGET_L2_HEIGHT).await; assert!(expect_error.is_err()); diff --git a/crates/apollo_l1_provider_types/src/lib.rs b/crates/apollo_l1_provider_types/src/lib.rs index 282d17dab3e..25e8042f7e1 100644 --- a/crates/apollo_l1_provider_types/src/lib.rs +++ b/crates/apollo_l1_provider_types/src/lib.rs @@ -376,11 +376,11 @@ impl Display for Event { /// Current state of the provider, where pending means: idle, between proposal/validation cycles. #[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)] pub enum ProviderState { - /// Provider has not been initialized yet, needs to do bootstrapping at least once. + /// Provider has not been initialized yet, needs to get start_height and probably also catch + /// up. Uninitialized, - // TODO(guyn): in a upcoming PR, bootstrap will be available not only on startup. - /// Provider is catching up using sync. Only happens on startup. - Bootstrap, + /// Provider is catching up using sync. + CatchingUp, /// Provider is not ready for proposing or validating. Use start_block to transition to Propose /// or Validate. Pending, @@ -397,14 +397,14 @@ impl ProviderState { *self == ProviderState::Uninitialized } - pub fn is_bootstrapping(&self) -> bool { - *self == ProviderState::Bootstrap + pub fn is_catching_up(&self) -> bool { + *self == ProviderState::CatchingUp } pub fn transition_to_pending(&self) -> ProviderState { assert!( - !self.is_bootstrapping(), - "Transitioning from bootstrapping should be done manually by the L1Provider." + !self.is_catching_up(), + "Transitioning from catching up should be done manually by the L1Provider." ); ProviderState::Pending }