Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 13 additions & 13 deletions crates/apollo_l1_provider/src/bootstrapper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use tracing::debug;
/// Caches commits to be applied later. This flow is only relevant while the node is starting up.
#[derive(Clone)]
pub struct Bootstrapper {
pub catch_up_height: BlockNumber,
pub target_height: BlockNumber,
pub sync_retry_interval: Duration,
pub commit_block_backlog: Vec<CommitBlockBacklog>,
pub l1_provider_client: SharedL1ProviderClient,
Expand Down Expand Up @@ -49,13 +49,13 @@ impl Bootstrapper {
n_sync_health_check_failures: Default::default(),
// This is overriden when starting the sync task (e.g., when provider starts
// bootstrapping).
catch_up_height: BlockNumber(0),
target_height: BlockNumber(0),
}
}

/// Check if the caller has caught up with the bootstrapper.
pub fn is_caught_up(&self, current_provider_height: BlockNumber) -> bool {
let is_caught_up = current_provider_height > self.catch_up_height;
let is_caught_up = current_provider_height > self.target_height;

self.sync_task_health_check(is_caught_up);

Expand Down Expand Up @@ -84,9 +84,9 @@ impl Bootstrapper {
pub fn start_l2_sync(
&mut self,
current_provider_height: BlockNumber,
catch_up_height: BlockNumber,
target_height: BlockNumber,
) {
self.catch_up_height = catch_up_height;
self.target_height = target_height;
// FIXME: spawning a task like this is evil.
// However, we aren't using the task executor, so no choice :(
// Once we start using a centralized threadpool, spawn through it instead of the
Expand All @@ -95,15 +95,15 @@ impl Bootstrapper {
self.l1_provider_client.clone(),
self.sync_client.clone(),
current_provider_height,
catch_up_height,
target_height,
self.sync_retry_interval,
));

self.sync_task_handle = SyncTaskHandle::Started(sync_task_handle.into());
}

pub fn catch_up_height(&self) -> BlockNumber {
self.catch_up_height
pub fn target_height(&self) -> BlockNumber {
self.target_height
}

fn sync_task_health_check(&self, is_caught_up: bool) {
Expand All @@ -129,7 +129,7 @@ impl Bootstrapper {

impl PartialEq for Bootstrapper {
fn eq(&self, other: &Self) -> bool {
self.catch_up_height == other.catch_up_height
self.target_height == other.target_height
&& self.commit_block_backlog == other.commit_block_backlog
}
}
Expand All @@ -139,7 +139,7 @@ impl Eq for Bootstrapper {}
impl std::fmt::Debug for Bootstrapper {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Bootstrapper")
.field("catch_up_height", &self.catch_up_height)
.field("target_height", &self.target_height)
.field("commit_block_backlog", &self.commit_block_backlog)
.field("sync_task_handle", &self.sync_task_handle)
.finish_non_exhaustive()
Expand All @@ -150,14 +150,14 @@ async fn l2_sync_task(
l1_provider_client: SharedL1ProviderClient,
sync_client: SharedStateSyncClient,
mut current_height: BlockNumber,
catch_up_height: BlockNumber,
target_height: BlockNumber,
retry_interval: Duration,
) {
while current_height <= catch_up_height {
while current_height <= target_height {
// TODO(Gilad): add tracing instrument.
debug!(
"Syncing L1Provider with L2 height: {} to target height: {}",
current_height, catch_up_height
current_height, target_height
);
let block = sync_client.get_block(current_height).await.inspect_err(|err| debug!("{err}"));

Expand Down
20 changes: 16 additions & 4 deletions crates/apollo_l1_provider/src/l1_provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,18 @@ use crate::L1ProviderConfig;
#[path = "l1_provider_tests.rs"]
pub mod l1_provider_tests;

/// Note on height specification:
/// - start_height: height of the next block after "historic" blocks, meaning blocks that were
/// already proved and sent to L1, and will not be scraped. Transactions from blocks lower than
/// start_height are already committed. The Provider is not interested in any block lower than
/// start_height.
/// - current_height: height of the next block that the Provider expects to see. It means the
/// provider has seen commits forall the previous blocks up to (not including) current_height.
/// - target_height: when bootstrapping, the height to which the bootstrapper will sync. After
/// bootstrapping is done, we expect the current_height to be one above the target_height. If any
/// more blocks are committed while bootstrapping, they are applied after the target_height, and
/// the current_height will be set to one above the last block in the backlog.

// TODO(Gilad): optimistic proposer support, will add later to keep things simple, but the design
// here is compatible with it.
#[derive(Debug, Clone)]
Expand Down Expand Up @@ -84,7 +96,7 @@ impl L1Provider {
// Start the provider, get first-scrape events, start L2 sync.
pub async fn initialize(
&mut self,
historic_l2_height: BlockNumber,
start_height: BlockNumber,
events: Vec<Event>,
) -> L1ProviderResult<()> {
info!("Initializing l1 provider");
Expand All @@ -103,8 +115,8 @@ impl L1Provider {
// The current_height is set to a very old height, that doesn't include any of the events
// sent now, or to be scraped in the future. The provider will begin bootstrapping when the
// batcher calls commit_block with a height above the current height.
self.start_height = Some(historic_l2_height);
self.current_height = historic_l2_height;
self.start_height = Some(start_height);
self.current_height = start_height;
self.state = ProviderState::Pending;
self.add_events(events)?;

Expand Down Expand Up @@ -310,7 +322,7 @@ impl L1Provider {
if self.state.is_uninitialized() {
warn!(
"Provider received a block height ({height}) while it is uninitialized. \
Cannot start bootstrapping until getting the historic_height from the \
Cannot start bootstrapping until getting the start_height from the \
scraper during the initialize call."
);
} else {
Expand Down
22 changes: 11 additions & 11 deletions crates/apollo_l1_provider/src/l1_provider_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ fn commit_block_no_rejected(
l1_provider.commit_block(txs.iter().copied().collect(), [].into(), block_number).unwrap();
}

fn commit_block_expect_error_just_to_start_bootstrapping(
fn call_commit_block_to_start_bootstrapping_and_expect_error(
l1_provider: &mut L1Provider,
block_number: BlockNumber,
) {
Expand Down Expand Up @@ -437,7 +437,7 @@ async fn bootstrap_commit_block_received_twice_no_error() {
.build_into_l1_provider();

l1_provider.initialize(BlockNumber(0), vec![]).await.expect("l1 provider initialize failed");
commit_block_expect_error_just_to_start_bootstrapping(&mut l1_provider, BlockNumber(2));
call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, BlockNumber(2));

commit_block_no_rejected(&mut l1_provider, &[], BlockNumber(2));
// l1_provider.start_bootstrapping(BlockNumber(2));
Expand All @@ -459,7 +459,7 @@ async fn bootstrap_commit_block_received_twice_error_if_new_uncommitted_txs() {
.build_into_l1_provider();

l1_provider.initialize(BlockNumber(0), vec![]).await.expect("l1 provider initialize failed");
commit_block_expect_error_just_to_start_bootstrapping(&mut l1_provider, BlockNumber(2));
call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, BlockNumber(2));

// Test.
commit_block_no_rejected(&mut l1_provider, &[tx_hash!(1)], BlockNumber(0));
Expand Down Expand Up @@ -1069,7 +1069,7 @@ async fn commit_block_historical_height_short_circuits_bootstrap() {
let l1_provider_builder_clone = l1_provider_builder.clone();
let mut l1_provider = l1_provider_builder.clone().build_into_l1_provider();
l1_provider.initialize(STARTUP_HEIGHT, vec![]).await.expect("l1 provider initialize failed");
commit_block_expect_error_just_to_start_bootstrapping(&mut l1_provider, TARGET_HEIGHT);
call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, TARGET_HEIGHT);

let expected_unchanged = l1_provider_builder_clone
.with_height(STARTUP_HEIGHT)
Expand Down Expand Up @@ -1483,6 +1483,7 @@ async fn bootstrap_e2e() {

let mut l1_provider =
L1Provider::new(config, l1_provider_client.clone(), Arc::new(sync_client), None);

// Test.

// Trigger the bootstrapper: this will trigger the sync task to start trying to fetch blocks
Expand All @@ -1492,10 +1493,7 @@ async fn bootstrap_e2e() {
// understand though.
let scraped_l1_handler_txs = vec![]; // No txs to scrape in this test.
l1_provider.initialize(STARTUP_HEIGHT, scraped_l1_handler_txs).await.unwrap();
// TODO(guyn): this test assumes we start in bootstrapping state. The test should be updated to
// include the part where the batcher's first commit_block command is what determines the
// catchup height and causes the bootstrapping to begin.
l1_provider.start_bootstrapping(CATCH_UP_HEIGHT);
call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, CATCH_UP_HEIGHT);

// Load first **Sync** response: the initializer task will pick it up within the specified
// interval.
Expand Down Expand Up @@ -1578,6 +1576,7 @@ async fn bootstrap_delayed_batcher_and_sync_state_with_trivial_catch_up() {

let l1_provider_client = Arc::new(FakeL1ProviderClient::default());
const STARTUP_HEIGHT: BlockNumber = BlockNumber(3);
const CATCH_UP_HEIGHT: BlockNumber = BlockNumber(5);

let sync_client = MockStateSyncClient::default();
let config = L1ProviderConfig {
Expand All @@ -1587,12 +1586,13 @@ async fn bootstrap_delayed_batcher_and_sync_state_with_trivial_catch_up() {

let mut l1_provider =
L1Provider::new(config, l1_provider_client.clone(), Arc::new(sync_client), None);

// Test.

// Start the sync sequence, should busy-wait until the batcher height is sent.
let scraped_l1_handler_txs = []; // No txs to scrape in this test.
l1_provider.initialize(STARTUP_HEIGHT, scraped_l1_handler_txs.into()).await.unwrap();

call_commit_block_to_start_bootstrapping_and_expect_error(&mut l1_provider, CATCH_UP_HEIGHT);
// **Commit** a few blocks. The height starts from the provider's current height, since this
// is a trivial catchup scenario (nothing to catch up).
// This checks that the trivial catch_up_height doesn't mess up this flow.
Expand All @@ -1606,11 +1606,11 @@ async fn bootstrap_delayed_batcher_and_sync_state_with_trivial_catch_up() {
// Commit blocks should have been applied.
let start_height_plus_2 = height_add(STARTUP_HEIGHT, 2);
assert_eq!(l1_provider.current_height, start_height_plus_2);
// TODO(guyn): it is possible that the rest of this test is trivial.

// Should still be bootstrapping, since catchup height isn't determined yet.
// Technically we could end bootstrapping at this point, but its simpler to let it
// terminate gracefully once the batcher and sync are ready.
assert_eq!(l1_provider.state, ProviderState::Pending);
assert!(l1_provider.state.is_bootstrapping());

// Let the sync task continue, it should short circuit.
tokio::time::sleep(config.startup_sync_sleep_retry_interval_seconds).await;
Expand Down
2 changes: 1 addition & 1 deletion crates/apollo_l1_provider/src/l1_scraper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ impl<BaseLayerType: BaseLayerContract + Send + Sync + Debug> L1Scraper<BaseLayer
.await
.map_err(L1ScraperError::BaseLayerError)?
.number;
Ok(last_historic_l2_height)
Ok(last_historic_l2_height.unchecked_next())
}

/// Send an initialize message to the L1 provider, including the last L2 height that was proved
Expand Down
4 changes: 2 additions & 2 deletions crates/apollo_l1_provider/src/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ macro_rules! make_bootstrapper {
committed_txs: [$(tx_hash!($tx)),*].into()
}),*
].into_iter().collect(),
catch_up_height: BlockNumber(0),
target_height: BlockNumber(0),
l1_provider_client: Arc::new(FakeL1ProviderClient::default()),
sync_client: Arc::new(MockStateSyncClient::default()),
sync_task_handle: SyncTaskHandle::default(),
Expand Down Expand Up @@ -636,7 +636,7 @@ impl L1ProviderClient for FakeL1ProviderClient {

async fn initialize(
&self,
_last_historic_l2_height: BlockNumber,
_start_height: BlockNumber,
_events: Vec<Event>,
) -> L1ProviderClientResult<()> {
todo!()
Expand Down
Loading