Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,6 @@
"l1_gas_price_scraper_config.starting_block": 0,
"l1_gas_price_scraper_config.starting_block.#is_none": true,
"l1_gas_price_scraper_config.startup_num_blocks_multiplier": 2,
"l1_scraper_config.set_provider_historic_height_to_l2_genesis": false
"l1_scraper_config.set_provider_historic_height_to_l2_genesis": false,
"l1_scraper_config.l1_block_time_seconds": 12.0
}
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,6 @@
"l1_gas_price_scraper_config.starting_block": 0,
"l1_gas_price_scraper_config.starting_block.#is_none": true,
"l1_gas_price_scraper_config.startup_num_blocks_multiplier": 2,
"l1_scraper_config.l1_block_time_seconds": 12.0,
"l1_scraper_config.set_provider_historic_height_to_l2_genesis": false
}
10 changes: 1 addition & 9 deletions crates/apollo_l1_provider/src/l1_provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -122,15 +122,7 @@ impl L1Provider {
block_timestamp,
scrape_timestamp,
} => {
let tx_hash = l1_handler_tx.tx_hash;
let successfully_inserted =
self.tx_manager.add_tx(l1_handler_tx, block_timestamp, scrape_timestamp);
if !successfully_inserted {
debug!(
"Unexpected L1 Handler transaction with hash: {tx_hash}, already \
known or committed."
);
}
self.tx_manager.add_tx(l1_handler_tx, block_timestamp, scrape_timestamp);
}
Event::TransactionCancellationStarted {
tx_hash,
Expand Down
15 changes: 15 additions & 0 deletions crates/apollo_l1_provider/src/l1_provider_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,21 @@ fn process_events_committed_txs() {
assert_eq!(l1_provider, expected_l1_provider);
}

#[test]
fn add_tx_double_scraped_doesnt_update_scrape_timestamp() {
// Setup.
let mut l1_provider = L1ProviderContentBuilder::new()
.with_timed_txs([(l1_handler(1), 1)])
.with_state(ProviderState::Pending)
.build_into_l1_provider();

let expected_l1_provider = l1_provider.clone();

// Test: double scrape doesn't update the scrape timestamp.
l1_provider.add_events(vec![timed_l1_handler_event(tx_hash!(1), 2.into())]).unwrap();
assert_eq!(l1_provider, expected_l1_provider);
}

#[test]
fn pending_state_returns_error() {
// Setup.
Expand Down
7 changes: 2 additions & 5 deletions crates/apollo_l1_provider/src/l1_scraper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,6 @@ pub mod l1_scraper_tests;

type L1ScraperResult<T, B> = Result<T, L1ScraperError<B>>;

// TODO(guyn): make this a config parameter
// Sensible lower bound.
const L1_BLOCK_TIME: u64 = 12;

pub struct L1Scraper<BaseLayerType: BaseLayerContract + Send + Sync + Debug> {
pub config: L1ScraperConfig,
pub base_layer: BaseLayerType,
Expand Down Expand Up @@ -140,7 +136,8 @@ impl<BaseLayerType: BaseLayerContract + Send + Sync + Debug> L1Scraper<BaseLayer
debug!("Latest L1 block number: {latest_l1_block_number:?}");

// Estimate the number of blocks in the interval, to rewind from the latest block.
let blocks_in_interval = self.config.startup_rewind_time_seconds.as_secs() / L1_BLOCK_TIME;
let blocks_in_interval = self.config.startup_rewind_time_seconds.as_secs()
/ self.config.l1_block_time_seconds.as_secs();
debug!("Blocks in interval: {blocks_in_interval}");

// Add 50% safety margin.
Expand Down
32 changes: 26 additions & 6 deletions crates/apollo_l1_provider/src/transaction_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use apollo_l1_provider_types::{InvalidValidationStatus, ValidationStatus};
use starknet_api::block::{BlockTimestamp, UnixTimestamp};
use starknet_api::executable_transaction::L1HandlerTransaction;
use starknet_api::transaction::TransactionHash;
use tracing::debug;
use tracing::{debug, info, warn};

use crate::transaction_record::{
Records,
Expand Down Expand Up @@ -161,14 +161,34 @@ impl TransactionManager {
tx: L1HandlerTransaction,
block_timestamp: BlockTimestamp,
scrape_timestamp: UnixTimestamp,
) -> bool {
) {
let tx_hash = tx.tx_hash;
// If exists, return false and do nothing. If not, create the record as a HashOnly payload.
let is_new_record = self.create_record_if_not_exist(tx_hash);
self.with_record(tx_hash, move |record| {
record.tx.set(tx, block_timestamp, scrape_timestamp);
// Replace a HashOnly payload with a Full payload. Do not update a Full payload.
// A hash only payload can come from bootstrapping from state sync, and then updated by
// add_events from the scraper. However, if we get the same full tx twice (from the scraper)
// it could indicate a double-scrape, and may cause the tx to be re-added to the proposable
// index.
self.with_record(tx_hash, move |record| match &record.tx {
TransactionPayload::HashOnly(_) => {
if !is_new_record {
info!(
"Transaction {tx_hash} already exists as a HashOnly payload. It was \
probably gotten via state sync component, and is now updated with a Full \
payload."
);
}
record.tx.set(tx, block_timestamp, scrape_timestamp);
}
TransactionPayload::Full { tx: _, created_at_block_timestamp: _, scrape_timestamp } => {
warn!(
"Transaction {tx_hash} already exists as a Full payload, scraped at \
{scrape_timestamp}. This could indicate a double scrape. Ignoring the new \
transaction."
);
}
});

is_new_record
}

pub fn request_cancellation(
Expand Down
9 changes: 9 additions & 0 deletions crates/apollo_l1_scraper_config/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ pub struct L1ScraperConfig {
#[serde(deserialize_with = "deserialize_float_seconds_to_duration")]
pub polling_interval_seconds: Duration,
pub set_provider_historic_height_to_l2_genesis: bool,
#[serde(deserialize_with = "deserialize_float_seconds_to_duration")]
pub l1_block_time_seconds: Duration,
}

impl Default for L1ScraperConfig {
Expand All @@ -29,6 +31,7 @@ impl Default for L1ScraperConfig {
finality: 0,
polling_interval_seconds: Duration::from_secs(30),
set_provider_historic_height_to_l2_genesis: false,
l1_block_time_seconds: Duration::from_secs(12),
}
}
}
Expand Down Expand Up @@ -67,6 +70,12 @@ impl SerializeConfig for L1ScraperConfig {
This is useful on new chains (or in tests) where there have not been any state updates to the Starknet contract.",
ParamPrivacyInput::Public,
),
ser_param(
"l1_block_time_seconds",
&self.l1_block_time_seconds.as_secs(),
"The time it takes for a new L1 block to be created.",
ParamPrivacyInput::Public,
),
])
}
}
5 changes: 5 additions & 0 deletions crates/apollo_node/resources/config_schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -2604,6 +2604,11 @@
"privacy": "Public",
"value": 0
},
"l1_scraper_config.l1_block_time_seconds": {
"description": "The time it takes for a new L1 block to be created.",
"privacy": "Public",
"value": 12
},
"l1_scraper_config.polling_interval_seconds": {
"description": "Interval in Seconds between each scraping attempt of L1.",
"privacy": "Public",
Expand Down
Loading