|
| 1 | +use std::str::FromStr; |
| 2 | + |
| 3 | +use alloy::consensus::Header as HeaderInner; |
| 4 | +use alloy::primitives::{ |
| 5 | + keccak256, |
| 6 | + BlockHash, |
| 7 | + Bytes, |
| 8 | + Log as LogInner, |
| 9 | + LogData, |
| 10 | + TxHash, |
| 11 | + B256, |
| 12 | + U256, |
| 13 | +}; |
| 14 | +use alloy::providers::{Provider, ProviderBuilder}; |
| 15 | +use alloy::rpc::types::{Block, BlockTransactions, Header, Log}; |
| 16 | +use alloy::transports::mock::Asserter; |
| 17 | +use apollo_l1_provider::event_identifiers_to_track; |
| 18 | +use papyrus_base_layer::ethereum_base_layer_contract::{ |
| 19 | + EthereumBaseLayerConfig, |
| 20 | + EthereumBaseLayerContract, |
| 21 | +}; |
| 22 | +use papyrus_base_layer::test_utils::{ |
| 23 | + DEFAULT_ANVIL_L1_ACCOUNT_ADDRESS, |
| 24 | + DEFAULT_ANVIL_L1_DEPLOYED_ADDRESS, |
| 25 | +}; |
| 26 | +mod utils; |
| 27 | +use papyrus_base_layer::BaseLayerContract; |
| 28 | +use utils::{L1_CONTRACT_ADDRESS, L2_ENTRY_POINT}; |
| 29 | + |
| 30 | +// This test requires that we do some manual work to produce the logs we expect to get from the |
| 31 | +// Starknet L1 contract. The reason we don't just post events to L1 and have them scraped is that |
| 32 | +// some of the log types don't correspond to actions we can just do to the base layer, like marking |
| 33 | +// a tx as consumed on L2 (which requires a state update). We also don't know which additional logs |
| 34 | +// may be added to the list of filtered logs, which is the point of this test (to protect against |
| 35 | +// future additions). So we leave it to anyone that adds that new message from L1 to L2 to also make |
| 36 | +// an example log and post it as part of the test, to make sure it is properly parsed all the way up |
| 37 | +// to the provider. |
| 38 | + |
| 39 | +const FAKE_HASH: &str = "0x1234567890123456789012345678901234567890123456789012345678901234"; |
| 40 | + |
| 41 | +#[tokio::test] |
| 42 | +async fn all_event_types_must_be_filtered_and_parsed() { |
| 43 | + // Setup. |
| 44 | + // Make a mock L1 |
| 45 | + let asserter = Asserter::new(); |
| 46 | + let provider = ProviderBuilder::new().connect_mocked_client(asserter.clone()); |
| 47 | + |
| 48 | + let mut base_layer = EthereumBaseLayerContract::new_with_provider( |
| 49 | + EthereumBaseLayerConfig::default(), |
| 50 | + provider.root().clone(), |
| 51 | + ); |
| 52 | + |
| 53 | + // We can just return the same block all the time, it will only affect the timestamps. |
| 54 | + let dummy_block: Block<B256, Header> = dummy_block(); |
| 55 | + |
| 56 | + // Put together the log that corresponds to each type of event in event_identifiers_to_track(). |
| 57 | + // Then filter them one at a time. If any iteration doesn't return an event, it means we fail to |
| 58 | + // filter for it. If any iteration returns an error, we know something is wrong. |
| 59 | + // TODO(guyn): add the scraper and provider parsing. |
| 60 | + let mut block_number = 1; |
| 61 | + let filters = event_identifiers_to_track(); |
| 62 | + |
| 63 | + let mut expected_logs = Vec::with_capacity(filters.len()); |
| 64 | + |
| 65 | + // This log is for LOG_MESSAGE_TO_L2_EVENT_IDENTIFIER (must check that this is the first log in |
| 66 | + // filters) |
| 67 | + let expected_message_to_l2_log = encode_message_into_log( |
| 68 | + filters[0], |
| 69 | + block_number, |
| 70 | + &[U256::from(15), U256::from(202)], |
| 71 | + U256::from(127), |
| 72 | + Some(U256::from(420)), |
| 73 | + ); |
| 74 | + block_number += 1; |
| 75 | + asserter.push_success(&vec![expected_message_to_l2_log.clone()]); |
| 76 | + expected_logs.push(expected_message_to_l2_log); |
| 77 | + asserter.push_success(&dummy_block); |
| 78 | + |
| 79 | + // This log is for MESSAGE_TO_L2_CANCELLATION_STARTED_EVENT_IDENTIFIER (must check that this is |
| 80 | + // the second log in filters) |
| 81 | + let expected_message_to_l2_cancellation_started_log = encode_message_into_log( |
| 82 | + filters[1], |
| 83 | + block_number, |
| 84 | + &[U256::from(1), U256::from(2)], |
| 85 | + U256::from(0), |
| 86 | + None, |
| 87 | + ); |
| 88 | + block_number += 1; |
| 89 | + asserter.push_success(&vec![expected_message_to_l2_cancellation_started_log.clone()]); |
| 90 | + expected_logs.push(expected_message_to_l2_cancellation_started_log); |
| 91 | + asserter.push_success(&dummy_block); |
| 92 | + |
| 93 | + // This log is for MESSAGE_TO_L2_CANCELED_EVENT_IDENTIFIER (must check that this is the third |
| 94 | + // log in filters) |
| 95 | + let expected_message_to_l2_canceled_log = encode_message_into_log( |
| 96 | + filters[2], |
| 97 | + block_number, |
| 98 | + &[U256::from(1), U256::from(2)], |
| 99 | + U256::from(0), |
| 100 | + None, |
| 101 | + ); |
| 102 | + block_number += 1; |
| 103 | + asserter.push_success(&vec![expected_message_to_l2_canceled_log.clone()]); |
| 104 | + expected_logs.push(expected_message_to_l2_canceled_log); |
| 105 | + asserter.push_success(&dummy_block); |
| 106 | + |
| 107 | + // This log is for CONSUMED_MESSAGE_TO_L2_EVENT_IDENTIFIER (must check that this is the fourth |
| 108 | + // log in filters) |
| 109 | + let expected_consumed_message_to_l2_log = encode_message_into_log( |
| 110 | + filters[3], |
| 111 | + block_number, |
| 112 | + &[U256::from(1), U256::from(2)], |
| 113 | + U256::from(0), |
| 114 | + Some(U256::from(1)), |
| 115 | + ); |
| 116 | + block_number += 1; |
| 117 | + asserter.push_success(&vec![expected_consumed_message_to_l2_log.clone()]); |
| 118 | + expected_logs.push(expected_consumed_message_to_l2_log); |
| 119 | + asserter.push_success(&dummy_block); |
| 120 | + |
| 121 | + // If new log types are needed, they must be added here. |
| 122 | + |
| 123 | + // Check that each event type has a corresponding log. |
| 124 | + for filter in filters { |
| 125 | + // Only filter for one event at a time, to make sure we trigger on all events. |
| 126 | + let events = base_layer.events(0..=block_number, &[filter]).await.unwrap_or_else(|_| { |
| 127 | + panic!("should succeed in getting events for filter: {:?}", filter) |
| 128 | + }); |
| 129 | + assert!(events.len() == 1, "Expected 1 event for filter: {:?}", filter); |
| 130 | + } |
| 131 | +} |
| 132 | + |
| 133 | +fn dummy_block<T>() -> Block<T, Header> { |
| 134 | + Block { |
| 135 | + header: Header { |
| 136 | + hash: BlockHash::from_str(FAKE_HASH).unwrap(), |
| 137 | + inner: HeaderInner { number: 3, base_fee_per_gas: Some(5), ..Default::default() }, |
| 138 | + total_difficulty: None, |
| 139 | + size: None, |
| 140 | + }, |
| 141 | + transactions: BlockTransactions::<T>::default(), |
| 142 | + uncles: vec![], |
| 143 | + withdrawals: None, |
| 144 | + } |
| 145 | +} |
| 146 | + |
| 147 | +// Each function signature is hashed using keccak256 to get the selector. |
| 148 | +// For example, LogMessageToL2(address,uint256,uint256,uint256[],uint256,uint256) |
| 149 | +// becomes "db80dd488acf86d17c747445b0eabb5d57c541d3bd7b6b87af987858e5066b2b". |
| 150 | +fn filter_to_hash(filter: &str) -> String { |
| 151 | + format!("{:x}", keccak256(filter.as_bytes())) |
| 152 | +} |
| 153 | + |
| 154 | +/// Encodes the non-indexed parameters of LogMessageToL2 event data. |
| 155 | +/// Parameters: (payload: uint256[], nonce: uint256, fee: uint256) |
| 156 | +/// |
| 157 | +/// ABI encoding for tuple (uint256[], uint256, uint256): |
| 158 | +/// - Offset to array (32 bytes) |
| 159 | +/// - nonce value (32 bytes) |
| 160 | +/// - fee value (32 bytes) |
| 161 | +/// - Array length (32 bytes) |
| 162 | +/// - Array elements (32 bytes each) |
| 163 | +fn encode_log_message_to_l2_data(payload: &[U256], nonce: U256, fee: U256) -> Bytes { |
| 164 | + // Instead of the payload array data, we only put in the head section the offset to where the |
| 165 | + // data will be stored. This would be 3 words from the start of the head (offset, nonce, fee = |
| 166 | + // 96 bytes = 0x60). |
| 167 | + let offset = U256::from(96u64); |
| 168 | + |
| 169 | + let mut encoded = Vec::new(); |
| 170 | + // Offset to the array data (96 bytes). |
| 171 | + encoded.extend_from_slice(&offset.to_be_bytes::<32>()); |
| 172 | + // nonce. |
| 173 | + encoded.extend_from_slice(&nonce.to_be_bytes::<32>()); |
| 174 | + // fee. |
| 175 | + encoded.extend_from_slice(&fee.to_be_bytes::<32>()); |
| 176 | + // Tail section has the payload array data only. It starts with the length of the array. |
| 177 | + let array_len = U256::from(payload.len()); |
| 178 | + encoded.extend_from_slice(&array_len.to_be_bytes::<32>()); |
| 179 | + // Finally, write the array elements. |
| 180 | + for item in payload { |
| 181 | + encoded.extend_from_slice(&item.to_be_bytes::<32>()); |
| 182 | + } |
| 183 | + |
| 184 | + Bytes::from(encoded) |
| 185 | +} |
| 186 | + |
| 187 | +// Same as above, but for the other event types (that don't include a fee). |
| 188 | +fn encode_other_event_data(payload: &[U256], nonce: U256) -> Bytes { |
| 189 | + // Instead of the payload array data, we only put in the head section the offset to where the |
| 190 | + // data will be stored. This would be 2 words from the start of the head (offset, nonce = 64 |
| 191 | + // bytes = 0x40). |
| 192 | + let offset = U256::from(64u64); |
| 193 | + |
| 194 | + let mut encoded = Vec::new(); |
| 195 | + // Offset to the array data (96 bytes). |
| 196 | + encoded.extend_from_slice(&offset.to_be_bytes::<32>()); |
| 197 | + // nonce. |
| 198 | + encoded.extend_from_slice(&nonce.to_be_bytes::<32>()); |
| 199 | + // Tail section has the payload array data only. It starts with the length of the array. |
| 200 | + let array_len = U256::from(payload.len()); |
| 201 | + encoded.extend_from_slice(&array_len.to_be_bytes::<32>()); |
| 202 | + // Finally, write the array elements. |
| 203 | + for item in payload { |
| 204 | + encoded.extend_from_slice(&item.to_be_bytes::<32>()); |
| 205 | + } |
| 206 | + |
| 207 | + Bytes::from(encoded) |
| 208 | +} |
| 209 | + |
| 210 | +fn encode_message_into_log( |
| 211 | + selector: &str, |
| 212 | + block_number: u64, |
| 213 | + payload: &[U256], |
| 214 | + nonce: U256, |
| 215 | + fee: Option<U256>, |
| 216 | +) -> Log { |
| 217 | + // Add zero padding to the address to make it 32 bytes |
| 218 | + let starknet_address = DEFAULT_ANVIL_L1_ACCOUNT_ADDRESS.to_bigint().to_str_radix(16); |
| 219 | + let starknet_address = format!("{:0>64}", starknet_address); |
| 220 | + |
| 221 | + let encoded_data = match fee { |
| 222 | + Some(fee) => encode_log_message_to_l2_data(payload, nonce, fee), |
| 223 | + None => encode_other_event_data(payload, nonce), |
| 224 | + }; |
| 225 | + Log { |
| 226 | + inner: LogInner { |
| 227 | + address: DEFAULT_ANVIL_L1_DEPLOYED_ADDRESS.parse().unwrap(), |
| 228 | + data: LogData::new_unchecked( |
| 229 | + vec![ |
| 230 | + filter_to_hash(selector).parse().unwrap(), |
| 231 | + starknet_address.parse().unwrap(), |
| 232 | + U256::from(L1_CONTRACT_ADDRESS).into(), |
| 233 | + U256::from(L2_ENTRY_POINT).into(), |
| 234 | + ], |
| 235 | + encoded_data, |
| 236 | + ), |
| 237 | + }, |
| 238 | + block_hash: Some(BlockHash::from_str(FAKE_HASH).unwrap()), |
| 239 | + block_number: Some(block_number), |
| 240 | + block_timestamp: None, |
| 241 | + transaction_hash: Some(TxHash::from_str(FAKE_HASH).unwrap()), |
| 242 | + transaction_index: Some(block_number + 1), |
| 243 | + log_index: Some(block_number + 2), |
| 244 | + removed: false, |
| 245 | + } |
| 246 | +} |
0 commit comments