diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 93aa1bb1658..36564600d83 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -67,6 +67,10 @@ jobs: loadtest: runs-on: [matterlabs-ci-runner] + strategy: + fail-fast: false + matrix: + vm_mode: ["old", "new"] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 @@ -82,7 +86,8 @@ jobs: - name: Loadtest configuration run: | - echo EXPECTED_TX_COUNT="16000" >> .env + echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'new' && 24000 || 18000 }} >> .env + echo ACCOUNTS_AMOUNT="150" >> .env echo FAIL_FAST=true >> .env echo IN_DOCKER=1 >> .env echo DATABASE_MERKLE_TREE_MODE=lightweight >> .env @@ -105,7 +110,9 @@ jobs: # `sleep 60` because we need to wait until server added all the tokens - name: Run server run: | - ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & + EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE=${{ matrix.vm_mode }} \ + PASSED_ENV_VARS="EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE" \ + ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & ci_run sleep 60 - name: Deploy legacy era contracts @@ -135,7 +142,7 @@ jobs: base_token: ["Eth", "Custom"] deployment_mode: ["Rollup", "Validium"] env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}${{ matrix.base_token == 'Custom' && ',base_token_ratio_persister' || '' }}" + SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}${{ matrix.base_token == 'Custom' && ',base_token_ratio_persister' || '' }}" runs-on: [matterlabs-ci-runner] steps: diff --git a/Cargo.lock b/Cargo.lock index c352625b943..ce20580b385 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1933,6 +1933,18 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "enum_dispatch" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" +dependencies = [ + "once_cell", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.72", +] + [[package]] name = "env_filter" version = "0.1.0" @@ -7276,6 +7288,17 @@ dependencies = [ "zksync_vm_benchmark_harness", ] +[[package]] +name = "vm2" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=9a38900d7af9b1d72b47ce3be980e77c1239a61d#9a38900d7af9b1d72b47ce3be980e77c1239a61d" +dependencies = [ + "enum_dispatch", + "primitive-types", + "zk_evm_abstractions 0.150.0", + "zkevm_opcode_defs 0.150.0", +] + [[package]] name = "walkdir" version = "2.4.0" @@ -8903,11 +8926,13 @@ dependencies = [ "hex", "itertools 0.10.5", "once_cell", + "pretty_assertions", "serde", "thiserror", "tokio", "tracing", "vise", + "vm2", "zk_evm 0.131.0-rc.2", "zk_evm 0.133.0", "zk_evm 0.140.0", @@ -9742,6 +9767,7 @@ dependencies = [ "futures 0.3.28", "once_cell", "rand 0.8.5", + "serde", "tempfile", "test-casing", "tokio", @@ -9749,6 +9775,7 @@ dependencies = [ "vise", "zksync_contracts", "zksync_dal", + "zksync_health_check", "zksync_multivm", "zksync_node_genesis", "zksync_node_test_utils", diff --git a/Cargo.toml b/Cargo.toml index 20e24bff044..06bd6669b67 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -215,6 +215,9 @@ zk_evm_1_4_0 = { package = "zk_evm", version = "0.140.0" } zk_evm_1_4_1 = { package = "zk_evm", version = "0.141.0" } zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.0" } +# New VM; pinned to a specific commit because of instability +vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "9a38900d7af9b1d72b47ce3be980e77c1239a61d" } + # Consensus dependencies. zksync_concurrency = "=0.1.0-rc.9" zksync_consensus_bft = "=0.1.0-rc.9" diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 2a219222d1b..d24757829fa 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -94,8 +94,8 @@ async fn build_state_keeper( stop_receiver_clone.changed().await?; result })); - let batch_executor_base: Box = - Box::new(MainBatchExecutor::new(save_call_traces, true)); + let batch_executor = MainBatchExecutor::new(save_call_traces, true); + let batch_executor: Box = Box::new(batch_executor); let io = ExternalIO::new( connection_pool, @@ -108,7 +108,7 @@ async fn build_state_keeper( Ok(ZkSyncStateKeeper::new( stop_receiver, Box::new(io), - batch_executor_base, + batch_executor, output_handler, Arc::new(NoopSealer), Arc::new(storage_factory), diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index a56c85a7d5b..f2e73028e6e 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -8,7 +8,7 @@ use zksync_contracts::{ use zksync_multivm::{ interface::{ dyn_tracers::vm_1_5_0::DynTracer, tracer::VmExecutionStopReason, L1BatchEnv, L2BlockEnv, - SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, + SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, VmInterface, }, vm_latest::{ constants::{BATCH_COMPUTATIONAL_GAS_LIMIT, BOOTLOADER_HEAP_PAGE}, @@ -260,8 +260,7 @@ pub(super) fn execute_internal_transfer_test() -> u32 { output: tracer_result.clone(), } .into_tracer_pointer(); - let mut vm: Vm<_, HistoryEnabled> = - Vm::new(l1_batch, system_env, Rc::new(RefCell::new(storage_view))); + let mut vm: Vm<_, HistoryEnabled> = Vm::new(l1_batch, system_env, storage_view.to_rc_ptr()); let result = vm.inspect(tracer.into(), VmExecutionMode::Bootloader); assert!(!result.result.is_failed(), "The internal call has reverted"); diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index b1d522171fe..1c22ce5c41a 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -11,7 +11,7 @@ use zksync_config::{ }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, - BasicWitnessInputProducerConfig, ContractsConfig, DatabaseSecrets, + BasicWitnessInputProducerConfig, ContractsConfig, DatabaseSecrets, ExperimentalVmConfig, ExternalPriceApiClientConfig, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, L1Secrets, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, @@ -209,5 +209,6 @@ fn load_env_config() -> anyhow::Result { snapshot_recovery: None, external_price_api_client_config: ExternalPriceApiClientConfig::from_env().ok(), external_proof_integration_api_config: ExternalProofIntegrationApiConfig::from_env().ok(), + experimental_vm_config: ExperimentalVmConfig::from_env().ok(), }) } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 9fafacb7055..1998d2dae91 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -55,7 +55,8 @@ use zksync_node_framework::{ }, tee_verifier_input_producer::TeeVerifierInputProducerLayer, vm_runner::{ - bwip::BasicWitnessInputProducerLayer, protective_reads::ProtectiveReadsWriterLayer, + bwip::BasicWitnessInputProducerLayer, playground::VmPlaygroundLayer, + protective_reads::ProtectiveReadsWriterLayer, }, web3_api::{ caches::MempoolCacheLayer, @@ -248,8 +249,10 @@ impl MainNodeBuilder { try_load_config!(wallets.state_keeper), ); let db_config = try_load_config!(self.configs.db_config); + let experimental_vm_config = try_load_config!(self.configs.experimental_vm_config); let main_node_batch_executor_builder_layer = - MainBatchExecutorLayer::new(sk_config.save_call_traces, OPTIONAL_BYTECODE_COMPRESSION); + MainBatchExecutorLayer::new(sk_config.save_call_traces, OPTIONAL_BYTECODE_COMPRESSION) + .with_fast_vm_mode(experimental_vm_config.state_keeper_fast_vm_mode); let rocksdb_options = RocksdbStorageOptions { block_cache_capacity: db_config @@ -573,6 +576,16 @@ impl MainNodeBuilder { Ok(self) } + fn add_vm_playground_layer(mut self) -> anyhow::Result { + let vm_config = try_load_config!(self.configs.experimental_vm_config); + self.node.add_layer(VmPlaygroundLayer::new( + vm_config.playground, + self.genesis_config.l2_chain_id, + )); + + Ok(self) + } + fn add_base_token_ratio_persister_layer(mut self) -> anyhow::Result { let config = try_load_config!(self.configs.base_token_adjuster); let contracts_config = self.contracts_config.clone(); @@ -736,6 +749,9 @@ impl MainNodeBuilder { Component::VmRunnerBwip => { self = self.add_vm_runner_bwip_layer()?; } + Component::VmPlayground => { + self = self.add_vm_playground_layer()?; + } Component::ExternalProofIntegrationApi => { self = self.add_external_proof_integration_api_layer()?; } diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index a25f740ed44..5633fa3e10d 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -29,7 +29,7 @@ pub mod prover_dal; pub mod settlement; pub mod tee_types; pub mod url; -pub mod vm_version; +pub mod vm; pub mod web3; /// Account place in the global state tree is uniquely identified by its address. diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index d4300fba3f8..265c06987af 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -12,7 +12,7 @@ use serde_with::{DeserializeFromStr, SerializeDisplay}; use crate::{ ethabi::Token, - vm_version::VmVersion, + vm::VmVersion, web3::contract::{Detokenize, Error}, H256, U256, }; diff --git a/core/lib/basic_types/src/vm.rs b/core/lib/basic_types/src/vm.rs new file mode 100644 index 00000000000..c178c853b2d --- /dev/null +++ b/core/lib/basic_types/src/vm.rs @@ -0,0 +1,39 @@ +//! Basic VM types that shared widely enough to not put them in the `multivm` crate. + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Copy)] +pub enum VmVersion { + M5WithoutRefunds, + M5WithRefunds, + M6Initial, + M6BugWithCompressionFixed, + Vm1_3_2, + VmVirtualBlocks, + VmVirtualBlocksRefundsEnhancement, + VmBoojumIntegration, + Vm1_4_1, + Vm1_4_2, + Vm1_5_0SmallBootloaderMemory, + Vm1_5_0IncreasedBootloaderMemory, +} + +impl VmVersion { + /// Returns the latest supported VM version. + pub const fn latest() -> VmVersion { + Self::Vm1_5_0IncreasedBootloaderMemory + } +} + +/// Mode in which to run the new fast VM implementation. +#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum FastVmMode { + /// Run only the old VM. + #[default] + Old, + /// Run only the new Vm. + New, + /// Run both the new and old VM and compare their outputs for each transaction execution. + Shadow, +} diff --git a/core/lib/basic_types/src/vm_version.rs b/core/lib/basic_types/src/vm_version.rs deleted file mode 100644 index 49fec39fc9c..00000000000 --- a/core/lib/basic_types/src/vm_version.rs +++ /dev/null @@ -1,22 +0,0 @@ -#[derive(Debug, Clone, Copy)] -pub enum VmVersion { - M5WithoutRefunds, - M5WithRefunds, - M6Initial, - M6BugWithCompressionFixed, - Vm1_3_2, - VmVirtualBlocks, - VmVirtualBlocksRefundsEnhancement, - VmBoojumIntegration, - Vm1_4_1, - Vm1_4_2, - Vm1_5_0SmallBootloaderMemory, - Vm1_5_0IncreasedBootloaderMemory, -} - -impl VmVersion { - /// Returns the latest supported VM version. - pub const fn latest() -> VmVersion { - Self::Vm1_5_0IncreasedBootloaderMemory - } -} diff --git a/core/lib/config/src/configs/experimental.rs b/core/lib/config/src/configs/experimental.rs index e362715d3d4..bb00554ead1 100644 --- a/core/lib/config/src/configs/experimental.rs +++ b/core/lib/config/src/configs/experimental.rs @@ -3,6 +3,7 @@ use std::num::NonZeroU32; use serde::Deserialize; +use zksync_basic_types::{vm::FastVmMode, L1BatchNumber}; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct ExperimentalDBConfig { @@ -60,3 +61,50 @@ impl ExperimentalDBConfig { 100 } } + +/// Configuration for the VM playground (an experimental component that's unlikely to ever be stabilized). +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct ExperimentalVmPlaygroundConfig { + /// Mode in which to run the fast VM implementation. Note that for it to actually be used, L1 batches should have a recent version. + #[serde(default)] + pub fast_vm_mode: FastVmMode, + /// Path to the RocksDB cache directory. + #[serde(default = "ExperimentalVmPlaygroundConfig::default_db_path")] + pub db_path: String, + /// First L1 batch to consider processed. Will not be used if the processing cursor is persisted, unless the `reset` flag is set. + #[serde(default)] + pub first_processed_batch: L1BatchNumber, + /// If set to true, processing cursor will reset `first_processed_batch` regardless of the current progress. Beware that this will likely + /// require to drop the RocksDB cache. + #[serde(default)] + pub reset: bool, +} + +impl Default for ExperimentalVmPlaygroundConfig { + fn default() -> Self { + Self { + fast_vm_mode: FastVmMode::default(), + db_path: Self::default_db_path(), + first_processed_batch: L1BatchNumber(0), + reset: false, + } + } +} + +impl ExperimentalVmPlaygroundConfig { + pub fn default_db_path() -> String { + "./db/vm_playground".to_owned() + } +} + +/// Experimental VM configuration options. +#[derive(Debug, Clone, Default, PartialEq, Deserialize)] +pub struct ExperimentalVmConfig { + #[serde(skip)] // Isn't properly deserialized by `envy` + pub playground: ExperimentalVmPlaygroundConfig, + + /// Mode in which to run the fast VM implementation in the state keeper. Should not be set in production; + /// the new VM doesn't produce call traces and can diverge from the old VM! + #[serde(default)] + pub state_keeper_fast_vm_mode: FastVmMode, +} diff --git a/core/lib/config/src/configs/general.rs b/core/lib/config/src/configs/general.rs index 91106a7ca1d..3e6b05d8003 100644 --- a/core/lib/config/src/configs/general.rs +++ b/core/lib/config/src/configs/general.rs @@ -9,10 +9,10 @@ use crate::{ pruning::PruningConfig, snapshot_recovery::SnapshotRecoveryConfig, vm_runner::{BasicWitnessInputProducerConfig, ProtectiveReadsWriterConfig}, - CommitmentGeneratorConfig, ExternalPriceApiClientConfig, FriProofCompressorConfig, - FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, - FriWitnessVectorGeneratorConfig, ObservabilityConfig, PrometheusConfig, - ProofDataHandlerConfig, + CommitmentGeneratorConfig, ExperimentalVmConfig, ExternalPriceApiClientConfig, + FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, + FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, ObservabilityConfig, + PrometheusConfig, ProofDataHandlerConfig, }, ApiConfig, ContractVerifierConfig, DBConfig, EthConfig, ExternalProofIntegrationApiConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, @@ -51,4 +51,5 @@ pub struct GeneralConfig { pub external_price_api_client_config: Option, pub consensus_config: Option, pub external_proof_integration_api_config: Option, + pub experimental_vm_config: Option, } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index eb6c26dbe94..0ecd8ee0df9 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -9,7 +9,7 @@ pub use self::{ database::{DBConfig, PostgresConfig}, eth_sender::{EthConfig, GasAdjusterConfig}, eth_watch::EthWatchConfig, - experimental::ExperimentalDBConfig, + experimental::{ExperimentalDBConfig, ExperimentalVmConfig, ExperimentalVmPlaygroundConfig}, external_price_api_client::ExternalPriceApiClientConfig, external_proof_integration_api::ExternalProofIntegrationApiConfig, fri_proof_compressor::FriProofCompressorConfig, diff --git a/core/lib/config/src/configs/vm_runner.rs b/core/lib/config/src/configs/vm_runner.rs index fa7c7c1a90a..1fecc12668c 100644 --- a/core/lib/config/src/configs/vm_runner.rs +++ b/core/lib/config/src/configs/vm_runner.rs @@ -1,7 +1,7 @@ use serde::Deserialize; use zksync_basic_types::L1BatchNumber; -#[derive(Debug, Deserialize, Clone, PartialEq, Default)] +#[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ProtectiveReadsWriterConfig { /// Path to the RocksDB data directory that serves state cache. #[serde(default = "ProtectiveReadsWriterConfig::default_db_path")] @@ -18,7 +18,7 @@ impl ProtectiveReadsWriterConfig { } } -#[derive(Debug, Deserialize, Clone, PartialEq, Default)] +#[derive(Debug, Deserialize, Clone, PartialEq)] pub struct BasicWitnessInputProducerConfig { /// Path to the RocksDB data directory that serves state cache. #[serde(default = "BasicWitnessInputProducerConfig::default_db_path")] diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 6cf512ec5d1..3f548ac1c80 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -6,6 +6,7 @@ use zksync_basic_types::{ commitment::L1BatchCommitmentMode, network::Network, protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, + vm::FastVmMode, L1BatchNumber, L1ChainId, L2ChainId, }; use zksync_consensus_utils::EncodeDist; @@ -291,6 +292,34 @@ impl Distribution for EncodeDist { } } +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::ExperimentalVmPlaygroundConfig { + configs::ExperimentalVmPlaygroundConfig { + fast_vm_mode: gen_fast_vm_mode(rng), + db_path: self.sample(rng), + first_processed_batch: L1BatchNumber(rng.gen()), + reset: self.sample(rng), + } + } +} + +fn gen_fast_vm_mode(rng: &mut R) -> FastVmMode { + match rng.gen_range(0..3) { + 0 => FastVmMode::Old, + 1 => FastVmMode::New, + _ => FastVmMode::Shadow, + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::ExperimentalVmConfig { + configs::ExperimentalVmConfig { + playground: self.sample(rng), + state_keeper_fast_vm_mode: gen_fast_vm_mode(rng), + } + } +} + impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::database::DBConfig { configs::database::DBConfig { @@ -1060,6 +1089,7 @@ impl Distribution for EncodeDist { external_price_api_client_config: self.sample(rng), consensus_config: self.sample(rng), external_proof_integration_api_config: self.sample(rng), + experimental_vm_config: self.sample(rng), } } } diff --git a/core/lib/env_config/src/vm_runner.rs b/core/lib/env_config/src/vm_runner.rs index 9973d760a23..efaf5d1666c 100644 --- a/core/lib/env_config/src/vm_runner.rs +++ b/core/lib/env_config/src/vm_runner.rs @@ -1,4 +1,6 @@ -use zksync_config::configs::{BasicWitnessInputProducerConfig, ProtectiveReadsWriterConfig}; +use zksync_config::configs::{ + BasicWitnessInputProducerConfig, ExperimentalVmConfig, ProtectiveReadsWriterConfig, +}; use crate::{envy_load, FromEnv}; @@ -13,3 +15,74 @@ impl FromEnv for BasicWitnessInputProducerConfig { envy_load("vm_runner.bwip", "VM_RUNNER_BWIP_") } } + +impl FromEnv for ExperimentalVmConfig { + fn from_env() -> anyhow::Result { + Ok(Self { + playground: envy_load("experimental_vm.playground", "EXPERIMENTAL_VM_PLAYGROUND_")?, + ..envy_load("experimental_vm", "EXPERIMENTAL_VM_")? + }) + } +} + +#[cfg(test)] +mod tests { + use zksync_basic_types::{vm::FastVmMode, L1BatchNumber}; + + use super::*; + use crate::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + #[test] + fn bwip_config_from_env() { + let mut lock = MUTEX.lock(); + let config = r#" + VM_RUNNER_BWIP_DB_PATH=/db/bwip + VM_RUNNER_BWIP_WINDOW_SIZE=50 + VM_RUNNER_BWIP_FIRST_PROCESSED_BATCH=123 + "#; + lock.set_env(config); + + let config = BasicWitnessInputProducerConfig::from_env().unwrap(); + assert_eq!(config.db_path, "/db/bwip"); + assert_eq!(config.window_size, 50); + assert_eq!(config.first_processed_batch, L1BatchNumber(123)); + } + + #[test] + fn experimental_vm_config_from_env() { + let mut lock = MUTEX.lock(); + let config = r#" + EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE=new + EXPERIMENTAL_VM_PLAYGROUND_FAST_VM_MODE=shadow + EXPERIMENTAL_VM_PLAYGROUND_DB_PATH=/db/vm_playground + EXPERIMENTAL_VM_PLAYGROUND_FIRST_PROCESSED_BATCH=123 + EXPERIMENTAL_VM_PLAYGROUND_RESET=true + "#; + lock.set_env(config); + + let config = ExperimentalVmConfig::from_env().unwrap(); + assert_eq!(config.state_keeper_fast_vm_mode, FastVmMode::New); + assert_eq!(config.playground.fast_vm_mode, FastVmMode::Shadow); + assert_eq!(config.playground.db_path, "/db/vm_playground"); + assert_eq!(config.playground.first_processed_batch, L1BatchNumber(123)); + assert!(config.playground.reset); + + lock.remove_env(&["EXPERIMENTAL_VM_PLAYGROUND_RESET"]); + let config = ExperimentalVmConfig::from_env().unwrap(); + assert!(!config.playground.reset); + + lock.remove_env(&["EXPERIMENTAL_VM_PLAYGROUND_FIRST_PROCESSED_BATCH"]); + let config = ExperimentalVmConfig::from_env().unwrap(); + assert_eq!(config.playground.first_processed_batch, L1BatchNumber(0)); + + lock.remove_env(&["EXPERIMENTAL_VM_PLAYGROUND_FAST_VM_MODE"]); + let config = ExperimentalVmConfig::from_env().unwrap(); + assert_eq!(config.playground.fast_vm_mode, FastVmMode::Old); + + lock.remove_env(&["EXPERIMENTAL_VM_PLAYGROUND_DB_PATH"]); + let config = ExperimentalVmConfig::from_env().unwrap(); + assert!(!config.playground.db_path.is_empty()); + } +} diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index 5e5440ff940..fc35f152ae1 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -16,6 +16,7 @@ zk_evm_1_4_1.workspace = true zk_evm_1_4_0.workspace = true zk_evm_1_3_3.workspace = true zk_evm_1_3_1.workspace = true +vm2.workspace = true circuit_sequencer_api_1_3_3.workspace = true circuit_sequencer_api_1_4_0.workspace = true @@ -34,6 +35,7 @@ anyhow.workspace = true hex.workspace = true itertools.workspace = true once_cell.workspace = true +pretty_assertions.workspace = true serde.workspace = true thiserror.workspace = true tracing.workspace = true diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs index 1ee9f5ea90f..2bf320aeb14 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -62,9 +62,6 @@ impl GlueFrom for crate::interface::Fi .map(UserL2ToL1Log) .collect(), system_logs: vec![], - total_log_queries: value.full_result.total_log_queries, - cycles_used: value.full_result.cycles_used, - deduplicated_events_logs: vec![], storage_refunds: Vec::new(), pubdata_costs: Vec::new(), }, @@ -121,9 +118,6 @@ impl GlueFrom for crate::interface::Fi .map(UserL2ToL1Log) .collect(), system_logs: vec![], - total_log_queries: value.full_result.total_log_queries, - cycles_used: value.full_result.cycles_used, - deduplicated_events_logs: vec![], storage_refunds: Vec::new(), pubdata_costs: Vec::new(), }, @@ -179,9 +173,6 @@ impl GlueFrom for crate::interface: .map(UserL2ToL1Log) .collect(), system_logs: vec![], - total_log_queries: value.full_result.total_log_queries, - cycles_used: value.full_result.cycles_used, - deduplicated_events_logs: vec![], storage_refunds: Vec::new(), pubdata_costs: Vec::new(), }, diff --git a/core/lib/multivm/src/interface/mod.rs b/core/lib/multivm/src/interface/mod.rs index 5cb1f5cd1d1..360d53df52a 100644 --- a/core/lib/multivm/src/interface/mod.rs +++ b/core/lib/multivm/src/interface/mod.rs @@ -1,19 +1,21 @@ pub(crate) mod traits; - -pub use traits::{ - tracers::dyn_tracers, - vm::{VmInterface, VmInterfaceHistoryEnabled}, -}; pub mod types; -pub use types::{ - errors::{ - BytecodeCompressionError, Halt, TxRevertReason, VmRevertReason, VmRevertReasonParsingError, +pub use self::{ + traits::{ + tracers::dyn_tracers, + vm::{VmFactory, VmInterface, VmInterfaceHistoryEnabled}, }, - inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode}, - outputs::{ - BootloaderMemory, CurrentExecutionState, ExecutionResult, FinishedL1Batch, L2Block, - Refunds, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, + types::{ + errors::{ + BytecodeCompressionError, Halt, TxRevertReason, VmRevertReason, + VmRevertReasonParsingError, + }, + inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode}, + outputs::{ + BootloaderMemory, CurrentExecutionState, ExecutionResult, FinishedL1Batch, L2Block, + Refunds, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, + }, + tracer, }, - tracer, }; diff --git a/core/lib/multivm/src/interface/traits/vm.rs b/core/lib/multivm/src/interface/traits/vm.rs index 499c46a7b52..0fd41934cc6 100644 --- a/core/lib/multivm/src/interface/traits/vm.rs +++ b/core/lib/multivm/src/interface/traits/vm.rs @@ -51,25 +51,17 @@ use zksync_state::StoragePtr; use zksync_types::Transaction; use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::{ - interface::{ - types::{ - errors::BytecodeCompressionError, - inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode}, - outputs::{BootloaderMemory, CurrentExecutionState, VmExecutionResultAndLogs}, - }, - FinishedL1Batch, VmMemoryMetrics, +use crate::interface::{ + types::{ + errors::BytecodeCompressionError, + inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode}, + outputs::{BootloaderMemory, CurrentExecutionState, VmExecutionResultAndLogs}, }, - tracers::TracerDispatcher, - vm_latest::HistoryEnabled, - HistoryMode, + FinishedL1Batch, VmMemoryMetrics, }; -pub trait VmInterface { - type TracerDispatcher: Default + From>; - - /// Initialize VM. - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self; +pub trait VmInterface { + type TracerDispatcher: Default; /// Push transaction to bootloader memory. fn push_transaction(&mut self, tx: Transaction); @@ -148,14 +140,32 @@ pub trait VmInterface { } } +/// Encapsulates creating VM instance based on the provided environment. +pub trait VmFactory: VmInterface { + /// Creates a new VM instance. + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self; +} + /// Methods of VM requiring history manipulations. -pub trait VmInterfaceHistoryEnabled: VmInterface { +/// +/// # Snapshot workflow +/// +/// External callers must follow the following snapshot workflow: +/// +/// - Each new snapshot created using `make_snapshot()` must be either popped or rolled back before creating the following snapshot. +/// OTOH, it's not required to call either of these methods by the end of VM execution. +/// - `pop_snapshot_no_rollback()` may be called spuriously, when no snapshot was created. It is a no-op in this case. +/// +/// These rules guarantee that at each given moment, a VM instance has at most one snapshot (unless the VM makes snapshots internally), +/// which may allow additional VM optimizations. +pub trait VmInterfaceHistoryEnabled: VmInterface { /// Create a snapshot of the current VM state and push it into memory. fn make_snapshot(&mut self); /// Roll back VM state to the latest snapshot and destroy the snapshot. fn rollback_to_the_latest_snapshot(&mut self); - /// Pop the latest snapshot from memory and destroy it. + /// Pop the latest snapshot from memory and destroy it. If there are no snapshots, this should be a no-op + /// (i.e., the VM must not panic in this case). fn pop_snapshot_no_rollback(&mut self); } diff --git a/core/lib/multivm/src/interface/types/outputs/execution_result.rs b/core/lib/multivm/src/interface/types/outputs/execution_result.rs index 19ce9b599c8..35d14524e0a 100644 --- a/core/lib/multivm/src/interface/types/outputs/execution_result.rs +++ b/core/lib/multivm/src/interface/types/outputs/execution_result.rs @@ -10,7 +10,7 @@ use zksync_utils::bytecode::bytecode_len_in_bytes; use crate::interface::{Halt, VmExecutionStatistics, VmRevertReason}; /// Refunds produced for the user. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq)] pub struct Refunds { pub gas_refunded: u64, pub operator_suggested_refund: u64, diff --git a/core/lib/multivm/src/interface/types/outputs/execution_state.rs b/core/lib/multivm/src/interface/types/outputs/execution_state.rs index cc7bb64d403..05eab795c87 100644 --- a/core/lib/multivm/src/interface/types/outputs/execution_state.rs +++ b/core/lib/multivm/src/interface/types/outputs/execution_state.rs @@ -1,6 +1,5 @@ use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, - zk_evm_types::LogQuery, StorageLog, VmEvent, U256, }; @@ -15,17 +14,10 @@ pub struct CurrentExecutionState { pub used_contract_hashes: Vec, /// L2 to L1 logs produced by the VM. pub system_logs: Vec, - /// L2 to L1 logs produced by the L1Messeger. + /// L2 to L1 logs produced by the `L1Messenger`. /// For pre-boojum VMs, there was no distinction between user logs and system /// logs and so all the outputted logs were treated as user_l2_to_l1_logs. pub user_l2_to_l1_logs: Vec, - /// Number of log queries produced by the VM. Including l2_to_l1 logs, storage logs and events. - pub total_log_queries: usize, - /// Number of cycles used by the VM. - pub cycles_used: u32, - /// Sorted & deduplicated events logs for batch. Note, that this is a more "low-level" representation of - /// the `events` field of this struct TODO(PLA-649): refactor to remove duplication of data. - pub deduplicated_events_logs: Vec, /// Refunds returned by `StorageOracle`. pub storage_refunds: Vec, /// Pubdata costs returned by `StorageOracle`. diff --git a/core/lib/multivm/src/interface/types/outputs/l2_block.rs b/core/lib/multivm/src/interface/types/outputs/l2_block.rs index ccbcba15f65..6125b2742d1 100644 --- a/core/lib/multivm/src/interface/types/outputs/l2_block.rs +++ b/core/lib/multivm/src/interface/types/outputs/l2_block.rs @@ -1,5 +1,6 @@ use zksync_types::H256; +#[derive(Debug)] pub struct L2Block { pub number: u32, pub timestamp: u64, diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index 007c69fdf7f..08b077ce3ea 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -4,17 +4,17 @@ pub use circuit_sequencer_api_1_5_0 as circuit_sequencer_api_latest; pub use zk_evm_1_5_0 as zk_evm_latest; -pub use zksync_types::vm_version::VmVersion; +pub use zksync_types::vm::VmVersion; -pub use self::versions::{ - vm_1_3_2, vm_1_4_1, vm_1_4_2, vm_boojum_integration, vm_latest, vm_m5, vm_m6, - vm_refunds_enhancement, vm_virtual_blocks, -}; pub use crate::{ glue::{ history_mode::HistoryMode, tracers::{MultiVMTracer, MultiVmTracerPointer}, }, + versions::{ + vm_1_3_2, vm_1_4_1, vm_1_4_2, vm_boojum_integration, vm_fast, vm_latest, vm_m5, vm_m6, + vm_refunds_enhancement, vm_virtual_blocks, + }, vm_instance::VmInstance, }; diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index b56d92015a3..635915f9527 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -7,8 +7,8 @@ use zksync_system_constants::{ L2_BASE_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, SYSTEM_CONTEXT_ADDRESS, }; use zksync_types::{ - vm_trace::ViolatedValidationRule, web3::keccak256, AccountTreeId, Address, StorageKey, - VmVersion, H256, U256, + vm::VmVersion, vm_trace::ViolatedValidationRule, web3::keccak256, AccountTreeId, Address, + StorageKey, H256, U256, }; use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h256}; diff --git a/core/lib/multivm/src/utils.rs b/core/lib/multivm/src/utils.rs index a15fdba6b70..96ae580a5f7 100644 --- a/core/lib/multivm/src/utils.rs +++ b/core/lib/multivm/src/utils.rs @@ -1,6 +1,7 @@ use zksync_types::{ fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput}, - VmVersion, U256, + vm::VmVersion, + U256, }; use crate::vm_latest::L1BatchEnv; diff --git a/core/lib/multivm/src/versions/mod.rs b/core/lib/multivm/src/versions/mod.rs index c594d50af0e..e9e34c1cda1 100644 --- a/core/lib/multivm/src/versions/mod.rs +++ b/core/lib/multivm/src/versions/mod.rs @@ -1,7 +1,9 @@ +pub mod shadow; pub mod vm_1_3_2; pub mod vm_1_4_1; pub mod vm_1_4_2; pub mod vm_boojum_integration; +pub mod vm_fast; pub mod vm_latest; pub mod vm_m5; pub mod vm_m6; diff --git a/core/lib/multivm/src/versions/shadow.rs b/core/lib/multivm/src/versions/shadow.rs new file mode 100644 index 00000000000..675a95c5ba7 --- /dev/null +++ b/core/lib/multivm/src/versions/shadow.rs @@ -0,0 +1,381 @@ +use std::{ + collections::{BTreeMap, HashSet}, + fmt, +}; + +use anyhow::Context as _; +use zksync_state::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}; +use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transaction}; +use zksync_utils::bytecode::CompressedBytecodeInfo; + +use crate::{ + interface::{ + BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + }, + vm_fast, +}; + +#[derive(Debug)] +pub struct ShadowVm { + main: T, + shadow: vm_fast::Vm>, +} + +impl VmFactory> for ShadowVm +where + S: ReadStorage, + T: VmFactory>, +{ + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + Self { + main: T::new(batch_env.clone(), system_env.clone(), storage.clone()), + shadow: vm_fast::Vm::new(batch_env, system_env, ImmutableStorageView::new(storage)), + } + } +} + +impl VmInterface for ShadowVm +where + S: ReadStorage, + T: VmInterface, +{ + type TracerDispatcher = T::TracerDispatcher; + + fn push_transaction(&mut self, tx: Transaction) { + self.shadow.push_transaction(tx.clone()); + self.main.push_transaction(tx); + } + + fn execute(&mut self, execution_mode: VmExecutionMode) -> VmExecutionResultAndLogs { + let main_result = self.main.execute(execution_mode); + let shadow_result = self.shadow.execute(execution_mode); + let mut errors = DivergenceErrors::default(); + errors.check_results_match(&main_result, &shadow_result); + errors + .into_result() + .with_context(|| format!("executing VM with mode {execution_mode:?}")) + .unwrap(); + main_result + } + + fn inspect( + &mut self, + dispatcher: Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + let shadow_result = self.shadow.inspect((), execution_mode); + let main_result = self.main.inspect(dispatcher, execution_mode); + let mut errors = DivergenceErrors::default(); + errors.check_results_match(&main_result, &shadow_result); + errors + .into_result() + .with_context(|| format!("executing VM with mode {execution_mode:?}")) + .unwrap(); + main_result + } + + fn get_bootloader_memory(&self) -> BootloaderMemory { + let main_memory = self.main.get_bootloader_memory(); + let shadow_memory = self.shadow.get_bootloader_memory(); + DivergenceErrors::single("get_bootloader_memory", &main_memory, &shadow_memory).unwrap(); + main_memory + } + + fn get_last_tx_compressed_bytecodes(&self) -> Vec { + let main_bytecodes = self.main.get_last_tx_compressed_bytecodes(); + let shadow_bytecodes = self.shadow.get_last_tx_compressed_bytecodes(); + DivergenceErrors::single( + "get_last_tx_compressed_bytecodes", + &main_bytecodes, + &shadow_bytecodes, + ) + .unwrap(); + main_bytecodes + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.shadow.start_new_l2_block(l2_block_env); + self.main.start_new_l2_block(l2_block_env); + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + let main_state = self.main.get_current_execution_state(); + let shadow_state = self.shadow.get_current_execution_state(); + DivergenceErrors::single("get_current_execution_state", &main_state, &shadow_state) + .unwrap(); + main_state + } + + fn execute_transaction_with_bytecode_compression( + &mut self, + tx: Transaction, + with_compression: bool, + ) -> ( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + ) { + let tx_hash = tx.hash(); + let main_result = self + .main + .execute_transaction_with_bytecode_compression(tx.clone(), with_compression); + let shadow_result = self + .shadow + .execute_transaction_with_bytecode_compression(tx, with_compression); + let mut errors = DivergenceErrors::default(); + errors.check_results_match(&main_result.1, &shadow_result.1); + errors + .into_result() + .with_context(|| { + format!("executing transaction {tx_hash:?}, with_compression={with_compression:?}") + }) + .unwrap(); + main_result + } + + fn inspect_transaction_with_bytecode_compression( + &mut self, + tracer: Self::TracerDispatcher, + tx: Transaction, + with_compression: bool, + ) -> ( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + ) { + let tx_hash = tx.hash(); + let main_result = self.main.inspect_transaction_with_bytecode_compression( + tracer, + tx.clone(), + with_compression, + ); + let shadow_result = + self.shadow + .inspect_transaction_with_bytecode_compression((), tx, with_compression); + let mut errors = DivergenceErrors::default(); + errors.check_results_match(&main_result.1, &shadow_result.1); + errors + .into_result() + .with_context(|| { + format!("inspecting transaction {tx_hash:?}, with_compression={with_compression:?}") + }) + .unwrap(); + main_result + } + + fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + self.main.record_vm_memory_metrics() + } + + fn gas_remaining(&self) -> u32 { + let main_gas = self.main.gas_remaining(); + let shadow_gas = self.shadow.gas_remaining(); + DivergenceErrors::single("gas_remaining", &main_gas, &shadow_gas).unwrap(); + main_gas + } + + fn finish_batch(&mut self) -> FinishedL1Batch { + let main_batch = self.main.finish_batch(); + let shadow_batch = self.shadow.finish_batch(); + + let mut errors = DivergenceErrors::default(); + errors.check_results_match( + &main_batch.block_tip_execution_result, + &shadow_batch.block_tip_execution_result, + ); + errors.check_final_states_match( + &main_batch.final_execution_state, + &shadow_batch.final_execution_state, + ); + errors.check_match( + "final_bootloader_memory", + &main_batch.final_bootloader_memory, + &shadow_batch.final_bootloader_memory, + ); + errors.check_match( + "pubdata_input", + &main_batch.pubdata_input, + &shadow_batch.pubdata_input, + ); + errors.check_match( + "state_diffs", + &main_batch.state_diffs, + &shadow_batch.state_diffs, + ); + errors.into_result().unwrap(); + main_batch + } +} + +#[must_use = "Should be converted to a `Result`"] +#[derive(Debug, Default)] +pub struct DivergenceErrors(Vec); + +impl DivergenceErrors { + fn single( + context: &str, + main: &T, + shadow: &T, + ) -> anyhow::Result<()> { + let mut this = Self::default(); + this.check_match(context, main, shadow); + this.into_result() + } + + fn check_results_match( + &mut self, + main_result: &VmExecutionResultAndLogs, + shadow_result: &VmExecutionResultAndLogs, + ) { + self.check_match("result", &main_result.result, &shadow_result.result); + self.check_match( + "logs.events", + &main_result.logs.events, + &shadow_result.logs.events, + ); + self.check_match( + "logs.system_l2_to_l1_logs", + &main_result.logs.system_l2_to_l1_logs, + &shadow_result.logs.system_l2_to_l1_logs, + ); + self.check_match( + "logs.user_l2_to_l1_logs", + &main_result.logs.user_l2_to_l1_logs, + &shadow_result.logs.user_l2_to_l1_logs, + ); + let main_logs = UniqueStorageLogs::new(&main_result.logs.storage_logs); + let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); + self.check_match("logs.storage_logs", &main_logs, &shadow_logs); + self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); + } + + fn check_match(&mut self, context: &str, main: &T, shadow: &T) { + if main != shadow { + let comparison = pretty_assertions::Comparison::new(main, shadow); + let err = anyhow::anyhow!("`{context}` mismatch: {comparison}"); + self.0.push(err); + } + } + + fn check_final_states_match( + &mut self, + main: &CurrentExecutionState, + shadow: &CurrentExecutionState, + ) { + self.check_match("final_state.events", &main.events, &shadow.events); + self.check_match( + "final_state.user_l2_to_l1_logs", + &main.user_l2_to_l1_logs, + &shadow.user_l2_to_l1_logs, + ); + self.check_match( + "final_state.system_logs", + &main.system_logs, + &shadow.system_logs, + ); + self.check_match( + "final_state.storage_refunds", + &main.storage_refunds, + &shadow.storage_refunds, + ); + self.check_match( + "final_state.pubdata_costs", + &main.pubdata_costs, + &shadow.pubdata_costs, + ); + self.check_match( + "final_state.used_contract_hashes", + &main.used_contract_hashes.iter().collect::>(), + &shadow.used_contract_hashes.iter().collect::>(), + ); + + let main_deduplicated_logs = Self::gather_logs(&main.deduplicated_storage_logs); + let shadow_deduplicated_logs = Self::gather_logs(&shadow.deduplicated_storage_logs); + self.check_match( + "deduplicated_storage_logs", + &main_deduplicated_logs, + &shadow_deduplicated_logs, + ); + } + + fn gather_logs(logs: &[StorageLog]) -> BTreeMap { + logs.iter() + .filter(|log| log.is_write()) + .map(|log| (log.key, log)) + .collect() + } + + fn into_result(self) -> anyhow::Result<()> { + if self.0.is_empty() { + Ok(()) + } else { + Err(anyhow::anyhow!( + "divergence between old VM and new VM execution: [{:?}]", + self.0 + )) + } + } +} + +// The new VM doesn't support read logs yet, doesn't order logs by access and deduplicates them +// inside the VM, hence this auxiliary struct. +#[derive(PartialEq)] +struct UniqueStorageLogs(BTreeMap); + +impl fmt::Debug for UniqueStorageLogs { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut map = formatter.debug_map(); + for log in self.0.values() { + map.entry( + &format!("{:?}:{:?}", log.log.key.address(), log.log.key.key()), + &format!("{:?} -> {:?}", log.previous_value, log.log.value), + ); + } + map.finish() + } +} + +impl UniqueStorageLogs { + fn new(logs: &[StorageLogWithPreviousValue]) -> Self { + let mut unique_logs = BTreeMap::::new(); + for log in logs { + if !log.log.is_write() { + continue; + } + if let Some(existing_log) = unique_logs.get_mut(&log.log.key) { + existing_log.log.value = log.log.value; + } else { + unique_logs.insert(log.log.key, *log); + } + } + + // Remove no-op write logs (i.e., X -> X writes) produced by the old VM. + unique_logs.retain(|_, log| log.previous_value != log.log.value); + Self(unique_logs) + } +} + +impl VmInterfaceHistoryEnabled for ShadowVm +where + S: ReadStorage, + T: VmInterfaceHistoryEnabled, +{ + fn make_snapshot(&mut self) { + self.shadow.make_snapshot(); + self.main.make_snapshot(); + } + + fn rollback_to_the_latest_snapshot(&mut self) { + self.shadow.rollback_to_the_latest_snapshot(); + self.main.rollback_to_the_latest_snapshot(); + } + + fn pop_snapshot_no_rollback(&mut self) { + self.shadow.pop_snapshot_no_rollback(); + self.main.pop_snapshot_no_rollback(); + } +} diff --git a/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs b/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs index 7f7b44071a1..d73ebb1648b 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs @@ -29,7 +29,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -38,8 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - (history.iter().map(|x| **x).collect(), events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 5721f4e2185..ff6c7f2f3d0 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -16,7 +16,8 @@ use crate::{ interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, tracers::old_tracers::TracerDispatcher, vm_1_3_2::{events::merge_events, VmInstance}, @@ -30,34 +31,9 @@ pub struct Vm { pub(crate) last_tx_compressed_bytecodes: Vec, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let oracle_tools = crate::vm_1_3_2::OracleTools::new(storage.clone()); - let block_properties = crate::vm_1_3_2::BlockProperties { - default_aa_code_hash: h256_to_u256( - system_env.base_system_smart_contracts.default_aa.hash, - ), - zkporter_is_available: false, - }; - let inner_vm: VmInstance = - crate::vm_1_3_2::vm_with_bootloader::init_vm_with_gas_limit( - oracle_tools, - batch_env.clone().glue_into(), - block_properties, - system_env.execution_mode.glue_into(), - &system_env.base_system_smart_contracts.clone().glue_into(), - system_env.bootloader_gas_limit, - ); - Self { - vm: inner_vm, - system_env, - batch_env, - last_tx_compressed_bytecodes: vec![], - } - } - fn push_transaction(&mut self, tx: Transaction) { crate::vm_1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, @@ -120,7 +96,7 @@ impl VmInterface for Vm { } fn get_current_execution_state(&self) -> CurrentExecutionState { - let (_full_history, raw_events, l1_messages) = self.vm.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.vm.state.event_sink.flatten(); let events = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -138,14 +114,6 @@ impl VmInterface for Vm { }) }) .collect(); - let total_log_queries = self.vm.state.event_sink.get_log_queries() - + self - .vm - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.vm.state.storage.get_final_log_queries().len(); let used_contract_hashes = self .vm @@ -171,10 +139,7 @@ impl VmInterface for Vm { used_contract_hashes, user_l2_to_l1_logs: l2_to_l1_logs, system_logs: vec![], - total_log_queries, - cycles_used: self.vm.state.local_state.monotonic_cycle_counter, - // It's not applicable for vm 1.3.2 - deduplicated_events_logs: vec![], + // Fields below are not produced by VM 1.3.2 storage_refunds: vec![], pubdata_costs: Vec::new(), } @@ -295,7 +260,34 @@ impl VmInterface for Vm { } } -impl VmInterfaceHistoryEnabled for Vm { +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let oracle_tools = crate::vm_1_3_2::OracleTools::new(storage.clone()); + let block_properties = crate::vm_1_3_2::BlockProperties { + default_aa_code_hash: h256_to_u256( + system_env.base_system_smart_contracts.default_aa.hash, + ), + zkporter_is_available: false, + }; + let inner_vm: VmInstance = + crate::vm_1_3_2::vm_with_bootloader::init_vm_with_gas_limit( + oracle_tools, + batch_env.clone().glue_into(), + block_properties, + system_env.execution_mode.glue_into(), + &system_env.base_system_smart_contracts.clone().glue_into(), + system_env.bootloader_gas_limit, + ); + Self { + vm: inner_vm, + system_env, + batch_env, + last_tx_compressed_bytecodes: vec![], + } + } +} + +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { self.vm.save_current_vm_as_snapshot() } @@ -305,6 +297,6 @@ impl VmInterfaceHistoryEnabled for Vm VmInstance { /// Removes the latest snapshot without rolling it back. /// This function expects that there is at least one snapshot present. pub fn pop_snapshot_no_rollback(&mut self) { - self.snapshots.pop().unwrap(); + self.snapshots.pop(); } /// Returns the amount of gas remaining to the VM. @@ -768,7 +768,7 @@ impl VmInstance { // Collecting `block_tip_result` needs logs with timestamp, so we drain events for the `full_result` // after because draining will drop timestamps. - let (_full_history, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); full_result.events = merge_events(raw_events) .into_iter() .map(|e| { diff --git a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/event_sink.rs index 0c9d1bb01cb..bd57239d857 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/event_sink.rs @@ -1,6 +1,5 @@ use std::collections::HashMap; -use itertools::Itertools; use zk_evm_1_4_1::{ abstractions::EventSink, aux_structures::{LogQuery, Timestamp}, @@ -9,7 +8,6 @@ use zk_evm_1_4_1::{ BOOTLOADER_FORMAL_ADDRESS, EVENT_AUX_BYTE, L1_MESSAGE_AUX_BYTE, }, }; -use zksync_types::U256; use crate::vm_1_4_1::old_vm::{ history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, @@ -31,7 +29,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -40,10 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - let events_logs = Self::events_logs_from_history(history); - - (events_logs, events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { @@ -69,92 +64,6 @@ impl InMemoryEventSink { Self::events_and_l1_messages_from_history(self.log_queries_after_timestamp(from_timestamp)) } - fn events_logs_from_history(history: &[Box]) -> Vec { - // Filter out all the L2->L1 logs and leave only events - let mut events = history - .iter() - .filter_map(|log_query| (log_query.aux_byte == EVENT_AUX_BYTE).then_some(**log_query)) - .collect_vec(); - - // Sort the events by timestamp and rollback flag, basically ensuring that - // if an event has been rolled back, the original event and its rollback will be put together - events.sort_by_key(|log| (log.timestamp, log.rollback)); - - let mut stack = Vec::::new(); - let mut net_history = vec![]; - for el in events.iter() { - assert_eq!(el.shard_id, 0, "only rollup shard is supported"); - if stack.is_empty() { - assert!(!el.rollback); - stack.push(*el); - } else { - // we can always pop as it's either one to add to queue, or discard - let previous = stack.pop().unwrap(); - if previous.timestamp == el.timestamp { - // Only rollback can have the same timestamp, so here we do nothing and simply - // double check the invariants - assert!(!previous.rollback); - assert!(el.rollback); - assert!(previous.rw_flag); - assert!(el.rw_flag); - assert_eq!(previous.tx_number_in_block, el.tx_number_in_block); - assert_eq!(previous.shard_id, el.shard_id); - assert_eq!(previous.address, el.address); - assert_eq!(previous.key, el.key); - assert_eq!(previous.written_value, el.written_value); - assert_eq!(previous.is_service, el.is_service); - continue; - } else { - // The event on the stack has not been rolled back. It must be a different event, - // with a different timestamp. - assert!(!el.rollback); - stack.push(*el); - - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - } - } - - // In case the stack is non-empty, then the last element of it has not been rolled back. - if let Some(previous) = stack.pop() { - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - - net_history - } - fn events_and_l1_messages_from_history( history: &[Box], ) -> (Vec, Vec) { diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 993cc795055..345948bfdfb 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -11,8 +11,8 @@ use crate::{ glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_1_4_1::{ bootloader_state::BootloaderState, @@ -38,22 +38,9 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); - Self { - bootloader_state, - state, - storage, - system_env, - batch_env, - snapshots: vec![], - _phantom: Default::default(), - } - } - /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: Transaction) { self.push_transaction_with_compression(tx, true); @@ -86,7 +73,7 @@ impl VmInterface for Vm { /// This method should be used only after the batch execution. /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { - let (deduplicated_events_logs, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -97,13 +84,6 @@ impl VmInterface for Vm { .into_iter() .map(|log| SystemL2ToL1Log(log.glue_into())) .collect(); - let total_log_queries = self.state.event_sink.get_log_queries() - + self - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.state.storage.get_final_log_queries().len(); let storage_log_queries = self.state.storage.get_final_log_queries(); @@ -122,12 +102,6 @@ impl VmInterface for Vm { .map(|log| UserL2ToL1Log(log.into())) .collect(), system_logs, - total_log_queries, - cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs: deduplicated_events_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), pubdata_costs: Vec::new(), } @@ -184,14 +158,28 @@ impl VmInterface for Vm { } } +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); + Self { + bootloader_state, + state, + storage, + system_env, + batch_env, + snapshots: vec![], + _phantom: Default::default(), + } + } +} + /// Methods of vm, which required some history manipulations -impl VmInterfaceHistoryEnabled for Vm { +impl VmInterfaceHistoryEnabled for Vm { /// Create snapshot of current vm state and push it into the memory fn make_snapshot(&mut self) { self.make_snapshot_inner() } - /// Rollback vm state to the latest snapshot and destroy the snapshot fn rollback_to_the_latest_snapshot(&mut self) { let snapshot = self .snapshots @@ -200,10 +188,7 @@ impl VmInterfaceHistoryEnabled for Vm { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -40,10 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - let events_logs = Self::events_logs_from_history(history); - - (events_logs, events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { @@ -69,92 +64,6 @@ impl InMemoryEventSink { Self::events_and_l1_messages_from_history(self.log_queries_after_timestamp(from_timestamp)) } - fn events_logs_from_history(history: &[Box]) -> Vec { - // Filter out all the L2->L1 logs and leave only events - let mut events = history - .iter() - .filter_map(|log_query| (log_query.aux_byte == EVENT_AUX_BYTE).then_some(**log_query)) - .collect_vec(); - - // Sort the events by timestamp and rollback flag, basically ensuring that - // if an event has been rolled back, the original event and its rollback will be put together - events.sort_by_key(|log| (log.timestamp, log.rollback)); - - let mut stack = Vec::::new(); - let mut net_history = vec![]; - for el in events.iter() { - assert_eq!(el.shard_id, 0, "only rollup shard is supported"); - if stack.is_empty() { - assert!(!el.rollback); - stack.push(*el); - } else { - // we can always pop as it's either one to add to queue, or discard - let previous = stack.pop().unwrap(); - if previous.timestamp == el.timestamp { - // Only rollback can have the same timestamp, so here we do nothing and simply - // double check the invariants - assert!(!previous.rollback); - assert!(el.rollback); - assert!(previous.rw_flag); - assert!(el.rw_flag); - assert_eq!(previous.tx_number_in_block, el.tx_number_in_block); - assert_eq!(previous.shard_id, el.shard_id); - assert_eq!(previous.address, el.address); - assert_eq!(previous.key, el.key); - assert_eq!(previous.written_value, el.written_value); - assert_eq!(previous.is_service, el.is_service); - continue; - } else { - // The event on the stack has not been rolled back. It must be a different event, - // with a different timestamp. - assert!(!el.rollback); - stack.push(*el); - - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - } - } - - // In case the stack is non-empty, then the last element of it has not been rolled back. - if let Some(previous) = stack.pop() { - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - - net_history - } - fn events_and_l1_messages_from_history( history: &[Box], ) -> (Vec, Vec) { diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index 6a02b162733..264ebde5611 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -11,8 +11,8 @@ use crate::{ glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_1_4_2::{ bootloader_state::BootloaderState, @@ -38,22 +38,9 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); - Self { - bootloader_state, - state, - storage, - system_env, - batch_env, - snapshots: vec![], - _phantom: Default::default(), - } - } - /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: Transaction) { self.push_transaction_with_compression(tx, true); @@ -86,7 +73,7 @@ impl VmInterface for Vm { /// This method should be used only after the batch execution. /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { - let (deduplicated_events_logs, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -97,13 +84,6 @@ impl VmInterface for Vm { .into_iter() .map(|log| SystemL2ToL1Log(log.glue_into())) .collect(); - let total_log_queries = self.state.event_sink.get_log_queries() - + self - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.state.storage.get_final_log_queries().len(); let storage_log_queries = self.state.storage.get_final_log_queries(); @@ -122,12 +102,6 @@ impl VmInterface for Vm { .map(|log| UserL2ToL1Log(log.into())) .collect(), system_logs, - total_log_queries, - cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs: deduplicated_events_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), pubdata_costs: Vec::new(), } @@ -189,14 +163,26 @@ impl VmInterface for Vm { } } -/// Methods of vm, which required some history manipulations -impl VmInterfaceHistoryEnabled for Vm { - /// Create snapshot of current vm state and push it into the memory +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); + Self { + bootloader_state, + state, + storage, + system_env, + batch_env, + snapshots: vec![], + _phantom: Default::default(), + } + } +} + +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { - self.make_snapshot_inner() + self.make_snapshot_inner(); } - /// Rollback vm state to the latest snapshot and destroy the snapshot fn rollback_to_the_latest_snapshot(&mut self) { let snapshot = self .snapshots @@ -205,10 +191,7 @@ impl VmInterfaceHistoryEnabled for Vm { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -40,10 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - let events_logs = Self::events_logs_from_history(history); - - (events_logs, events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { @@ -69,92 +64,6 @@ impl InMemoryEventSink { Self::events_and_l1_messages_from_history(self.log_queries_after_timestamp(from_timestamp)) } - fn events_logs_from_history(history: &[Box]) -> Vec { - // Filter out all the L2->L1 logs and leave only events - let mut events = history - .iter() - .filter_map(|log_query| (log_query.aux_byte == EVENT_AUX_BYTE).then_some(**log_query)) - .collect_vec(); - - // Sort the events by timestamp and rollback flag, basically ensuring that - // if an event has been rolled back, the original event and its rollback will be put together - events.sort_by_key(|log| (log.timestamp, log.rollback)); - - let mut stack = Vec::::new(); - let mut net_history = vec![]; - for el in events.iter() { - assert_eq!(el.shard_id, 0, "only rollup shard is supported"); - if stack.is_empty() { - assert!(!el.rollback); - stack.push(*el); - } else { - // we can always pop as it's either one to add to queue, or discard - let previous = stack.pop().unwrap(); - if previous.timestamp == el.timestamp { - // Only rollback can have the same timestamp, so here we do nothing and simply - // double check the invariants - assert!(!previous.rollback); - assert!(el.rollback); - assert!(previous.rw_flag); - assert!(el.rw_flag); - assert_eq!(previous.tx_number_in_block, el.tx_number_in_block); - assert_eq!(previous.shard_id, el.shard_id); - assert_eq!(previous.address, el.address); - assert_eq!(previous.key, el.key); - assert_eq!(previous.written_value, el.written_value); - assert_eq!(previous.is_service, el.is_service); - continue; - } else { - // The event on the stack has not been rolled back. It must be a different event, - // with a different timestamp. - assert!(!el.rollback); - stack.push(*el); - - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - } - } - - // In case the stack is non-empty, then the last element of it has not been rolled back. - if let Some(previous) = stack.pop() { - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - - net_history - } - fn events_and_l1_messages_from_history( history: &[Box], ) -> (Vec, Vec) { diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index c893046c854..90cea403084 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -11,8 +11,8 @@ use crate::{ glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_boojum_integration::{ bootloader_state::BootloaderState, @@ -38,22 +38,9 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); - Self { - bootloader_state, - state, - storage, - system_env, - batch_env, - snapshots: vec![], - _phantom: Default::default(), - } - } - /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: Transaction) { self.push_transaction_with_compression(tx, true); @@ -86,7 +73,7 @@ impl VmInterface for Vm { /// This method should be used only after the batch execution. /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { - let (deduplicated_events_logs, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -97,13 +84,6 @@ impl VmInterface for Vm { .into_iter() .map(|log| SystemL2ToL1Log(log.glue_into())) .collect(); - let total_log_queries = self.state.event_sink.get_log_queries() - + self - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.state.storage.get_final_log_queries().len(); let storage_log_queries = self.state.storage.get_final_log_queries(); @@ -122,12 +102,6 @@ impl VmInterface for Vm { .map(|log| UserL2ToL1Log(log.into())) .collect(), system_logs, - total_log_queries, - cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs: deduplicated_events_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), pubdata_costs: Vec::new(), } @@ -184,14 +158,26 @@ impl VmInterface for Vm { } } -/// Methods of vm, which required some history manipulations -impl VmInterfaceHistoryEnabled for Vm { - /// Create snapshot of current vm state and push it into the memory +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); + Self { + bootloader_state, + state, + storage, + system_env, + batch_env, + snapshots: vec![], + _phantom: Default::default(), + } + } +} + +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { - self.make_snapshot_inner() + self.make_snapshot_inner(); } - /// Rollback vm state to the latest snapshot and destroy the snapshot fn rollback_to_the_latest_snapshot(&mut self) { let snapshot = self .snapshots @@ -200,10 +186,7 @@ impl VmInterfaceHistoryEnabled for Vm, +} + +impl BootloaderL2Block { + pub(crate) fn new(l2_block: L2BlockEnv, first_tx_place: usize) -> Self { + Self { + number: l2_block.number, + timestamp: l2_block.timestamp, + txs_rolling_hash: EMPTY_TXS_ROLLING_HASH, + prev_block_hash: l2_block.prev_block_hash, + first_tx_index: first_tx_place, + max_virtual_blocks_to_create: l2_block.max_virtual_blocks_to_create, + txs: vec![], + } + } + + pub(super) fn push_tx(&mut self, tx: BootloaderTx) { + self.update_rolling_hash(tx.hash); + self.txs.push(tx) + } + + pub(crate) fn get_hash(&self) -> H256 { + l2_block_hash( + L2BlockNumber(self.number), + self.timestamp, + self.prev_block_hash, + self.txs_rolling_hash, + ) + } + + fn update_rolling_hash(&mut self, tx_hash: H256) { + self.txs_rolling_hash = concat_and_hash(self.txs_rolling_hash, tx_hash) + } + + pub(crate) fn make_snapshot(&self) -> L2BlockSnapshot { + L2BlockSnapshot { + txs_rolling_hash: self.txs_rolling_hash, + txs_len: self.txs.len(), + } + } + + pub(crate) fn apply_snapshot(&mut self, snapshot: L2BlockSnapshot) { + self.txs_rolling_hash = snapshot.txs_rolling_hash; + match self.txs.len().cmp(&snapshot.txs_len) { + Ordering::Greater => self.txs.truncate(snapshot.txs_len), + Ordering::Less => panic!("Applying snapshot from future is not supported"), + Ordering::Equal => {} + } + } + pub(crate) fn l2_block(&self) -> L2Block { + L2Block { + number: self.number, + timestamp: self.timestamp, + hash: self.get_hash(), + } + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/mod.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/mod.rs new file mode 100644 index 00000000000..73830de2759 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/mod.rs @@ -0,0 +1,8 @@ +mod l2_block; +mod snapshot; +mod state; +mod tx; + +pub(crate) mod utils; +pub(crate) use snapshot::BootloaderStateSnapshot; +pub use state::BootloaderState; diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/snapshot.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/snapshot.rs new file mode 100644 index 00000000000..8f1cec3cb7f --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/snapshot.rs @@ -0,0 +1,25 @@ +use zksync_types::H256; + +#[derive(Debug, Clone)] +pub(crate) struct BootloaderStateSnapshot { + /// ID of the next transaction to be executed. + pub(crate) tx_to_execute: usize, + /// Stored L2 blocks in bootloader memory + pub(crate) l2_blocks_len: usize, + /// Snapshot of the last L2 block. Only this block could be changed during the rollback + pub(crate) last_l2_block: L2BlockSnapshot, + /// The number of 32-byte words spent on the already included compressed bytecodes. + pub(crate) compressed_bytecodes_encoding: usize, + /// Current offset of the free space in the bootloader memory. + pub(crate) free_tx_offset: usize, + /// Whether the pubdata information has been provided already + pub(crate) is_pubdata_information_provided: bool, +} + +#[derive(Debug, Clone)] +pub(crate) struct L2BlockSnapshot { + /// The rolling hash of all the transactions in the miniblock + pub(crate) txs_rolling_hash: H256, + /// The number of transactions in the last L2 block + pub(crate) txs_len: usize, +} diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs new file mode 100644 index 00000000000..ae1c70db586 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs @@ -0,0 +1,293 @@ +use std::cmp::Ordering; + +use once_cell::sync::OnceCell; +use zksync_types::{L2ChainId, U256}; +use zksync_utils::bytecode::CompressedBytecodeInfo; + +use super::{ + l2_block::BootloaderL2Block, + tx::BootloaderTx, + utils::{apply_l2_block, apply_pubdata_to_memory, apply_tx_to_memory}, + BootloaderStateSnapshot, +}; +use crate::{ + interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + versions::vm_fast::{pubdata::PubdataInput, transaction_data::TransactionData}, + vm_latest::{constants::TX_DESCRIPTION_OFFSET, utils::l2_blocks::assert_next_block}, +}; + +/// Intermediate bootloader-related VM state. +/// +/// Required to process transactions one by one (since we intercept the VM execution to execute +/// transactions and add new ones to the memory on the fly). +/// Keeps tracking everything related to the bootloader memory and can restore the whole memory. +/// +/// +/// Serves two purposes: +/// - Tracks where next tx should be pushed to in the bootloader memory. +/// - Tracks which transaction should be executed next. +#[derive(Debug)] +pub struct BootloaderState { + /// ID of the next transaction to be executed. + /// See the structure doc-comment for a better explanation of purpose. + tx_to_execute: usize, + /// Stored txs in bootloader memory + l2_blocks: Vec, + /// The number of 32-byte words spent on the already included compressed bytecodes. + compressed_bytecodes_encoding: usize, + /// Initial memory of bootloader + initial_memory: BootloaderMemory, + /// Mode of txs for execution, it can be changed once per vm lunch + execution_mode: TxExecutionMode, + /// Current offset of the free space in the bootloader memory. + free_tx_offset: usize, + /// Information about the pubdata that will be needed to supply to the L1Messenger + pubdata_information: OnceCell, +} + +impl BootloaderState { + pub(crate) fn new( + execution_mode: TxExecutionMode, + initial_memory: BootloaderMemory, + first_l2_block: L2BlockEnv, + ) -> Self { + let l2_block = BootloaderL2Block::new(first_l2_block, 0); + Self { + tx_to_execute: 0, + compressed_bytecodes_encoding: 0, + l2_blocks: vec![l2_block], + initial_memory, + execution_mode, + free_tx_offset: 0, + pubdata_information: Default::default(), + } + } + + pub(crate) fn set_refund_for_current_tx(&mut self, refund: u64) { + let current_tx = self.current_tx(); + // We can't set the refund for the latest tx or using the latest l2_block for fining tx + // Because we can fill the whole batch first and then execute txs one by one + let tx = self.find_tx_mut(current_tx); + tx.refund = refund; + } + + pub(crate) fn set_pubdata_input(&mut self, info: PubdataInput) { + self.pubdata_information + .set(info) + .expect("Pubdata information is already set"); + } + + pub(crate) fn start_new_l2_block(&mut self, l2_block: L2BlockEnv) { + let last_block = self.last_l2_block(); + assert!( + !last_block.txs.is_empty(), + "Can not create new miniblocks on top of empty ones" + ); + assert_next_block(&last_block.l2_block(), &l2_block); + self.push_l2_block(l2_block); + } + + /// This method bypass sanity checks and should be used carefully. + pub(crate) fn push_l2_block(&mut self, l2_block: L2BlockEnv) { + self.l2_blocks + .push(BootloaderL2Block::new(l2_block, self.free_tx_index())) + } + + pub(crate) fn push_tx( + &mut self, + tx: TransactionData, + predefined_overhead: u32, + predefined_refund: u64, + compressed_bytecodes: Vec, + trusted_ergs_limit: U256, + chain_id: L2ChainId, + ) -> BootloaderMemory { + let tx_offset = self.free_tx_offset(); + let bootloader_tx = BootloaderTx::new( + tx, + predefined_refund, + predefined_overhead, + trusted_ergs_limit, + compressed_bytecodes, + tx_offset, + chain_id, + ); + + let mut memory = vec![]; + let compressed_bytecode_size = apply_tx_to_memory( + &mut memory, + &bootloader_tx, + self.last_l2_block(), + self.free_tx_index(), + self.free_tx_offset(), + self.compressed_bytecodes_encoding, + self.execution_mode, + self.last_l2_block().txs.is_empty(), + ); + self.compressed_bytecodes_encoding += compressed_bytecode_size; + self.free_tx_offset = tx_offset + bootloader_tx.encoded_len(); + self.last_mut_l2_block().push_tx(bootloader_tx); + memory + } + + pub(crate) fn last_l2_block(&self) -> &BootloaderL2Block { + self.l2_blocks.last().unwrap() + } + + pub(crate) fn get_pubdata_information(&self) -> &PubdataInput { + self.pubdata_information + .get() + .expect("Pubdata information is not set") + } + + fn last_mut_l2_block(&mut self) -> &mut BootloaderL2Block { + self.l2_blocks.last_mut().unwrap() + } + + /// Apply all bootloader transaction to the initial memory + pub(crate) fn bootloader_memory(&self) -> BootloaderMemory { + let mut initial_memory = self.initial_memory.clone(); + let mut offset = 0; + let mut compressed_bytecodes_offset = 0; + let mut tx_index = 0; + for l2_block in &self.l2_blocks { + for (num, tx) in l2_block.txs.iter().enumerate() { + let compressed_bytecodes_size = apply_tx_to_memory( + &mut initial_memory, + tx, + l2_block, + tx_index, + offset, + compressed_bytecodes_offset, + self.execution_mode, + num == 0, + ); + offset += tx.encoded_len(); + compressed_bytecodes_offset += compressed_bytecodes_size; + tx_index += 1; + } + if l2_block.txs.is_empty() { + apply_l2_block(&mut initial_memory, l2_block, tx_index) + } + } + + let pubdata_information = self + .pubdata_information + .clone() + .into_inner() + .expect("Empty pubdata information"); + + apply_pubdata_to_memory(&mut initial_memory, pubdata_information); + initial_memory + } + + fn free_tx_offset(&self) -> usize { + self.free_tx_offset + } + + pub(crate) fn free_tx_index(&self) -> usize { + let l2_block = self.last_l2_block(); + l2_block.first_tx_index + l2_block.txs.len() + } + + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + if let Some(tx) = self.last_l2_block().txs.last() { + tx.compressed_bytecodes.clone() + } else { + vec![] + } + } + + /// Returns the id of current tx + pub(crate) fn current_tx(&self) -> usize { + self.tx_to_execute + .checked_sub(1) + .expect("There are no current tx to execute") + } + + /// Returns the ID of the next transaction to be executed and increments the local transaction counter. + pub(crate) fn move_tx_to_execute_pointer(&mut self) -> usize { + assert!( + self.tx_to_execute < self.free_tx_index(), + "Attempt to execute tx that was not pushed to memory. Tx ID: {}, txs in bootloader: {}", + self.tx_to_execute, + self.free_tx_index() + ); + + let old = self.tx_to_execute; + self.tx_to_execute += 1; + old + } + + /// Get offset of tx description + pub(crate) fn get_tx_description_offset(&self, tx_index: usize) -> usize { + TX_DESCRIPTION_OFFSET + self.find_tx(tx_index).offset + } + + pub(crate) fn insert_fictive_l2_block(&mut self) -> &BootloaderL2Block { + let block = self.last_l2_block(); + if !block.txs.is_empty() { + self.start_new_l2_block(L2BlockEnv { + timestamp: block.timestamp + 1, + number: block.number + 1, + prev_block_hash: block.get_hash(), + max_virtual_blocks_to_create: 1, + }); + } + self.last_l2_block() + } + + fn find_tx(&self, tx_index: usize) -> &BootloaderTx { + for block in self.l2_blocks.iter().rev() { + if tx_index >= block.first_tx_index { + return &block.txs[tx_index - block.first_tx_index]; + } + } + panic!("The tx with index {} must exist", tx_index) + } + + fn find_tx_mut(&mut self, tx_index: usize) -> &mut BootloaderTx { + for block in self.l2_blocks.iter_mut().rev() { + if tx_index >= block.first_tx_index { + return &mut block.txs[tx_index - block.first_tx_index]; + } + } + panic!("The tx with index {} must exist", tx_index) + } + + pub(crate) fn get_snapshot(&self) -> BootloaderStateSnapshot { + BootloaderStateSnapshot { + tx_to_execute: self.tx_to_execute, + l2_blocks_len: self.l2_blocks.len(), + last_l2_block: self.last_l2_block().make_snapshot(), + compressed_bytecodes_encoding: self.compressed_bytecodes_encoding, + free_tx_offset: self.free_tx_offset, + is_pubdata_information_provided: self.pubdata_information.get().is_some(), + } + } + + pub(crate) fn apply_snapshot(&mut self, snapshot: BootloaderStateSnapshot) { + self.tx_to_execute = snapshot.tx_to_execute; + self.compressed_bytecodes_encoding = snapshot.compressed_bytecodes_encoding; + self.free_tx_offset = snapshot.free_tx_offset; + match self.l2_blocks.len().cmp(&snapshot.l2_blocks_len) { + Ordering::Greater => self.l2_blocks.truncate(snapshot.l2_blocks_len), + Ordering::Less => panic!("Applying snapshot from future is not supported"), + Ordering::Equal => {} + } + self.last_mut_l2_block() + .apply_snapshot(snapshot.last_l2_block); + + if !snapshot.is_pubdata_information_provided { + self.pubdata_information = Default::default(); + } else { + // Under the correct usage of the snapshots of the bootloader state, + // this assertion should never fail, i.e. since the pubdata information + // can be set only once. However, we have this assertion just in case. + assert!( + self.pubdata_information.get().is_some(), + "Snapshot with no pubdata can not rollback to snapshot with one" + ); + } + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs new file mode 100644 index 00000000000..36c1d65ddd3 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs @@ -0,0 +1,49 @@ +use zksync_types::{L2ChainId, H256, U256}; +use zksync_utils::bytecode::CompressedBytecodeInfo; + +use crate::versions::vm_fast::transaction_data::TransactionData; + +/// Information about tx necessary for execution in bootloader. +#[derive(Debug, Clone)] +pub(crate) struct BootloaderTx { + pub(crate) hash: H256, + /// Encoded transaction + pub(crate) encoded: Vec, + /// Compressed bytecodes, which has been published during this transaction + pub(crate) compressed_bytecodes: Vec, + /// Refunds for this transaction + pub(crate) refund: u64, + /// Gas overhead + pub(crate) gas_overhead: u32, + /// Gas Limit for this transaction. It can be different from the gas limit inside the transaction + pub(crate) trusted_gas_limit: U256, + /// Offset of the tx in bootloader memory + pub(crate) offset: usize, +} + +impl BootloaderTx { + pub(super) fn new( + tx: TransactionData, + predefined_refund: u64, + predefined_overhead: u32, + trusted_gas_limit: U256, + compressed_bytecodes: Vec, + offset: usize, + chain_id: L2ChainId, + ) -> Self { + let hash = tx.tx_hash(chain_id); + Self { + hash, + encoded: tx.into_tokens(), + compressed_bytecodes, + refund: predefined_refund, + gas_overhead: predefined_overhead, + trusted_gas_limit, + offset, + } + } + + pub(super) fn encoded_len(&self) -> usize { + self.encoded.len() + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs new file mode 100644 index 00000000000..21259e366d1 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs @@ -0,0 +1,181 @@ +use zksync_types::{ethabi, U256}; +use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; + +use super::{l2_block::BootloaderL2Block, tx::BootloaderTx}; +use crate::{ + interface::{BootloaderMemory, TxExecutionMode}, + versions::vm_fast::pubdata::PubdataInput, + vm_latest::constants::{ + BOOTLOADER_TX_DESCRIPTION_OFFSET, BOOTLOADER_TX_DESCRIPTION_SIZE, + COMPRESSED_BYTECODES_OFFSET, OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET, + OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS, OPERATOR_REFUNDS_OFFSET, + TX_DESCRIPTION_OFFSET, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, + }, +}; + +pub(super) fn get_memory_for_compressed_bytecodes( + compressed_bytecodes: &[CompressedBytecodeInfo], +) -> Vec { + let memory_addition: Vec<_> = compressed_bytecodes + .iter() + .flat_map(|x| x.encode_call()) + .collect(); + + bytes_to_be_words(memory_addition) +} + +#[allow(clippy::too_many_arguments)] +pub(super) fn apply_tx_to_memory( + memory: &mut BootloaderMemory, + bootloader_tx: &BootloaderTx, + bootloader_l2_block: &BootloaderL2Block, + tx_index: usize, + tx_offset: usize, + compressed_bytecodes_size: usize, + execution_mode: TxExecutionMode, + start_new_l2_block: bool, +) -> usize { + let bootloader_description_offset = + BOOTLOADER_TX_DESCRIPTION_OFFSET + BOOTLOADER_TX_DESCRIPTION_SIZE * tx_index; + let tx_description_offset = TX_DESCRIPTION_OFFSET + tx_offset; + + memory.push(( + bootloader_description_offset, + assemble_tx_meta(execution_mode, true), + )); + + memory.push(( + bootloader_description_offset + 1, + U256::from_big_endian(&(32 * tx_description_offset).to_be_bytes()), + )); + + let refund_offset = OPERATOR_REFUNDS_OFFSET + tx_index; + memory.push((refund_offset, bootloader_tx.refund.into())); + + let overhead_offset = TX_OVERHEAD_OFFSET + tx_index; + memory.push((overhead_offset, bootloader_tx.gas_overhead.into())); + + let trusted_gas_limit_offset = TX_TRUSTED_GAS_LIMIT_OFFSET + tx_index; + memory.push((trusted_gas_limit_offset, bootloader_tx.trusted_gas_limit)); + + memory.extend( + (tx_description_offset..tx_description_offset + bootloader_tx.encoded_len()) + .zip(bootloader_tx.encoded.clone()), + ); + apply_l2_block_inner(memory, bootloader_l2_block, tx_index, start_new_l2_block); + + // Note, +1 is moving for pointer + let compressed_bytecodes_offset = COMPRESSED_BYTECODES_OFFSET + 1 + compressed_bytecodes_size; + + let encoded_compressed_bytecodes = + get_memory_for_compressed_bytecodes(&bootloader_tx.compressed_bytecodes); + let compressed_bytecodes_encoding = encoded_compressed_bytecodes.len(); + + memory.extend( + (compressed_bytecodes_offset + ..compressed_bytecodes_offset + encoded_compressed_bytecodes.len()) + .zip(encoded_compressed_bytecodes), + ); + compressed_bytecodes_encoding +} + +pub(crate) fn apply_l2_block( + memory: &mut BootloaderMemory, + bootloader_l2_block: &BootloaderL2Block, + txs_index: usize, +) { + apply_l2_block_inner(memory, bootloader_l2_block, txs_index, true) +} + +fn apply_l2_block_inner( + memory: &mut BootloaderMemory, + bootloader_l2_block: &BootloaderL2Block, + txs_index: usize, + start_new_l2_block: bool, +) { + // Since L2 block information start from the `TX_OPERATOR_L2_BLOCK_INFO_OFFSET` and each + // L2 block info takes `TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO` slots, the position where the L2 block info + // for this transaction needs to be written is: + + let block_position = + TX_OPERATOR_L2_BLOCK_INFO_OFFSET + txs_index * TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO; + + memory.extend(vec![ + (block_position, bootloader_l2_block.number.into()), + (block_position + 1, bootloader_l2_block.timestamp.into()), + ( + block_position + 2, + h256_to_u256(bootloader_l2_block.prev_block_hash), + ), + ( + block_position + 3, + if start_new_l2_block { + bootloader_l2_block.max_virtual_blocks_to_create.into() + } else { + U256::zero() + }, + ), + ]) +} + +pub(crate) fn apply_pubdata_to_memory( + memory: &mut BootloaderMemory, + pubdata_information: PubdataInput, +) { + // Skipping two slots as they will be filled by the bootloader itself: + // - One slot is for the selector of the call to the L1Messenger. + // - The other slot is for the 0x20 offset for the calldata. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; + + // Need to skip first word as it represents array offset + // while bootloader expects only [len || data] + let pubdata = ethabi::encode(&[ethabi::Token::Bytes( + pubdata_information.build_pubdata(true), + )])[32..] + .to_vec(); + + assert!( + pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, + "The encoded pubdata is too big" + ); + + pubdata + .chunks(32) + .enumerate() + .for_each(|(slot_offset, value)| { + memory.push(( + l1_messenger_pubdata_start_slot + slot_offset, + U256::from(value), + )) + }); +} + +/// Forms a word that contains meta information for the transaction execution. +/// +/// # Current layout +/// +/// - 0 byte (MSB): server-side tx execution mode +/// In the server, we may want to execute different parts of the transaction in the different context +/// For example, when checking validity, we don't want to actually execute transaction and have side effects. +/// +/// Possible values: +/// - 0x00: validate & execute (normal mode) +/// - 0x02: execute but DO NOT validate +/// +/// - 31 byte (LSB): whether to execute transaction or not (at all). +pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool) -> U256 { + let mut output = [0u8; 32]; + + // Set 0 byte (execution mode) + output[0] = match execution_mode { + TxExecutionMode::VerifyExecute => 0x00, + TxExecutionMode::EstimateFee { .. } => 0x00, + TxExecutionMode::EthCall { .. } => 0x02, + }; + + // Set 31 byte (marker for tx execution) + output[31] = u8::from(execute_tx); + + U256::from_big_endian(&output) +} diff --git a/core/lib/multivm/src/versions/vm_fast/bytecode.rs b/core/lib/multivm/src/versions/vm_fast/bytecode.rs new file mode 100644 index 00000000000..7a16b5940df --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/bytecode.rs @@ -0,0 +1,52 @@ +use itertools::Itertools; +use zksync_state::ReadStorage; +use zksync_types::H256; +use zksync_utils::{ + bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, + h256_to_u256, +}; + +use super::Vm; + +impl Vm { + /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. + pub(crate) fn has_unpublished_bytecodes(&mut self) -> bool { + self.bootloader_state + .get_last_tx_compressed_bytecodes() + .iter() + .any(|info| { + let hash_bytecode = hash_bytecode(&info.original); + let is_bytecode_known = self.world.storage.is_bytecode_known(&hash_bytecode); + + let is_bytecode_known_cache = self + .world + .bytecode_cache + .contains_key(&h256_to_u256(hash_bytecode)); + !(is_bytecode_known || is_bytecode_known_cache) + }) + } +} + +pub(crate) fn compress_bytecodes( + bytecodes: &[Vec], + mut is_bytecode_known: impl FnMut(H256) -> bool, +) -> Vec { + bytecodes + .iter() + .enumerate() + .sorted_by_key(|(_idx, dep)| *dep) + .dedup_by(|x, y| x.1 == y.1) + .filter(|(_idx, dep)| !is_bytecode_known(hash_bytecode(dep))) + .sorted_by_key(|(idx, _dep)| *idx) + .filter_map(|(_idx, dep)| { + let compressed_bytecode = compress_bytecode(dep); + + compressed_bytecode + .ok() + .map(|compressed| CompressedBytecodeInfo { + original: dep.clone(), + compressed, + }) + }) + .collect() +} diff --git a/core/lib/multivm/src/versions/vm_fast/events.rs b/core/lib/multivm/src/versions/vm_fast/events.rs new file mode 100644 index 00000000000..b39d501b655 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/events.rs @@ -0,0 +1,112 @@ +use vm2::Event; +use zksync_types::{L1BatchNumber, VmEvent, H256}; +use zksync_utils::h256_to_account_address; + +#[derive(Clone)] +struct EventAccumulator { + pub(crate) shard_id: u8, + pub(crate) tx_number_in_block: u16, + pub(crate) topics: Vec<[u8; 32]>, + pub(crate) data: Vec, +} + +impl EventAccumulator { + fn into_vm_event(self, block_number: L1BatchNumber) -> VmEvent { + VmEvent { + location: (block_number, self.tx_number_in_block as u32), + address: h256_to_account_address(&H256(self.topics[0])), + indexed_topics: self.topics[1..].iter().map(H256::from).collect(), + value: self.data, + } + } +} + +pub(crate) fn merge_events(events: &[Event], block_number: L1BatchNumber) -> Vec { + let mut result = vec![]; + let mut current: Option<(usize, u32, EventAccumulator)> = None; + + for message in events.iter() { + let Event { + shard_id, + is_first, + tx_number, + key, + value, + } = message.clone(); + + if !is_first { + if let Some((mut remaining_data_length, mut remaining_topics, mut event)) = + current.take() + { + if event.shard_id != shard_id || event.tx_number_in_block != tx_number { + continue; + } + let mut data_0 = [0u8; 32]; + let mut data_1 = [0u8; 32]; + key.to_big_endian(&mut data_0); + value.to_big_endian(&mut data_1); + for el in [data_0, data_1].iter() { + if remaining_topics != 0 { + event.topics.push(*el); + remaining_topics -= 1; + } else if remaining_data_length != 0 { + if remaining_data_length >= 32 { + event.data.extend_from_slice(el); + remaining_data_length -= 32; + } else { + event.data.extend_from_slice(&el[..remaining_data_length]); + remaining_data_length = 0; + } + } + } + + if remaining_data_length != 0 || remaining_topics != 0 { + current = Some((remaining_data_length, remaining_topics, event)) + } else { + result.push(event.into_vm_event(block_number)); + } + } + } else { + // start new one. First take the old one only if it's well formed + if let Some((remaining_data_length, remaining_topics, event)) = current.take() { + if remaining_data_length == 0 && remaining_topics == 0 { + result.push(event.into_vm_event(block_number)); + } + } + + // split key as our internal marker. Ignore higher bits + let mut num_topics = key.0[0] as u32; + let mut data_length = (key.0[0] >> 32) as usize; + let mut buffer = [0u8; 32]; + value.to_big_endian(&mut buffer); + + let (topics, data) = if num_topics == 0 && data_length == 0 { + (vec![], vec![]) + } else if num_topics == 0 { + data_length -= 32; + (vec![], buffer.to_vec()) + } else { + num_topics -= 1; + (vec![buffer], vec![]) + }; + + let new_event = EventAccumulator { + shard_id, + tx_number_in_block: tx_number, + topics, + data, + }; + + current = Some((data_length, num_topics, new_event)) + } + } + + // add the last one + if let Some((remaining_data_length, remaining_topics, event)) = current.take() { + if remaining_data_length == 0 && remaining_topics == 0 { + result.push(event.into_vm_event(block_number)); + } + } + + result +} diff --git a/core/lib/multivm/src/versions/vm_fast/glue.rs b/core/lib/multivm/src/versions/vm_fast/glue.rs new file mode 100644 index 00000000000..cbf22f9122b --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/glue.rs @@ -0,0 +1,26 @@ +use zksync_types::l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log}; +use zksync_utils::u256_to_h256; + +use crate::glue::GlueFrom; + +impl GlueFrom<&vm2::L2ToL1Log> for SystemL2ToL1Log { + fn glue_from(value: &vm2::L2ToL1Log) -> Self { + let vm2::L2ToL1Log { + key, + value, + is_service, + address, + shard_id, + tx_number, + } = *value; + + Self(L2ToL1Log { + shard_id, + is_service, + tx_number_in_block: tx_number, + sender: address, + key: u256_to_h256(key), + value: u256_to_h256(value), + }) + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/hook.rs b/core/lib/multivm/src/versions/vm_fast/hook.rs new file mode 100644 index 00000000000..8d385f94f3e --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/hook.rs @@ -0,0 +1,39 @@ +#[derive(Debug)] +pub(crate) enum Hook { + AccountValidationEntered, + PaymasterValidationEntered, + AccountValidationExited, + ValidationStepEnded, + TxHasEnded, + DebugLog, + DebugReturnData, + NearCallCatch, + AskOperatorForRefund, + NotifyAboutRefund, + PostResult, + FinalBatchInfo, + PubdataRequested, +} + +impl Hook { + /// # Panics + /// Panics if the number does not correspond to any hook. + pub fn from_u32(hook: u32) -> Self { + match hook { + 0 => Hook::AccountValidationEntered, + 1 => Hook::PaymasterValidationEntered, + 2 => Hook::AccountValidationExited, + 3 => Hook::ValidationStepEnded, + 4 => Hook::TxHasEnded, + 5 => Hook::DebugLog, + 6 => Hook::DebugReturnData, + 7 => Hook::NearCallCatch, + 8 => Hook::AskOperatorForRefund, + 9 => Hook::NotifyAboutRefund, + 10 => Hook::PostResult, + 11 => Hook::FinalBatchInfo, + 12 => Hook::PubdataRequested, + _ => panic!("Unknown hook {}", hook), + } + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/initial_bootloader_memory.rs b/core/lib/multivm/src/versions/vm_fast/initial_bootloader_memory.rs new file mode 100644 index 00000000000..b3bf15cb1be --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/initial_bootloader_memory.rs @@ -0,0 +1,44 @@ +use zksync_types::U256; +use zksync_utils::{address_to_u256, h256_to_u256}; + +use crate::{interface::L1BatchEnv, vm_latest::utils::fee::get_batch_base_fee}; + +const OPERATOR_ADDRESS_SLOT: usize = 0; +const PREV_BLOCK_HASH_SLOT: usize = 1; +const NEW_BLOCK_TIMESTAMP_SLOT: usize = 2; +const NEW_BLOCK_NUMBER_SLOT: usize = 3; +const FAIR_PUBDATA_PRICE_SLOT: usize = 4; +const FAIR_L2_GAS_PRICE_SLOT: usize = 5; +const EXPECTED_BASE_FEE_SLOT: usize = 6; +const SHOULD_SET_NEW_BLOCK_SLOT: usize = 7; + +/// Returns the initial memory for the bootloader based on the current batch environment. +pub(crate) fn bootloader_initial_memory(l1_batch: &L1BatchEnv) -> Vec<(usize, U256)> { + let (prev_block_hash, should_set_new_block) = l1_batch + .previous_batch_hash + .map(|prev_block_hash| (h256_to_u256(prev_block_hash), U256::one())) + .unwrap_or_default(); + + vec![ + ( + OPERATOR_ADDRESS_SLOT, + address_to_u256(&l1_batch.fee_account), + ), + (PREV_BLOCK_HASH_SLOT, prev_block_hash), + (NEW_BLOCK_TIMESTAMP_SLOT, U256::from(l1_batch.timestamp)), + (NEW_BLOCK_NUMBER_SLOT, U256::from(l1_batch.number.0)), + ( + FAIR_PUBDATA_PRICE_SLOT, + U256::from(l1_batch.fee_input.fair_pubdata_price()), + ), + ( + FAIR_L2_GAS_PRICE_SLOT, + U256::from(l1_batch.fee_input.fair_l2_gas_price()), + ), + ( + EXPECTED_BASE_FEE_SLOT, + U256::from(get_batch_base_fee(l1_batch)), + ), + (SHOULD_SET_NEW_BLOCK_SLOT, should_set_new_block), + ] +} diff --git a/core/lib/multivm/src/versions/vm_fast/mod.rs b/core/lib/multivm/src/versions/vm_fast/mod.rs new file mode 100644 index 00000000000..4deb6b9dbf7 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/mod.rs @@ -0,0 +1,14 @@ +pub use self::vm::Vm; + +mod bootloader_state; +mod bytecode; +mod events; +mod glue; +mod hook; +mod initial_bootloader_memory; +mod pubdata; +mod refund; +#[cfg(test)] +mod tests; +mod transaction_data; +mod vm; diff --git a/core/lib/multivm/src/versions/vm_fast/pubdata.rs b/core/lib/multivm/src/versions/vm_fast/pubdata.rs new file mode 100644 index 00000000000..38489a6c8e9 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/pubdata.rs @@ -0,0 +1,124 @@ +use zksync_types::{ + event::L1MessengerL2ToL1Log, + writes::{compress_state_diffs, StateDiffRecord}, +}; + +/// Struct based on which the pubdata blob is formed +#[derive(Debug, Clone, Default)] +pub(crate) struct PubdataInput { + pub(crate) user_logs: Vec, + pub(crate) l2_to_l1_messages: Vec>, + pub(crate) published_bytecodes: Vec>, + pub(crate) state_diffs: Vec, +} + +impl PubdataInput { + pub(crate) fn build_pubdata(self, with_uncompressed_state_diffs: bool) -> Vec { + let mut l1_messenger_pubdata = vec![]; + + let PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + } = self; + + // Encoding user L2->L1 logs. + // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` + l1_messenger_pubdata.extend((user_logs.len() as u32).to_be_bytes()); + for l2tol1log in user_logs { + l1_messenger_pubdata.extend(l2tol1log.packed_encoding()); + } + + // Encoding L2->L1 messages + // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` + l1_messenger_pubdata.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); + for message in l2_to_l1_messages { + l1_messenger_pubdata.extend((message.len() as u32).to_be_bytes()); + l1_messenger_pubdata.extend(message); + } + + // Encoding bytecodes + // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` + l1_messenger_pubdata.extend((published_bytecodes.len() as u32).to_be_bytes()); + for bytecode in published_bytecodes { + l1_messenger_pubdata.extend((bytecode.len() as u32).to_be_bytes()); + l1_messenger_pubdata.extend(bytecode); + } + + // Encoding state diffs + // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` + let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); + l1_messenger_pubdata.extend(state_diffs_compressed); + + if with_uncompressed_state_diffs { + l1_messenger_pubdata.extend((state_diffs.len() as u32).to_be_bytes()); + for state_diff in state_diffs { + l1_messenger_pubdata.extend(state_diff.encode_padded()); + } + } + + l1_messenger_pubdata + } +} + +#[cfg(test)] +mod tests { + use zksync_system_constants::{ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS}; + use zksync_utils::u256_to_h256; + + use super::*; + + #[test] + fn test_basic_pubdata_building() { + // Just using some constant addresses for tests + let addr1 = BOOTLOADER_ADDRESS; + let addr2 = ACCOUNT_CODE_STORAGE_ADDRESS; + + let user_logs = vec![L1MessengerL2ToL1Log { + l2_shard_id: 0, + is_service: false, + tx_number_in_block: 0, + sender: addr1, + key: 1.into(), + value: 128.into(), + }]; + + let l2_to_l1_messages = vec![hex::decode("deadbeef").unwrap()]; + + let published_bytecodes = vec![hex::decode("aaaabbbb").unwrap()]; + + // For covering more cases, we have two state diffs: + // One with enumeration index present (and so it is a repeated write) and the one without it. + let state_diffs = vec![ + StateDiffRecord { + address: addr2, + key: 155.into(), + derived_key: u256_to_h256(125.into()).0, + enumeration_index: 12, + initial_value: 11.into(), + final_value: 12.into(), + }, + StateDiffRecord { + address: addr2, + key: 156.into(), + derived_key: u256_to_h256(126.into()).0, + enumeration_index: 0, + initial_value: 0.into(), + final_value: 14.into(), + }, + ]; + + let input = PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + }; + + let pubdata = + ethabi::encode(&[ethabi::Token::Bytes(input.build_pubdata(true))])[32..].to_vec(); + + assert_eq!(hex::encode(pubdata), "00000000000000000000000000000000000000000000000000000000000002c700000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000004aaaabbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/refund.rs b/core/lib/multivm/src/versions/vm_fast/refund.rs new file mode 100644 index 00000000000..524a6ca4c3b --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/refund.rs @@ -0,0 +1,66 @@ +use zksync_types::{H256, U256}; +use zksync_utils::ceil_div_u256; + +use crate::vm_latest::{utils::fee::get_batch_base_fee, L1BatchEnv}; + +pub(crate) fn compute_refund( + l1_batch: &L1BatchEnv, + bootloader_refund: u64, + gas_spent_on_pubdata: u64, + tx_gas_limit: u64, + current_ergs_per_pubdata_byte: u32, + pubdata_published: u32, + tx_hash: H256, +) -> u64 { + let total_gas_spent = tx_gas_limit - bootloader_refund; + + let gas_spent_on_computation = total_gas_spent + .checked_sub(gas_spent_on_pubdata) + .unwrap_or_else(|| { + tracing::error!( + "Gas spent on pubdata is greater than total gas spent. On pubdata: {}, total: {}", + gas_spent_on_pubdata, + total_gas_spent + ); + 0 + }); + + // For now, bootloader charges only for base fee. + let effective_gas_price = get_batch_base_fee(l1_batch); + + let bootloader_eth_price_per_pubdata_byte = + U256::from(effective_gas_price) * U256::from(current_ergs_per_pubdata_byte); + + let fair_eth_price_per_pubdata_byte = U256::from(l1_batch.fee_input.fair_pubdata_price()); + + // For now, L1 originated transactions are allowed to pay less than fair fee per pubdata, + // so we should take it into account. + let eth_price_per_pubdata_byte_for_calculation = std::cmp::min( + bootloader_eth_price_per_pubdata_byte, + fair_eth_price_per_pubdata_byte, + ); + + let fair_fee_eth = U256::from(gas_spent_on_computation) + * U256::from(l1_batch.fee_input.fair_l2_gas_price()) + + U256::from(pubdata_published) * eth_price_per_pubdata_byte_for_calculation; + let pre_paid_eth = U256::from(tx_gas_limit) * U256::from(effective_gas_price); + let refund_eth = pre_paid_eth.checked_sub(fair_fee_eth).unwrap_or_else(|| { + tracing::error!( + "Fair fee is greater than pre paid. Fair fee: {} wei, pre paid: {} wei", + fair_fee_eth, + pre_paid_eth + ); + U256::zero() + }); + + tracing::trace!( + "Fee benchmark for transaction with hash {}", + hex::encode(tx_hash.as_bytes()) + ); + tracing::trace!("Gas Limit: {}", tx_gas_limit); + tracing::trace!("Gas spent on computation: {}", gas_spent_on_computation); + tracing::trace!("Gas spent on pubdata: {}", gas_spent_on_pubdata); + tracing::trace!("Pubdata published: {}", pubdata_published); + + ceil_div_u256(refund_eth, effective_gas_price.into()).as_u64() +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs new file mode 100644 index 00000000000..239d40947a6 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -0,0 +1,423 @@ +use std::borrow::BorrowMut; + +use ethabi::Token; +use itertools::Itertools; +use zksync_contracts::load_sys_contract; +use zksync_system_constants::{ + CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, +}; +use zksync_types::{ + commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, + l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; + +use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; +use crate::versions::vm_fast::tests::tester::{ + default_l1_batch, get_empty_storage, InMemoryStorageView, VmTesterBuilder, +}; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + constants::{ + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, + MAX_VM_PUBDATA_PER_BATCH, + }, + tracers::PubdataTracer, + L1BatchEnv, TracerDispatcher, + }, +}; + +#[derive(Debug, Clone, Default)] +struct L1MessengerTestData { + l2_to_l1_logs: usize, + messages: Vec>, + bytecodes: Vec>, + state_diffs: Vec, +} + +struct MimicCallInfo { + to: Address, + who_to_mimic: Address, + data: Vec, +} + +const CALLS_PER_TX: usize = 1_000; +fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { + let complex_upgrade = get_complex_upgrade_abi(); + let l1_messenger = load_sys_contract("L1Messenger"); + + let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendL2ToL1Log") + .unwrap() + .encode_input(&[ + Token::Bool(false), + Token::FixedBytes(H256::random().0.to_vec()), + Token::FixedBytes(H256::random().0.to_vec()), + ]) + .unwrap(), + }); + let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendToL1") + .unwrap() + .encode_input(&[Token::Bytes(message.clone())]) + .unwrap(), + }); + let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("requestBytecodeL1Publication") + .unwrap() + .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) + .unwrap(), + }); + + let encoded_calls = logs_mimic_calls + .chain(messages_mimic_calls) + .chain(bytecodes_mimic_calls) + .map(|call| { + Token::Tuple(vec![ + Token::Address(call.to), + Token::Address(call.who_to_mimic), + Token::Bytes(call.data), + ]) + }) + .chunks(CALLS_PER_TX) + .into_iter() + .map(|chunk| { + complex_upgrade + .function("mimicCalls") + .unwrap() + .encode_input(&[Token::Array(chunk.collect_vec())]) + .unwrap() + }) + .collect_vec(); + + encoded_calls +} + +struct TestStatistics { + pub max_used_gas: u32, + pub circuit_statistics: u64, + pub execution_metrics_size: u64, +} + +struct StatisticsTagged { + pub statistics: TestStatistics, + pub tag: String, +} + +fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { + let mut storage = get_empty_storage(); + let complex_upgrade_code = read_complex_upgrade(); + + // For this test we'll just put the bytecode onto the force deployer address + storage.borrow_mut().set_value( + get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), + hash_bytecode(&complex_upgrade_code), + ); + storage + .borrow_mut() + .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); + + // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute + // the gas limit + + let batch_env = L1BatchEnv { + fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), + ..default_l1_batch(zksync_types::L1BatchNumber(1)) + }; + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_l1_batch_env(batch_env) + .build(); + + let bytecodes = test_data + .bytecodes + .iter() + .map(|bytecode| { + let hash = hash_bytecode(bytecode); + let words = bytes_to_be_words(bytecode.clone()); + (h256_to_u256(hash), words) + }) + .collect(); + vm.vm.insert_bytecodes(bytecodes); + + let txs_data = populate_mimic_calls(test_data.clone()); + let account = &mut vm.rich_accounts[0]; + + for (i, data) in txs_data.into_iter().enumerate() { + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, + calldata: data, + value: U256::zero(), + factory_deps: None, + }, + None, + ); + + vm.vm.push_transaction(tx); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction {i} wasn't successful for input: {:#?}", + test_data + ); + } + + // Now we count how much ergs were spent at the end of the batch + // It is assumed that the top level frame is the bootloader + + let ergs_before = vm.vm.gas_remaining(); + + // We ensure that indeed the provided state diffs are used + let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( + vm.vm.batch_env.clone(), + VmExecutionMode::Batch, + test_data.state_diffs.clone(), + ); + + let result = vm.vm.inspect_inner( + TracerDispatcher::default(), + VmExecutionMode::Batch, + Some(pubdata_tracer), + ); + + assert!( + !result.result.is_failed(), + "Batch wasn't successful for input: {:?}", + test_data + ); + + let ergs_after = vm.vm.gas_remaining(); + + assert_eq!( + (ergs_before - ergs_after) as u64, + result.statistics.gas_used + ); + + TestStatistics { + max_used_gas: ergs_before - ergs_after, + circuit_statistics: result.statistics.circuit_statistic.total() as u64, + execution_metrics_size: result.get_execution_metrics(None).size() as u64, + } +} + +fn generate_state_diffs( + repeated_writes: bool, + small_diff: bool, + number_of_state_diffs: usize, +) -> Vec { + (0..number_of_state_diffs) + .map(|i| { + let address = Address::from_low_u64_be(i as u64); + let key = U256::from(i); + let enumeration_index = if repeated_writes { i + 1 } else { 0 }; + + let (initial_value, final_value) = if small_diff { + // As small as it gets, one byte to denote zeroing out the value + (U256::from(1), U256::from(0)) + } else { + // As large as it gets + (U256::from(0), U256::from(2).pow(255.into())) + }; + + StateDiffRecord { + address, + key, + derived_key: u256_to_h256(i.into()).0, + enumeration_index: enumeration_index as u64, + initial_value, + final_value, + } + }) + .collect() +} + +// A valid zkEVM bytecode has odd number of 32 byte words +fn get_valid_bytecode_length(length: usize) -> usize { + // Firstly ensure that the length is divisible by 32 + let length_padded_to_32 = if length % 32 == 0 { + length + } else { + length + 32 - (length % 32) + }; + + // Then we ensure that the number returned by division by 32 is odd + if length_padded_to_32 % 64 == 0 { + length_padded_to_32 + 32 + } else { + length_padded_to_32 + } +} + +#[test] +fn test_dry_run_upper_bound() { + // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). + // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` + // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. + const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = + (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; + + // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. + // To get the upper bound, we'll try to do the following: + // 1. Max number of logs. + // 2. Lots of small L2->L1 messages / one large L2->L1 message. + // 3. Lots of small bytecodes / one large bytecode. + // 4. Lots of storage slot updates. + + let statistics = vec![ + // max logs + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, + ..Default::default() + }), + tag: "max_logs".to_string(), + }, + // max messages + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, + // so the max number of pubdata is bound by it + messages: vec![ + vec![0; 0]; + MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) + ], + ..Default::default() + }), + tag: "max_messages".to_string(), + }, + // long message + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it + messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], + ..Default::default() + }), + tag: "long_message".to_string(), + }, + // max bytecodes + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // Each bytecode must be at least 32 bytes long. + // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number + bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], + ..Default::default() + }), + tag: "max_bytecodes".to_string(), + }, + // long bytecode + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + bytecodes: vec![ + vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; + 1 + ], + ..Default::default() + }), + tag: "long_bytecode".to_string(), + }, + // lots of small repeated writes + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) + state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), + ..Default::default() + }), + tag: "small_repeated_writes".to_string(), + }, + // lots of big repeated writes + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value + state_diffs: generate_state_diffs( + true, + false, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, + ), + ..Default::default() + }), + tag: "big_repeated_writes".to_string(), + }, + // lots of small initial writes + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out + state_diffs: generate_state_diffs( + false, + true, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, + ), + ..Default::default() + }), + tag: "small_initial_writes".to_string(), + }, + // lots of large initial writes + StatisticsTagged { + statistics: execute_test(L1MessengerTestData { + // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value + state_diffs: generate_state_diffs( + false, + false, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, + ), + ..Default::default() + }), + tag: "big_initial_writes".to_string(), + }, + ]; + + // We use 2x overhead for the batch tip compared to the worst estimated scenario. + let max_used_gas = statistics + .iter() + .map(|s| (s.statistics.max_used_gas, s.tag.clone())) + .max() + .unwrap(); + assert!( + max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, + "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", + max_used_gas.1, + max_used_gas.0, + BOOTLOADER_BATCH_TIP_OVERHEAD + ); + + let circuit_statistics = statistics + .iter() + .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) + .max() + .unwrap(); + assert!( + circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, + "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", + circuit_statistics.1, + circuit_statistics.0, + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD + ); + + let execution_metrics_size = statistics + .iter() + .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) + .max() + .unwrap(); + assert!( + execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, + "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", + execution_metrics_size.1, + execution_metrics_size.0, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs new file mode 100644 index 00000000000..c698d36683e --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs @@ -0,0 +1,53 @@ +use zksync_types::U256; + +use crate::{ + interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, + versions::vm_fast::tests::{ + tester::VmTesterBuilder, + utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, + }, +}; + +#[test] +fn test_dummy_bootloader() { + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + base_system_contracts.bootloader = get_bootloader("dummy"); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(base_system_contracts) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + let result = vm.vm.execute(VmExecutionMode::Batch); + assert!(!result.result.is_failed()); + + let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); + + verify_required_memory( + &vm.vm.inner.state, + vec![(correct_first_cell, vm2::FIRST_HEAP, 0)], + ); +} + +#[test] +fn test_bootloader_out_of_gas() { + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + base_system_contracts.bootloader = get_bootloader("dummy"); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(base_system_contracts) + .with_bootloader_gas_limit(10) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + let res = vm.vm.execute(VmExecutionMode::Batch); + + assert!(matches!( + res.result, + ExecutionResult::Halt { + reason: Halt::BootloaderOutOfGas + } + )); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs new file mode 100644 index 00000000000..01fc8dc07d0 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs @@ -0,0 +1,40 @@ +use zksync_types::event::extract_long_l2_to_l1_messages; +use zksync_utils::bytecode::compress_bytecode; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{DeployContractsTx, TxType, VmTesterBuilder}, + utils::read_test_contract, + }, +}; + +#[test] +fn test_bytecode_publishing() { + // In this test, we aim to ensure that the contents of the compressed bytecodes + // are included as part of the L2->L1 long messages + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + + let compressed_bytecode = compress_bytecode(&counter).unwrap(); + + let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + vm.vm.execute(VmExecutionMode::Batch); + + let state = vm.vm.get_current_execution_state(); + let long_messages = extract_long_l2_to_l1_messages(&state.events); + assert!( + long_messages.contains(&compressed_bytecode), + "Bytecode not published" + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs new file mode 100644 index 00000000000..c97b38b6afc --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs @@ -0,0 +1,92 @@ +use std::sync::Arc; + +use once_cell::sync::OnceCell; +use zksync_types::{Address, Execute}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + tracers::CallTracer, + vm_latest::{ + constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + tests::{ + tester::VmTesterBuilder, + utils::{read_max_depth_contract, read_test_contract}, + }, + HistoryEnabled, ToTracerPointer, + }, +}; + +// This test is ultra slow, so it's ignored by default. +#[test] +#[ignore] +fn test_max_depth() { + let contarct = read_max_depth_contract(); + let address = Address::random(); + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![(contarct, address, true)]) + .build(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: vec![], + value: Default::default(), + factory_deps: None, + }, + None, + ); + + let result = Arc::new(OnceCell::new()); + let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); + vm.vm.push_transaction(tx); + let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); + assert!(result.get().is_some()); + assert!(res.result.is_failed()); +} + +#[test] +fn test_basic_behavior() { + let contarct = read_test_contract(); + let address = Address::random(); + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![(contarct, address, true)]) + .build(); + + let increment_by_6_calldata = + "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: hex::decode(increment_by_6_calldata).unwrap(), + value: Default::default(), + factory_deps: None, + }, + None, + ); + + let result = Arc::new(OnceCell::new()); + let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); + vm.vm.push_transaction(tx); + let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); + + let call_tracer_result = result.get().unwrap(); + + assert_eq!(call_tracer_result.len(), 1); + // Expect that there are a plenty of subcalls underneath. + let subcall = &call_tracer_result[0].calls; + assert!(subcall.len() > 10); + assert!(!res.result.is_failed()); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs new file mode 100644 index 00000000000..c582bd28c88 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs @@ -0,0 +1,74 @@ +use zksync_types::{Address, Execute, U256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + constants::BATCH_COMPUTATIONAL_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, + }, +}; + +// Checks that estimated number of circuits for simple transfer doesn't differ much +// from hardcoded expected value. +#[test] +fn test_circuits() { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Address::random(), + calldata: Vec::new(), + value: U256::from(1u8), + factory_deps: None, + }, + None, + ); + vm.vm.push_transaction(tx); + let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + + let s = res.statistics.circuit_statistic; + // Check `circuit_statistic`. + const EXPECTED: [f32; 13] = [ + 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, + 0.0, 0.0, 0.0, + ]; + let actual = [ + (s.main_vm, "main_vm"), + (s.ram_permutation, "ram_permutation"), + (s.storage_application, "storage_application"), + (s.storage_sorter, "storage_sorter"), + (s.code_decommitter, "code_decommitter"), + (s.code_decommitter_sorter, "code_decommitter_sorter"), + (s.log_demuxer, "log_demuxer"), + (s.events_sorter, "events_sorter"), + (s.keccak256, "keccak256"), + (s.ecrecover, "ecrecover"), + (s.sha256, "sha256"), + (s.secp256k1_verify, "secp256k1_verify"), + (s.transient_storage_checker, "transient_storage_checker"), + ]; + for ((actual, name), expected) in actual.iter().zip(EXPECTED) { + if expected == 0.0 { + assert_eq!( + *actual, expected, + "Check failed for {}, expected {}, actual {}", + name, expected, actual + ); + } else { + let diff = (actual - expected) / expected; + assert!( + diff.abs() < 0.1, + "Check failed for {}, expected {}, actual {}", + name, + expected, + actual + ); + } + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs new file mode 100644 index 00000000000..24fda3beed4 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -0,0 +1,251 @@ +use ethabi::Token; +use zksync_types::{ + get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{get_empty_storage, VmTesterBuilder}, + utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, + }, +}; + +fn generate_large_bytecode() -> Vec { + // This is the maximal possible size of a zkEVM bytecode + vec![2u8; ((1 << 16) - 1) * 32] +} + +#[test] +fn test_code_oracle() { + let precompiles_contract_address = Address::random(); + let precompile_contract_bytecode = read_precompiles_contract(); + + // Filling the zkevm bytecode + let normal_zkevm_bytecode = read_test_contract(); + let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_custom_contracts(vec![( + precompile_contract_bytecode, + precompiles_contract_address, + false, + )]) + .with_storage(storage) + .build(); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + vm.vm.insert_bytecodes([normal_zkevm_bytecode.as_slice()]); + let account = &mut vm.rich_accounts[0]; + + // Firstly, let's ensure that the contract works. + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: precompiles_contract_address, + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // Now, we ask for the same bytecode. We use to partially check whether the memory page with + // the decommitted bytecode gets erased (it shouldn't). + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: precompiles_contract_address, + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx2); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +fn find_code_oracle_cost_log( + precompiles_contract_address: Address, + logs: &[StorageLogWithPreviousValue], +) -> &StorageLogWithPreviousValue { + logs.iter() + .find(|log| { + *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() + }) + .expect("no code oracle cost log") +} + +#[test] +fn test_code_oracle_big_bytecode() { + let precompiles_contract_address = Address::random(); + let precompile_contract_bytecode = read_precompiles_contract(); + + let big_zkevm_bytecode = generate_large_bytecode(); + let big_zkevm_bytecode_hash = hash_bytecode(&big_zkevm_bytecode); + let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); + + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&big_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_custom_contracts(vec![( + precompile_contract_bytecode, + precompiles_contract_address, + false, + )]) + .with_storage(storage) + .build(); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + vm.vm.insert_bytecodes([big_zkevm_bytecode.as_slice()]); + + let account = &mut vm.rich_accounts[0]; + + // Firstly, let's ensure that the contract works. + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: precompiles_contract_address, + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +#[test] +fn refunds_in_code_oracle() { + let precompiles_contract_address = Address::random(); + let precompile_contract_bytecode = read_precompiles_contract(); + + let normal_zkevm_bytecode = read_test_contract(); + let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + // Execute code oracle twice with identical VM state that only differs in that the queried bytecode + // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas + // for already decommitted codes). + let mut oracle_costs = vec![]; + for decommit in [false, true] { + let mut vm = VmTesterBuilder::new() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_custom_contracts(vec![( + precompile_contract_bytecode.clone(), + precompiles_contract_address, + false, + )]) + .with_storage(storage.clone()) + .build(); + + vm.vm.insert_bytecodes([normal_zkevm_bytecode.as_slice()]); + + let account = &mut vm.rich_accounts[0]; + if decommit { + let (_, is_fresh) = vm + .vm + .inner + .world_diff + .decommit_opcode(&mut vm.vm.world, h256_to_u256(normal_zkevm_bytecode_hash)); + assert!(is_fresh); + } + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: precompiles_contract_address, + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + let log = + find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); + oracle_costs.push(log.log.value); + } + + // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` + // in `CodeOracle.yul`. + let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); + assert_eq!( + code_oracle_refund, + (4 * (normal_zkevm_bytecode.len() / 32)).into() + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs new file mode 100644 index 00000000000..460c8251652 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs @@ -0,0 +1,81 @@ +use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; +use zksync_types::{ + get_code_key, get_known_code_key, get_nonce_key, + system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, + AccountTreeId, U256, +}; +use zksync_utils::u256_to_h256; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{DeployContractsTx, TxType, VmTesterBuilder}, + utils::{get_balance, read_test_contract, verify_required_storage}, + }, + vm_latest::utils::fee::get_batch_base_fee, +}; + +#[test] +fn test_default_aa_interaction() { + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + let DeployContractsTx { + tx, + bytecode_hash, + address, + } = account.get_deploy_tx(&counter, None, TxType::L2); + let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + vm.vm.execute(VmExecutionMode::Batch); + + vm.vm.get_current_execution_state(); + + // Both deployment and ordinary nonce should be incremented by one. + let account_nonce_key = get_nonce_key(&account.address); + let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; + + // The code hash of the deployed contract should be marked as republished. + let known_codes_key = get_known_code_key(&bytecode_hash); + + // The contract should be deployed successfully. + let account_code_key = get_code_key(&address); + + let expected_slots = [ + (u256_to_h256(expected_nonce), account_nonce_key), + (u256_to_h256(U256::from(1u32)), known_codes_key), + (bytecode_hash, account_code_key), + ]; + + verify_required_storage( + &expected_slots, + &mut vm.vm.world.storage, + vm.vm.inner.world_diff.get_storage_state(), + ); + + let expected_fee = maximal_fee + - U256::from(result.refunds.gas_refunded) + * U256::from(get_batch_base_fee(&vm.vm.batch_env)); + let operator_balance = get_balance( + AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), + &vm.fee_account, + &mut vm.vm.world.storage, + vm.vm.inner.world_diff.get_storage_state(), + ); + + assert_eq!( + operator_balance, expected_fee, + "Operator did not receive his fee" + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs new file mode 100644 index 00000000000..e0c55c5a685 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs @@ -0,0 +1,36 @@ +use zksync_test_account::Account; +use zksync_types::{fee::Fee, Execute}; + +use crate::{ + interface::{TxExecutionMode, VmInterface}, + vm_fast::tests::tester::VmTesterBuilder, + vm_latest::constants::{TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, +}; + +/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. +#[test] +fn test_tx_gas_limit_offset() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let gas_limit = 9999.into(); + let tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute::default(), + Some(Fee { + gas_limit, + ..Account::default_fee() + }), + ); + + vm.vm.push_transaction(tx); + + assert!(vm.vm.inner.state.previous_frames.is_empty()); + let gas_limit_from_memory = vm + .vm + .read_word_from_bootloader_heap(TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET); + + assert_eq!(gas_limit_from_memory, gas_limit); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs new file mode 100644 index 00000000000..af90566671e --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -0,0 +1,96 @@ +use std::collections::HashSet; + +use itertools::Itertools; +use zksync_state::ReadStorage; +use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_test_account::Account; +use zksync_types::{Execute, U256}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::{ + tests::{ + tester::{TxType, VmTesterBuilder}, + utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, + }, + vm::Vm, + }, +}; + +#[test] +fn test_get_used_contracts() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); + + // create and push and execute some not-empty factory deps transaction with success status + // to check that `get_decommitted_hashes()` updates + let contract_code = read_test_contract(); + let mut account = Account::random(); + let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); + vm.vm.push_transaction(tx.tx.clone()); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + assert!(vm + .vm + .decommitted_hashes() + .contains(&h256_to_u256(tx.bytecode_hash))); + + // Note: `Default_AA` will be in the list of used contracts if L2 tx is used + assert_eq!( + vm.vm.decommitted_hashes().collect::>(), + known_bytecodes_without_aa_code(&vm.vm) + ); + + // create push and execute some non-empty factory deps transaction that fails + // (`known_bytecodes` will be updated but we expect `get_decommitted_hashes()` to not be updated) + + let calldata = [1, 2, 3]; + let big_calldata: Vec = calldata + .iter() + .cycle() + .take(calldata.len() * 1024) + .cloned() + .collect(); + let account2 = Account::random(); + let tx2 = account2.get_l1_tx( + Execute { + contract_address: CONTRACT_DEPLOYER_ADDRESS, + calldata: big_calldata, + value: Default::default(), + factory_deps: vec![vec![1; 32]], + }, + 1, + ); + + vm.vm.push_transaction(tx2.clone()); + + let res2 = vm.vm.execute(VmExecutionMode::OneTx); + + assert!(res2.result.is_failed()); + + for factory_dep in tx2.execute.factory_deps { + let hash = hash_bytecode(&factory_dep); + let hash_to_u256 = h256_to_u256(hash); + assert!(known_bytecodes_without_aa_code(&vm.vm).contains(&hash_to_u256)); + assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); + } +} + +fn known_bytecodes_without_aa_code(vm: &Vm) -> HashSet { + let mut known_bytecodes_without_aa_code = vm + .world + .bytecode_cache + .keys() + .cloned() + .collect::>(); + + known_bytecodes_without_aa_code.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); + + known_bytecodes_without_aa_code +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs new file mode 100644 index 00000000000..dde83d8a9f3 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs @@ -0,0 +1,120 @@ +use zksync_types::H256; +use zksync_utils::h256_to_u256; + +use crate::vm_latest::tests::tester::VmTesterBuilder; +use crate::vm_latest::types::inputs::system_env::TxExecutionMode; +use crate::vm_latest::{HistoryEnabled, TxRevertReason}; + +// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. +// Port it later, it's not significant. for now + +#[test] +fn test_invalid_bytecode() { + let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) + .with_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1); + let mut storage = vm_builder.take_storage(); + let mut vm = vm_builder.build(&mut storage); + + let block_gas_per_pubdata = vm_test_env + .block_context + .context + .block_gas_price_per_pubdata(); + + let mut test_vm_with_custom_bytecode_hash = + |bytecode_hash: H256, expected_revert_reason: Option| { + let mut oracle_tools = + OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); + + let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( + h256_to_u256(bytecode_hash), + block_gas_per_pubdata as u32, + ); + + run_vm_with_custom_factory_deps( + &mut oracle_tools, + vm_test_env.block_context.context, + &vm_test_env.block_properties, + encoded_tx, + predefined_overhead, + expected_revert_reason, + ); + }; + + let failed_to_mark_factory_deps = |msg: &str, data: Vec| { + TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { + msg: msg.to_string(), + data, + }) + }; + + // Here we provide the correctly-formatted bytecode hash of + // odd length, so it should work. + test_vm_with_custom_bytecode_hash( + H256([ + 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]), + None, + ); + + // Here we provide correctly formatted bytecode of even length, so + // it should fail. + test_vm_with_custom_bytecode_hash( + H256([ + 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]), + Some(failed_to_mark_factory_deps( + "Code length in words must be odd", + vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, + 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, + 32, 98, 101, 32, 111, 100, 100, + ], + )), + ); + + // Here we provide incorrectly formatted bytecode of odd length, so + // it should fail. + test_vm_with_custom_bytecode_hash( + H256([ + 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]), + Some(failed_to_mark_factory_deps( + "Incorrectly formatted bytecodeHash", + vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, + 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, + 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + )), + ); + + // Here we provide incorrectly formatted bytecode of odd length, so + // it should fail. + test_vm_with_custom_bytecode_hash( + H256([ + 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]), + Some(failed_to_mark_factory_deps( + "Incorrectly formatted bytecodeHash", + vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, + 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, + 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + )), + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs new file mode 100644 index 00000000000..0bbf633254e --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs @@ -0,0 +1,45 @@ +use zksync_state::ReadStorage; +use zksync_types::get_nonce_key; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{Account, TxType, VmTesterBuilder}, + utils::read_test_contract, + }, +}; + +#[test] +fn test_is_write_initial_behaviour() { + // In this test, we check result of `is_write_initial` at different stages. + // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't + // messed up it with the repeated writes during the one batch execution. + + let mut account = Account::random(); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(vec![account.clone()]) + .build(); + + let nonce_key = get_nonce_key(&account.address); + // Check that the next write to the nonce key will be initial. + assert!(vm + .storage + .as_ref() + .borrow_mut() + .is_write_initial(&nonce_key)); + + let contract_code = read_test_contract(); + let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; + + vm.vm.push_transaction(tx); + vm.vm.execute(VmExecutionMode::OneTx); + + // Check that `is_write_initial` still returns true for the nonce key. + assert!(vm + .storage + .as_ref() + .borrow_mut() + .is_write_initial(&nonce_key)); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs new file mode 100644 index 00000000000..033a7b2658f --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -0,0 +1,199 @@ +use ethabi::Token; +use zksync_contracts::l1_messenger_contract; +use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; +use zksync_types::{ + get_code_key, get_known_code_key, + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + storage_writes_deduplicator::StorageWritesDeduplicator, + Execute, ExecuteTransactionCommon, U256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::{ + tests::{ + tester::{TxType, VmTesterBuilder}, + utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, + }, + transaction_data::TransactionData, + }, +}; + +#[test] +fn test_l1_tx_execution() { + // In this test, we try to execute a contract deployment from L1 + // Here instead of marking code hash via the bootloader means, we will be + // using L1->L2 communication, the same it would likely be done during the priority mode. + + // There are always at least 9 initial writes here, because we pay fees from l1: + // - `totalSupply` of ETH token + // - balance of the refund recipient + // - balance of the bootloader + // - `tx_rolling` hash + // - `gasPerPubdataByte` + // - `basePubdataSpent` + // - rolling hash of L2->L1 logs + // - transaction number in block counter + // - L2->L1 log counter in `L1Messenger` + + // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. + let basic_initial_writes = 5; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let contract_code = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); + let tx_data: TransactionData = deploy_tx.tx.clone().into(); + + let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { + shard_id: 0, + is_service: true, + tx_number_in_block: 0, + sender: BOOTLOADER_ADDRESS, + key: tx_data.tx_hash(0.into()), + value: u256_to_h256(U256::from(1u32)), + }] + .into_iter() + .map(UserL2ToL1Log) + .collect(); + + vm.vm.push_transaction(deploy_tx.tx.clone()); + + let res = vm.vm.execute(VmExecutionMode::OneTx); + + // The code hash of the deployed contract should be marked as republished. + let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); + + // The contract should be deployed successfully. + let account_code_key = get_code_key(&deploy_tx.address); + + assert!(!res.result.is_failed()); + + for (expected_value, storage_location) in [ + (U256::from(1u32), known_codes_key), + (h256_to_u256(deploy_tx.bytecode_hash), account_code_key), + ] { + assert_eq!( + expected_value, + vm.vm.inner.world_diff.get_storage_state()[&( + *storage_location.address(), + h256_to_u256(*storage_location.key()) + )] + ); + } + + assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + true, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + vm.vm.push_transaction(tx); + let res = vm.vm.execute(VmExecutionMode::OneTx); + let storage_logs = res.logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + + // Tx panicked + assert_eq!(res.initial_storage_writes, basic_initial_writes); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + false, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + vm.vm.push_transaction(tx.clone()); + let res = vm.vm.execute(VmExecutionMode::OneTx); + let storage_logs = res.logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + // We changed one slot inside contract. However, the rewrite of the `basePubdataSpent` didn't happen, since it was the same + // as the start of the previous tx. Thus we have `+1` slot for the changed counter and `-1` slot for base pubdata spent + assert_eq!(res.initial_storage_writes, basic_initial_writes); + + // No repeated writes + let repeated_writes = res.repeated_storage_writes; + assert_eq!(res.repeated_storage_writes, 0); + + vm.vm.push_transaction(tx); + let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. + // But now the base pubdata spent has changed too. + assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); + assert_eq!(res.repeated_storage_writes, repeated_writes); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + false, + Some(10.into()), + false, + TxType::L1 { serial_id: 1 }, + ); + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + // Method is not payable tx should fail + assert!(result.result.is_failed(), "The transaction should fail"); + + let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); + assert_eq!(res.initial_storage_writes, basic_initial_writes); + assert_eq!(res.repeated_storage_writes, 1); +} + +#[test] +fn test_l1_tx_execution_high_gas_limit() { + // In this test, we try to execute an L1->L2 transaction with a high gas limit. + // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, + // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let account = &mut vm.rich_accounts[0]; + + let l1_messenger = l1_messenger_contract(); + + let contract_function = l1_messenger.function("sendToL1").unwrap(); + let params = [ + // Even a message of size 100k should not be able to be sent by a priority transaction + Token::Bytes(vec![0u8; 100_000]), + ]; + let calldata = contract_function.encode_input(¶ms).unwrap(); + + let mut tx = account.get_l1_tx( + Execute { + contract_address: L1_MESSENGER_ADDRESS, + value: 0.into(), + factory_deps: vec![], + calldata, + }, + 0, + ); + + if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { + // Using some large gas limit + data.gas_limit = 300_000_000.into(); + } else { + unreachable!() + }; + + vm.vm.push_transaction(tx); + + let res = vm.vm.execute(VmExecutionMode::OneTx); + + assert!(res.result.is_failed(), "The transaction should've failed"); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs new file mode 100644 index 00000000000..1f9d0aaff09 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs @@ -0,0 +1,424 @@ +//! +//! Tests for the bootloader +//! The description for each of the tests can be found in the corresponding `.yul` file. +//! + +use zksync_state::ReadStorage; +use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; +use zksync_types::{ + block::{pack_block_info, L2BlockHasher}, + AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, L2BlockNumber, + ProtocolVersionId, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, + SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +use crate::{ + interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::{ + tests::tester::{default_l1_batch, VmTesterBuilder}, + vm::Vm, + }, + vm_latest::{ + constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, + utils::l2_blocks::get_l2_block_hash_key, + }, +}; + +fn get_l1_noop() -> Transaction { + Transaction { + common_data: ExecuteTransactionCommon::L1(L1TxCommonData { + sender: H160::random(), + gas_limit: U256::from(2000000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute: Execute { + contract_address: H160::zero(), + calldata: vec![], + value: U256::zero(), + factory_deps: vec![], + }, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +#[test] +fn test_l2_block_initialization_timestamp() { + // This test checks that the L2 block initialization works correctly. + // Here we check that that the first block must have timestamp that is greater or equal to the timestamp + // of the current batch. + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + // Override the timestamp of the current L2 block to be 0. + vm.vm.bootloader_state.push_l2_block(L2BlockEnv { + number: 1, + timestamp: 0, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + }); + let l1_tx = get_l1_noop(); + + vm.vm.push_transaction(l1_tx); + let res = vm.vm.execute(VmExecutionMode::OneTx); + + assert_eq!( + res.result, + ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} + ); +} + +#[test] +fn test_l2_block_initialization_number_non_zero() { + // This test checks that the L2 block initialization works correctly. + // Here we check that the first L2 block number can not be zero. + + let l1_batch = default_l1_batch(L1BatchNumber(1)); + let first_l2_block = L2BlockEnv { + number: 0, + timestamp: l1_batch.timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + }; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_l1_batch_env(l1_batch) + .with_random_rich_accounts(1) + .build(); + + let l1_tx = get_l1_noop(); + + vm.vm.push_transaction(l1_tx); + + set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block); + + let res = vm.vm.execute(VmExecutionMode::OneTx); + + assert_eq!( + res.result, + ExecutionResult::Halt { + reason: Halt::FailedToSetL2Block( + "L2 block number is never expected to be zero".to_string() + ) + } + ); +} + +fn test_same_l2_block( + expected_error: Option, + override_timestamp: Option, + override_prev_block_hash: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.timestamp = 1; + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_l1_batch_env(l1_batch) + .with_random_rich_accounts(1) + .build(); + + let l1_tx = get_l1_noop(); + vm.vm.push_transaction(l1_tx.clone()); + let res = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!res.result.is_failed()); + + let mut current_l2_block = vm.vm.batch_env.first_l2_block; + + if let Some(timestamp) = override_timestamp { + current_l2_block.timestamp = timestamp; + } + if let Some(prev_block_hash) = override_prev_block_hash { + current_l2_block.prev_block_hash = prev_block_hash; + } + + if (None, None) == (override_timestamp, override_prev_block_hash) { + current_l2_block.max_virtual_blocks_to_create = 0; + } + + vm.vm.push_transaction(l1_tx); + set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +#[test] +fn test_l2_block_same_l2_block() { + // This test aims to test the case when there are multiple transactions inside the same L2 block. + + // Case 1: Incorrect timestamp + test_same_l2_block( + Some(Halt::FailedToSetL2Block( + "The timestamp of the same L2 block must be same".to_string(), + )), + Some(0), + None, + ); + + // Case 2: Incorrect previous block hash + test_same_l2_block( + Some(Halt::FailedToSetL2Block( + "The previous hash of the same L2 block must be same".to_string(), + )), + None, + Some(H256::zero()), + ); + + // Case 3: Correct continuation of the same L2 block + test_same_l2_block(None, None, None); +} + +fn test_new_l2_block( + first_l2_block: L2BlockEnv, + overriden_second_block_number: Option, + overriden_second_block_timestamp: Option, + overriden_second_block_prev_block_hash: Option, + expected_error: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.timestamp = 1; + l1_batch.first_l2_block = first_l2_block; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let l1_tx = get_l1_noop(); + + // Firstly we execute the first transaction + vm.vm.push_transaction(l1_tx.clone()); + vm.vm.execute(VmExecutionMode::OneTx); + + let mut second_l2_block = vm.vm.batch_env.first_l2_block; + second_l2_block.number += 1; + second_l2_block.timestamp += 1; + second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); + + if let Some(block_number) = overriden_second_block_number { + second_l2_block.number = block_number; + } + if let Some(timestamp) = overriden_second_block_timestamp { + second_l2_block.timestamp = timestamp; + } + if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { + second_l2_block.prev_block_hash = prev_block_hash; + } + + vm.vm.bootloader_state.push_l2_block(second_l2_block); + + vm.vm.push_transaction(l1_tx); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +#[test] +fn test_l2_block_new_l2_block() { + // This test is aimed to cover potential issue + + let correct_first_block = L2BlockEnv { + number: 1, + timestamp: 1, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + }; + + // Case 1: Block number increasing by more than 1 + test_new_l2_block( + correct_first_block, + Some(3), + None, + None, + Some(Halt::FailedToSetL2Block( + "Invalid new L2 block number".to_string(), + )), + ); + + // Case 2: Timestamp not increasing + test_new_l2_block( + correct_first_block, + None, + Some(1), + None, + Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), + ); + + // Case 3: Incorrect previous block hash + test_new_l2_block( + correct_first_block, + None, + None, + Some(H256::zero()), + Some(Halt::FailedToSetL2Block( + "The current L2 block hash is incorrect".to_string(), + )), + ); + + // Case 4: Correct new block + test_new_l2_block(correct_first_block, None, None, None, None); +} + +#[allow(clippy::too_many_arguments)] +fn test_first_in_batch( + miniblock_timestamp: u64, + miniblock_number: u32, + pending_txs_hash: H256, + batch_timestamp: u64, + new_batch_timestamp: u64, + batch_number: u32, + proposed_block: L2BlockEnv, + expected_error: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.number += 1; + l1_batch.timestamp = new_batch_timestamp; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + let l1_tx = get_l1_noop(); + + // Setting the values provided. + let mut storage_ptr = vm.vm.world.storage.borrow_mut(); + let miniblock_info_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let pending_txs_hash_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + ); + let batch_info_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_BLOCK_INFO_POSITION, + ); + let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); + + storage_ptr.set_value( + miniblock_info_slot, + u256_to_h256(pack_block_info( + miniblock_number as u64, + miniblock_timestamp, + )), + ); + storage_ptr.set_value(pending_txs_hash_slot, pending_txs_hash); + storage_ptr.set_value( + batch_info_slot, + u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), + ); + storage_ptr.set_value( + prev_block_hash_position, + L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), + ); + drop(storage_ptr); + + // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. + // And then override it with the user-provided value + + let last_l2_block = vm.vm.bootloader_state.last_l2_block(); + let new_l2_block = L2BlockEnv { + number: last_l2_block.number + 1, + timestamp: last_l2_block.timestamp + 1, + prev_block_hash: last_l2_block.get_hash(), + max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, + }; + + vm.vm.bootloader_state.push_l2_block(new_l2_block); + vm.vm.push_transaction(l1_tx); + set_manual_l2_block_info(&mut vm.vm, 0, proposed_block); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +#[test] +fn test_l2_block_first_in_batch() { + let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); + let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) + .finalize(ProtocolVersionId::latest()); + test_first_in_batch( + 1, + 1, + H256::zero(), + 1, + 2, + 1, + L2BlockEnv { + number: 2, + timestamp: 2, + prev_block_hash, + max_virtual_blocks_to_create: 1, + }, + None, + ); + + let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); + let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) + .finalize(ProtocolVersionId::latest()); + test_first_in_batch( + 8, + 1, + H256::zero(), + 5, + 12, + 1, + L2BlockEnv { + number: 2, + timestamp: 9, + prev_block_hash, + max_virtual_blocks_to_create: 1, + }, + Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), + ); +} + +fn set_manual_l2_block_info( + vm: &mut Vm, + tx_number: usize, + block_info: L2BlockEnv, +) { + let fictive_miniblock_position = + TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; + + vm.write_to_bootloader_heap([ + (fictive_miniblock_position, block_info.number.into()), + (fictive_miniblock_position + 1, block_info.timestamp.into()), + ( + fictive_miniblock_position + 2, + h256_to_u256(block_info.prev_block_hash), + ), + ( + fictive_miniblock_position + 3, + block_info.max_virtual_blocks_to_create.into(), + ), + ]) +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs new file mode 100644 index 00000000000..9d5b229f23a --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -0,0 +1,26 @@ +mod bootloader; +mod default_aa; +//mod block_tip; FIXME: requires vm metrics +mod bytecode_publishing; +// mod call_tracer; FIXME: requires tracers +// mod circuits; FIXME: requires tracers / circuit stats +mod code_oracle; +mod gas_limit; +mod get_used_contracts; +mod is_write_initial; +mod l1_tx_execution; +mod l2_blocks; +mod nonce_holder; +// mod precompiles; FIXME: requires tracers / circuit stats +// mod prestate_tracer; FIXME: is pre-state tracer still relevant? +mod refunds; +mod require_eip712; +mod rollbacks; +mod sekp256r1; +mod simple_execution; +mod storage; +mod tester; +mod tracing_execution_error; +mod transfer; +mod upgrade; +mod utils; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs new file mode 100644 index 00000000000..b18676cf2ba --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -0,0 +1,179 @@ +use zksync_types::{Execute, ExecuteTransactionCommon, Nonce}; + +use crate::{ + interface::{ + ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, + VmRevertReason, + }, + vm_fast::tests::{ + tester::{Account, VmTesterBuilder}, + utils::read_nonce_holder_tester, + }, +}; + +pub enum NonceHolderTestMode { + SetValueUnderNonce, + IncreaseMinNonceBy5, + IncreaseMinNonceTooMuch, + LeaveNonceUnused, + IncreaseMinNonceBy1, + SwitchToArbitraryOrdering, +} + +impl From for u8 { + fn from(mode: NonceHolderTestMode) -> u8 { + match mode { + NonceHolderTestMode::SetValueUnderNonce => 0, + NonceHolderTestMode::IncreaseMinNonceBy5 => 1, + NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, + NonceHolderTestMode::LeaveNonceUnused => 3, + NonceHolderTestMode::IncreaseMinNonceBy1 => 4, + NonceHolderTestMode::SwitchToArbitraryOrdering => 5, + } + } +} + +#[test] +fn test_nonce_holder() { + let mut account = Account::random(); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_deployer() + .with_custom_contracts(vec![( + read_nonce_holder_tester().to_vec(), + account.address, + true, + )]) + .with_rich_accounts(vec![account.clone()]) + .build(); + + let mut run_nonce_test = |nonce: u32, + test_mode: NonceHolderTestMode, + error_message: Option, + comment: &'static str| { + // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, + // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. + // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. + vm.reset_state(true); + let mut transaction = account.get_l2_tx_for_execute_with_nonce( + Execute { + contract_address: account.address, + calldata: vec![12], + value: Default::default(), + factory_deps: vec![], + }, + None, + Nonce(nonce), + ); + let ExecuteTransactionCommon::L2(tx_data) = &mut transaction.common_data else { + unreachable!(); + }; + tx_data.signature = vec![test_mode.into()]; + vm.vm.push_transaction_inner(transaction, 0, true); + let result = vm.vm.execute(VmExecutionMode::OneTx); + + if let Some(msg) = error_message { + let expected_error = + TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { + msg, + data: vec![], + })); + let ExecutionResult::Halt { reason } = result.result else { + panic!("Expected revert, got {:?}", result.result); + }; + assert_eq!(reason.to_string(), expected_error.to_string(), "{comment}"); + } else { + assert!(!result.result.is_failed(), "{comment}: {result:?}"); + } + }; + // Test 1: trying to set value under non sequential nonce value. + run_nonce_test( + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + Some("Previous nonce has not been used".to_string()), + "Allowed to set value under non sequential value", + ); + + // Test 2: increase min nonce by 1 with sequential nonce ordering: + run_nonce_test( + 0u32, + NonceHolderTestMode::IncreaseMinNonceBy1, + None, + "Failed to increment nonce by 1 for sequential account", + ); + + // Test 3: correctly set value under nonce with sequential nonce ordering: + run_nonce_test( + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Failed to set value under nonce sequential value", + ); + + // Test 5: migrate to the arbitrary nonce ordering: + run_nonce_test( + 2u32, + NonceHolderTestMode::SwitchToArbitraryOrdering, + None, + "Failed to switch to arbitrary ordering", + ); + + // Test 6: increase min nonce by 5 + run_nonce_test( + 6u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Failed to increase min nonce by 5", + ); + + // Test 7: since the nonces in range [6,10] are no longer allowed, the + // tx with nonce 10 should not be allowed + run_nonce_test( + 10u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some("Reusing the same nonce twice".to_string()), + "Allowed to reuse nonce below the minimal one", + ); + + // Test 8: we should be able to use nonce 13 + run_nonce_test( + 13u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Did not allow to use unused nonce 10", + ); + + // Test 9: we should not be able to reuse nonce 13 + run_nonce_test( + 13u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some("Reusing the same nonce twice".to_string()), + "Allowed to reuse the same nonce twice", + ); + + // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 + run_nonce_test( + 14u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Did not allow to use a bumped nonce", + ); + + // Test 11: Do not allow bumping nonce by too much + run_nonce_test( + 16u32, + NonceHolderTestMode::IncreaseMinNonceTooMuch, + Some("The value for incrementing the nonce is too high".to_string()), + "Allowed for incrementing min nonce too much", + ); + + // Test 12: Do not allow not setting a nonce as used + run_nonce_test( + 16u32, + NonceHolderTestMode::LeaveNonceUnused, + Some("The nonce was not set as used".to_string()), + "Allowed to leave nonce as unused", + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs new file mode 100644 index 00000000000..5bdf0930d55 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -0,0 +1,133 @@ +use zk_evm_1_5_0::zk_evm_abstractions::precompiles::PrecompileAddress; +use zksync_types::{Address, Execute}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +#[test] +fn test_keccak() { + // Execute special transaction and check that at least 1000 keccak calls were made. + let contract = read_precompiles_contract(); + let address = Address::random(); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![(contract, address, true)]) + .build(); + + // calldata for `doKeccak(1000)`. + let keccak1000_calldata = + "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: hex::decode(keccak1000_calldata).unwrap(), + value: Default::default(), + factory_deps: None, + }, + None, + ); + vm.vm.push_transaction(tx); + let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + + let keccak_count = vm + .vm + .state + .precompiles_processor + .precompile_cycles_history + .inner() + .iter() + .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) + .count(); + + assert!(keccak_count >= 1000); +} + +#[test] +fn test_sha256() { + // Execute special transaction and check that at least 1000 `sha256` calls were made. + let contract = read_precompiles_contract(); + let address = Address::random(); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![(contract, address, true)]) + .build(); + + // calldata for `doSha256(1000)`. + let sha1000_calldata = + "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: hex::decode(sha1000_calldata).unwrap(), + value: Default::default(), + factory_deps: None, + }, + None, + ); + vm.vm.push_transaction(tx); + let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + + let sha_count = vm + .vm + .state + .precompiles_processor + .precompile_cycles_history + .inner() + .iter() + .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) + .count(); + + assert!(sha_count >= 1000); +} + +#[test] +fn test_ecrecover() { + // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: account.address, + calldata: Vec::new(), + value: Default::default(), + factory_deps: None, + }, + None, + ); + vm.vm.push_transaction(tx); + let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + + let ecrecover_count = vm + .vm + .state + .precompiles_processor + .precompile_cycles_history + .inner() + .iter() + .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) + .count(); + + assert_eq!(ecrecover_count, 1); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs new file mode 100644 index 00000000000..63620c7d9ff --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs @@ -0,0 +1,143 @@ +use std::sync::Arc; + +use once_cell::sync::OnceCell; +use zksync_test_account::TxType; +use zksync_types::{utils::deployed_address_create, Execute, U256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + tracers::PrestateTracer, + vm_latest::{ + constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, + HistoryEnabled, ToTracerPointer, + }, +}; + +#[test] +fn test_prestate_tracer() { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + + vm.deploy_test_contract(); + let account = &mut vm.rich_accounts[0]; + + let tx1 = account.get_test_contract_transaction( + vm.test_contract.unwrap(), + false, + Default::default(), + true, + TxType::L2, + ); + vm.vm.push_transaction(tx1); + + let contract_address = vm.test_contract.unwrap(); + let prestate_tracer_result = Arc::new(OnceCell::default()); + let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); + let tracer_ptr = prestate_tracer.into_tracer_pointer(); + vm.vm.inspect(tracer_ptr.into(), VmExecutionMode::Batch); + + let prestate_result = Arc::try_unwrap(prestate_tracer_result) + .unwrap() + .take() + .unwrap_or_default(); + + assert!(prestate_result.1.contains_key(&contract_address)); +} + +#[test] +fn test_prestate_tracer_diff_mode() { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_random_rich_accounts(1) + .with_deployer() + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build(); + let contract = read_simple_transfer_contract(); + let tx = vm + .deployer + .as_mut() + .expect("You have to initialize builder with deployer") + .get_deploy_tx(&contract, None, TxType::L2) + .tx; + let nonce = tx.nonce().unwrap().0.into(); + vm.vm.push_transaction(tx); + vm.vm.execute(VmExecutionMode::OneTx); + let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); + vm.test_contract = Some(deployed_address); + + // Deploy a second copy of the contract to see its appearance in the pre-state + let tx2 = vm + .deployer + .as_mut() + .expect("You have to initialize builder with deployer") + .get_deploy_tx(&contract, None, TxType::L2) + .tx; + let nonce2 = tx2.nonce().unwrap().0.into(); + vm.vm.push_transaction(tx2); + vm.vm.execute(VmExecutionMode::OneTx); + let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); + + let account = &mut vm.rich_accounts[0]; + + //enter ether to contract to see difference in the balance post execution + let tx0 = Execute { + contract_address: vm.test_contract.unwrap(), + calldata: Default::default(), + value: U256::from(100000), + factory_deps: None, + }; + + vm.vm + .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); + + let tx1 = Execute { + contract_address: deployed_address2, + calldata: Default::default(), + value: U256::from(200000), + factory_deps: None, + }; + + vm.vm + .push_transaction(account.get_l2_tx_for_execute(tx1, None)); + let prestate_tracer_result = Arc::new(OnceCell::default()); + let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); + let tracer_ptr = prestate_tracer.into_tracer_pointer(); + vm.vm + .inspect(tracer_ptr.into(), VmExecutionMode::Bootloader); + + let prestate_result = Arc::try_unwrap(prestate_tracer_result) + .unwrap() + .take() + .unwrap_or_default(); + + //assert that the pre-state contains both deployed contracts with balance zero + assert!(prestate_result.0.contains_key(&deployed_address)); + assert!(prestate_result.0.contains_key(&deployed_address2)); + assert_eq!( + prestate_result.0[&deployed_address].balance, + Some(U256::zero()) + ); + assert_eq!( + prestate_result.0[&deployed_address2].balance, + Some(U256::zero()) + ); + + //assert that the post-state contains both deployed contracts with the correct balance + assert!(prestate_result.1.contains_key(&deployed_address)); + assert!(prestate_result.1.contains_key(&deployed_address2)); + assert_eq!( + prestate_result.1[&deployed_address].balance, + Some(U256::from(100000)) + ); + assert_eq!( + prestate_result.1[&deployed_address2].balance, + Some(U256::from(200000)) + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs new file mode 100644 index 00000000000..21a3129a3a6 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs @@ -0,0 +1,221 @@ +use ethabi::Token; +use zksync_types::{Address, Execute, U256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{DeployContractsTx, TxType, VmTesterBuilder}, + utils::{read_expensive_contract, read_test_contract}, + }, +}; + +#[test] +fn test_predetermined_refunded_gas() { + // In this test, we compare the execution of the bootloader with the predefined + // refunded gas and without them + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + let l1_batch = vm.vm.batch_env.clone(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + + let DeployContractsTx { + tx, + bytecode_hash: _, + address: _, + } = account.get_deploy_tx(&counter, None, TxType::L2); + vm.vm.push_transaction(tx.clone()); + let result = vm.vm.execute(VmExecutionMode::OneTx); + + assert!(!result.result.is_failed()); + + // If the refund provided by the operator or the final refund are the 0 + // there is no impact of the operator's refund at all and so this test does not + // make much sense. + assert!( + result.refunds.operator_suggested_refund > 0, + "The operator's refund is 0" + ); + assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); + + let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); + assert!(!result_without_predefined_refunds.result.is_failed(),); + + // Here we want to provide the same refund from the operator and check that it's the correct one. + // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. + // But the overall result should be the same + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(vec![account.clone()]) + .build(); + + vm.vm + .push_transaction_inner(tx.clone(), result.refunds.gas_refunded, true); + + let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); + + assert!(!result_with_predefined_refunds.result.is_failed()); + + // We need to sort these lists as those are flattened from HashMaps + current_state_with_predefined_refunds + .used_contract_hashes + .sort(); + current_state_without_predefined_refunds + .used_contract_hashes + .sort(); + + assert_eq!( + current_state_with_predefined_refunds.events, + current_state_without_predefined_refunds.events + ); + + assert_eq!( + current_state_with_predefined_refunds.user_l2_to_l1_logs, + current_state_without_predefined_refunds.user_l2_to_l1_logs + ); + + assert_eq!( + current_state_with_predefined_refunds.system_logs, + current_state_without_predefined_refunds.system_logs + ); + + assert_eq!( + current_state_with_predefined_refunds.deduplicated_storage_logs, + current_state_without_predefined_refunds.deduplicated_storage_logs + ); + assert_eq!( + current_state_with_predefined_refunds.used_contract_hashes, + current_state_without_predefined_refunds.used_contract_hashes + ); + + // In this test we put the different refund from the operator. + // We still can't use the refund tracer, because it will override the refund. + // But we can check that the logs and events have changed. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(vec![account.clone()]) + .build(); + + let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; + vm.vm + .push_transaction_inner(tx, changed_operator_suggested_refund, true); + let result = vm.vm.execute(VmExecutionMode::Batch); + let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); + + assert!(!result.result.is_failed()); + current_state_with_changed_predefined_refunds + .used_contract_hashes + .sort(); + current_state_without_predefined_refunds + .used_contract_hashes + .sort(); + + assert_eq!( + current_state_with_changed_predefined_refunds.events.len(), + current_state_without_predefined_refunds.events.len() + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.events, + current_state_without_predefined_refunds.events + ); + + assert_eq!( + current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, + current_state_without_predefined_refunds.user_l2_to_l1_logs + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.system_logs, + current_state_without_predefined_refunds.system_logs + ); + + assert_eq!( + current_state_with_changed_predefined_refunds + .deduplicated_storage_logs + .len(), + current_state_without_predefined_refunds + .deduplicated_storage_logs + .len() + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.deduplicated_storage_logs, + current_state_without_predefined_refunds.deduplicated_storage_logs + ); + assert_eq!( + current_state_with_changed_predefined_refunds.used_contract_hashes, + current_state_without_predefined_refunds.used_contract_hashes + ); +} + +#[test] +fn negative_pubdata_for_transaction() { + let expensive_contract_address = Address::random(); + let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); + let expensive_function = expensive_contract.function("expensive").unwrap(); + let cleanup_function = expensive_contract.function("cleanUp").unwrap(); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_custom_contracts(vec![( + expensive_contract_bytecode, + expensive_contract_address, + false, + )]) + .build(); + + let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: expensive_contract_address, + calldata: expensive_function + .encode_input(&[Token::Uint(10.into())]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(expensive_tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. + let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: expensive_contract_address, + calldata: cleanup_function.encode_input(&[]).unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(clean_up_tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + assert!(result.refunds.operator_suggested_refund > 0); + assert_eq!( + result.refunds.gas_refunded, + result.refunds.operator_suggested_refund + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs new file mode 100644 index 00000000000..7e378a2b62c --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -0,0 +1,171 @@ +use ethabi::Token; +use zksync_eth_signer::{EthereumSigner, TransactionParameters}; +use zksync_state::ReadStorage; +use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; +use zksync_types::{ + fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, + utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, + L2ChainId, Nonce, Transaction, U256, +}; +use zksync_utils::h256_to_u256; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{Account, VmTester, VmTesterBuilder}, + utils::read_many_owners_custom_account_contract, + }, +}; + +impl VmTester { + pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { + let key = storage_key_for_standard_token_balance( + AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), + &address, + ); + self.vm + .inner + .world_diff + .get_storage_state() + .get(&(L2_BASE_TOKEN_ADDRESS, h256_to_u256(*key.key()))) + .copied() + .unwrap_or_else(|| h256_to_u256(self.vm.world.storage.read_value(&key))) + } +} + +/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy +/// and EIP712 transactions. +/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. +#[tokio::test] +async fn test_require_eip712() { + // Use 3 accounts: + // - `private_address` - EOA account, where we have the key + // - `account_address` - AA account, where the contract is deployed + // - beneficiary - an EOA account, where we'll try to transfer the tokens. + let account_abstraction = Account::random(); + let mut private_account = Account::random(); + let beneficiary = Account::random(); + + let (bytecode, contract) = read_many_owners_custom_account_contract(); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) + .build(); + + assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); + + let chain_id: u32 = 270; + + // First, let's set the owners of the AA account to the `private_address`. + // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). + let set_owners_function = contract.function("setOwners").unwrap(); + let encoded_input = set_owners_function + .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) + .unwrap(); + + let tx = private_account.get_l2_tx_for_execute( + Execute { + contract_address: account_abstraction.address, + calldata: encoded_input, + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + let private_account_balance = vm.get_eth_balance(private_account.address); + + // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). + // Normally this would not work - unless the operator is malicious. + let aa_raw_tx = TransactionParameters { + nonce: U256::from(0), + to: Some(beneficiary.address), + gas: U256::from(100000000), + gas_price: Some(U256::from(10000000)), + value: U256::from(888000088), + data: vec![], + chain_id: 270, + transaction_type: None, + access_list: None, + max_fee_per_gas: U256::from(1000000000), + max_priority_fee_per_gas: U256::from(1000000000), + max_fee_per_blob_gas: None, + blob_versioned_hashes: None, + }; + + let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; + let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); + + let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); + l2_tx.set_input(aa_tx, hash); + // Pretend that operator is malicious and sets the initiator to the AA account. + l2_tx.common_data.initiator_address = account_abstraction.address; + let transaction: Transaction = l2_tx.into(); + + vm.vm.push_transaction(transaction); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + assert_eq!( + vm.get_eth_balance(beneficiary.address), + U256::from(888000088) + ); + // Make sure that the tokens were transferred from the AA account. + assert_eq!( + private_account_balance, + vm.get_eth_balance(private_account.address) + ); + + // // Now send the 'classic' EIP712 transaction + let tx_712 = L2Tx::new( + beneficiary.address, + vec![], + Nonce(1), + Fee { + gas_limit: U256::from(1000000000), + max_fee_per_gas: U256::from(1000000000), + max_priority_fee_per_gas: U256::from(1000000000), + gas_per_pubdata_limit: U256::from(1000000000), + }, + account_abstraction.address, + U256::from(28374938), + vec![], + Default::default(), + ); + + let mut transaction_request: TransactionRequest = tx_712.into(); + transaction_request.chain_id = Some(chain_id.into()); + + let domain = Eip712Domain::new(L2ChainId::from(chain_id)); + let signature = private_account + .get_pk_signer() + .sign_typed_data(&domain, &transaction_request) + .await + .unwrap(); + let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); + + let (aa_txn_request, aa_hash) = + TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); + + let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); + l2_tx.set_input(encoded_tx, aa_hash); + + let transaction: Transaction = l2_tx.into(); + vm.vm.push_transaction(transaction); + vm.vm.execute(VmExecutionMode::OneTx); + + assert_eq!( + vm.get_eth_balance(beneficiary.address), + U256::from(916375026) + ); + assert_eq!( + private_account_balance, + vm.get_eth_balance(private_account.address) + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs new file mode 100644 index 00000000000..c530c5af18e --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -0,0 +1,144 @@ +use ethabi::Token; +use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; +use zksync_types::{Execute, U256}; + +use crate::{ + interface::TxExecutionMode, + vm_fast::tests::{ + tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, + utils::read_test_contract, + }, +}; + +#[test] +fn test_vm_rollbacks() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let mut account = vm.rich_accounts[0].clone(); + let counter = read_test_contract(); + let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; + + let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(tx_0.clone(), false), + TransactionTestInfo::new_processed(tx_1.clone(), false), + TransactionTestInfo::new_processed(tx_2.clone(), false), + ]); + + // reset vm + vm.reset_with_empty_storage(); + + let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), + // The correct nonce is 0, this tx will fail + TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + // This tx will succeed + TransactionTestInfo::new_processed(tx_0.clone(), false), + // The correct nonce is 1, this tx will fail + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + // The correct nonce is 1, this tx will fail + TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + // This tx will succeed + TransactionTestInfo::new_processed(tx_1, false), + // The correct nonce is 2, this tx will fail + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + // This tx will succeed + TransactionTestInfo::new_processed(tx_2.clone(), false), + // This tx will fail + TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), + ]); + + assert_eq!(result_without_rollbacks, result_with_rollbacks); +} + +#[test] +fn test_vm_loadnext_rollbacks() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + let mut account = vm.rich_accounts[0].clone(); + + let loadnext_contract = get_loadnext_contract(); + let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; + let DeployContractsTx { + tx: loadnext_deploy_tx, + address, + .. + } = account.get_deploy_tx_with_factory_deps( + &loadnext_contract.bytecode, + Some(loadnext_constructor_data), + loadnext_contract.factory_deps.clone(), + TxType::L2, + ); + + let loadnext_tx_1 = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: LoadnextContractExecutionParams { + reads: 100, + writes: 100, + events: 100, + hashes: 500, + recursive_calls: 10, + deploys: 60, + } + .to_bytes(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + let loadnext_tx_2 = account.get_l2_tx_for_execute( + Execute { + contract_address: address, + calldata: LoadnextContractExecutionParams { + reads: 100, + writes: 100, + events: 100, + hashes: 500, + recursive_calls: 10, + deploys: 60, + } + .to_bytes(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), + ]); + + // reset vm + vm.reset_with_empty_storage(); + + let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused.into(), + ), + TransactionTestInfo::new_processed(loadnext_tx_1, false), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), + TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), + TransactionTestInfo::new_processed(loadnext_tx_2, false), + ]); + + assert_eq!(result_without_rollbacks, result_with_rollbacks); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs new file mode 100644 index 00000000000..1e761b30ca6 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs @@ -0,0 +1,75 @@ +use zk_evm_1_5_0::zkevm_opcode_defs::p256; +use zksync_system_constants::P256VERIFY_PRECOMPILE_ADDRESS; +use zksync_types::{web3::keccak256, Execute, H256, U256}; +use zksync_utils::h256_to_u256; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::tester::VmTesterBuilder, + vm_latest::ExecutionResult, +}; + +#[test] +fn test_sekp256r1() { + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_execution_mode(TxExecutionMode::EthCall) + .with_random_rich_accounts(1) + .build(); + + let account = &mut vm.rich_accounts[0]; + + // The digest, secret key and public key were copied from the following test suit: `https://github.com/hyperledger/besu/blob/b6a6402be90339367d5bcabcd1cfd60df4832465/crypto/algorithms/src/test/java/org/hyperledger/besu/crypto/SECP256R1Test.java#L36` + let sk = p256::SecretKey::from_slice( + &hex::decode("519b423d715f8b581f4fa8ee59f4771a5b44c8130b4e3eacca54a56dda72b464").unwrap(), + ) + .unwrap(); + let sk = p256::ecdsa::SigningKey::from(sk); + + let digest = keccak256(&hex::decode("5905238877c77421f73e43ee3da6f2d9e2ccad5fc942dcec0cbd25482935faaf416983fe165b1a045ee2bcd2e6dca3bdf46c4310a7461f9a37960ca672d3feb5473e253605fb1ddfd28065b53cb5858a8ad28175bf9bd386a5e471ea7a65c17cc934a9d791e91491eb3754d03799790fe2d308d16146d5c9b0d0debd97d79ce8").unwrap()); + let public_key_encoded = hex::decode("1ccbe91c075fc7f4f033bfa248db8fccd3565de94bbfb12f3c59ff46c271bf83ce4014c68811f9a21a1fdb2c0e6113e06db7ca93b7404e78dc7ccd5ca89a4ca9").unwrap(); + + let (sig, _) = sk.sign_prehash_recoverable(&digest).unwrap(); + let (r, s) = sig.split_bytes(); + + let mut encoded_r = [0u8; 32]; + encoded_r.copy_from_slice(&r); + + let mut encoded_s = [0u8; 32]; + encoded_s.copy_from_slice(&s); + + let mut x = [0u8; 32]; + x.copy_from_slice(&public_key_encoded[0..32]); + + let mut y = [0u8; 32]; + y.copy_from_slice(&public_key_encoded[32..64]); + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: P256VERIFY_PRECOMPILE_ADDRESS, + calldata: [digest, encoded_r, encoded_s, x, y].concat(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + + let execution_result = vm.vm.execute(VmExecutionMode::Batch); + + let ExecutionResult::Success { output } = execution_result.result else { + panic!("batch failed") + }; + + let output = H256::from_slice(&output); + + assert_eq!( + h256_to_u256(output), + U256::from(1u32), + "verification was not successful" + ); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs new file mode 100644 index 00000000000..7d866e1539b --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs @@ -0,0 +1,78 @@ +use crate::{ + interface::{ExecutionResult, VmExecutionMode, VmInterface}, + vm_fast::tests::tester::{TxType, VmTesterBuilder}, +}; + +#[test] +fn estimate_fee() { + let mut vm_tester = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_deployer() + .with_random_rich_accounts(1) + .build(); + + vm_tester.deploy_test_contract(); + let account = &mut vm_tester.rich_accounts[0]; + + let tx = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L2, + ); + + vm_tester.vm.push_transaction(tx); + + let result = vm_tester.vm.execute(VmExecutionMode::OneTx); + assert!(matches!(result.result, ExecutionResult::Success { .. })); +} + +#[test] +fn simple_execute() { + let mut vm_tester = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_deployer() + .with_random_rich_accounts(1) + .build(); + + vm_tester.deploy_test_contract(); + + let account = &mut vm_tester.rich_accounts[0]; + + let tx1 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + + let tx2 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + true, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + + let tx3 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + let vm = &mut vm_tester.vm; + vm.push_transaction(tx1); + vm.push_transaction(tx2); + vm.push_transaction(tx3); + let tx = vm.execute(VmExecutionMode::OneTx); + assert!(matches!(tx.result, ExecutionResult::Success { .. })); + let tx = vm.execute(VmExecutionMode::OneTx); + assert!(matches!(tx.result, ExecutionResult::Revert { .. })); + let tx = vm.execute(VmExecutionMode::OneTx); + assert!(matches!(tx.result, ExecutionResult::Success { .. })); + let block_tip = vm.execute(VmExecutionMode::Batch); + assert!(matches!(block_tip.result, ExecutionResult::Success { .. })); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs new file mode 100644 index 00000000000..733ce1f0618 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs @@ -0,0 +1,130 @@ +use ethabi::Token; +use zksync_contracts::{load_contract, read_bytecode}; +use zksync_types::{Address, Execute, U256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled}, + vm_fast::tests::tester::VmTesterBuilder, +}; + +fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 { + let bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + let test_contract_address = Address::random(); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_deployer() + .with_random_rich_accounts(1) + .with_custom_contracts(vec![(bytecode, test_contract_address, false)]) + .build(); + + let account = &mut vm.rich_accounts[0]; + + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: test_contract_address, + calldata: first_tx_calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: test_contract_address, + calldata: second_tx_calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "First tx failed"); + vm.vm.pop_snapshot_no_rollback(); + + // We rollback once because transient storage and rollbacks are a tricky combination. + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx2.clone()); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Second tx failed"); + vm.vm.rollback_to_the_latest_snapshot(); + + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx2); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Second tx failed on second run"); + + result.statistics.pubdata_published +} + +fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { + test_storage(vec![], second_tx_calldata) +} + +#[test] +fn test_storage_behavior() { + let contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + // In all of the tests below we provide the first tx to ensure that the tracers will not include + // the statistics from the start of the bootloader and will only include those for the transaction itself. + + let base_pubdata = test_storage_one_tx(vec![]); + let simple_test_pubdata = test_storage_one_tx( + contract + .function("simpleWrite") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + let resetting_write_pubdata = test_storage_one_tx( + contract + .function("resettingWrite") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + let resetting_write_via_revert_pubdata = test_storage_one_tx( + contract + .function("resettingWriteViaRevert") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + + assert_eq!(simple_test_pubdata - base_pubdata, 65); + assert_eq!(resetting_write_pubdata - base_pubdata, 34); + assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); +} + +#[test] +fn test_transient_storage_behavior() { + let contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + let first_tstore_test = contract + .function("testTransientStore") + .unwrap() + .encode_input(&[]) + .unwrap(); + // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. + let second_tstore_test = contract + .function("assertTValue") + .unwrap() + .encode_input(&[Token::Uint(U256::zero())]) + .unwrap(); + + test_storage(first_tstore_test, second_tstore_test); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs new file mode 100644 index 00000000000..781069ddf49 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs @@ -0,0 +1,6 @@ +pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; +pub(crate) use vm_tester::{default_l1_batch, get_empty_storage, VmTester, VmTesterBuilder}; +pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; + +mod transaction_test_info; +mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs new file mode 100644 index 00000000000..9bb013542c7 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -0,0 +1,246 @@ +use zksync_state::ReadStorage; +use zksync_types::{ExecuteTransactionCommon, Transaction, H160, U256}; + +use super::VmTester; +use crate::{ + interface::{ + CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, + VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, + }, + vm_fast::Vm, +}; + +#[derive(Debug, Clone)] +pub(crate) enum TxModifier { + WrongSignatureLength, + WrongSignature, + WrongMagicValue, + WrongNonce, + NonceReused, +} + +#[derive(Debug, Clone)] +pub(crate) enum TxExpectedResult { + Rejected { error: ExpectedError }, + Processed { rollback: bool }, +} + +#[derive(Debug, Clone)] +pub(crate) struct TransactionTestInfo { + tx: Transaction, + result: TxExpectedResult, +} + +#[derive(Debug, Clone)] +pub(crate) struct ExpectedError { + pub(crate) revert_reason: TxRevertReason, + pub(crate) modifier: Option, +} + +impl From for ExpectedError { + fn from(value: TxModifier) -> Self { + let revert_reason = match value { + TxModifier::WrongSignatureLength => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "Signature length is incorrect".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, + 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, + 116, 0, 0, 0, + ], + }) + } + TxModifier::WrongSignature => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), + data: vec![], + }) + } + TxModifier::WrongMagicValue => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "v is neither 27 nor 28".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, + 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }) + + } + TxModifier::WrongNonce => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "Incorrect nonce".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, + 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + }) + } + TxModifier::NonceReused => { + Halt::ValidationFailed(VmRevertReason::General { + msg: "Reusing the same nonce twice".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, + 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, + 0, 0, 0, + ], + }) + } + }; + + ExpectedError { + revert_reason: TxRevertReason::Halt(revert_reason), + modifier: Some(value), + } + } +} + +impl TransactionTestInfo { + pub(crate) fn new_rejected( + mut transaction: Transaction, + expected_error: ExpectedError, + ) -> Self { + transaction.common_data = match transaction.common_data { + ExecuteTransactionCommon::L2(mut data) => { + if let Some(modifier) = &expected_error.modifier { + match modifier { + TxModifier::WrongSignatureLength => { + data.signature = data.signature[..data.signature.len() - 20].to_vec() + } + TxModifier::WrongSignature => data.signature = vec![27u8; 65], + TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], + TxModifier::WrongNonce => { + // Do not need to modify signature for nonce error + } + TxModifier::NonceReused => { + // Do not need to modify signature for nonce error + } + } + } + ExecuteTransactionCommon::L2(data) + } + _ => panic!("L1 transactions are not supported"), + }; + + Self { + tx: transaction, + result: TxExpectedResult::Rejected { + error: expected_error, + }, + } + } + + pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { + Self { + tx: transaction, + result: TxExpectedResult::Processed { + rollback: should_be_rollbacked, + }, + } + } + + fn verify_result(&self, result: &VmExecutionResultAndLogs) { + match &self.result { + TxExpectedResult::Rejected { error } => match &result.result { + ExecutionResult::Success { .. } => { + panic!("Transaction should be reverted {:?}", self.tx.nonce()) + } + ExecutionResult::Revert { output } => match &error.revert_reason { + TxRevertReason::TxReverted(expected) => { + assert_eq!(output, expected) + } + _ => { + panic!("Error types mismatch"); + } + }, + ExecutionResult::Halt { reason } => match &error.revert_reason { + TxRevertReason::Halt(expected) => { + assert_eq!(reason, expected) + } + _ => { + panic!("Error types mismatch"); + } + }, + }, + TxExpectedResult::Processed { .. } => { + assert!(!result.result.is_failed()); + } + } + } + + fn should_rollback(&self) -> bool { + match &self.result { + TxExpectedResult::Rejected { .. } => true, + TxExpectedResult::Processed { rollback } => *rollback, + } + } +} + +// TODO this doesn't include all the state of ModifiedWorld +#[derive(Debug, PartialEq)] +struct VmStateDump { + state: vm2::State, + storage_writes: Vec<((H160, U256), U256)>, + events: Box<[vm2::Event]>, +} + +impl Vm { + fn dump_state(&self) -> VmStateDump { + VmStateDump { + state: self.inner.state.clone(), + storage_writes: self + .inner + .world_diff + .get_storage_state() + .iter() + .map(|(k, v)| (*k, *v)) + .collect(), + events: self.inner.world_diff.events().into(), + } + } +} + +impl VmTester { + pub(crate) fn execute_and_verify_txs( + &mut self, + txs: &[TransactionTestInfo], + ) -> CurrentExecutionState { + for tx_test_info in txs { + self.execute_tx_and_verify(tx_test_info.clone()); + } + self.vm.execute(VmExecutionMode::Batch); + let mut state = self.vm.get_current_execution_state(); + state.used_contract_hashes.sort(); + state + } + + pub(crate) fn execute_tx_and_verify( + &mut self, + tx_test_info: TransactionTestInfo, + ) -> VmExecutionResultAndLogs { + self.vm.make_snapshot(); + let inner_state_before = self.vm.dump_state(); + self.vm.push_transaction(tx_test_info.tx.clone()); + let result = self.vm.execute(VmExecutionMode::OneTx); + tx_test_info.verify_result(&result); + if tx_test_info.should_rollback() { + self.vm.rollback_to_the_latest_snapshot(); + let inner_state_after = self.vm.dump_state(); + pretty_assertions::assert_eq!( + inner_state_before, + inner_state_after, + "Inner state before and after rollback should be equal" + ); + } else { + self.vm.pop_snapshot_no_rollback(); + } + result + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs new file mode 100644 index 00000000000..7715dd0a6d4 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs @@ -0,0 +1,296 @@ +use std::{cell::RefCell, rc::Rc}; + +use vm2::WorldDiff; +use zksync_contracts::BaseSystemContracts; +use zksync_state::{InMemoryStorage, StoragePtr}; +use zksync_test_account::{Account, TxType}; +use zksync_types::{ + block::L2BlockHasher, + fee_model::BatchFeeInput, + get_code_key, get_is_account_key, + helpers::unix_timestamp_ms, + utils::{deployed_address_create, storage_key_for_eth_balance}, + AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, + StorageKey, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; + +use crate::{ + interface::{ + L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, + }, + versions::vm_fast::{tests::utils::read_test_contract, vm::Vm}, + vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, utils::l2_blocks::load_last_l2_block}, +}; + +pub(crate) struct VmTester { + pub(crate) vm: Vm>, + pub(crate) storage: StoragePtr, + pub(crate) deployer: Option, + pub(crate) test_contract: Option
, + pub(crate) fee_account: Address, + pub(crate) rich_accounts: Vec, + pub(crate) custom_contracts: Vec, +} + +impl VmTester { + pub(crate) fn deploy_test_contract(&mut self) { + let contract = read_test_contract(); + let tx = self + .deployer + .as_mut() + .expect("You have to initialize builder with deployer") + .get_deploy_tx(&contract, None, TxType::L2) + .tx; + let nonce = tx.nonce().unwrap().0.into(); + self.vm.push_transaction(tx); + self.vm.execute(VmExecutionMode::OneTx); + let deployed_address = + deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); + self.test_contract = Some(deployed_address); + } + + pub(crate) fn reset_with_empty_storage(&mut self) { + self.storage = Rc::new(RefCell::new(get_empty_storage())); + self.vm.inner.world_diff = WorldDiff::default(); + self.reset_state(false); + } + + /// Reset the state of the VM to the initial state. + /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, + /// otherwise it will use the first L2 block of l1 batch env + pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { + for account in self.rich_accounts.iter_mut() { + account.nonce = Nonce(0); + make_account_rich(self.storage.clone(), account); + } + if let Some(deployer) = &self.deployer { + make_account_rich(self.storage.clone(), deployer); + } + + if !self.custom_contracts.is_empty() { + println!("Inserting custom contracts is not yet supported") + // `insert_contracts(&mut self.storage, &self.custom_contracts);` + } + + let storage = self.storage.clone(); + { + let mut storage = storage.borrow_mut(); + // Commit pending storage changes (old VM versions commit them on successful execution) + for (&(address, slot), &value) in self.vm.inner.world_diff.get_storage_state() { + let key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(slot)); + storage.set_value(key, u256_to_h256(value)); + } + } + + let mut l1_batch = self.vm.batch_env.clone(); + if use_latest_l2_block { + let last_l2_block = load_last_l2_block(&storage).unwrap_or(L2Block { + number: 0, + timestamp: 0, + hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + }); + l1_batch.first_l2_block = L2BlockEnv { + number: last_l2_block.number + 1, + timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), + prev_block_hash: last_l2_block.hash, + max_virtual_blocks_to_create: 1, + }; + } + + let vm = Vm::new(l1_batch, self.vm.system_env.clone(), storage); + + if self.test_contract.is_some() { + self.deploy_test_contract(); + } + self.vm = vm; + } +} + +pub(crate) type ContractsToDeploy = (Vec, Address, bool); + +pub(crate) struct VmTesterBuilder { + storage: Option, + l1_batch_env: Option, + system_env: SystemEnv, + deployer: Option, + rich_accounts: Vec, + custom_contracts: Vec, +} + +impl Clone for VmTesterBuilder { + fn clone(&self) -> Self { + Self { + storage: None, + l1_batch_env: self.l1_batch_env.clone(), + system_env: self.system_env.clone(), + deployer: self.deployer.clone(), + rich_accounts: self.rich_accounts.clone(), + custom_contracts: self.custom_contracts.clone(), + } + } +} + +#[allow(dead_code)] +impl VmTesterBuilder { + pub(crate) fn new() -> Self { + Self { + storage: None, + l1_batch_env: None, + system_env: SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BaseSystemContracts::playground(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::from(270), + }, + deployer: None, + rich_accounts: vec![], + custom_contracts: vec![], + } + } + + pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { + self.l1_batch_env = Some(l1_batch_env); + self + } + + pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { + self.system_env = system_env; + self + } + + pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { + self.storage = Some(storage); + self + } + + pub(crate) fn with_base_system_smart_contracts( + mut self, + base_system_smart_contracts: BaseSystemContracts, + ) -> Self { + self.system_env.base_system_smart_contracts = base_system_smart_contracts; + self + } + + pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { + self.system_env.bootloader_gas_limit = gas_limit; + self + } + + pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { + self.system_env.execution_mode = execution_mode; + self + } + + pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { + self.storage = Some(get_empty_storage()); + self + } + + pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { + for _ in 0..number { + let account = Account::random(); + self.rich_accounts.push(account); + } + self + } + + pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { + self.rich_accounts.extend(accounts); + self + } + + pub(crate) fn with_deployer(mut self) -> Self { + let deployer = Account::random(); + self.deployer = Some(deployer); + self + } + + pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { + self.custom_contracts = contracts; + self + } + + pub(crate) fn build(self) -> VmTester { + let l1_batch_env = self + .l1_batch_env + .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); + + let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); + insert_contracts(&mut raw_storage, &self.custom_contracts); + let storage_ptr = Rc::new(RefCell::new(raw_storage)); + for account in self.rich_accounts.iter() { + make_account_rich(storage_ptr.clone(), account); + } + if let Some(deployer) = &self.deployer { + make_account_rich(storage_ptr.clone(), deployer); + } + + let fee_account = l1_batch_env.fee_account; + let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); + + VmTester { + vm, + storage: storage_ptr, + deployer: self.deployer, + test_contract: None, + fee_account, + rich_accounts: self.rich_accounts.clone(), + custom_contracts: self.custom_contracts.clone(), + } + } +} + +pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { + let timestamp = unix_timestamp_ms(); + L1BatchEnv { + previous_batch_hash: None, + number, + timestamp, + fee_input: BatchFeeInput::l1_pegged( + 50_000_000_000, // 50 gwei + 250_000_000, // 0.25 gwei + ), + fee_account: Address::random(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + } +} + +pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { + let key = storage_key_for_eth_balance(&account.address); + storage + .as_ref() + .borrow_mut() + .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); +} + +pub(crate) fn get_empty_storage() -> InMemoryStorage { + InMemoryStorage::with_system_contracts(hash_bytecode) +} + +// Inserts the contracts into the test environment, bypassing the +// deployer system contract. Besides the reference to storage +// it accepts a `contracts` tuple of information about the contract +// and whether or not it is an account. +fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { + for (contract, address, is_account) in contracts { + let deployer_code_key = get_code_key(address); + raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); + + if *is_account { + let is_account_key = get_is_account_key(address); + raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); + } + + raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs new file mode 100644 index 00000000000..75144839006 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs @@ -0,0 +1,51 @@ +use zksync_types::{Execute, H160}; + +use crate::{ + interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, + vm_fast::tests::{ + tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, + utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, + }, +}; + +#[test] +fn test_tracing_of_execution_errors() { + let contract_address = H160::random(); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_deployer() + .with_random_rich_accounts(1) + .build(); + + let account = &mut vm.rich_accounts[0]; + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address, + calldata: get_execute_error_calldata(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( + tx, + ExpectedError { + revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { + msg: "short".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + ], + }), + modifier: None, + }, + )); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs new file mode 100644 index 00000000000..3b61b8ac7f1 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs @@ -0,0 +1,218 @@ +use ethabi::Token; +use zksync_contracts::{load_contract, read_bytecode}; +use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; +use zksync_types::{utils::storage_key_for_eth_balance, AccountTreeId, Address, Execute, U256}; +use zksync_utils::u256_to_h256; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_fast::tests::{ + tester::{get_empty_storage, VmTesterBuilder}, + utils::get_balance, + }, +}; + +enum TestOptions { + Send(U256), + Transfer(U256), +} + +fn test_send_or_transfer(test_option: TestOptions) { + let test_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let recipeint_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", + ); + let test_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + + let test_contract_address = Address::random(); + let recipient_address = Address::random(); + + let (value, calldata) = match test_option { + TestOptions::Send(value) => ( + value, + test_abi + .function("send") + .unwrap() + .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) + .unwrap(), + ), + TestOptions::Transfer(value) => ( + value, + test_abi + .function("transfer") + .unwrap() + .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) + .unwrap(), + ), + }; + + let mut storage = get_empty_storage(); + storage.set_value( + storage_key_for_eth_balance(&test_contract_address), + u256_to_h256(value), + ); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_deployer() + .with_random_rich_accounts(1) + .with_custom_contracts(vec![ + (test_bytecode, test_contract_address, false), + (recipeint_bytecode, recipient_address, false), + ]) + .build(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: test_contract_address, + calldata, + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let tx_result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !tx_result.result.is_failed(), + "Transaction wasn't successful" + ); + + let batch_result = vm.vm.execute(VmExecutionMode::Batch); + assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); + + let new_recipient_balance = get_balance( + AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), + &recipient_address, + &mut vm.vm.world.storage, + vm.vm.inner.world_diff.get_storage_state(), + ); + + assert_eq!(new_recipient_balance, value); +} + +#[test] +fn test_send_and_transfer() { + test_send_or_transfer(TestOptions::Send(U256::zero())); + test_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); + test_send_or_transfer(TestOptions::Transfer(U256::zero())); + test_send_or_transfer(TestOptions::Transfer(U256::from(10).pow(18.into()))); +} + +fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { + let test_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let reentrant_recipeint_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", + ); + let test_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let reentrant_recipient_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", + ); + + let test_contract_address = Address::random(); + let reentrant_recipeint_address = Address::random(); + + let (value, calldata) = match test_option { + TestOptions::Send(value) => ( + value, + test_abi + .function("send") + .unwrap() + .encode_input(&[ + Token::Address(reentrant_recipeint_address), + Token::Uint(value), + ]) + .unwrap(), + ), + TestOptions::Transfer(value) => ( + value, + test_abi + .function("transfer") + .unwrap() + .encode_input(&[ + Token::Address(reentrant_recipeint_address), + Token::Uint(value), + ]) + .unwrap(), + ), + }; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_deployer() + .with_random_rich_accounts(1) + .with_custom_contracts(vec![ + (test_bytecode, test_contract_address, false), + ( + reentrant_recipeint_bytecode, + reentrant_recipeint_address, + false, + ), + ]) + .build(); + + // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. + let account = &mut vm.rich_accounts[0]; + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: reentrant_recipeint_address, + calldata: reentrant_recipient_abi + .function("setX") + .unwrap() + .encode_input(&[]) + .unwrap(), + value: U256::from(1), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let tx1_result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !tx1_result.result.is_failed(), + "Transaction 1 wasn't successful" + ); + + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: test_contract_address, + calldata, + value, + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx2); + let tx2_result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + tx2_result.result.is_failed(), + "Transaction 2 should have failed, but it succeeded" + ); + + let batch_result = vm.vm.execute(VmExecutionMode::Batch); + assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +} + +#[test] +fn test_reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::zero())); + test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); + test_reentrancy_protection_send_or_transfer(TestOptions::Transfer(U256::zero())); + test_reentrancy_protection_send_or_transfer(TestOptions::Transfer( + U256::from(10).pow(18.into()), + )); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs new file mode 100644 index 00000000000..61643677609 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs @@ -0,0 +1,343 @@ +use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; +use zksync_test_account::TxType; +use zksync_types::{ + ethabi::{Contract, Token}, + get_code_key, get_known_code_key, + protocol_upgrade::ProtocolUpgradeTxCommonData, + Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, + CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; + +use crate::{ + interface::{ + ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, + VmInterfaceHistoryEnabled, + }, + vm_fast::tests::{ + tester::VmTesterBuilder, + utils::{ + get_complex_upgrade_abi, read_complex_upgrade, read_test_contract, + verify_required_storage, + }, + }, +}; + +/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: +/// - This transaction must be the only one in block +/// - If present, this transaction must be the first one in block +#[test] +fn test_protocol_upgrade_is_first() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let bytecode_hash = hash_bytecode(&read_test_contract()); + vm.storage + .borrow_mut() + .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + + // Here we just use some random transaction of protocol upgrade type: + let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecode hash to + address: H160::random(), + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + // Another random upgrade transaction + let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecode hash to + address: H160::random(), + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + let normal_l1_transaction = vm.rich_accounts[0] + .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) + .tx; + + let expected_error = + Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); + + vm.vm.make_snapshot(); + // Test 1: there must be only one system transaction in block + vm.vm.push_transaction(protocol_upgrade_transaction.clone()); + vm.vm.push_transaction(normal_l1_transaction.clone()); + vm.vm.push_transaction(another_protocol_upgrade_transaction); + + vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert_eq!( + result.result, + ExecutionResult::Halt { + reason: expected_error.clone() + } + ); + + // Test 2: the protocol upgrade tx must be the first one in block + vm.vm.rollback_to_the_latest_snapshot(); + vm.vm.make_snapshot(); + vm.vm.push_transaction(normal_l1_transaction.clone()); + vm.vm.push_transaction(protocol_upgrade_transaction.clone()); + + vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert_eq!( + result.result, + ExecutionResult::Halt { + reason: expected_error + } + ); + + vm.vm.rollback_to_the_latest_snapshot(); + vm.vm.make_snapshot(); + vm.vm.push_transaction(protocol_upgrade_transaction); + vm.vm.push_transaction(normal_l1_transaction); + + vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed()); +} + +/// In this test we try to test how force deployments could be done via protocol upgrade transactions. +#[test] +fn test_force_deploy_upgrade() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let storage_view = vm.storage.clone(); + let bytecode_hash = hash_bytecode(&read_test_contract()); + + let known_code_key = get_known_code_key(&bytecode_hash); + // It is generally expected that all the keys will be set as known prior to the protocol upgrade. + storage_view + .borrow_mut() + .set_value(known_code_key, u256_to_h256(1.into())); + drop(storage_view); + + let address_to_deploy = H160::random(); + // Here we just use some random transaction of protocol upgrade type: + let transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecode hash to + address: address_to_deploy, + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + vm.vm.push_transaction(transaction); + + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "The force upgrade was not successful" + ); + + let expected_slots = [(bytecode_hash, get_code_key(&address_to_deploy))]; + + // Verify that the bytecode has been set correctly + verify_required_storage( + &expected_slots, + &mut *vm.storage.borrow_mut(), + vm.vm.inner.world_diff.get_storage_state(), + ); +} + +/// Here we show how the work with the complex upgrader could be done +#[test] +fn test_complex_upgrader() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let bytecode_hash = hash_bytecode(&read_complex_upgrade()); + let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); + + // Let's assume that the bytecode for the implementation of the complex upgrade + // is already deployed in some address in user space + let upgrade_impl = H160::random(); + let account_code_key = get_code_key(&upgrade_impl); + + { + let mut storage = vm.storage.borrow_mut(); + storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + storage.set_value( + get_known_code_key(&msg_sender_test_hash), + u256_to_h256(1.into()), + ); + storage.set_value(account_code_key, bytecode_hash); + storage.store_factory_dep(bytecode_hash, read_complex_upgrade()); + storage.store_factory_dep(msg_sender_test_hash, read_msg_sender_test()); + } + + let address_to_deploy1 = H160::random(); + let address_to_deploy2 = H160::random(); + + let transaction = get_complex_upgrade_tx( + upgrade_impl, + address_to_deploy1, + address_to_deploy2, + bytecode_hash, + ); + + vm.vm.push_transaction(transaction); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "The force upgrade was not successful" + ); + + let expected_slots = [ + (bytecode_hash, get_code_key(&address_to_deploy1)), + (bytecode_hash, get_code_key(&address_to_deploy2)), + ]; + + // Verify that the bytecode has been set correctly + verify_required_storage( + &expected_slots, + &mut *vm.storage.borrow_mut(), + vm.vm.inner.world_diff.get_storage_state(), + ); +} + +#[derive(Debug, Clone)] +struct ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash: H256, + // The address on which to deploy the bytecode hash to + address: Address, + // Whether to run the constructor on the force deployment + call_constructor: bool, + // The value with which to initialize a contract + value: U256, + // The constructor calldata + input: Vec, +} + +fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { + let deployer = deployer_contract(); + let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); + + let encoded_deployments: Vec<_> = deployment + .iter() + .map(|deployment| { + Token::Tuple(vec![ + Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), + Token::Address(deployment.address), + Token::Bool(deployment.call_constructor), + Token::Uint(deployment.value), + Token::Bytes(deployment.input.clone()), + ]) + }) + .collect(); + + let params = [Token::Array(encoded_deployments)]; + + let calldata = contract_function + .encode_input(¶ms) + .expect("failed to encode parameters"); + + let execute = Execute { + contract_address: CONTRACT_DEPLOYER_ADDRESS, + calldata, + factory_deps: vec![], + value: U256::zero(), + }; + + Transaction { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, + gas_limit: U256::from(200_000_000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +// Returns the transaction that performs a complex protocol upgrade. +// The first param is the address of the implementation of the complex upgrade +// in user-space, while the next 3 params are params of the implementation itself +// For the explanation for the parameters, please refer to: +// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol +fn get_complex_upgrade_tx( + implementation_address: Address, + address1: Address, + address2: Address, + bytecode_hash: H256, +) -> Transaction { + let impl_contract = get_complex_upgrade_abi(); + let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); + let impl_calldata = impl_function + .encode_input(&[ + Token::Address(address1), + Token::Address(address2), + Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), + ]) + .unwrap(); + + let complex_upgrader = get_complex_upgrader_abi(); + let upgrade_function = complex_upgrader.function("upgrade").unwrap(); + let complex_upgrader_calldata = upgrade_function + .encode_input(&[ + Token::Address(implementation_address), + Token::Bytes(impl_calldata), + ]) + .unwrap(); + + let execute = Execute { + contract_address: COMPLEX_UPGRADER_ADDRESS, + calldata: complex_upgrader_calldata, + factory_deps: vec![], + value: U256::zero(), + }; + + Transaction { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, + gas_limit: U256::from(200_000_000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +fn read_msg_sender_test() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") +} + +fn get_complex_upgrader_abi() -> Contract { + load_sys_contract("ComplexUpgrader") +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs new file mode 100644 index 00000000000..0a72667bd80 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -0,0 +1,128 @@ +use std::collections::BTreeMap; + +use ethabi::Contract; +use once_cell::sync::Lazy; +use vm2::{instruction_handlers::HeapInterface, HeapId, State}; +use zksync_contracts::{ + load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, +}; +use zksync_state::ReadStorage; +use zksync_types::{ + utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H160, H256, + U256, +}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; + +pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +pub(crate) fn verify_required_memory(state: &State, required_values: Vec<(U256, HeapId, u32)>) { + for (required_value, memory_page, cell) in required_values { + let current_value = state.heaps[memory_page].read_u256(cell * 32); + assert_eq!(current_value, required_value); + } +} + +pub(crate) fn verify_required_storage( + required_values: &[(H256, StorageKey)], + main_storage: &mut impl ReadStorage, + storage_changes: &BTreeMap<(H160, U256), U256>, +) { + for &(required_value, key) in required_values { + let current_value = storage_changes + .get(&(*key.account().address(), h256_to_u256(*key.key()))) + .copied() + .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))); + + assert_eq!( + u256_to_h256(current_value), + required_value, + "Invalid value at key {key:?}" + ); + } +} +pub(crate) fn get_balance( + token_id: AccountTreeId, + account: &Address, + main_storage: &mut impl ReadStorage, + storage_changes: &BTreeMap<(H160, U256), U256>, +) -> U256 { + let key = storage_key_for_standard_token_balance(token_id, account); + + storage_changes + .get(&(*key.account().address(), h256_to_u256(*key.key()))) + .copied() + .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))) +} + +pub(crate) fn read_test_contract() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") +} + +pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { + let bootloader_code = read_zbin_bytecode(format!( + "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", + test + )); + + let bootloader_hash = hash_bytecode(&bootloader_code); + SystemContractCode { + code: bytes_to_be_words(bootloader_code), + hash: bootloader_hash, + } +} + +pub(crate) fn read_error_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ) +} + +pub(crate) fn get_execute_error_calldata() -> Vec { + let test_contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ); + + let function = test_contract.function("require_short").unwrap(); + + function + .encode_input(&[]) + .expect("failed to encode parameters") +} + +pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { + let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; + (read_bytecode(path), load_contract(path)) +} + +pub(crate) fn read_precompiles_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", + ) +} + +pub(crate) fn load_precompiles_contract() -> Contract { + load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", + ) +} + +pub(crate) fn read_nonce_holder_tester() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") +} + +pub(crate) fn read_complex_upgrade() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") +} + +pub(crate) fn get_complex_upgrade_abi() -> Contract { + load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" + ) +} + +pub(crate) fn read_expensive_contract() -> (Vec, Contract) { + const PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; + (read_bytecode(PATH), load_contract(PATH)) +} diff --git a/core/lib/multivm/src/versions/vm_fast/transaction_data.rs b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs new file mode 100644 index 00000000000..502be0dc22c --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs @@ -0,0 +1,338 @@ +use std::convert::TryInto; + +use zksync_types::{ + ethabi::{encode, Address, Token}, + fee::{encoding_len, Fee}, + l1::is_l1_tx_type, + l2::{L2Tx, TransactionType}, + transaction_request::{PaymasterParams, TransactionRequest}, + web3::Bytes, + Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, Nonce, Transaction, H256, U256, +}; +use zksync_utils::{address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; + +use crate::vm_latest::{ + constants::{MAX_GAS_PER_PUBDATA_BYTE, TX_MAX_COMPUTE_GAS_LIMIT}, + utils::overhead::derive_overhead, +}; + +/// This structure represents the data that is used by +/// the Bootloader to describe the transaction. +#[derive(Debug, Default, Clone)] +pub(crate) struct TransactionData { + pub(crate) tx_type: u8, + pub(crate) from: Address, + pub(crate) to: Address, + pub(crate) gas_limit: U256, + pub(crate) pubdata_price_limit: U256, + pub(crate) max_fee_per_gas: U256, + pub(crate) max_priority_fee_per_gas: U256, + pub(crate) paymaster: Address, + pub(crate) nonce: U256, + pub(crate) value: U256, + // The reserved fields that are unique for different types of transactions. + // E.g. nonce is currently used in all transaction, but it should not be mandatory + // in the long run. + pub(crate) reserved: [U256; 4], + pub(crate) data: Vec, + pub(crate) signature: Vec, + // The factory deps provided with the transaction. + // Note that *only hashes* of these bytecodes are signed by the user + // and they are used in the ABI encoding of the struct. + // TODO: include this into the tx signature as part of SMA-1010 + pub(crate) factory_deps: Vec>, + pub(crate) paymaster_input: Vec, + pub(crate) reserved_dynamic: Vec, + pub(crate) raw_bytes: Option>, +} + +impl From for TransactionData { + fn from(execute_tx: Transaction) -> Self { + match execute_tx.common_data { + ExecuteTransactionCommon::L2(common_data) => { + let nonce = U256::from_big_endian(&common_data.nonce.to_be_bytes()); + + let should_check_chain_id = if matches!( + common_data.transaction_type, + TransactionType::LegacyTransaction + ) && common_data.extract_chain_id().is_some() + { + U256([1, 0, 0, 0]) + } else { + U256::zero() + }; + + // Ethereum transactions do not sign gas per pubdata limit, and so for them we need to use + // some default value. We use the maximum possible value that is allowed by the bootloader + // (i.e. we can not use u64::MAX, because the bootloader requires gas per pubdata for such + // transactions to be higher than `MAX_GAS_PER_PUBDATA_BYTE`). + let gas_per_pubdata_limit = if common_data.transaction_type.is_ethereum_type() { + MAX_GAS_PER_PUBDATA_BYTE.into() + } else { + common_data.fee.gas_per_pubdata_limit + }; + + TransactionData { + tx_type: (common_data.transaction_type as u32) as u8, + from: common_data.initiator_address, + to: execute_tx.execute.contract_address, + gas_limit: common_data.fee.gas_limit, + pubdata_price_limit: gas_per_pubdata_limit, + max_fee_per_gas: common_data.fee.max_fee_per_gas, + max_priority_fee_per_gas: common_data.fee.max_priority_fee_per_gas, + paymaster: common_data.paymaster_params.paymaster, + nonce, + value: execute_tx.execute.value, + reserved: [ + should_check_chain_id, + U256::zero(), + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + signature: common_data.signature, + factory_deps: execute_tx.execute.factory_deps, + paymaster_input: common_data.paymaster_params.paymaster_input, + reserved_dynamic: vec![], + raw_bytes: execute_tx.raw_bytes.map(|a| a.0), + } + } + ExecuteTransactionCommon::L1(common_data) => { + let refund_recipient = h256_to_u256(address_to_h256(&common_data.refund_recipient)); + TransactionData { + tx_type: common_data.tx_format() as u8, + from: common_data.sender, + to: execute_tx.execute.contract_address, + gas_limit: common_data.gas_limit, + pubdata_price_limit: common_data.gas_per_pubdata_limit, + // It doesn't matter what we put here, since + // the bootloader does not charge anything + max_fee_per_gas: common_data.max_fee_per_gas, + max_priority_fee_per_gas: U256::zero(), + paymaster: Address::default(), + nonce: U256::from(common_data.serial_id.0), // priority op ID + value: execute_tx.execute.value, + reserved: [ + common_data.to_mint, + refund_recipient, + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + // The signature isn't checked for L1 transactions so we don't care + signature: vec![], + factory_deps: execute_tx.execute.factory_deps, + paymaster_input: vec![], + reserved_dynamic: vec![], + raw_bytes: None, + } + } + ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { + let refund_recipient = h256_to_u256(address_to_h256(&common_data.refund_recipient)); + TransactionData { + tx_type: common_data.tx_format() as u8, + from: common_data.sender, + to: execute_tx.execute.contract_address, + gas_limit: common_data.gas_limit, + pubdata_price_limit: common_data.gas_per_pubdata_limit, + // It doesn't matter what we put here, since + // the bootloader does not charge anything + max_fee_per_gas: common_data.max_fee_per_gas, + max_priority_fee_per_gas: U256::zero(), + paymaster: Address::default(), + nonce: U256::from(common_data.upgrade_id as u16), + value: execute_tx.execute.value, + reserved: [ + common_data.to_mint, + refund_recipient, + U256::zero(), + U256::zero(), + ], + data: execute_tx.execute.calldata, + // The signature isn't checked for L1 transactions so we don't care + signature: vec![], + factory_deps: execute_tx.execute.factory_deps, + paymaster_input: vec![], + reserved_dynamic: vec![], + raw_bytes: None, + } + } + } + } +} + +impl TransactionData { + pub(crate) fn abi_encode_with_custom_factory_deps( + self, + factory_deps_hashes: Vec, + ) -> Vec { + encode(&[Token::Tuple(vec![ + Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), + Token::Address(self.from), + Token::Address(self.to), + Token::Uint(self.gas_limit), + Token::Uint(self.pubdata_price_limit), + Token::Uint(self.max_fee_per_gas), + Token::Uint(self.max_priority_fee_per_gas), + Token::Address(self.paymaster), + Token::Uint(self.nonce), + Token::Uint(self.value), + Token::FixedArray(self.reserved.iter().copied().map(Token::Uint).collect()), + Token::Bytes(self.data), + Token::Bytes(self.signature), + Token::Array(factory_deps_hashes.into_iter().map(Token::Uint).collect()), + Token::Bytes(self.paymaster_input), + Token::Bytes(self.reserved_dynamic), + ])]) + } + + pub(crate) fn abi_encode(self) -> Vec { + let factory_deps_hashes = self + .factory_deps + .iter() + .map(|dep| h256_to_u256(hash_bytecode(dep))) + .collect(); + self.abi_encode_with_custom_factory_deps(factory_deps_hashes) + } + + pub(crate) fn into_tokens(self) -> Vec { + let bytes = self.abi_encode(); + assert!(bytes.len() % 32 == 0); + + bytes_to_be_words(bytes) + } + + pub(crate) fn overhead_gas(&self) -> u32 { + let encoded_len = encoding_len( + self.data.len() as u64, + self.signature.len() as u64, + self.factory_deps.len() as u64, + self.paymaster_input.len() as u64, + self.reserved_dynamic.len() as u64, + ); + + derive_overhead(encoded_len) + } + + pub(crate) fn trusted_ergs_limit(&self) -> U256 { + // No transaction is allowed to spend more than `TX_MAX_COMPUTE_GAS_LIMIT` gas on compute. + U256::from(TX_MAX_COMPUTE_GAS_LIMIT).min(self.gas_limit) + } + + pub(crate) fn tx_hash(&self, chain_id: L2ChainId) -> H256 { + if is_l1_tx_type(self.tx_type) { + return self.canonical_l1_tx_hash().unwrap(); + } + + let l2_tx: L2Tx = self.clone().try_into().unwrap(); + let mut transaction_request: TransactionRequest = l2_tx.into(); + transaction_request.chain_id = Some(chain_id.as_u64()); + + // It is assumed that the `TransactionData` always has all the necessary components to recover the hash. + transaction_request + .get_tx_hash() + .expect("Could not recover L2 transaction hash") + } + + fn canonical_l1_tx_hash(&self) -> Result { + use zksync_types::web3::keccak256; + + if !is_l1_tx_type(self.tx_type) { + return Err(TxHashCalculationError::CannotCalculateL1HashForL2Tx); + } + + let encoded_bytes = self.clone().abi_encode(); + + Ok(H256(keccak256(&encoded_bytes))) + } +} + +#[derive(Debug, Clone, Copy)] +pub(crate) enum TxHashCalculationError { + CannotCalculateL1HashForL2Tx, + CannotCalculateL2HashForL1Tx, +} + +impl TryInto for TransactionData { + type Error = TxHashCalculationError; + + fn try_into(self) -> Result { + if is_l1_tx_type(self.tx_type) { + return Err(TxHashCalculationError::CannotCalculateL2HashForL1Tx); + } + + let common_data = L2TxCommonData { + transaction_type: (self.tx_type as u32).try_into().unwrap(), + nonce: Nonce(self.nonce.as_u32()), + fee: Fee { + max_fee_per_gas: self.max_fee_per_gas, + max_priority_fee_per_gas: self.max_priority_fee_per_gas, + gas_limit: self.gas_limit, + gas_per_pubdata_limit: self.pubdata_price_limit, + }, + signature: self.signature, + input: None, + initiator_address: self.from, + paymaster_params: PaymasterParams { + paymaster: self.paymaster, + paymaster_input: self.paymaster_input, + }, + }; + let execute = Execute { + contract_address: self.to, + value: self.value, + calldata: self.data, + factory_deps: self.factory_deps, + }; + + Ok(L2Tx { + execute, + common_data, + received_timestamp_ms: 0, + raw_bytes: self.raw_bytes.map(Bytes::from), + }) + } +} + +#[cfg(test)] +mod tests { + use zksync_types::fee::encoding_len; + + use super::*; + + #[test] + fn test_consistency_with_encoding_length() { + let transaction = TransactionData { + tx_type: 113, + from: Address::random(), + to: Address::random(), + gas_limit: U256::from(1u32), + pubdata_price_limit: U256::from(1u32), + max_fee_per_gas: U256::from(1u32), + max_priority_fee_per_gas: U256::from(1u32), + paymaster: Address::random(), + nonce: U256::zero(), + value: U256::zero(), + // The reserved fields that are unique for different types of transactions. + // E.g. nonce is currently used in all transaction, but it should not be mandatory + // in the long run. + reserved: [U256::zero(); 4], + data: vec![0u8; 65], + signature: vec![0u8; 75], + // The factory deps provided with the transaction. + // Note that *only hashes* of these bytecodes are signed by the user + // and they are used in the ABI encoding of the struct. + // TODO: include this into the tx signature as part of SMA-1010 + factory_deps: vec![vec![0u8; 32], vec![1u8; 32]], + paymaster_input: vec![0u8; 85], + reserved_dynamic: vec![0u8; 32], + raw_bytes: None, + }; + + let assumed_encoded_len = encoding_len(65, 75, 2, 85, 32); + + let true_encoding_len = transaction.into_tokens().len(); + + assert_eq!(assumed_encoded_len, true_encoding_len); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs new file mode 100644 index 00000000000..a4dad0b324d --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -0,0 +1,794 @@ +use std::{collections::HashMap, fmt}; + +use vm2::{ + decode::decode_program, fat_pointer::FatPointer, instruction_handlers::HeapInterface, + ExecutionEnd, Program, Settings, VirtualMachine, +}; +use zk_evm_1_5_0::zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION; +use zksync_contracts::SystemContractCode; +use zksync_state::ReadStorage; +use zksync_types::{ + event::{ + extract_l2tol1logs_from_l1_messenger, extract_long_l2_to_l1_messages, + L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE, + }, + l1::is_l1_tx_type, + l2_to_l1_log::UserL2ToL1Log, + utils::key_for_eth_balance, + writes::{ + compression::compress_with_best_strategy, StateDiffRecord, BYTES_PER_DERIVED_KEY, + BYTES_PER_ENUMERATION_INDEX, + }, + AccountTreeId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, + BOOTLOADER_ADDRESS, H160, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, + L2_BASE_TOKEN_ADDRESS, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; + +use super::{ + bootloader_state::{BootloaderState, BootloaderStateSnapshot}, + bytecode::compress_bytecodes, + hook::Hook, + initial_bootloader_memory::bootloader_initial_memory, + transaction_data::TransactionData, +}; +use crate::{ + glue::GlueInto, + interface::{ + BytecodeCompressionError, Halt, TxRevertReason, VmInterface, VmInterfaceHistoryEnabled, + VmRevertReason, + }, + vm_fast::{ + bootloader_state::utils::{apply_l2_block, apply_pubdata_to_memory}, + events::merge_events, + pubdata::PubdataInput, + refund::compute_refund, + }, + vm_latest::{ + constants::{ + get_vm_hook_params_start_position, get_vm_hook_position, OPERATOR_REFUNDS_OFFSET, + TX_GAS_LIMIT_OFFSET, VM_HOOK_PARAMS_COUNT, + }, + BootloaderMemory, CurrentExecutionState, ExecutionResult, FinishedL1Batch, L1BatchEnv, + L2BlockEnv, MultiVMSubversion, Refunds, SystemEnv, VmExecutionLogs, VmExecutionMode, + VmExecutionResultAndLogs, VmExecutionStatistics, + }, +}; + +const VM_VERSION: MultiVMSubversion = MultiVMSubversion::IncreasedBootloaderMemory; + +pub struct Vm { + pub(crate) world: World, + pub(crate) inner: VirtualMachine, + suspended_at: u16, + gas_for_account_validation: u32, + pub(crate) bootloader_state: BootloaderState, + pub(crate) batch_env: L1BatchEnv, + pub(crate) system_env: SystemEnv, + snapshot: Option, +} + +impl Vm { + fn run( + &mut self, + execution_mode: VmExecutionMode, + track_refunds: bool, + ) -> (ExecutionResult, Refunds) { + let mut refunds = Refunds { + gas_refunded: 0, + operator_suggested_refund: 0, + }; + let mut last_tx_result = None; + let mut pubdata_before = self.inner.world_diff.pubdata() as u32; + + let result = loop { + let hook = match self.inner.resume_from(self.suspended_at, &mut self.world) { + ExecutionEnd::SuspendedOnHook { + hook, + pc_to_resume_from, + } => { + self.suspended_at = pc_to_resume_from; + hook + } + ExecutionEnd::ProgramFinished(output) => break ExecutionResult::Success { output }, + ExecutionEnd::Reverted(output) => { + break match TxRevertReason::parse_error(&output) { + TxRevertReason::TxReverted(output) => ExecutionResult::Revert { output }, + TxRevertReason::Halt(reason) => ExecutionResult::Halt { reason }, + } + } + ExecutionEnd::Panicked => { + break ExecutionResult::Halt { + reason: if self.gas_remaining() == 0 { + Halt::BootloaderOutOfGas + } else { + Halt::VMPanic + }, + } + } + }; + + match Hook::from_u32(hook) { + Hook::AccountValidationEntered | Hook::AccountValidationExited => { + // TODO (PLA-908): implement account validation + } + Hook::TxHasEnded => { + if let VmExecutionMode::OneTx = execution_mode { + break last_tx_result.take().unwrap(); + } + } + Hook::AskOperatorForRefund => { + if track_refunds { + let [bootloader_refund, gas_spent_on_pubdata, gas_per_pubdata_byte] = + self.get_hook_params(); + let current_tx_index = self.bootloader_state.current_tx(); + let tx_description_offset = self + .bootloader_state + .get_tx_description_offset(current_tx_index); + let tx_gas_limit = self + .read_word_from_bootloader_heap( + tx_description_offset + TX_GAS_LIMIT_OFFSET, + ) + .as_u64(); + + let pubdata_published = self.inner.world_diff.pubdata() as u32; + + refunds.operator_suggested_refund = compute_refund( + &self.batch_env, + bootloader_refund.as_u64(), + gas_spent_on_pubdata.as_u64(), + tx_gas_limit, + gas_per_pubdata_byte.low_u32(), + pubdata_published.saturating_sub(pubdata_before), + self.bootloader_state + .last_l2_block() + .txs + .last() + .unwrap() + .hash, + ); + + pubdata_before = pubdata_published; + let refund_value = refunds.operator_suggested_refund; + self.write_to_bootloader_heap([( + OPERATOR_REFUNDS_OFFSET + current_tx_index, + refund_value.into(), + )]); + self.bootloader_state + .set_refund_for_current_tx(refund_value); + } + } + Hook::NotifyAboutRefund => { + if track_refunds { + refunds.gas_refunded = self.get_hook_params()[0].low_u64() + } + } + Hook::PostResult => { + let result = self.get_hook_params()[0]; + let value = self.get_hook_params()[1]; + let fp = FatPointer::from(value); + assert_eq!(fp.offset, 0); + + let return_data = self.inner.state.heaps[fp.memory_page] + .read_range_big_endian(fp.start..fp.start + fp.length); + + last_tx_result = Some(if result.is_zero() { + ExecutionResult::Revert { + output: VmRevertReason::from(return_data.as_slice()), + } + } else { + ExecutionResult::Success { + output: return_data, + } + }); + } + Hook::FinalBatchInfo => { + // set fictive l2 block + let txs_index = self.bootloader_state.free_tx_index(); + let l2_block = self.bootloader_state.insert_fictive_l2_block(); + let mut memory = vec![]; + apply_l2_block(&mut memory, l2_block, txs_index); + self.write_to_bootloader_heap(memory); + } + Hook::PubdataRequested => { + if !matches!(execution_mode, VmExecutionMode::Batch) { + unreachable!("We do not provide the pubdata when executing the block tip or a single transaction"); + } + + let events = + merge_events(self.inner.world_diff.events(), self.batch_env.number); + + let published_bytecodes = events + .iter() + .filter(|event| { + // Filter events from the l1 messenger contract that match the expected signature. + event.address == L1_MESSENGER_ADDRESS + && !event.indexed_topics.is_empty() + && event.indexed_topics[0] + == *L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE + }) + .map(|event| { + let hash = U256::from_big_endian(&event.value[..32]); + self.world + .bytecode_cache + .get(&hash) + .expect("published unknown bytecode") + .clone() + }) + .collect(); + + let pubdata_input = PubdataInput { + user_logs: extract_l2tol1logs_from_l1_messenger(&events), + l2_to_l1_messages: extract_long_l2_to_l1_messages(&events), + published_bytecodes, + state_diffs: self + .compute_state_diffs() + .filter(|diff| diff.address != L1_MESSENGER_ADDRESS) + .collect(), + }; + + // Save the pubdata for the future initial bootloader memory building + self.bootloader_state + .set_pubdata_input(pubdata_input.clone()); + + // Apply the pubdata to the current memory + let mut memory_to_apply = vec![]; + + apply_pubdata_to_memory(&mut memory_to_apply, pubdata_input); + self.write_to_bootloader_heap(memory_to_apply); + } + + Hook::PaymasterValidationEntered | Hook::ValidationStepEnded => { /* unused */ } + Hook::DebugLog | Hook::DebugReturnData | Hook::NearCallCatch => { + // These hooks are for debug purposes only + } + } + }; + + (result, refunds) + } + + fn get_hook_params(&self) -> [U256; 3] { + (get_vm_hook_params_start_position(VM_VERSION) + ..get_vm_hook_params_start_position(VM_VERSION) + VM_HOOK_PARAMS_COUNT) + .map(|word| self.read_word_from_bootloader_heap(word as usize)) + .collect::>() + .try_into() + .unwrap() + } + + /// Should only be used when the bootloader is executing (e.g., when handling hooks). + pub(crate) fn read_word_from_bootloader_heap(&self, word: usize) -> U256 { + self.inner.state.heaps[vm2::FIRST_HEAP].read_u256(word as u32 * 32) + } + + /// Should only be used when the bootloader is executing (e.g., when handling hooks). + pub(crate) fn write_to_bootloader_heap( + &mut self, + memory: impl IntoIterator, + ) { + assert!(self.inner.state.previous_frames.is_empty()); + for (slot, value) in memory { + self.inner + .state + .heaps + .write_u256(vm2::FIRST_HEAP, slot as u32 * 32, value); + } + } + + pub(crate) fn insert_bytecodes<'a>(&mut self, bytecodes: impl IntoIterator) { + for code in bytecodes { + let hash = h256_to_u256(hash_bytecode(code)); + self.world.bytecode_cache.insert(hash, code.into()); + } + } + + pub(crate) fn push_transaction_inner( + &mut self, + tx: zksync_types::Transaction, + refund: u64, + with_compression: bool, + ) { + let tx: TransactionData = tx.into(); + let overhead = tx.overhead_gas(); + + self.insert_bytecodes(tx.factory_deps.iter().map(|dep| &dep[..])); + + let compressed_bytecodes = if is_l1_tx_type(tx.tx_type) || !with_compression { + // L1 transactions do not need compression + vec![] + } else { + compress_bytecodes(&tx.factory_deps, |hash| { + self.inner + .world_diff + .get_storage_state() + .get(&(KNOWN_CODES_STORAGE_ADDRESS, h256_to_u256(hash))) + .map(|x| !x.is_zero()) + .unwrap_or_else(|| self.world.storage.is_bytecode_known(&hash)) + }) + }; + + let trusted_ergs_limit = tx.trusted_ergs_limit(); + + let memory = self.bootloader_state.push_tx( + tx, + overhead, + refund, + compressed_bytecodes, + trusted_ergs_limit, + self.system_env.chain_id, + ); + + self.write_to_bootloader_heap(memory); + } + + fn compute_state_diffs(&mut self) -> impl Iterator + '_ { + let storage = &mut self.world.storage; + + self.inner.world_diff.get_storage_changes().map( + move |((address, key), (initial_value, final_value))| { + let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); + StateDiffRecord { + address, + key, + derived_key: + zk_evm_1_5_0::aux_structures::LogQuery::derive_final_address_for_params( + &address, &key, + ), + enumeration_index: storage + .get_enumeration_index(&storage_key) + .unwrap_or_default(), + initial_value: initial_value.unwrap_or_default(), + final_value, + } + }, + ) + } + + pub(crate) fn decommitted_hashes(&self) -> impl Iterator + '_ { + self.inner.world_diff.decommitted_hashes() + } +} + +// We don't implement `VmFactory` trait because, unlike old VMs, the new VM doesn't require storage to be writable; +// it maintains its own storage cache and a write buffer. +impl Vm { + pub fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { + let default_aa_code_hash = system_env + .base_system_smart_contracts + .default_aa + .hash + .into(); + + let program_cache = HashMap::from([convert_system_contract_code( + &system_env.base_system_smart_contracts.default_aa, + false, + )]); + + let (_, bootloader) = + convert_system_contract_code(&system_env.base_system_smart_contracts.bootloader, true); + let bootloader_memory = bootloader_initial_memory(&batch_env); + + let mut inner = VirtualMachine::new( + BOOTLOADER_ADDRESS, + bootloader, + H160::zero(), + vec![], + system_env.bootloader_gas_limit, + Settings { + default_aa_code_hash, + // this will change after 1.5 + evm_interpreter_code_hash: default_aa_code_hash, + hook_address: get_vm_hook_position(VM_VERSION) * 32, + }, + ); + + inner.state.current_frame.sp = 0; + + // The bootloader writes results to high addresses in its heap, so it makes sense to preallocate it. + inner.state.current_frame.heap_size = u32::MAX; + inner.state.current_frame.aux_heap_size = u32::MAX; + inner.state.current_frame.exception_handler = INITIAL_FRAME_FORMAL_EH_LOCATION; + + let mut me = Self { + world: World::new(storage, program_cache), + inner, + suspended_at: 0, + gas_for_account_validation: system_env.default_validation_computational_gas_limit, + bootloader_state: BootloaderState::new( + system_env.execution_mode, + bootloader_memory.clone(), + batch_env.first_l2_block, + ), + system_env, + batch_env, + snapshot: None, + }; + + me.write_to_bootloader_heap(bootloader_memory); + + me + } + + fn delete_history_if_appropriate(&mut self) { + if self.snapshot.is_none() && self.inner.state.previous_frames.is_empty() { + self.inner.delete_history(); + } + } +} + +impl VmInterface for Vm { + type TracerDispatcher = (); + + fn push_transaction(&mut self, tx: zksync_types::Transaction) { + self.push_transaction_inner(tx, 0, true); + } + + fn inspect( + &mut self, + (): Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + let mut track_refunds = false; + if matches!(execution_mode, VmExecutionMode::OneTx) { + // Move the pointer to the next transaction + self.bootloader_state.move_tx_to_execute_pointer(); + track_refunds = true; + } + + let start = self.inner.world_diff.snapshot(); + let pubdata_before = self.inner.world_diff.pubdata(); + + let (result, refunds) = self.run(execution_mode, track_refunds); + let ignore_world_diff = matches!(execution_mode, VmExecutionMode::OneTx) + && matches!(result, ExecutionResult::Halt { .. }); + + // If the execution is halted, the VM changes are expected to be rolled back by the caller. + // Earlier VMs return empty execution logs in this case, so we follow this behavior. + let logs = if ignore_world_diff { + VmExecutionLogs::default() + } else { + let storage_logs = self + .inner + .world_diff + .get_storage_changes_after(&start) + .map(|((address, key), change)| StorageLogWithPreviousValue { + log: StorageLog { + key: StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)), + value: u256_to_h256(change.after), + kind: if change.is_initial { + StorageLogKind::InitialWrite + } else { + StorageLogKind::RepeatedWrite + }, + }, + previous_value: u256_to_h256(change.before.unwrap_or_default()), + }) + .collect(); + let events = merge_events( + self.inner.world_diff.events_after(&start), + self.batch_env.number, + ); + let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) + .into_iter() + .map(Into::into) + .map(UserL2ToL1Log) + .collect(); + let system_l2_to_l1_logs = self + .inner + .world_diff + .l2_to_l1_logs_after(&start) + .iter() + .map(|x| x.glue_into()) + .collect(); + VmExecutionLogs { + storage_logs, + events, + user_l2_to_l1_logs, + system_l2_to_l1_logs, + total_log_queries_count: 0, // This field is unused + } + }; + + let pubdata_after = self.inner.world_diff.pubdata(); + VmExecutionResultAndLogs { + result, + logs, + // TODO (PLA-936): Fill statistics; investigate whether they should be zeroed on `Halt` + statistics: VmExecutionStatistics { + contracts_used: 0, + cycles_used: 0, + gas_used: 0, + gas_remaining: 0, + computational_gas_used: 0, + total_log_queries: 0, + pubdata_published: (pubdata_after - pubdata_before).max(0) as u32, + circuit_statistic: Default::default(), + }, + refunds, + } + } + + fn inspect_transaction_with_bytecode_compression( + &mut self, + (): Self::TracerDispatcher, + tx: zksync_types::Transaction, + with_compression: bool, + ) -> ( + Result<(), BytecodeCompressionError>, + VmExecutionResultAndLogs, + ) { + self.push_transaction_inner(tx, 0, with_compression); + let result = self.inspect((), VmExecutionMode::OneTx); + + let compression_result = if self.has_unpublished_bytecodes() { + Err(BytecodeCompressionError::BytecodeCompressionFailed) + } else { + Ok(()) + }; + (compression_result, result) + } + + fn get_bootloader_memory(&self) -> BootloaderMemory { + self.bootloader_state.bootloader_memory() + } + + fn get_last_tx_compressed_bytecodes( + &self, + ) -> Vec { + self.bootloader_state.get_last_tx_compressed_bytecodes() + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.bootloader_state.start_new_l2_block(l2_block_env) + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + let world_diff = &self.inner.world_diff; + let events = merge_events(world_diff.events(), self.batch_env.number); + + let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) + .into_iter() + .map(Into::into) + .map(UserL2ToL1Log) + .collect(); + + CurrentExecutionState { + events, + deduplicated_storage_logs: world_diff + .get_storage_changes() + .map(|((address, key), (_, value))| StorageLog { + key: StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)), + value: u256_to_h256(value), + kind: StorageLogKind::RepeatedWrite, // Initialness doesn't matter here + }) + .collect(), + used_contract_hashes: self.decommitted_hashes().collect(), + system_logs: world_diff + .l2_to_l1_logs() + .iter() + .map(|x| x.glue_into()) + .collect(), + user_l2_to_l1_logs, + storage_refunds: world_diff.storage_refunds().to_vec(), + pubdata_costs: world_diff.pubdata_costs().to_vec(), + } + } + + fn record_vm_memory_metrics(&self) -> crate::vm_latest::VmMemoryMetrics { + todo!("Unused during batch execution") + } + + fn gas_remaining(&self) -> u32 { + self.inner.state.current_frame.gas + } + + fn finish_batch(&mut self) -> FinishedL1Batch { + let result = self.execute(VmExecutionMode::Batch); + let execution_state = self.get_current_execution_state(); + let bootloader_memory = self.get_bootloader_memory(); + FinishedL1Batch { + block_tip_execution_result: result, + final_execution_state: execution_state, + final_bootloader_memory: Some(bootloader_memory), + pubdata_input: Some( + self.bootloader_state + .get_pubdata_information() + .clone() + .build_pubdata(false), + ), + state_diffs: Some( + self.bootloader_state + .get_pubdata_information() + .state_diffs + .to_vec(), + ), + } + } +} + +#[derive(Debug)] +struct VmSnapshot { + vm_snapshot: vm2::Snapshot, + bootloader_snapshot: BootloaderStateSnapshot, + suspended_at: u16, + gas_for_account_validation: u32, +} + +impl VmInterfaceHistoryEnabled for Vm { + fn make_snapshot(&mut self) { + assert!( + self.snapshot.is_none(), + "cannot create a VM snapshot until a previous snapshot is rolled back to or popped" + ); + + self.delete_history_if_appropriate(); + self.snapshot = Some(VmSnapshot { + vm_snapshot: self.inner.snapshot(), + bootloader_snapshot: self.bootloader_state.get_snapshot(), + suspended_at: self.suspended_at, + gas_for_account_validation: self.gas_for_account_validation, + }); + } + + fn rollback_to_the_latest_snapshot(&mut self) { + let VmSnapshot { + vm_snapshot, + bootloader_snapshot, + suspended_at, + gas_for_account_validation, + } = self.snapshot.take().expect("no snapshots to rollback to"); + + self.inner.rollback(vm_snapshot); + self.bootloader_state.apply_snapshot(bootloader_snapshot); + self.suspended_at = suspended_at; + self.gas_for_account_validation = gas_for_account_validation; + + self.delete_history_if_appropriate(); + } + + fn pop_snapshot_no_rollback(&mut self) { + self.snapshot = None; + self.delete_history_if_appropriate(); + } +} + +impl fmt::Debug for Vm { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Vm") + .field("suspended_at", &self.suspended_at) + .field( + "gas_for_account_validation", + &self.gas_for_account_validation, + ) + .field("bootloader_state", &self.bootloader_state) + .field("storage", &self.world.storage) + .field("program_cache", &self.world.program_cache) + .field("batch_env", &self.batch_env) + .field("system_env", &self.system_env) + .field("snapshot", &self.snapshot.as_ref().map(|_| ())) + .finish() + } +} + +#[derive(Debug)] +pub(crate) struct World { + pub(crate) storage: S, + // TODO (PLA-1008): Store `Program`s in an LRU cache + program_cache: HashMap, + pub(crate) bytecode_cache: HashMap>, +} + +impl World { + fn new(storage: S, program_cache: HashMap) -> Self { + Self { + storage, + program_cache, + bytecode_cache: Default::default(), + } + } +} + +impl vm2::World for World { + fn decommit_code(&mut self, hash: U256) -> Vec { + self.decommit(hash) + .code_page() + .as_ref() + .iter() + .flat_map(|u| { + let mut buffer = [0u8; 32]; + u.to_big_endian(&mut buffer); + buffer + }) + .collect() + } + + fn decommit(&mut self, hash: U256) -> Program { + self.program_cache + .entry(hash) + .or_insert_with(|| { + bytecode_to_program(self.bytecode_cache.entry(hash).or_insert_with(|| { + self.storage + .load_factory_dep(u256_to_h256(hash)) + .expect("vm tried to decommit nonexistent bytecode") + })) + }) + .clone() + } + + fn read_storage(&mut self, contract: H160, key: U256) -> Option { + let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); + if self.storage.is_write_initial(key) { + None + } else { + Some(self.storage.read_value(key).as_bytes().into()) + } + } + + fn cost_of_writing_storage(&mut self, initial_value: Option, new_value: U256) -> u32 { + let is_initial = initial_value.is_none(); + let initial_value = initial_value.unwrap_or_default(); + + if initial_value == new_value { + return 0; + } + + // Since we need to publish the state diffs onchain, for each of the updated storage slot + // we basically need to publish the following pair: `()`. + // For key we use the following optimization: + // - The first time we publish it, we use 32 bytes. + // Then, we remember a 8-byte id for this slot and assign it to it. We call this initial write. + // - The second time we publish it, we will use the 4/5 byte representation of this 8-byte instead of the 32 + // bytes of the entire key. + // For value compression, we use a metadata byte which holds the length of the value and the operation from the + // previous state to the new state, and the compressed value. The maximum for this is 33 bytes. + // Total bytes for initial writes then becomes 65 bytes and repeated writes becomes 38 bytes. + let compressed_value_size = + compress_with_best_strategy(initial_value, new_value).len() as u32; + + if is_initial { + (BYTES_PER_DERIVED_KEY as u32) + compressed_value_size + } else { + (BYTES_PER_ENUMERATION_INDEX as u32) + compressed_value_size + } + } + + fn is_free_storage_slot(&self, contract: &H160, key: &U256) -> bool { + contract == &zksync_system_constants::SYSTEM_CONTEXT_ADDRESS + || contract == &L2_BASE_TOKEN_ADDRESS + && u256_to_h256(*key) == key_for_eth_balance(&BOOTLOADER_ADDRESS) + } +} + +fn bytecode_to_program(bytecode: &[u8]) -> Program { + Program::new( + decode_program( + &bytecode + .chunks_exact(8) + .map(|chunk| u64::from_be_bytes(chunk.try_into().unwrap())) + .collect::>(), + false, + ), + bytecode + .chunks_exact(32) + .map(U256::from_big_endian) + .collect::>(), + ) +} + +fn convert_system_contract_code(code: &SystemContractCode, is_bootloader: bool) -> (U256, Program) { + ( + h256_to_u256(code.hash), + Program::new( + decode_program( + &code + .code + .iter() + .flat_map(|x| x.0.into_iter().rev()) + .collect::>(), + is_bootloader, + ), + code.code.clone(), + ), + ) +} diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs index 58fad96dec8..aca2bc49707 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs @@ -1,6 +1,5 @@ use std::collections::HashMap; -use itertools::Itertools; use zk_evm_1_5_0::{ abstractions::EventSink, aux_structures::{LogQuery, Timestamp}, @@ -9,7 +8,6 @@ use zk_evm_1_5_0::{ BOOTLOADER_FORMAL_ADDRESS, EVENT_AUX_BYTE, L1_MESSAGE_AUX_BYTE, }, }; -use zksync_types::U256; use crate::vm_latest::old_vm::{ history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, @@ -31,7 +29,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -40,10 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - let events_logs = Self::events_logs_from_history(history); - - (events_logs, events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { @@ -69,92 +64,6 @@ impl InMemoryEventSink { Self::events_and_l1_messages_from_history(self.log_queries_after_timestamp(from_timestamp)) } - fn events_logs_from_history(history: &[Box]) -> Vec { - // Filter out all the L2->L1 logs and leave only events - let mut events = history - .iter() - .filter_map(|log_query| (log_query.aux_byte == EVENT_AUX_BYTE).then_some(**log_query)) - .collect_vec(); - - // Sort the events by timestamp and rollback flag, basically ensuring that - // if an event has been rolled back, the original event and its rollback will be put together - events.sort_by_key(|log| (log.timestamp, log.rollback)); - - let mut stack = Vec::::new(); - let mut net_history = vec![]; - for el in events.iter() { - assert_eq!(el.shard_id, 0, "only rollup shard is supported"); - if stack.is_empty() { - assert!(!el.rollback); - stack.push(*el); - } else { - // we can always pop as it's either one to add to queue, or discard - let previous = stack.pop().unwrap(); - if previous.timestamp == el.timestamp { - // Only rollback can have the same timestamp, so here we do nothing and simply - // double check the invariants - assert!(!previous.rollback); - assert!(el.rollback); - assert!(previous.rw_flag); - assert!(el.rw_flag); - assert_eq!(previous.tx_number_in_block, el.tx_number_in_block); - assert_eq!(previous.shard_id, el.shard_id); - assert_eq!(previous.address, el.address); - assert_eq!(previous.key, el.key); - assert_eq!(previous.written_value, el.written_value); - assert_eq!(previous.is_service, el.is_service); - continue; - } else { - // The event on the stack has not been rolled back. It must be a different event, - // with a different timestamp. - assert!(!el.rollback); - stack.push(*el); - - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - } - } - - // In case the stack is non-empty, then the last element of it has not been rolled back. - if let Some(previous) = stack.pop() { - // cleanup some fields - // flags are conventions - let sorted_log_query = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: previous.tx_number_in_block, - aux_byte: 0, - shard_id: previous.shard_id, - address: previous.address, - key: previous.key, - read_value: U256::zero(), - written_value: previous.written_value, - rw_flag: false, - rollback: false, - is_service: previous.is_service, - }; - - net_history.push(sorted_log_query); - } - - net_history - } - fn events_and_l1_messages_from_history( history: &[Box], ) -> (Vec, Vec) { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs index 8c8c6e2d097..7174e9be67d 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs @@ -1,6 +1,11 @@ use ethabi::Token; -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_types::{get_known_code_key, web3::keccak256, Address, Execute, U256}; +use zk_evm_1_5_0::{ + aux_structures::{MemoryPage, Timestamp}, + zkevm_opcode_defs::{ContractCodeSha256Format, VersionedHashLen32}, +}; +use zksync_types::{ + get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, +}; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; use crate::{ @@ -79,7 +84,10 @@ fn test_code_oracle() { vm.vm.push_transaction(tx1); let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); // Now, we ask for the same bytecode. We use to partially check whether the memory page with // the decommitted bytecode gets erased (it shouldn't). @@ -99,7 +107,21 @@ fn test_code_oracle() { ); vm.vm.push_transaction(tx2); let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +fn find_code_oracle_cost_log( + precompiles_contract_address: Address, + logs: &[StorageLogWithPreviousValue], +) -> &StorageLogWithPreviousValue { + logs.iter() + .find(|log| { + *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() + }) + .expect("no code oracle cost log") } #[test] @@ -164,3 +186,97 @@ fn test_code_oracle_big_bytecode() { let result = vm.vm.execute(VmExecutionMode::OneTx); assert!(!result.result.is_failed(), "Transaction wasn't successful"); } + +#[test] +fn refunds_in_code_oracle() { + let precompiles_contract_address = Address::random(); + let precompile_contract_bytecode = read_precompiles_contract(); + + let normal_zkevm_bytecode = read_test_contract(); + let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_words = bytes_to_be_words(normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + // Execute code oracle twice with identical VM state that only differs in that the queried bytecode + // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas + // for already decommitted codes). + let mut oracle_costs = vec![]; + for decommit in [false, true] { + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_custom_contracts(vec![( + precompile_contract_bytecode.clone(), + precompiles_contract_address, + false, + )]) + .with_storage(storage.clone()) + .build(); + + vm.vm.state.decommittment_processor.populate( + vec![( + h256_to_u256(normal_zkevm_bytecode_hash), + normal_zkevm_bytecode_words.clone(), + )], + Timestamp(0), + ); + + let account = &mut vm.rich_accounts[0]; + if decommit { + let (header, normalized_preimage) = + ContractCodeSha256Format::normalize_for_decommitment(&normal_zkevm_bytecode_hash.0); + let query = vm + .vm + .state + .prepare_to_decommit( + 0, + header, + normalized_preimage, + MemoryPage(123), + Timestamp(0), + ) + .unwrap(); + + assert!(query.is_fresh); + vm.vm.state.execute_decommit(0, query).unwrap(); + } + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: precompiles_contract_address, + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + let log = + find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); + oracle_costs.push(log.log.value); + } + + // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` + // in `CodeOracle.yul`. + let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); + assert_eq!( + code_oracle_refund, + (4 * normal_zkevm_bytecode_words.len()).into() + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 359190fc478..6b3be989fb3 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -1,11 +1,12 @@ use ethabi::Token; use zksync_contracts::l1_messenger_contract; use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; +use zksync_test_account::Account; use zksync_types::{ get_code_key, get_known_code_key, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, storage_writes_deduplicator::StorageWritesDeduplicator, - Execute, ExecuteTransactionCommon, U256, + Execute, ExecuteTransactionCommon, K256PrivateKey, U256, }; use zksync_utils::u256_to_h256; @@ -154,7 +155,9 @@ fn test_l1_tx_execution_high_gas_limit() { .with_empty_in_memory_storage() .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) + .with_rich_accounts(vec![Account::new( + K256PrivateKey::from_bytes([0xad; 32].into()).unwrap(), + )]) .build(); let account = &mut vm.rich_accounts[0]; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index 72d2271f715..52dbd6efb33 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -1,9 +1,12 @@ +use ethabi::Token; +use zksync_types::{Address, Execute, U256}; + use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, vm_latest::{ tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, + utils::{read_expensive_contract, read_test_contract}, }, types::internals::TransactionData, HistoryEnabled, @@ -164,3 +167,62 @@ fn test_predetermined_refunded_gas() { current_state_without_predefined_refunds.used_contract_hashes ); } + +#[test] +fn negative_pubdata_for_transaction() { + let expensive_contract_address = Address::random(); + let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); + let expensive_function = expensive_contract.function("expensive").unwrap(); + let cleanup_function = expensive_contract.function("cleanUp").unwrap(); + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .with_custom_contracts(vec![( + expensive_contract_bytecode, + expensive_contract_address, + false, + )]) + .build(); + + let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: expensive_contract_address, + calldata: expensive_function + .encode_input(&[Token::Uint(10.into())]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(expensive_tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. + let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: expensive_contract_address, + calldata: cleanup_function.encode_input(&[]).unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(clean_up_tx); + let result = vm.vm.execute(VmExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + assert!(result.refunds.operator_suggested_refund > 0); + assert_eq!( + result.refunds.gas_refunded, + result.refunds.operator_suggested_refund + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs index 8a55a3fc6a5..28d85348648 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs @@ -14,7 +14,8 @@ use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; use crate::{ interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, + L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, + VmInterface, }, vm_latest::{ constants::BATCH_COMPUTATIONAL_GAS_LIMIT, @@ -82,7 +83,7 @@ impl VmTester { let mut l1_batch = self.vm.batch_env.clone(); if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { + let last_l2_block = load_last_l2_block(&self.storage).unwrap_or(L2Block { number: 0, timestamp: 0, hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs index 37bdd0cef8e..2482df0d0e8 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs @@ -131,3 +131,9 @@ pub(crate) fn get_complex_upgrade_abi() -> Contract { "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" ) } + +pub(crate) fn read_expensive_contract() -> (Vec, Contract) { + const PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; + (read_bytecode(PATH), load_contract(PATH)) +} diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs index 9a3e70f8dff..b9ac0bfad22 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs @@ -63,7 +63,7 @@ pub(crate) fn new_vm_state( system_env: &SystemEnv, l1_batch_env: &L1BatchEnv, ) -> (ZkSyncVmState, BootloaderState) { - let last_l2_block = if let Some(last_l2_block) = load_last_l2_block(storage.clone()) { + let last_l2_block = if let Some(last_l2_block) = load_last_l2_block(&storage) { last_l2_block } else { // This is the scenario of either the first L2 block ever or diff --git a/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs b/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs index ec30a86013b..d3253ffd7fb 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs @@ -52,7 +52,7 @@ pub(crate) fn l2_block_hash( } /// Get last saved block from storage -pub fn load_last_l2_block(storage: StoragePtr) -> Option { +pub fn load_last_l2_block(storage: &StoragePtr) -> Option { // Get block number and timestamp let current_l2_block_info_key = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index a6cd884c738..f11431f0154 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -3,7 +3,8 @@ use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{ event::extract_l2tol1logs_from_l1_messenger, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, - Transaction, VmVersion, + vm::VmVersion, + Transaction, }; use zksync_utils::bytecode::CompressedBytecodeInfo; @@ -11,8 +12,8 @@ use crate::{ glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_latest::{ bootloader_state::BootloaderState, @@ -72,19 +73,9 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let vm_version: VmVersion = system_env.version.into(); - Self::new_with_subversion( - batch_env, - system_env, - storage, - vm_version.try_into().expect("Incorrect 1.5.0 VmVersion"), - ) - } - /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: Transaction) { self.push_transaction_with_compression(tx, true); @@ -117,7 +108,7 @@ impl VmInterface for Vm { /// This method should be used only after the batch execution. /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { - let (deduplicated_events_logs, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -128,13 +119,6 @@ impl VmInterface for Vm { .into_iter() .map(|log| SystemL2ToL1Log(log.glue_into())) .collect(); - let total_log_queries = self.state.event_sink.get_log_queries() - + self - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.state.storage.get_final_log_queries().len(); let storage_log_queries = self.state.storage.get_final_log_queries(); let deduped_storage_log_queries = @@ -152,12 +136,6 @@ impl VmInterface for Vm { .map(|log| UserL2ToL1Log(log.into())) .collect(), system_logs, - total_log_queries, - cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs: deduplicated_events_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), storage_refunds: self.state.storage.returned_io_refunds.inner().clone(), pubdata_costs: self.state.storage.returned_pubdata_costs.inner().clone(), } @@ -219,6 +197,18 @@ impl VmInterface for Vm { } } +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let vm_version: VmVersion = system_env.version.into(); + Self::new_with_subversion( + batch_env, + system_env, + storage, + vm_version.try_into().expect("Incorrect 1.5.0 VmVersion"), + ) + } +} + impl Vm { pub(crate) fn new_with_subversion( batch_env: L1BatchEnv, @@ -240,14 +230,11 @@ impl Vm { } } -/// Methods of vm, which required some history manipulations -impl VmInterfaceHistoryEnabled for Vm { - /// Create snapshot of current vm state and push it into the memory +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { self.make_snapshot_inner() } - /// Rollback vm state to the latest snapshot and destroy the snapshot fn rollback_to_the_latest_snapshot(&mut self) { let snapshot = self .snapshots @@ -256,10 +243,7 @@ impl VmInterfaceHistoryEnabled for Vm { self.rollback_to_snapshot(snapshot); } - /// Pop the latest snapshot from the memory and destroy it fn pop_snapshot_no_rollback(&mut self) { - self.snapshots - .pop() - .expect("Snapshot should be created before rolling it back"); + self.snapshots.pop(); } } diff --git a/core/lib/multivm/src/versions/vm_m5/event_sink.rs b/core/lib/multivm/src/versions/vm_m5/event_sink.rs index 782aa1d662f..83b01cb5c59 100644 --- a/core/lib/multivm/src/versions/vm_m5/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_m5/event_sink.rs @@ -33,7 +33,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.inner().len(), 1, @@ -45,9 +45,7 @@ impl InMemoryEventSink { forward, rollbacks: _, } = full_history; - let history = forward.clone(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(forward); - (history, events, l1_messages) + Self::events_and_l1_messages_from_history(forward) } pub fn get_log_queries(&self) -> usize { diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 67bfec9b970..53189dbcfef 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -4,7 +4,8 @@ use zk_evm_1_3_1::aux_structures::LogQuery; use zksync_state::StoragePtr; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Transaction, VmVersion, + vm::VmVersion, + Transaction, }; use zksync_utils::{bytecode::CompressedBytecodeInfo, h256_to_u256, u256_to_h256}; @@ -13,7 +14,8 @@ use crate::{ interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, vm_m5::{ events::merge_events, @@ -64,20 +66,10 @@ impl Vm { } } -impl VmInterface for Vm { +impl VmInterface for Vm { /// Tracers are not supported for here we use `()` as a placeholder type TracerDispatcher = (); - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let vm_version: VmVersion = system_env.version.into(); - let vm_sub_version = match vm_version { - VmVersion::M5WithoutRefunds => MultiVMSubversion::V1, - VmVersion::M5WithRefunds => MultiVMSubversion::V2, - _ => panic!("Unsupported protocol version for vm_m5: {:?}", vm_version), - }; - Self::new_with_subversion(batch_env, system_env, storage, vm_sub_version) - } - fn push_transaction(&mut self, tx: Transaction) { crate::vm_m5::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, @@ -119,7 +111,7 @@ impl VmInterface for Vm { } fn get_current_execution_state(&self) -> CurrentExecutionState { - let (_full_history, raw_events, l1_messages) = self.vm.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.vm.state.event_sink.flatten(); let events = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -137,14 +129,6 @@ impl VmInterface for Vm { }) }) .collect(); - let total_log_queries = self.vm.state.event_sink.get_log_queries() - + self - .vm - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.vm.get_final_log_queries().len(); let used_contract_hashes = self .vm @@ -182,10 +166,7 @@ impl VmInterface for Vm { used_contract_hashes, system_logs: vec![], user_l2_to_l1_logs: l2_to_l1_logs, - total_log_queries, - cycles_used: self.vm.state.local_state.monotonic_cycle_counter, - // It's not applicable for `vm5` - deduplicated_events_logs: vec![], + // Fields below are not produced by `vm5` storage_refunds: vec![], pubdata_costs: vec![], } @@ -234,7 +215,19 @@ impl VmInterface for Vm { } } -impl VmInterfaceHistoryEnabled for Vm { +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let vm_version: VmVersion = system_env.version.into(); + let vm_sub_version = match vm_version { + VmVersion::M5WithoutRefunds => MultiVMSubversion::V1, + VmVersion::M5WithRefunds => MultiVMSubversion::V2, + _ => panic!("Unsupported protocol version for vm_m5: {:?}", vm_version), + }; + Self::new_with_subversion(batch_env, system_env, storage, vm_sub_version) + } +} + +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { self.vm.save_current_vm_as_snapshot() } diff --git a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs index 085c219f43c..3f708f3470f 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs @@ -748,7 +748,7 @@ impl VmInstance { // Collecting `block_tip_result` needs logs with timestamp, so we drain events for the `full_result` // after because draining will drop timestamps. - let (_full_history, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); full_result.events = merge_events(raw_events) .into_iter() .map(|e| { diff --git a/core/lib/multivm/src/versions/vm_m6/event_sink.rs b/core/lib/multivm/src/versions/vm_m6/event_sink.rs index 56fe8dcb11e..bf39b5962d9 100644 --- a/core/lib/multivm/src/versions/vm_m6/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_m6/event_sink.rs @@ -30,7 +30,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -38,8 +38,7 @@ impl InMemoryEventSink { ); // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - (history.to_vec(), events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index fe2deb4181a..634867697a9 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -6,7 +6,8 @@ use zk_evm_1_3_1::aux_structures::LogQuery; use zksync_state::StoragePtr; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Transaction, VmVersion, + vm::VmVersion, + Transaction, }; use zksync_utils::{ bytecode::{hash_bytecode, CompressedBytecodeInfo}, @@ -18,7 +19,8 @@ use crate::{ interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, tracers::old_tracers::TracerDispatcher, vm_m6::{events::merge_events, storage::Storage, vm_instance::MultiVMSubversion, VmInstance}, @@ -64,19 +66,9 @@ impl Vm { } } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let vm_version: VmVersion = system_env.version.into(); - let vm_sub_version = match vm_version { - VmVersion::M6Initial => MultiVMSubversion::V1, - VmVersion::M6BugWithCompressionFixed => MultiVMSubversion::V2, - _ => panic!("Unsupported protocol version for vm_m6: {:?}", vm_version), - }; - Self::new_with_subversion(batch_env, system_env, storage, vm_sub_version) - } - fn push_transaction(&mut self, tx: Transaction) { crate::vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, @@ -135,7 +127,7 @@ impl VmInterface for Vm { } fn get_current_execution_state(&self) -> CurrentExecutionState { - let (_full_history, raw_events, l1_messages) = self.vm.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.vm.state.event_sink.flatten(); let events = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -153,14 +145,6 @@ impl VmInterface for Vm { }) }) .collect(); - let total_log_queries = self.vm.state.event_sink.get_log_queries() - + self - .vm - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.vm.get_final_log_queries().len(); let used_contract_hashes = self .vm @@ -196,13 +180,10 @@ impl VmInterface for Vm { .map(GlueInto::glue_into) .collect(), used_contract_hashes, + user_l2_to_l1_logs: l2_to_l1_logs, + // Fields below are not produced by `vm6` system_logs: vec![], - total_log_queries, - cycles_used: self.vm.state.local_state.monotonic_cycle_counter, - // It's not applicable for `vm6` - deduplicated_events_logs: vec![], storage_refunds: vec![], - user_l2_to_l1_logs: l2_to_l1_logs, pubdata_costs: vec![], } } @@ -323,7 +304,19 @@ impl VmInterface for Vm { } } -impl VmInterfaceHistoryEnabled for Vm { +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let vm_version: VmVersion = system_env.version.into(); + let vm_sub_version = match vm_version { + VmVersion::M6Initial => MultiVMSubversion::V1, + VmVersion::M6BugWithCompressionFixed => MultiVMSubversion::V2, + _ => panic!("Unsupported protocol version for vm_m6: {:?}", vm_version), + }; + Self::new_with_subversion(batch_env, system_env, storage, vm_sub_version) + } +} + +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { self.vm.save_current_vm_as_snapshot() } @@ -333,6 +326,6 @@ impl VmInterfaceHistoryEnabled for Vm VmInstance { } /// Removes the latest snapshot without rolling back to it. - /// This function expects that there is at least one snapshot present. pub fn pop_snapshot_no_rollback(&mut self) { - self.snapshots.pop().unwrap(); + self.snapshots.pop(); } /// Returns the amount of gas remaining to the VM. @@ -781,7 +780,7 @@ impl VmInstance { // Collecting `block_tip_result` needs logs with timestamp, so we drain events for the `full_result` // after because draining will drop timestamps. - let (_full_history, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); full_result.events = merge_events(raw_events) .into_iter() .map(|e| { diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs index 2af642d358d..b9e0f1b61b3 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs @@ -29,7 +29,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -38,8 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - (history.iter().map(|x| **x).collect(), events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 0bac1d7d47d..c580b84e202 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -7,7 +7,7 @@ use crate::{ glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_latest::HistoryEnabled, @@ -35,22 +35,9 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); - Self { - bootloader_state, - state, - storage, - system_env, - batch_env, - snapshots: vec![], - _phantom: Default::default(), - } - } - /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: Transaction) { self.push_transaction_with_compression(tx, true) @@ -83,7 +70,7 @@ impl VmInterface for Vm { /// This method should be used only after the batch execution. /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { - let (deduplicated_events_logs, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -93,13 +80,6 @@ impl VmInterface for Vm { .into_iter() .map(|log| UserL2ToL1Log(log.glue_into())) .collect(); - let total_log_queries = self.state.event_sink.get_log_queries() - + self - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.state.storage.get_final_log_queries().len(); let storage_log_queries = self.state.storage.get_final_log_queries(); @@ -115,12 +95,6 @@ impl VmInterface for Vm { used_contract_hashes: self.get_used_contracts(), user_l2_to_l1_logs: l2_to_l1_logs, system_logs: vec![], - total_log_queries, - cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs: deduplicated_events_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), storage_refunds: self.state.storage.returned_refunds.inner().clone(), pubdata_costs: Vec::new(), } @@ -157,14 +131,27 @@ impl VmInterface for Vm { } } -/// Methods of vm, which required some history manipulations -impl VmInterfaceHistoryEnabled for Vm { +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); + Self { + bootloader_state, + state, + storage, + system_env, + batch_env, + snapshots: vec![], + _phantom: Default::default(), + } + } +} + +impl VmInterfaceHistoryEnabled for Vm { /// Create snapshot of current vm state and push it into the memory fn make_snapshot(&mut self) { - self.make_snapshot_inner() + self.make_snapshot_inner(); } - /// Rollback vm state to the latest snapshot and destroy the snapshot fn rollback_to_the_latest_snapshot(&mut self) { let snapshot = self .snapshots @@ -173,10 +160,7 @@ impl VmInterfaceHistoryEnabled for Vm { self.rollback_to_snapshot(snapshot); } - /// Pop the latest snapshot from the memory and destroy it fn pop_snapshot_no_rollback(&mut self) { - self.snapshots - .pop() - .expect("Snapshot should be created before rolling it back"); + self.snapshots.pop(); } } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs index eadfe70d0a7..0d1c8ee554c 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs @@ -29,7 +29,7 @@ impl OracleWithHistory for InMemoryEventSink { // otherwise we carry rollbacks to the parent's frames impl InMemoryEventSink { - pub fn flatten(&self) -> (Vec, Vec, Vec) { + pub fn flatten(&self) -> (Vec, Vec) { assert_eq!( self.frames_stack.len(), 1, @@ -38,8 +38,7 @@ impl InMemoryEventSink { // we forget rollbacks as we have finished the execution and can just apply them let history = self.frames_stack.forward().current_frame(); - let (events, l1_messages) = Self::events_and_l1_messages_from_history(history); - (history.iter().map(|x| **x).collect(), events, l1_messages) + Self::events_and_l1_messages_from_history(history) } pub fn get_log_queries(&self) -> usize { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index ec9b12e82ed..a7cef17591a 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -7,7 +7,7 @@ use crate::{ glue::GlueInto, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_latest::HistoryEnabled, @@ -35,22 +35,9 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { +impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { - let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); - Self { - bootloader_state, - state, - storage, - system_env, - batch_env, - snapshots: vec![], - _phantom: Default::default(), - } - } - /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: Transaction) { self.push_transaction_with_compression(tx, true) @@ -83,7 +70,7 @@ impl VmInterface for Vm { /// This method should be used only after the batch execution. /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { - let (deduplicated_events_logs, raw_events, l1_messages) = self.state.event_sink.flatten(); + let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) .into_iter() .map(|e| e.into_vm_event(self.batch_env.number)) @@ -93,13 +80,6 @@ impl VmInterface for Vm { .into_iter() .map(|log| UserL2ToL1Log(log.glue_into())) .collect(); - let total_log_queries = self.state.event_sink.get_log_queries() - + self - .state - .precompiles_processor - .get_timestamp_history() - .len() - + self.state.storage.get_final_log_queries().len(); let storage_log_queries = self.state.storage.get_final_log_queries(); @@ -115,12 +95,6 @@ impl VmInterface for Vm { used_contract_hashes: self.get_used_contracts(), user_l2_to_l1_logs: l2_to_l1_logs, system_logs: vec![], - total_log_queries, - cycles_used: self.state.local_state.monotonic_cycle_counter, - deduplicated_events_logs: deduplicated_events_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), storage_refunds: Vec::new(), pubdata_costs: Vec::new(), } @@ -157,14 +131,26 @@ impl VmInterface for Vm { } } -/// Methods of vm, which required some history manipulations -impl VmInterfaceHistoryEnabled for Vm { - /// Create snapshot of current vm state and push it into the memory +impl VmFactory for Vm { + fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { + let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); + Self { + bootloader_state, + state, + storage, + system_env, + batch_env, + snapshots: vec![], + _phantom: Default::default(), + } + } +} + +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { self.make_snapshot_inner() } - /// Rollback vm state to the latest snapshot and destroy the snapshot fn rollback_to_the_latest_snapshot(&mut self) { let snapshot = self .snapshots @@ -173,10 +159,7 @@ impl VmInterfaceHistoryEnabled for Vm { self.rollback_to_snapshot(snapshot); } - /// Pop the latest snapshot from the memory and destroy it fn pop_snapshot_no_rollback(&mut self) { - self.snapshots - .pop() - .expect("Snapshot should be created before rolling it back"); + self.snapshots.pop(); } } diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 74cb93c494b..c8a7ce83799 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -1,28 +1,33 @@ -use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::VmVersion; +use zksync_state::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}; +use zksync_types::vm::{FastVmMode, VmVersion}; use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::history_mode::HistoryMode, interface::{ BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::TracerDispatcher, + versions::shadow::ShadowVm, }; +pub type ShadowedFastVm = ShadowVm, H>>; + #[derive(Debug)] -pub enum VmInstance { - VmM5(crate::vm_m5::Vm), - VmM6(crate::vm_m6::Vm), - Vm1_3_2(crate::vm_1_3_2::Vm), - VmVirtualBlocks(crate::vm_virtual_blocks::Vm), - VmVirtualBlocksRefundsEnhancement(crate::vm_refunds_enhancement::Vm), - VmBoojumIntegration(crate::vm_boojum_integration::Vm), - Vm1_4_1(crate::vm_1_4_1::Vm), - Vm1_4_2(crate::vm_1_4_2::Vm), - Vm1_5_0(crate::vm_latest::Vm), +pub enum VmInstance { + VmM5(crate::vm_m5::Vm, H>), + VmM6(crate::vm_m6::Vm, H>), + Vm1_3_2(crate::vm_1_3_2::Vm, H>), + VmVirtualBlocks(crate::vm_virtual_blocks::Vm, H>), + VmVirtualBlocksRefundsEnhancement(crate::vm_refunds_enhancement::Vm, H>), + VmBoojumIntegration(crate::vm_boojum_integration::Vm, H>), + Vm1_4_1(crate::vm_1_4_1::Vm, H>), + Vm1_4_2(crate::vm_1_4_2::Vm, H>), + Vm1_5_0(crate::vm_latest::Vm, H>), + VmFast(crate::vm_fast::Vm>), + ShadowedVmFast(ShadowedFastVm), } macro_rules! dispatch_vm { @@ -37,18 +42,14 @@ macro_rules! dispatch_vm { VmInstance::Vm1_4_1(vm) => vm.$function($($params)*), VmInstance::Vm1_4_2(vm) => vm.$function($($params)*), VmInstance::Vm1_5_0(vm) => vm.$function($($params)*), + VmInstance::VmFast(vm) => vm.$function($($params)*), + VmInstance::ShadowedVmFast(vm) => vm.$function($($params)*), } }; } -impl VmInterface for VmInstance { - type TracerDispatcher = TracerDispatcher; - - fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage_view: StoragePtr) -> Self { - let protocol_version = system_env.version; - let vm_version: VmVersion = protocol_version.into(); - Self::new_with_specific_version(batch_env, system_env, storage_view, vm_version) - } +impl VmInterface for VmInstance { + type TracerDispatcher = TracerDispatcher, H>; /// Push tx into memory for the future execution fn push_transaction(&mut self, tx: zksync_types::Transaction) { @@ -130,9 +131,19 @@ impl VmInterface for VmInstance { } } -impl VmInterfaceHistoryEnabled - for VmInstance -{ +impl VmFactory> for VmInstance { + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage_view: StoragePtr>, + ) -> Self { + let protocol_version = system_env.version; + let vm_version: VmVersion = protocol_version.into(); + Self::new_with_specific_version(batch_env, system_env, storage_view, vm_version) + } +} + +impl VmInterfaceHistoryEnabled for VmInstance { fn make_snapshot(&mut self) { dispatch_vm!(self.make_snapshot()) } @@ -146,11 +157,11 @@ impl VmInterfaceHistoryEnabled } } -impl VmInstance { +impl VmInstance { pub fn new_with_specific_version( l1_batch_env: L1BatchEnv, system_env: SystemEnv, - storage_view: StoragePtr, + storage_view: StoragePtr>, vm_version: VmVersion, ) -> Self { match vm_version { @@ -236,4 +247,27 @@ impl VmInstance { } } } + + /// Creates a VM that may use the fast VM depending on the protocol version in `system_env` and `mode`. + pub fn maybe_fast( + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + storage_view: StoragePtr>, + mode: FastVmMode, + ) -> Self { + let vm_version = system_env.version.into(); + match vm_version { + VmVersion::Vm1_5_0IncreasedBootloaderMemory => match mode { + FastVmMode::Old => Self::new(l1_batch_env, system_env, storage_view), + FastVmMode::New => { + let storage = ImmutableStorageView::new(storage_view); + Self::VmFast(crate::vm_fast::Vm::new(l1_batch_env, system_env, storage)) + } + FastVmMode::Shadow => { + Self::ShadowedVmFast(ShadowVm::new(l1_batch_env, system_env, storage_view)) + } + }, + _ => Self::new(l1_batch_env, system_env, storage_view), + } + } } diff --git a/core/lib/protobuf_config/src/experimental.rs b/core/lib/protobuf_config/src/experimental.rs index 8d92f3ef87a..cb959e22904 100644 --- a/core/lib/protobuf_config/src/experimental.rs +++ b/core/lib/protobuf_config/src/experimental.rs @@ -1,10 +1,11 @@ use std::num::NonZeroU32; use anyhow::Context as _; +use zksync_basic_types::{vm::FastVmMode, L1BatchNumber}; use zksync_config::configs; use zksync_protobuf::{repr::ProtoRepr, required}; -use crate::proto::experimental as proto; +use crate::{proto::experimental as proto, read_optional_repr}; impl ProtoRepr for proto::Db { type Type = configs::ExperimentalDBConfig; @@ -49,3 +50,76 @@ impl ProtoRepr for proto::Db { } } } + +impl proto::FastVmMode { + fn new(source: FastVmMode) -> Self { + match source { + FastVmMode::Old => Self::Old, + FastVmMode::New => Self::New, + FastVmMode::Shadow => Self::Shadow, + } + } + + fn parse(&self) -> FastVmMode { + match self { + Self::Old => FastVmMode::Old, + Self::New => FastVmMode::New, + Self::Shadow => FastVmMode::Shadow, + } + } +} + +impl ProtoRepr for proto::VmPlayground { + type Type = configs::ExperimentalVmPlaygroundConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + fast_vm_mode: self + .fast_vm_mode + .map(proto::FastVmMode::try_from) + .transpose() + .context("fast_vm_mode")? + .map_or_else(FastVmMode::default, |mode| mode.parse()), + db_path: self + .db_path + .clone() + .unwrap_or_else(Self::Type::default_db_path), + first_processed_batch: L1BatchNumber(self.first_processed_batch.unwrap_or(0)), + reset: self.reset.unwrap_or(false), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + fast_vm_mode: Some(proto::FastVmMode::new(this.fast_vm_mode).into()), + db_path: Some(this.db_path.clone()), + first_processed_batch: Some(this.first_processed_batch.0), + reset: Some(this.reset), + } + } +} + +impl ProtoRepr for proto::Vm { + type Type = configs::ExperimentalVmConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + playground: read_optional_repr(&self.playground).unwrap_or_default(), + state_keeper_fast_vm_mode: self + .state_keeper_fast_vm_mode + .map(proto::FastVmMode::try_from) + .transpose() + .context("fast_vm_mode")? + .map_or_else(FastVmMode::default, |mode| mode.parse()), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + playground: Some(ProtoRepr::build(&this.playground)), + state_keeper_fast_vm_mode: Some( + proto::FastVmMode::new(this.state_keeper_fast_vm_mode).into(), + ), + } + } +} diff --git a/core/lib/protobuf_config/src/general.rs b/core/lib/protobuf_config/src/general.rs index 88da1899760..af6f690dfc8 100644 --- a/core/lib/protobuf_config/src/general.rs +++ b/core/lib/protobuf_config/src/general.rs @@ -43,6 +43,7 @@ impl ProtoRepr for proto::GeneralConfig { external_proof_integration_api_config: read_optional_repr( &self.external_proof_integration_api, ), + experimental_vm_config: read_optional_repr(&self.experimental_vm), }) } @@ -97,6 +98,7 @@ impl ProtoRepr for proto::GeneralConfig { .external_proof_integration_api_config .as_ref() .map(ProtoRepr::build), + experimental_vm: this.experimental_vm_config.as_ref().map(ProtoRepr::build), } } } diff --git a/core/lib/protobuf_config/src/proto/config/experimental.proto b/core/lib/protobuf_config/src/proto/config/experimental.proto index 1336c4719d2..1682b2c9a83 100644 --- a/core/lib/protobuf_config/src/proto/config/experimental.proto +++ b/core/lib/protobuf_config/src/proto/config/experimental.proto @@ -18,3 +18,22 @@ message SnapshotRecovery { optional uint64 tree_recovery_parallel_persistence_buffer = 1; optional bool drop_storage_key_preimages = 2; // optional; false by default } + +enum FastVmMode { + OLD = 0; + NEW = 1; + SHADOW = 2; +} + +// Experimental VM configuration +message VmPlayground { + optional FastVmMode fast_vm_mode = 1; // optional; if not set, fast VM is not used + optional string db_path = 2; // optional; defaults to `./db/vm_playground` + optional uint32 first_processed_batch = 3; // optional; defaults to 0 + optional bool reset = 4; // optional; defaults to false +} + +message Vm { + optional VmPlayground playground = 1; // optional + optional FastVmMode state_keeper_fast_vm_mode = 2; // optional; if not set, fast VM is not used +} diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index cb4629f2d85..373559e7351 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -9,6 +9,7 @@ import "zksync/config/contract_verifier.proto"; import "zksync/config/database.proto"; import "zksync/config/circuit_breaker.proto"; import "zksync/config/eth_sender.proto"; +import "zksync/config/experimental.proto"; import "zksync/config/house_keeper.proto"; import "zksync/config/observability.proto"; import "zksync/config/snapshots_creator.proto"; @@ -56,4 +57,5 @@ message GeneralConfig { optional external_price_api_client.ExternalPriceApiClient external_price_api_client = 41; optional core.consensus.Config consensus = 42; optional external_proof_integration_api.ExternalProofIntegrationApi external_proof_integration_api = 43; + optional experimental.Vm experimental_vm = 44; } diff --git a/core/lib/protobuf_config/src/proto/config/vm_runner.proto b/core/lib/protobuf_config/src/proto/config/vm_runner.proto index 93521a5fd89..d6537c109e6 100644 --- a/core/lib/protobuf_config/src/proto/config/vm_runner.proto +++ b/core/lib/protobuf_config/src/proto/config/vm_runner.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package zksync.config.vm_runner; +import "zksync/config/experimental.proto"; + message ProtectiveReadsWriter { optional string db_path = 1; // required; fs path optional uint64 window_size = 2; // required diff --git a/core/lib/protobuf_config/src/vm_runner.rs b/core/lib/protobuf_config/src/vm_runner.rs index cc0d53ad519..134cc20952f 100644 --- a/core/lib/protobuf_config/src/vm_runner.rs +++ b/core/lib/protobuf_config/src/vm_runner.rs @@ -1,6 +1,6 @@ use anyhow::Context; use zksync_basic_types::L1BatchNumber; -use zksync_config::configs::{self}; +use zksync_config::configs; use zksync_protobuf::{required, ProtoRepr}; use crate::proto::vm_runner as proto; diff --git a/core/lib/state/src/lib.rs b/core/lib/state/src/lib.rs index 5044490c46d..7041b9bc2a6 100644 --- a/core/lib/state/src/lib.rs +++ b/core/lib/state/src/lib.rs @@ -32,7 +32,7 @@ pub use self::{ BatchDiff, OwnedPostgresStorage, OwnedStorage, PgOrRocksdbStorage, ReadStorageFactory, RocksdbWithMemory, }, - storage_view::{StorageView, StorageViewCache, StorageViewMetrics}, + storage_view::{ImmutableStorageView, StorageView, StorageViewCache, StorageViewMetrics}, witness::WitnessStorage, }; @@ -92,3 +92,21 @@ pub trait WriteStorage: ReadStorage { /// Smart pointer to [`WriteStorage`]. pub type StoragePtr = Rc>; + +impl ReadStorage for StoragePtr { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + self.borrow_mut().read_value(key) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + self.borrow_mut().is_write_initial(key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.borrow_mut().load_factory_dep(hash) + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + self.borrow_mut().get_enumeration_index(key) + } +} diff --git a/core/lib/state/src/storage_view.rs b/core/lib/state/src/storage_view.rs index 7dcfda2ba40..b01f423f078 100644 --- a/core/lib/state/src/storage_view.rs +++ b/core/lib/state/src/storage_view.rs @@ -8,7 +8,7 @@ use std::{ use zksync_types::{StorageKey, StorageValue, H256}; -use crate::{ReadStorage, WriteStorage}; +use crate::{ReadStorage, StoragePtr, WriteStorage}; /// Metrics for [`StorageView`]. #[derive(Debug, Default, Clone, Copy)] @@ -224,6 +224,46 @@ impl WriteStorage for StorageView { } } +/// Immutable wrapper around [`StorageView`] that reads directly from the underlying storage ignoring any +/// modifications in the [`StorageView`]. Used by the fast VM, which has its own internal management of writes. +#[derive(Debug)] +pub struct ImmutableStorageView(StoragePtr>); + +impl ImmutableStorageView { + /// Creates a new view based on the provided storage pointer. + pub fn new(ptr: StoragePtr>) -> Self { + Self(ptr) + } +} + +// All methods other than `read_value()` do not read back modified storage slots, so we proxy them as-is. +impl ReadStorage for ImmutableStorageView { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + let started_at = Instant::now(); + let mut this = self.0.borrow_mut(); + let cached_value = this.read_storage_keys().get(key); + cached_value.copied().unwrap_or_else(|| { + let value = this.storage_handle.read_value(key); + this.cache.read_storage_keys.insert(*key, value); + this.metrics.time_spent_on_storage_missed += started_at.elapsed(); + this.metrics.storage_invocations_missed += 1; + value + }) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + self.0.borrow_mut().is_write_initial(key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.0.borrow_mut().load_factory_dep(hash) + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + self.0.borrow_mut().get_enumeration_index(key) + } +} + #[cfg(test)] mod test { use zksync_types::{AccountTreeId, Address, H256}; @@ -272,4 +312,23 @@ mod test { assert_eq!(metrics.get_value_storage_invocations, 3); assert_eq!(metrics.set_value_storage_invocations, 2); } + + #[test] + fn immutable_storage_view() { + let account: AccountTreeId = AccountTreeId::new(Address::from([0xfe; 20])); + let key = H256::from_low_u64_be(61); + let value = H256::from_low_u64_be(73); + let key = StorageKey::new(account, key); + + let mut raw_storage = InMemoryStorage::default(); + raw_storage.set_value(key, value); + let storage_view = StorageView::new(raw_storage).to_rc_ptr(); + let mut immutable_view = ImmutableStorageView::new(storage_view.clone()); + + let new_value = H256::repeat_byte(0x11); + let prev_value = storage_view.borrow_mut().set_value(key, new_value); + assert_eq!(prev_value, value); + + assert_eq!(immutable_view.read_value(&key), value); + } } diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index b69b295130d..32443b60c8c 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -12,14 +12,14 @@ use zksync_merkle_tree::{ BlockOutputWithProofs, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, ValueHash, }; use zksync_multivm::{ - interface::{FinishedL1Batch, L2BlockEnv, VmInterface}, + interface::{FinishedL1Batch, L2BlockEnv, VmFactory, VmInterface}, vm_latest::HistoryEnabled, VmInstance, }; use zksync_prover_interface::inputs::{ StorageLogMetadata, V1TeeVerifierInput, WitnessInputMerklePaths, }; -use zksync_state::{InMemoryStorage, StorageView, WriteStorage}; +use zksync_state::{InMemoryStorage, ReadStorage, StorageView}; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, StorageLog, H256}; use zksync_utils::bytecode::hash_bytecode; use zksync_vm_utils::execute_tx; @@ -154,7 +154,7 @@ fn get_bowp_and_set_initial_values( } /// Executes the VM and returns `FinishedL1Batch` on success. -fn execute_vm( +fn execute_vm( l2_blocks_execution_data: Vec, mut vm: VmInstance, ) -> anyhow::Result { diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 105d43aa6c6..a55f6b5753d 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -16,7 +16,7 @@ pub use protocol_upgrade::{ProtocolUpgrade, ProtocolVersion}; use serde::{Deserialize, Serialize}; pub use storage::*; pub use tx::Execute; -pub use zksync_basic_types::{protocol_version::ProtocolVersionId, vm_version::VmVersion, *}; +pub use zksync_basic_types::{protocol_version::ProtocolVersionId, vm, *}; pub use zksync_crypto_primitives::*; use zksync_utils::{ address_to_u256, bytecode::hash_bytecode, h256_to_u256, u256_to_account_address, diff --git a/core/lib/types/src/utils.rs b/core/lib/types/src/utils.rs index 2bbbc34e8f7..bf086d6cdcd 100644 --- a/core/lib/types/src/utils.rs +++ b/core/lib/types/src/utils.rs @@ -50,7 +50,7 @@ pub fn nonces_to_full_nonce(tx_nonce: U256, deploy_nonce: U256) -> U256 { DEPLOYMENT_NONCE_INCREMENT * deploy_nonce + tx_nonce } -fn key_for_eth_balance(address: &Address) -> H256 { +pub fn key_for_eth_balance(address: &Address) -> H256 { let address_h256 = address_to_h256(address); let bytes = [address_h256.as_bytes(), &[0; 32]].concat(); diff --git a/core/lib/vm_utils/src/lib.rs b/core/lib/vm_utils/src/lib.rs index 9cec0e13be8..a3ec715851a 100644 --- a/core/lib/vm_utils/src/lib.rs +++ b/core/lib/vm_utils/src/lib.rs @@ -1,20 +1,20 @@ -pub mod storage; - use anyhow::{anyhow, Context}; use tokio::runtime::Handle; use zksync_dal::{Connection, Core}; use zksync_multivm::{ - interface::{VmInterface, VmInterfaceHistoryEnabled}, + interface::{VmFactory, VmInterface, VmInterfaceHistoryEnabled}, vm_latest::HistoryEnabled, VmInstance, }; -use zksync_state::{PostgresStorage, StoragePtr, StorageView, WriteStorage}; +use zksync_state::{PostgresStorage, ReadStorage, StoragePtr, StorageView}; use zksync_types::{L1BatchNumber, L2ChainId, Transaction}; use crate::storage::L1BatchParamsProvider; +pub mod storage; + pub type VmAndStorage<'a> = ( - VmInstance>, HistoryEnabled>, + VmInstance, HistoryEnabled>, StoragePtr>>, ); @@ -58,7 +58,7 @@ pub fn create_vm( Ok((vm, storage_view)) } -pub fn execute_tx( +pub fn execute_tx( tx: &Transaction, vm: &mut VmInstance, ) -> anyhow::Result<()> { diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index c443fb9e8e0..9d399bdd0af 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -64,6 +64,8 @@ pub enum Component { VmRunnerBwip, /// External prover API that is used to retrieve data for proving and verifies final proofs against ones, generated by us ExternalProofIntegrationApi, + /// VM runner-based component that allows to test experimental VM features. Doesn't save any data to Postgres. + VmPlayground, } #[derive(Debug)] @@ -108,6 +110,7 @@ impl FromStr for Components { Ok(Components(vec![Component::BaseTokenRatioPersister])) } "vm_runner_bwip" => Ok(Components(vec![Component::VmRunnerBwip])), + "vm_playground" => Ok(Components(vec![Component::VmPlayground])), "external_proof_integration_api" => { Ok(Components(vec![Component::ExternalProofIntegrationApi])) } diff --git a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs index 6d2a06605be..d25c46bda08 100644 --- a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs @@ -12,11 +12,11 @@ use zksync_config::{ house_keeper::HouseKeeperConfig, vm_runner::BasicWitnessInputProducerConfig, wallets::{AddressWallet, EthSender, StateKeeper, Wallet, Wallets}, - CommitmentGeneratorConfig, DatabaseSecrets, ExternalPriceApiClientConfig, - FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, - FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, GeneralConfig, - ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, - PruningConfig, SnapshotRecoveryConfig, + CommitmentGeneratorConfig, DatabaseSecrets, ExperimentalVmConfig, + ExternalPriceApiClientConfig, FriProofCompressorConfig, FriProverConfig, + FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, + GeneralConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, + ProtectiveReadsWriterConfig, PruningConfig, SnapshotRecoveryConfig, }, ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, @@ -78,6 +78,7 @@ pub struct TempConfigStore { pub snapshot_recovery: Option, pub external_price_api_client_config: Option, pub external_proof_integration_api_config: Option, + pub experimental_vm_config: Option, } impl TempConfigStore { @@ -116,6 +117,7 @@ impl TempConfigStore { external_proof_integration_api_config: self .external_proof_integration_api_config .clone(), + experimental_vm_config: self.experimental_vm_config.clone(), } } @@ -188,10 +190,11 @@ fn load_env_config() -> anyhow::Result { snapshot_recovery: None, external_price_api_client_config: ExternalPriceApiClientConfig::from_env().ok(), external_proof_integration_api_config: ExternalProofIntegrationApiConfig::from_env().ok(), + experimental_vm_config: ExperimentalVmConfig::from_env().ok(), }) } -pub fn load_general_config(path: Option) -> anyhow::Result { +pub fn load_general_config(path: Option) -> anyhow::Result { match path { Some(path) => { let yaml = std::fs::read_to_string(path).context("Failed to read general config")?; @@ -203,7 +206,7 @@ pub fn load_general_config(path: Option) -> anyhow::Result) -> anyhow::Result { +pub fn load_database_secrets(path: Option) -> anyhow::Result { match path { Some(path) => { let yaml = std::fs::read_to_string(path).context("Failed to read secrets")?; diff --git a/core/node/api_server/src/execution_sandbox/apply.rs b/core/node/api_server/src/execution_sandbox/apply.rs index a65538e2502..99664697b14 100644 --- a/core/node/api_server/src/execution_sandbox/apply.rs +++ b/core/node/api_server/src/execution_sandbox/apply.rs @@ -40,7 +40,7 @@ use super::{ }; type VmStorageView<'a> = StorageView>>; -type BoxedVm<'a> = Box, HistoryDisabled>>; +type BoxedVm<'a> = Box>, HistoryDisabled>>; #[derive(Debug)] struct Sandbox<'a> { @@ -301,7 +301,7 @@ pub(super) fn apply_vm_in_sandbox( block_args: BlockArgs, // Block arguments for the transaction. state_override: Option, apply: impl FnOnce( - &mut VmInstance, HistoryDisabled>, + &mut VmInstance>, HistoryDisabled>, Transaction, ProtocolVersionId, ) -> T, diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 38939937fcd..826200b5537 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -31,9 +31,9 @@ use zksync_types::{ l2::{error::TxCheckError::TxDuplication, L2Tx}, transaction_request::CallOverrides, utils::storage_key_for_eth_balance, + vm::VmVersion, AccountTreeId, Address, ExecuteTransactionCommon, L2ChainId, Nonce, PackedEthSignature, - ProtocolVersionId, Transaction, VmVersion, H160, H256, MAX_L2_TX_GAS_LIMIT, - MAX_NEW_FACTORY_DEPS, U256, + ProtocolVersionId, Transaction, H160, H256, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, U256, }; use zksync_utils::h256_to_u256; diff --git a/core/node/commitment_generator/src/utils.rs b/core/node/commitment_generator/src/utils.rs index b4e6bc542e9..59f8753859a 100644 --- a/core/node/commitment_generator/src/utils.rs +++ b/core/node/commitment_generator/src/utils.rs @@ -15,7 +15,7 @@ use zk_evm_1_5_0::{ zk_evm_abstractions::queries::LogQuery as LogQuery_1_5_0, }; use zksync_multivm::utils::get_used_bootloader_memory_bytes; -use zksync_types::{zk_evm_types::LogQuery, ProtocolVersionId, VmVersion, H256, U256}; +use zksync_types::{vm::VmVersion, zk_evm_types::LogQuery, ProtocolVersionId, H256, U256}; use zksync_utils::expand_memory_contents; /// Encapsulates computations of commitment components. diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs index 33d3b5676aa..3288b68bdeb 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs @@ -1,9 +1,9 @@ use zksync_state_keeper::MainBatchExecutor; +use zksync_types::vm::FastVmMode; use crate::{ implementations::resources::state_keeper::BatchExecutorResource, wiring_layer::{WiringError, WiringLayer}, - IntoContext, }; /// Wiring layer for `MainBatchExecutor`, part of the state keeper responsible for running the VM. @@ -11,12 +11,7 @@ use crate::{ pub struct MainBatchExecutorLayer { save_call_traces: bool, optional_bytecode_compression: bool, -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - pub batch_executor: BatchExecutorResource, + fast_vm_mode: FastVmMode, } impl MainBatchExecutorLayer { @@ -24,25 +19,29 @@ impl MainBatchExecutorLayer { Self { save_call_traces, optional_bytecode_compression, + fast_vm_mode: FastVmMode::default(), } } + + pub fn with_fast_vm_mode(mut self, mode: FastVmMode) -> Self { + self.fast_vm_mode = mode; + self + } } #[async_trait::async_trait] impl WiringLayer for MainBatchExecutorLayer { type Input = (); - type Output = Output; + type Output = BatchExecutorResource; fn layer_name(&self) -> &'static str { "main_batch_executor_layer" } - async fn wire(self, _input: Self::Input) -> Result { - let builder = + async fn wire(self, (): Self::Input) -> Result { + let mut executor = MainBatchExecutor::new(self.save_call_traces, self.optional_bytecode_compression); - - Ok(Output { - batch_executor: builder.into(), - }) + executor.set_fast_vm_mode(self.fast_vm_mode); + Ok(executor.into()) } } diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs index 74b4b5e3207..ee2fb84416e 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs @@ -1,8 +1,9 @@ use zksync_config::configs::vm_runner::BasicWitnessInputProducerConfig; +use zksync_state_keeper::MainBatchExecutor; use zksync_types::L2ChainId; use zksync_vm_runner::{ - BasicWitnessInputProducer, BasicWitnessInputProducerIo, ConcurrentOutputHandlerFactoryTask, - StorageSyncTask, + impls::{BasicWitnessInputProducer, BasicWitnessInputProducerIo}, + ConcurrentOutputHandlerFactoryTask, StorageSyncTask, }; use crate::{ @@ -18,17 +19,14 @@ use crate::{ #[derive(Debug)] pub struct BasicWitnessInputProducerLayer { - basic_witness_input_producer_config: BasicWitnessInputProducerConfig, + config: BasicWitnessInputProducerConfig, zksync_network_id: L2ChainId, } impl BasicWitnessInputProducerLayer { - pub fn new( - basic_witness_input_producer_config: BasicWitnessInputProducerConfig, - zksync_network_id: L2ChainId, - ) -> Self { + pub fn new(config: BasicWitnessInputProducerConfig, zksync_network_id: L2ChainId) -> Self { Self { - basic_witness_input_producer_config, + config, zksync_network_id, } } @@ -68,25 +66,26 @@ impl WiringLayer for BasicWitnessInputProducerLayer { object_store, } = input; + // - 1 connection for `StorageSyncTask` which can hold a long-term connection in case it needs to + // catch up cache. + // - 1 connection for `ConcurrentOutputHandlerFactoryTask` / `VmRunner` as they need occasional access + // to DB for querying last processed batch and last ready to be loaded batch. + // - `window_size` connections for `BasicWitnessInputProducer` + // as there can be multiple output handlers holding multi-second connections to process + // BWIP data. + let connection_pool = master_pool.get_custom(self.config.window_size + 2).await?; + + // We don't get the executor from the context because it would contain state keeper-specific settings. + let batch_executor = Box::new(MainBatchExecutor::new(false, false)); + let (basic_witness_input_producer, tasks) = BasicWitnessInputProducer::new( - // One for `StorageSyncTask` which can hold a long-term connection in case it needs to - // catch up cache. - // - // One for `ConcurrentOutputHandlerFactoryTask`/`VmRunner` as they need occasional access - // to DB for querying last processed batch and last ready to be loaded batch. - // - // `window_size` connections for `BasicWitnessInputProducer` - // as there can be multiple output handlers holding multi-second connections to process - // BWIP data. - master_pool - .get_custom(self.basic_witness_input_producer_config.window_size + 2) - .await?, + connection_pool, object_store.0, - self.basic_witness_input_producer_config.db_path, + batch_executor, + self.config.db_path, self.zksync_network_id, - self.basic_witness_input_producer_config - .first_processed_batch, - self.basic_witness_input_producer_config.window_size, + self.config.first_processed_batch, + self.config.window_size, ) .await?; diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs b/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs index 91e92ffcd1b..85b7028bc79 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/mod.rs @@ -6,6 +6,7 @@ use crate::{ }; pub mod bwip; +pub mod playground; pub mod protective_reads; #[async_trait::async_trait] diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs b/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs new file mode 100644 index 00000000000..810d538ba97 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs @@ -0,0 +1,117 @@ +use async_trait::async_trait; +use zksync_config::configs::ExperimentalVmPlaygroundConfig; +use zksync_node_framework_derive::{FromContext, IntoContext}; +use zksync_types::L2ChainId; +use zksync_vm_runner::{ + impls::{VmPlayground, VmPlaygroundIo, VmPlaygroundLoaderTask}, + ConcurrentOutputHandlerFactoryTask, +}; + +use crate::{ + implementations::resources::{ + healthcheck::AppHealthCheckResource, + pools::{MasterPool, PoolResource}, + }, + StopReceiver, Task, TaskId, WiringError, WiringLayer, +}; + +#[derive(Debug)] +pub struct VmPlaygroundLayer { + config: ExperimentalVmPlaygroundConfig, + zksync_network_id: L2ChainId, +} + +impl VmPlaygroundLayer { + pub fn new(config: ExperimentalVmPlaygroundConfig, zksync_network_id: L2ChainId) -> Self { + Self { + config, + zksync_network_id, + } + } +} + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, + #[context(default)] + pub app_health: AppHealthCheckResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub output_handler_factory_task: ConcurrentOutputHandlerFactoryTask, + #[context(task)] + pub loader_task: VmPlaygroundLoaderTask, + #[context(task)] + pub playground: VmPlayground, +} + +#[async_trait] +impl WiringLayer for VmPlaygroundLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "vm_runner_playground" + } + + async fn wire(self, input: Self::Input) -> Result { + let Input { + master_pool, + app_health, + } = input; + + // - 1 connection for `StorageSyncTask` which can hold a long-term connection in case it needs to + // catch up cache. + // - 1 connection for `ConcurrentOutputHandlerFactoryTask` / `VmRunner` as they need occasional access + // to DB for querying last processed batch and last ready to be loaded batch. + // - 1 connection for the only running VM instance. + let connection_pool = master_pool.get_custom(3).await?; + + let (playground, tasks) = VmPlayground::new( + connection_pool, + self.config.fast_vm_mode, + self.config.db_path, + self.zksync_network_id, + self.config.first_processed_batch, + self.config.reset, + ) + .await?; + + app_health + .0 + .insert_component(playground.health_check()) + .map_err(WiringError::internal)?; + + Ok(Output { + output_handler_factory_task: tasks.output_handler_factory_task, + loader_task: tasks.loader_task, + playground, + }) + } +} + +#[async_trait] +impl Task for VmPlaygroundLoaderTask { + fn id(&self) -> TaskId { + "vm_runner/playground/storage_sync".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} + +#[async_trait] +impl Task for VmPlayground { + fn id(&self) -> TaskId { + "vm_runner/playground".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(&stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs b/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs index 3b07d0cea13..a0b0d18a4d9 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/protective_reads.rs @@ -2,7 +2,8 @@ use zksync_config::configs::vm_runner::ProtectiveReadsWriterConfig; use zksync_node_framework_derive::FromContext; use zksync_types::L2ChainId; use zksync_vm_runner::{ - ConcurrentOutputHandlerFactoryTask, ProtectiveReadsIo, ProtectiveReadsWriter, StorageSyncTask, + impls::{ProtectiveReadsIo, ProtectiveReadsWriter}, + ConcurrentOutputHandlerFactoryTask, StorageSyncTask, }; use crate::{ diff --git a/core/node/state_keeper/src/batch_executor/main_executor.rs b/core/node/state_keeper/src/batch_executor/main_executor.rs index 4c85fc5bb1f..d3595323a9a 100644 --- a/core/node/state_keeper/src/batch_executor/main_executor.rs +++ b/core/node/state_keeper/src/batch_executor/main_executor.rs @@ -13,8 +13,8 @@ use zksync_multivm::{ MultiVMTracer, VmInstance, }; use zksync_shared_metrics::{InteractionType, TxStage, APP_METRICS}; -use zksync_state::{OwnedStorage, ReadStorage, StorageView, WriteStorage}; -use zksync_types::{vm_trace::Call, Transaction}; +use zksync_state::{OwnedStorage, ReadStorage, StorageView}; +use zksync_types::{vm::FastVmMode, vm_trace::Call, Transaction}; use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}; @@ -35,6 +35,7 @@ pub struct MainBatchExecutor { /// that in cases where the node is expected to process any transactions processed by the sequencer /// regardless of its configuration, this flag should be set to `true`. optional_bytecode_compression: bool, + fast_vm_mode: FastVmMode, } impl MainBatchExecutor { @@ -42,8 +43,18 @@ impl MainBatchExecutor { Self { save_call_traces, optional_bytecode_compression, + fast_vm_mode: FastVmMode::Old, } } + + pub fn set_fast_vm_mode(&mut self, fast_vm_mode: FastVmMode) { + if !matches!(fast_vm_mode, FastVmMode::Old) { + tracing::warn!( + "Running new VM with mode {fast_vm_mode:?}; this can lead to incorrect node behavior" + ); + } + self.fast_vm_mode = fast_vm_mode; + } } impl BatchExecutor for MainBatchExecutor { @@ -59,6 +70,7 @@ impl BatchExecutor for MainBatchExecutor { let executor = CommandReceiver { save_call_traces: self.save_call_traces, optional_bytecode_compression: self.optional_bytecode_compression, + fast_vm_mode: self.fast_vm_mode, commands: commands_receiver, }; @@ -86,6 +98,7 @@ impl BatchExecutor for MainBatchExecutor { struct CommandReceiver { save_call_traces: bool, optional_bytecode_compression: bool, + fast_vm_mode: FastVmMode, commands: mpsc::Receiver, } @@ -99,8 +112,12 @@ impl CommandReceiver { tracing::info!("Starting executing L1 batch #{}", &l1_batch_params.number); let storage_view = StorageView::new(secondary_storage).to_rc_ptr(); - - let mut vm = VmInstance::new(l1_batch_params, system_env, storage_view.clone()); + let mut vm = VmInstance::maybe_fast( + l1_batch_params, + system_env, + storage_view.clone(), + self.fast_vm_mode, + ); while let Some(cmd) = self.commands.blocking_recv() { match cmd { @@ -152,12 +169,15 @@ impl CommandReceiver { tracing::info!("State keeper exited with an unfinished L1 batch"); } - fn execute_tx( + fn execute_tx( &self, tx: &Transaction, vm: &mut VmInstance, ) -> TxExecutionResult { - // Save pre-`execute_next_tx` VM snapshot. + // Executing a next transaction means that a previous transaction was either rolled back (in which case its snapshot + // was already removed), or that we build on top of it (in which case, it can be removed now). + vm.pop_snapshot_no_rollback(); + // Save pre-execution VM snapshot. vm.make_snapshot(); // Execute the transaction. @@ -191,13 +211,13 @@ impl CommandReceiver { } } - fn rollback_last_tx(&self, vm: &mut VmInstance) { + fn rollback_last_tx(&self, vm: &mut VmInstance) { let latency = KEEPER_METRICS.tx_execution_time[&TxExecutionStage::TxRollback].start(); vm.rollback_to_the_latest_snapshot(); latency.observe(); } - fn start_next_l2_block( + fn start_next_l2_block( &self, l2_block_env: L2BlockEnv, vm: &mut VmInstance, @@ -205,7 +225,7 @@ impl CommandReceiver { vm.start_new_l2_block(l2_block_env); } - fn finish_batch( + fn finish_batch( &self, vm: &mut VmInstance, ) -> FinishedL1Batch { @@ -225,7 +245,7 @@ impl CommandReceiver { /// Attempts to execute transaction with or without bytecode compression. /// If compression fails, the transaction will be re-executed without compression. - fn execute_tx_in_vm_with_optional_compression( + fn execute_tx_in_vm_with_optional_compression( &self, tx: &Transaction, vm: &mut VmInstance, @@ -243,9 +263,6 @@ impl CommandReceiver { // it means that there is no sense in polluting the space of compressed bytecodes, // and so we re-execute the transaction, but without compression. - // Saving the snapshot before executing - vm.make_snapshot(); - let call_tracer_result = Arc::new(OnceCell::default()); let tracer = if self.save_call_traces { vec![CallTracer::new(call_tracer_result.clone()).into_tracer_pointer()] @@ -257,7 +274,6 @@ impl CommandReceiver { vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), true) { let compressed_bytecodes = vm.get_last_tx_compressed_bytecodes(); - vm.pop_snapshot_no_rollback(); let trace = Arc::try_unwrap(call_tracer_result) .unwrap() @@ -265,7 +281,11 @@ impl CommandReceiver { .unwrap_or_default(); return (result, compressed_bytecodes, trace); } + + // Roll back to the snapshot just before the transaction execution taken in `Self::execute_tx()` + // and create a snapshot at the same VM state again. vm.rollback_to_the_latest_snapshot(); + vm.make_snapshot(); let call_tracer_result = Arc::new(OnceCell::default()); let tracer = if self.save_call_traces { @@ -292,7 +312,7 @@ impl CommandReceiver { /// Attempts to execute transaction with mandatory bytecode compression. /// If bytecode compression fails, the transaction will be rejected. - fn execute_tx_in_vm( + fn execute_tx_in_vm( &self, tx: &Transaction, vm: &mut VmInstance, diff --git a/core/node/state_keeper/src/batch_executor/tests/mod.rs b/core/node/state_keeper/src/batch_executor/tests/mod.rs index 4b36965895f..ab9115991de 100644 --- a/core/node/state_keeper/src/batch_executor/tests/mod.rs +++ b/core/node/state_keeper/src/batch_executor/tests/mod.rs @@ -2,7 +2,9 @@ use assert_matches::assert_matches; use test_casing::{test_casing, Product}; use zksync_dal::{ConnectionPool, Core}; use zksync_test_account::Account; -use zksync_types::{get_nonce_key, utils::storage_key_for_eth_balance, PriorityOpId}; +use zksync_types::{ + get_nonce_key, utils::storage_key_for_eth_balance, vm::FastVmMode, PriorityOpId, +}; use self::tester::{AccountLoadNextExecutable, StorageSnapshot, TestConfig, Tester}; use super::TxExecutionResult; @@ -41,13 +43,15 @@ impl StorageType { const ALL: [Self; 3] = [Self::AsyncRocksdbCache, Self::Rocksdb, Self::Postgres]; } +const FAST_VM_MODES: [FastVmMode; 3] = [FastVmMode::Old, FastVmMode::New, FastVmMode::Shadow]; + /// Checks that we can successfully execute a single L2 tx in batch executor on all storage types. -#[test_casing(3, StorageType::ALL)] +#[test_casing(9, Product((StorageType::ALL, FAST_VM_MODES)))] #[tokio::test] -async fn execute_l2_tx(storage_type: StorageType) { +async fn execute_l2_tx(storage_type: StorageType, vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; let mut executor = tester.create_batch_executor(storage_type).await; @@ -82,14 +86,9 @@ impl SnapshotRecoveryMutation { } } -const EXECUTE_L2_TX_AFTER_SNAPSHOT_RECOVERY_CASES: Product<( - [Option; 3], - [StorageType; 3], -)> = Product((SnapshotRecoveryMutation::ALL, StorageType::ALL)); - /// Tests that we can continue executing account transactions after emulating snapshot recovery. /// Test cases with a set `mutation` ensure that the VM executor correctly detects missing data (e.g., dropped account nonce). -#[test_casing(9, EXECUTE_L2_TX_AFTER_SNAPSHOT_RECOVERY_CASES)] +#[test_casing(9, Product((SnapshotRecoveryMutation::ALL, StorageType::ALL)))] #[tokio::test] async fn execute_l2_tx_after_snapshot_recovery( mutation: Option, @@ -106,7 +105,7 @@ async fn execute_l2_tx_after_snapshot_recovery( } let snapshot = storage_snapshot.recover(&connection_pool).await; - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, FastVmMode::Old); let mut executor = tester .recover_batch_executor_custom(&storage_type, &snapshot) .await; @@ -120,12 +119,13 @@ async fn execute_l2_tx_after_snapshot_recovery( } /// Checks that we can successfully execute a single L1 tx in batch executor. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn execute_l1_tx() { +async fn execute_l1_tx(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; @@ -142,12 +142,13 @@ async fn execute_l1_tx() { } /// Checks that we can successfully execute a single L2 tx and a single L1 tx in batch executor. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn execute_l2_and_l1_txs() { +async fn execute_l2_and_l1_txs(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; let mut executor = tester @@ -167,12 +168,13 @@ async fn execute_l2_and_l1_txs() { } /// Checks that we can successfully rollback the transaction and execute it once again. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn rollback() { +async fn rollback(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; @@ -213,12 +215,13 @@ async fn rollback() { } /// Checks that incorrect transactions are marked as rejected. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn reject_tx() { +async fn reject_tx(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; let mut executor = tester @@ -232,12 +235,13 @@ async fn reject_tx() { /// Checks that tx with too big gas limit is correctly processed. /// When processed in the bootloader, no more than 80M gas can be used within the execution context. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn too_big_gas_limit() { +async fn too_big_gas_limit(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; let mut executor = tester @@ -252,12 +256,13 @@ async fn too_big_gas_limit() { } /// Checks that we can't execute the same transaction twice. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn tx_cant_be_reexecuted() { +async fn tx_cant_be_reexecuted(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; let mut executor = tester @@ -274,12 +279,13 @@ async fn tx_cant_be_reexecuted() { } /// Checks that we can deploy and call the loadnext contract. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn deploy_and_call_loadtest() { +async fn deploy_and_call_loadtest(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; let mut executor = tester @@ -304,12 +310,13 @@ async fn deploy_and_call_loadtest() { } /// Checks that a tx that is reverted by the VM still can be included into a batch. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn execute_reverted_tx() { +async fn execute_reverted_tx(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; @@ -334,13 +341,14 @@ async fn execute_reverted_tx() { /// Runs the batch executor through a semi-realistic basic scenario: /// a batch with different operations, both successful and not. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn execute_realistic_scenario() { +async fn execute_realistic_scenario(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let mut bob = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, vm_mode); tester.genesis().await; tester.fund(&[alice.address()]).await; @@ -395,8 +403,9 @@ async fn execute_realistic_scenario() { } /// Checks that we handle the bootloader out of gas error on execution phase. +#[test_casing(3, FAST_VM_MODES)] #[tokio::test] -async fn bootloader_out_of_gas_for_any_tx() { +async fn bootloader_out_of_gas_for_any_tx(vm_mode: FastVmMode) { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); @@ -406,6 +415,7 @@ async fn bootloader_out_of_gas_for_any_tx() { save_call_traces: false, vm_gas_limit: Some(10), validation_computational_gas_limit: u32::MAX, + fast_vm_mode: vm_mode, }, ); @@ -426,7 +436,7 @@ async fn bootloader_tip_out_of_gas() { let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, FastVmMode::Old); tester.genesis().await; tester.fund(&[alice.address()]).await; @@ -451,6 +461,7 @@ async fn bootloader_tip_out_of_gas() { - 10, ), validation_computational_gas_limit: u32::MAX, + fast_vm_mode: FastVmMode::Old, }); let mut second_executor = tester @@ -467,7 +478,7 @@ async fn catchup_rocksdb_cache() { let mut alice = Account::random(); let mut bob = Account::random(); - let mut tester = Tester::new(connection_pool); + let mut tester = Tester::new(connection_pool, FastVmMode::Old); tester.genesis().await; tester.fund(&[alice.address(), bob.address()]).await; diff --git a/core/node/state_keeper/src/batch_executor/tests/tester.rs b/core/node/state_keeper/src/batch_executor/tests/tester.rs index 961ccf9db16..6730d427c67 100644 --- a/core/node/state_keeper/src/batch_executor/tests/tester.rs +++ b/core/node/state_keeper/src/batch_executor/tests/tester.rs @@ -24,6 +24,7 @@ use zksync_types::{ storage_writes_deduplicator::StorageWritesDeduplicator, system_contracts::get_system_smart_contracts, utils::storage_key_for_standard_token_balance, + vm::FastVmMode, AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, StorageLog, Transaction, H256, L2_BASE_TOKEN_ADDRESS, U256, }; @@ -45,16 +46,18 @@ pub(super) struct TestConfig { pub(super) save_call_traces: bool, pub(super) vm_gas_limit: Option, pub(super) validation_computational_gas_limit: u32, + pub(super) fast_vm_mode: FastVmMode, } impl TestConfig { - pub(super) fn new() -> Self { + pub(super) fn new(fast_vm_mode: FastVmMode) -> Self { let config = StateKeeperConfig::for_tests(); Self { vm_gas_limit: None, save_call_traces: false, validation_computational_gas_limit: config.validation_computational_gas_limit, + fast_vm_mode, } } } @@ -71,8 +74,8 @@ pub(super) struct Tester { } impl Tester { - pub(super) fn new(pool: ConnectionPool) -> Self { - Self::with_config(pool, TestConfig::new()) + pub(super) fn new(pool: ConnectionPool, fast_vm_mode: FastVmMode) -> Self { + Self::with_config(pool, TestConfig::new(fast_vm_mode)) } pub(super) fn with_config(pool: ConnectionPool, config: TestConfig) -> Self { @@ -141,6 +144,8 @@ impl Tester { system_env: SystemEnv, ) -> BatchExecutorHandle { let mut batch_executor = MainBatchExecutor::new(self.config.save_call_traces, false); + batch_executor.set_fast_vm_mode(self.config.fast_vm_mode); + let (_stop_sender, stop_receiver) = watch::channel(false); let storage = storage_factory .access_storage(&stop_receiver, l1_batch_env.number - 1) @@ -440,7 +445,7 @@ impl StorageSnapshot { alice: &mut Account, transaction_count: u32, ) -> Self { - let mut tester = Tester::new(connection_pool.clone()); + let mut tester = Tester::new(connection_pool.clone(), FastVmMode::Old); tester.genesis().await; tester.fund(&[alice.address()]).await; diff --git a/core/node/state_keeper/src/mempool_actor.rs b/core/node/state_keeper/src/mempool_actor.rs index d79d9ebb34a..5003d75b669 100644 --- a/core/node/state_keeper/src/mempool_actor.rs +++ b/core/node/state_keeper/src/mempool_actor.rs @@ -11,7 +11,7 @@ use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; use zksync_node_fee_model::BatchFeeModelInputProvider; #[cfg(test)] use zksync_types::H256; -use zksync_types::{get_nonce_key, Address, Nonce, Transaction, VmVersion}; +use zksync_types::{get_nonce_key, vm::VmVersion, Address, Nonce, Transaction}; use super::{metrics::KEEPER_METRICS, types::MempoolGuard}; diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 465042a602d..e47e1182699 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -45,9 +45,6 @@ pub(super) fn default_vm_batch_result() -> FinishedL1Batch { used_contract_hashes: vec![], user_l2_to_l1_logs: vec![], system_logs: vec![], - total_log_queries: 0, - cycles_used: 0, - deduplicated_events_logs: vec![], storage_refunds: Vec::new(), pubdata_costs: Vec::new(), }, diff --git a/core/node/vm_runner/Cargo.toml b/core/node/vm_runner/Cargo.toml index 52a8e467643..cc6313fa572 100644 --- a/core/node/vm_runner/Cargo.toml +++ b/core/node/vm_runner/Cargo.toml @@ -22,7 +22,9 @@ zksync_utils.workspace = true zksync_prover_interface.workspace = true zksync_object_store.workspace = true zksync_vm_utils.workspace = true +zksync_health_check.workspace = true +serde.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true async-trait.workspace = true diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 48f243cd9bc..f7f8c099609 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -6,7 +6,7 @@ use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_object_store::ObjectStore; use zksync_prover_interface::inputs::VMRunWitnessInputData; -use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; +use zksync_state_keeper::{BatchExecutor, StateKeeperOutputHandler, UpdatesManager}; use zksync_types::{ block::StorageOracleInfo, witness_block_state::WitnessStorageState, L1BatchNumber, L2ChainId, H256, @@ -30,6 +30,7 @@ impl BasicWitnessInputProducer { pub async fn new( pool: ConnectionPool, object_store: Arc, + batch_executor: Box, rocksdb_path: String, chain_id: L2ChainId, first_processed_batch: L1BatchNumber, @@ -47,13 +48,12 @@ impl BasicWitnessInputProducer { }; let (output_handler_factory, output_handler_factory_task) = ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), output_handler_factory); - let batch_processor = MainBatchExecutor::new(false, false); let vm_runner = VmRunner::new( pool, Box::new(io), Arc::new(loader), Box::new(output_handler_factory), - Box::new(batch_processor), + batch_executor, ); Ok(( Self { vm_runner }, @@ -75,8 +75,7 @@ impl BasicWitnessInputProducer { } } -/// A collections of tasks that need to be run in order for BWIP to work as -/// intended. +/// Collection of tasks that need to be run in order for BWIP to work as intended. #[derive(Debug)] pub struct BasicWitnessInputProducerTasks { /// Task that synchronizes storage with new available batches. diff --git a/core/node/vm_runner/src/impls/mod.rs b/core/node/vm_runner/src/impls/mod.rs index 2d982730498..7f9869531c6 100644 --- a/core/node/vm_runner/src/impls/mod.rs +++ b/core/node/vm_runner/src/impls/mod.rs @@ -1,7 +1,13 @@ +//! Components powered by a VM runner. + mod bwip; +mod playground; mod protective_reads; -pub use bwip::{ - BasicWitnessInputProducer, BasicWitnessInputProducerIo, BasicWitnessInputProducerTasks, +pub use self::{ + bwip::{ + BasicWitnessInputProducer, BasicWitnessInputProducerIo, BasicWitnessInputProducerTasks, + }, + playground::{VmPlayground, VmPlaygroundIo, VmPlaygroundLoaderTask, VmPlaygroundTasks}, + protective_reads::{ProtectiveReadsIo, ProtectiveReadsWriter, ProtectiveReadsWriterTasks}, }; -pub use protective_reads::{ProtectiveReadsIo, ProtectiveReadsWriter, ProtectiveReadsWriterTasks}; diff --git a/core/node/vm_runner/src/impls/playground.rs b/core/node/vm_runner/src/impls/playground.rs new file mode 100644 index 00000000000..4fb140431df --- /dev/null +++ b/core/node/vm_runner/src/impls/playground.rs @@ -0,0 +1,333 @@ +use std::{ + io, + path::{Path, PathBuf}, + sync::Arc, +}; + +use anyhow::Context as _; +use async_trait::async_trait; +use serde::Serialize; +use tokio::{ + fs, + sync::{oneshot, watch}, +}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; +use zksync_state::RocksdbStorage; +use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; +use zksync_types::{vm::FastVmMode, L1BatchNumber, L2ChainId}; + +use crate::{ + ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, OutputHandlerFactory, + StorageSyncTask, VmRunner, VmRunnerIo, VmRunnerStorage, +}; + +#[derive(Debug, Serialize)] +struct VmPlaygroundHealth { + vm_mode: FastVmMode, + last_processed_batch: L1BatchNumber, +} + +impl From for Health { + fn from(health: VmPlaygroundHealth) -> Self { + Health::from(HealthStatus::Ready).with_details(health) + } +} + +/// Virtual machine playground. Does not persist anything in Postgres; instead, keeps an L1 batch cursor as a plain text file in the RocksDB directory +/// (so that the playground doesn't repeatedly process same batches after a restart). +#[derive(Debug)] +pub struct VmPlayground { + pool: ConnectionPool, + batch_executor: MainBatchExecutor, + rocksdb_path: String, + chain_id: L2ChainId, + io: VmPlaygroundIo, + loader_task_sender: oneshot::Sender>, + output_handler_factory: + ConcurrentOutputHandlerFactory, + reset_to_batch: Option, +} + +impl VmPlayground { + /// Creates a new playground. + pub async fn new( + pool: ConnectionPool, + vm_mode: FastVmMode, + rocksdb_path: String, + chain_id: L2ChainId, + first_processed_batch: L1BatchNumber, + reset_state: bool, + ) -> anyhow::Result<(Self, VmPlaygroundTasks)> { + tracing::info!( + "Starting VM playground with mode {vm_mode:?}, first processed batch is #{first_processed_batch} \ + (reset processing: {reset_state:?})" + ); + + let cursor_file_path = Path::new(&rocksdb_path).join("__vm_playground_cursor"); + let latest_processed_batch = VmPlaygroundIo::read_cursor(&cursor_file_path).await?; + tracing::info!("Latest processed batch: {latest_processed_batch:?}"); + let latest_processed_batch = if reset_state { + first_processed_batch + } else { + latest_processed_batch.unwrap_or(first_processed_batch) + }; + + let mut batch_executor = MainBatchExecutor::new(false, false); + batch_executor.set_fast_vm_mode(vm_mode); + + let io = VmPlaygroundIo { + cursor_file_path, + vm_mode, + latest_processed_batch: Arc::new(watch::channel(latest_processed_batch).0), + health_updater: Arc::new(ReactiveHealthCheck::new("vm_playground").1), + }; + let (output_handler_factory, output_handler_factory_task) = + ConcurrentOutputHandlerFactory::new( + pool.clone(), + io.clone(), + VmPlaygroundOutputHandler, + ); + let (loader_task_sender, loader_task_receiver) = oneshot::channel(); + + let this = Self { + pool, + batch_executor, + rocksdb_path, + chain_id, + io, + loader_task_sender, + output_handler_factory, + reset_to_batch: reset_state.then_some(first_processed_batch), + }; + Ok(( + this, + VmPlaygroundTasks { + loader_task: VmPlaygroundLoaderTask { + inner: loader_task_receiver, + }, + output_handler_factory_task, + }, + )) + } + + /// Returns a health check for this component. + pub fn health_check(&self) -> ReactiveHealthCheck { + self.io.health_updater.subscribe() + } + + #[cfg(test)] + pub(crate) fn io(&self) -> &VmPlaygroundIo { + &self.io + } + + #[tracing::instrument(skip(self), err)] + async fn reset_rocksdb_cache(&self, last_retained_batch: L1BatchNumber) -> anyhow::Result<()> { + let builder = RocksdbStorage::builder(self.rocksdb_path.as_ref()).await?; + let current_l1_batch = builder.l1_batch_number().await; + if current_l1_batch <= Some(last_retained_batch) { + tracing::info!("Resetting RocksDB cache is not required: its current batch #{current_l1_batch:?} is lower than the target"); + return Ok(()); + } + + tracing::info!("Resetting RocksDB cache from batch #{current_l1_batch:?}"); + let mut conn = self.pool.connection_tagged("vm_playground").await?; + builder.roll_back(&mut conn, last_retained_batch).await + } + + /// Continuously loads new available batches and writes the corresponding data + /// produced by that batch. + /// + /// # Errors + /// + /// Propagates RocksDB and Postgres errors. + pub async fn run(self, stop_receiver: &watch::Receiver) -> anyhow::Result<()> { + fs::create_dir_all(&self.rocksdb_path) + .await + .with_context(|| format!("cannot create dir `{}`", self.rocksdb_path))?; + + if let Some(reset_to_batch) = self.reset_to_batch { + self.io.health_updater.update(HealthStatus::Affected.into()); + + self.reset_rocksdb_cache(reset_to_batch).await?; + self.io + .write_cursor(reset_to_batch) + .await + .context("failed resetting VM playground state")?; + tracing::info!("Finished resetting playground state"); + } + + self.io.update_health(); + + let (loader, loader_task) = VmRunnerStorage::new( + self.pool.clone(), + self.rocksdb_path, + self.io.clone(), + self.chain_id, + ) + .await?; + self.loader_task_sender.send(loader_task).ok(); + let vm_runner = VmRunner::new( + self.pool, + Box::new(self.io), + Arc::new(loader), + Box::new(self.output_handler_factory), + Box::new(self.batch_executor), + ); + vm_runner.run(stop_receiver).await + } +} + +/// Loader task for the VM playground. +#[derive(Debug)] +pub struct VmPlaygroundLoaderTask { + inner: oneshot::Receiver>, +} + +impl VmPlaygroundLoaderTask { + /// Runs a task until a stop signal is received. + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let task = tokio::select! { + biased; + _ = stop_receiver.changed() => return Ok(()), + res = self.inner => match res { + Ok(task) => task, + Err(_) => anyhow::bail!("VM playground stopped before spawning loader task"), + } + }; + task.run(stop_receiver).await + } +} + +/// Collection of tasks that need to be run in order for the VM playground to work as intended. +#[derive(Debug)] +pub struct VmPlaygroundTasks { + /// Task that synchronizes storage with new available batches. + pub loader_task: VmPlaygroundLoaderTask, + /// Task that handles output from processed batches. + pub output_handler_factory_task: ConcurrentOutputHandlerFactoryTask, +} + +/// I/O powering [`VmPlayground`]. +#[derive(Debug, Clone)] +pub struct VmPlaygroundIo { + cursor_file_path: PathBuf, + vm_mode: FastVmMode, + // We don't read this value from the cursor file in the `VmRunnerIo` implementation because reads / writes + // aren't guaranteed to be atomic. + latest_processed_batch: Arc>, + health_updater: Arc, +} + +impl VmPlaygroundIo { + async fn read_cursor(cursor_file_path: &Path) -> anyhow::Result> { + match fs::read_to_string(cursor_file_path).await { + Ok(buffer) => { + let cursor = buffer + .parse::() + .with_context(|| format!("invalid cursor value: {buffer}"))?; + Ok(Some(L1BatchNumber(cursor))) + } + Err(err) if err.kind() == io::ErrorKind::NotFound => Ok(None), + Err(err) => Err(anyhow::Error::new(err).context(format!( + "failed reading VM playground cursor from `{}`", + cursor_file_path.display() + ))), + } + } + + async fn write_cursor(&self, cursor: L1BatchNumber) -> anyhow::Result<()> { + let buffer = cursor.to_string(); + fs::write(&self.cursor_file_path, buffer) + .await + .with_context(|| { + format!( + "failed writing VM playground cursor to `{}`", + self.cursor_file_path.display() + ) + }) + } + + fn update_health(&self) { + let health = VmPlaygroundHealth { + vm_mode: self.vm_mode, + last_processed_batch: *self.latest_processed_batch.borrow(), + }; + self.health_updater.update(health.into()); + } + + #[cfg(test)] + pub(crate) fn subscribe_to_completed_batches(&self) -> watch::Receiver { + self.latest_processed_batch.subscribe() + } +} + +#[async_trait] +impl VmRunnerIo for VmPlaygroundIo { + fn name(&self) -> &'static str { + "vm_playground" + } + + async fn latest_processed_batch( + &self, + _conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + Ok(*self.latest_processed_batch.borrow()) + } + + async fn last_ready_to_be_loaded_batch( + &self, + conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + let sealed_l1_batch = conn + .blocks_dal() + .get_sealed_l1_batch_number() + .await? + .context("no L1 batches in Postgres")?; + let last_processed_l1_batch = self.latest_processed_batch(conn).await?; + Ok(sealed_l1_batch.min(last_processed_l1_batch + 1)) + } + + async fn mark_l1_batch_as_processing( + &self, + _conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + tracing::info!("Started processing L1 batch #{l1_batch_number}"); + Ok(()) + } + + async fn mark_l1_batch_as_completed( + &self, + _conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + tracing::info!("Finished processing L1 batch #{l1_batch_number}"); + self.write_cursor(l1_batch_number).await?; + // We should only update the in-memory value after the write to the cursor file succeeded. + self.latest_processed_batch.send_replace(l1_batch_number); + self.update_health(); + Ok(()) + } +} + +#[derive(Debug)] +struct VmPlaygroundOutputHandler; + +#[async_trait] +impl StateKeeperOutputHandler for VmPlaygroundOutputHandler { + async fn handle_l2_block(&mut self, updates_manager: &UpdatesManager) -> anyhow::Result<()> { + tracing::trace!("Processed L2 block #{}", updates_manager.l2_block.number); + Ok(()) + } +} + +#[async_trait] +impl OutputHandlerFactory for VmPlaygroundOutputHandler { + async fn create_handler( + &mut self, + _l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + Ok(Box::new(Self)) + } +} diff --git a/core/node/vm_runner/src/lib.rs b/core/node/vm_runner/src/lib.rs index b252eebcbb1..03e3f43baed 100644 --- a/core/node/vm_runner/src/lib.rs +++ b/core/node/vm_runner/src/lib.rs @@ -3,7 +3,7 @@ #![warn(missing_debug_implementations, missing_docs)] -mod impls; +pub mod impls; mod io; mod output_handler; mod process; @@ -13,13 +13,11 @@ mod metrics; #[cfg(test)] mod tests; -pub use impls::{ - BasicWitnessInputProducer, BasicWitnessInputProducerIo, BasicWitnessInputProducerTasks, - ProtectiveReadsIo, ProtectiveReadsWriter, ProtectiveReadsWriterTasks, +pub use self::{ + io::VmRunnerIo, + output_handler::{ + ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, OutputHandlerFactory, + }, + process::VmRunner, + storage::{BatchExecuteData, StorageSyncTask, VmRunnerStorage}, }; -pub use io::VmRunnerIo; -pub use output_handler::{ - ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, OutputHandlerFactory, -}; -pub use process::VmRunner; -pub use storage::{BatchExecuteData, StorageSyncTask, VmRunnerStorage}; diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index e9dbebfa24d..4cb2d26f6bd 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -28,6 +28,7 @@ use super::{BatchExecuteData, OutputHandlerFactory, VmRunnerIo}; use crate::storage::{load_batch_execute_data, StorageLoader}; mod output_handler; +mod playground; mod process; mod storage; mod storage_writer; @@ -306,11 +307,12 @@ async fn store_l1_batches( digest.push_tx_hash(tx.hash()); new_l2_block.hash = digest.finalize(ProtocolVersionId::latest()); - l2_block_number += 1; new_l2_block.base_system_contracts_hashes = contract_hashes; new_l2_block.l2_tx_count = 1; conn.blocks_dal().insert_l2_block(&new_l2_block).await?; last_l2_block_hash = new_l2_block.hash; + l2_block_number += 1; + let tx_result = execute_l2_transaction(tx.clone()); conn.transactions_dal() .mark_txs_as_executed_in_l2_block( @@ -330,9 +332,9 @@ async fn store_l1_batches( last_l2_block_hash, ); fictive_l2_block.hash = digest.finalize(ProtocolVersionId::latest()); - l2_block_number += 1; conn.blocks_dal().insert_l2_block(&fictive_l2_block).await?; last_l2_block_hash = fictive_l2_block.hash; + l2_block_number += 1; let header = L1BatchHeader::new( l1_batch_number, diff --git a/core/node/vm_runner/src/tests/playground.rs b/core/node/vm_runner/src/tests/playground.rs new file mode 100644 index 00000000000..c4111f73741 --- /dev/null +++ b/core/node/vm_runner/src/tests/playground.rs @@ -0,0 +1,139 @@ +use test_casing::test_casing; +use tokio::sync::watch; +use zksync_health_check::HealthStatus; +use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; +use zksync_state::RocksdbStorage; +use zksync_types::vm::FastVmMode; + +use super::*; +use crate::impls::VmPlayground; + +async fn run_playground( + pool: ConnectionPool, + rocksdb_dir: &tempfile::TempDir, + reset_state: bool, +) { + let mut conn = pool.connection().await.unwrap(); + let genesis_params = GenesisParams::mock(); + if conn.blocks_dal().is_genesis_needed().await.unwrap() { + insert_genesis_batch(&mut conn, &genesis_params) + .await + .unwrap(); + + // Generate some batches and persist them in Postgres + let mut accounts = [Account::random()]; + fund(&mut conn, &accounts).await; + store_l1_batches( + &mut conn, + 1..=1, // TODO: test on >1 batch + genesis_params.base_system_contracts().hashes(), + &mut accounts, + ) + .await + .unwrap(); + } + + let (playground, playground_tasks) = VmPlayground::new( + pool.clone(), + FastVmMode::Shadow, + rocksdb_dir.path().to_str().unwrap().to_owned(), + genesis_params.config().l2_chain_id, + L1BatchNumber(0), + reset_state, + ) + .await + .unwrap(); + + let (stop_sender, stop_receiver) = watch::channel(false); + let playground_io = playground.io().clone(); + assert_eq!( + playground_io + .latest_processed_batch(&mut conn) + .await + .unwrap(), + L1BatchNumber(0) + ); + assert_eq!( + playground_io + .last_ready_to_be_loaded_batch(&mut conn) + .await + .unwrap(), + L1BatchNumber(1) + ); + let mut health_check = playground.health_check(); + + let mut completed_batches = playground_io.subscribe_to_completed_batches(); + let task_handles = [ + tokio::spawn(playground_tasks.loader_task.run(stop_receiver.clone())), + tokio::spawn( + playground_tasks + .output_handler_factory_task + .run(stop_receiver.clone()), + ), + tokio::spawn(async move { playground.run(&stop_receiver).await }), + ]; + // Wait until all batches are processed. + completed_batches + .wait_for(|&number| number == L1BatchNumber(1)) + .await + .unwrap(); + health_check + .wait_for(|health| { + if !matches!(health.status(), HealthStatus::Ready) { + return false; + } + let health_details = health.details().unwrap(); + assert_eq!(health_details["vm_mode"], "shadow"); + health_details["last_processed_batch"] == 1_u64 + }) + .await; + + // Check that playground I/O works correctly. + assert_eq!( + playground_io + .latest_processed_batch(&mut conn) + .await + .unwrap(), + L1BatchNumber(1) + ); + // There's no batch #2 in storage + assert_eq!( + playground_io + .last_ready_to_be_loaded_batch(&mut conn) + .await + .unwrap(), + L1BatchNumber(1) + ); + + stop_sender.send_replace(true); + for task_handle in task_handles { + task_handle.await.unwrap().unwrap(); + } +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn vm_playground_basics(reset_state: bool) { + let pool = ConnectionPool::test_pool().await; + let rocksdb_dir = tempfile::TempDir::new().unwrap(); + run_playground(pool, &rocksdb_dir, reset_state).await; +} + +#[tokio::test] +async fn resetting_playground_state() { + let pool = ConnectionPool::test_pool().await; + let rocksdb_dir = tempfile::TempDir::new().unwrap(); + run_playground(pool.clone(), &rocksdb_dir, false).await; + + // Manually catch up RocksDB to Postgres to ensure that resetting it is not trivial. + let (_stop_sender, stop_receiver) = watch::channel(false); + let mut conn = pool.connection().await.unwrap(); + RocksdbStorage::builder(rocksdb_dir.path()) + .await + .unwrap() + .synchronize(&mut conn, &stop_receiver, None) + .await + .unwrap(); + + run_playground(pool.clone(), &rocksdb_dir, true).await; +} diff --git a/core/tests/ts-integration/src/context-owner.ts b/core/tests/ts-integration/src/context-owner.ts index 634e8c950a6..6cc2bed0a8d 100644 --- a/core/tests/ts-integration/src/context-owner.ts +++ b/core/tests/ts-integration/src/context-owner.ts @@ -2,7 +2,7 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { BigNumberish } from 'ethers'; -import { TestContext, TestEnvironment, TestWallets } from './types'; +import { NodeMode, TestContext, TestEnvironment, TestWallets } from './types'; import { lookupPrerequisites } from './prerequisites'; import { Reporter } from './reporter'; import { scaledGasPrice } from './helpers'; @@ -541,6 +541,64 @@ export class TestContextOwner { this.reporter.finishAction(); } + /** + * Waits until the VM playground processes all L1 batches. If the playground runs the new VM in the shadow mode, this means + * that there are no divergence in old and new VM execution. Outputs a warning if the VM playground isn't run or runs not in the shadow mode. + */ + private async waitForVmPlayground() { + while (true) { + const lastProcessedBatch = await this.lastPlaygroundBatch(); + if (lastProcessedBatch === undefined) { + this.reporter.warn('The node does not run VM playground; run to check old / new VM divergence'); + break; + } + const lastNodeBatch = await this.l2Provider.getL1BatchNumber(); + + this.reporter.debug(`VM playground progress: L1 batch #${lastProcessedBatch} / ${lastNodeBatch}`); + if (lastProcessedBatch >= lastNodeBatch) { + break; + } + await zksync.utils.sleep(500); + } + } + + /** + * Returns the number of the last L1 batch processed by the VM playground, taking it from the node health endpoint. + * Returns `undefined` if the VM playground isn't run or doesn't have the shadow mode. + */ + private async lastPlaygroundBatch() { + interface VmPlaygroundHealth { + readonly status: string; + readonly details?: { + vm_mode?: string; + last_processed_batch?: number; + }; + } + + interface NodeHealth { + readonly components: { + vm_playground?: VmPlaygroundHealth; + }; + } + + const healthcheckPort = process.env.API_HEALTHCHECK_PORT ?? '3071'; + const nodeHealth = (await (await fetch(`http://127.0.0.1:${healthcheckPort}/health`)).json()) as NodeHealth; + const playgroundHealth = nodeHealth.components.vm_playground; + if (playgroundHealth === undefined) { + return undefined; + } + if (playgroundHealth.status !== 'ready') { + throw new Error(`Unexpected VM playground health status: ${playgroundHealth.status}`); + } + if (playgroundHealth.details?.vm_mode !== 'shadow') { + this.reporter.warn( + `VM playground mode is '${playgroundHealth.details?.vm_mode}'; should be set to 'shadow' to check VM divergence` + ); + return undefined; + } + return playgroundHealth.details?.last_processed_batch ?? 0; + } + /** * Performs context deinitialization. */ @@ -548,10 +606,16 @@ export class TestContextOwner { // Reset the reporter context. this.reporter = new Reporter(); try { + if (this.env.nodeMode == NodeMode.Main && this.env.network === 'localhost') { + // Check that the VM execution hasn't diverged using the VM playground. The component and thus the main node + // will crash on divergence, so we just need to make sure that the test doesn't exit before the VM playground + // processes all batches on the node. + this.reporter.startAction('Waiting for VM playground to catch up'); + await this.waitForVmPlayground(); + this.reporter.finishAction(); + } this.reporter.startAction(`Tearing down the context`); - await this.collectFunds(); - this.reporter.finishAction(); } catch (error: any) { // Report the issue to the console and mark the last action as failed. diff --git a/core/tests/ts-integration/src/reporter.ts b/core/tests/ts-integration/src/reporter.ts index 114ff2a7f5c..903ff3101ef 100644 --- a/core/tests/ts-integration/src/reporter.ts +++ b/core/tests/ts-integration/src/reporter.ts @@ -88,8 +88,8 @@ export class Reporter { /** * Prints an error message to the console. */ - error(message: string) { - console.log(this.indent(`${errorPrefix('Error:')}: ${fail(message)}`)); + error(message: string, ...args: any[]) { + console.log(this.indent(`${errorPrefix('Error:')}: ${fail(message)}`), ...args); } /** diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index f5f85b3b4d2..efbc08a957a 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -27,6 +27,10 @@ harness = false name = "iai" harness = false +[[bench]] +name = "fill_bootloader" +harness = false + [[bin]] name = "iai_results_to_prometheus" path = "src/iai_results_to_prometheus.rs" diff --git a/core/tests/vm-benchmark/benches/fill_bootloader.rs b/core/tests/vm-benchmark/benches/fill_bootloader.rs new file mode 100644 index 00000000000..fac422c8237 --- /dev/null +++ b/core/tests/vm-benchmark/benches/fill_bootloader.rs @@ -0,0 +1,23 @@ +use std::time::Instant; + +use criterion::black_box; +use zksync_vm_benchmark_harness::{ + cut_to_allowed_bytecode_size, get_deploy_tx_with_gas_limit, BenchmarkingVm, +}; + +fn main() { + let test_contract = + std::fs::read("deployment_benchmarks/event_spam").expect("failed to read file"); + + let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); + let tx = get_deploy_tx_with_gas_limit(code, 1000); + + let start = Instant::now(); + + let mut vm = BenchmarkingVm::new(); + for _ in 0..1000 { + vm.run_transaction(black_box(&tx)); + } + + println!("{:?}", start.elapsed()); +} diff --git a/core/tests/vm-benchmark/deployment_benchmarks/heap_read_write b/core/tests/vm-benchmark/deployment_benchmarks/heap_read_write new file mode 100644 index 00000000000..914a2338685 Binary files /dev/null and b/core/tests/vm-benchmark/deployment_benchmarks/heap_read_write differ diff --git a/core/tests/vm-benchmark/deployment_benchmarks_sources/heap_read_write.sol b/core/tests/vm-benchmark/deployment_benchmarks_sources/heap_read_write.sol new file mode 100644 index 00000000000..d5a503eb708 --- /dev/null +++ b/core/tests/vm-benchmark/deployment_benchmarks_sources/heap_read_write.sol @@ -0,0 +1,25 @@ +pragma solidity ^0.8.0; + +contract HeapBenchmark { + constructor() { + uint256 v1 = 0; + uint256 v2 = 0; + uint256 n = 16000; + uint256[] memory array = new uint256[](1); + + assembly { + mstore(add(array, sub(n, 1)), 4242) + + let j := 0 + for {} lt(j, n) {} { + v1 := mload(add(array, mod(mul(j, j), n))) + v2 := mload(add(array, j)) + mstore(add(array, j), add(add(v1, v2), 42)) + j := add(j, 1) + if gt(j, sub(n, 1)) { + j := 0 + } + } + } + } +} diff --git a/core/tests/vm-benchmark/harness/src/instruction_counter.rs b/core/tests/vm-benchmark/harness/src/instruction_counter.rs index 0d80658c720..017b13da44c 100644 --- a/core/tests/vm-benchmark/harness/src/instruction_counter.rs +++ b/core/tests/vm-benchmark/harness/src/instruction_counter.rs @@ -13,6 +13,7 @@ pub struct InstructionCounter { /// A tracer that counts the number of instructions executed by the VM. impl InstructionCounter { + #[allow(dead_code)] // FIXME pub fn new(output: Rc>) -> Self { Self { count: 0, output } } diff --git a/core/tests/vm-benchmark/harness/src/lib.rs b/core/tests/vm-benchmark/harness/src/lib.rs index 35e7530e9aa..a30221cfa0b 100644 --- a/core/tests/vm-benchmark/harness/src/lib.rs +++ b/core/tests/vm-benchmark/harness/src/lib.rs @@ -7,9 +7,10 @@ use zksync_multivm::{ L2BlockEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, }, utils::get_max_gas_per_pubdata_byte, - vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryEnabled, TracerDispatcher, Vm}, + vm_fast::Vm, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; -use zksync_state::{InMemoryStorage, StorageView}; +use zksync_state::InMemoryStorage; use zksync_types::{ block::L2BlockHasher, ethabi::{encode, Token}, @@ -61,7 +62,7 @@ static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { static PRIVATE_KEY: Lazy = Lazy::new(|| K256PrivateKey::from_bytes(H256([42; 32])).expect("invalid key bytes")); -pub struct BenchmarkingVm(Vm, HistoryEnabled>); +pub struct BenchmarkingVm(Vm<&'static InMemoryStorage>); impl BenchmarkingVm { #[allow(clippy::new_without_default)] @@ -95,7 +96,7 @@ impl BenchmarkingVm { default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::from(270), }, - Rc::new(RefCell::new(StorageView::new(&*STORAGE))), + &*STORAGE, )) } @@ -109,18 +110,17 @@ impl BenchmarkingVm { let count = Rc::new(RefCell::new(0)); - self.0.inspect( - TracerDispatcher::new(vec![Box::new( - instruction_counter::InstructionCounter::new(count.clone()), - )]), - VmExecutionMode::OneTx, - ); + self.0.inspect((), VmExecutionMode::OneTx); count.take() } } pub fn get_deploy_tx(code: &[u8]) -> Transaction { + get_deploy_tx_with_gas_limit(code, 30_000_000) +} + +pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32) -> Transaction { let params = [ Token::FixedBytes(vec![0u8; 32]), Token::FixedBytes(hash_bytecode(code).0.to_vec()), @@ -137,7 +137,7 @@ pub fn get_deploy_tx(code: &[u8]) -> Transaction { calldata, Nonce(0), Fee { - gas_limit: U256::from(30000000u32), + gas_limit: U256::from(gas_limit), max_fee_per_gas: U256::from(250_000_000), max_priority_fee_per_gas: U256::from(0), gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( diff --git a/etc/contracts-test-data/contracts/expensive/expensive.sol b/etc/contracts-test-data/contracts/expensive/expensive.sol index c3b99df4892..27e18b6eb6c 100644 --- a/etc/contracts-test-data/contracts/expensive/expensive.sol +++ b/etc/contracts-test-data/contracts/expensive/expensive.sol @@ -12,4 +12,10 @@ contract Expensive { } return keccak256(abi.encodePacked(array)); } + + function cleanUp() public { + for (uint i = 0; i < array.length; i++) { + array[i] = 0; + } + } } diff --git a/etc/env/base/vm_runner.toml b/etc/env/base/vm_runner.toml index dd8e9915280..8e6171d7936 100644 --- a/etc/env/base/vm_runner.toml +++ b/etc/env/base/vm_runner.toml @@ -1,6 +1,4 @@ -# Configuration for the VM runner crate - -[vm_runner] +# Configuration for the VM runner instances and experimental VM [vm_runner.protective_reads] # Path to the directory that contains RocksDB with protective reads writer cache. @@ -17,3 +15,13 @@ db_path = "./db/main/basic_witness_input_producer" window_size = 3 # All batches before this one (inclusive) are always considered to be processed. first_processed_batch = 0 + +[experimental_vm] +# Mode in which to run the new fast VM in the state keeper. Don't set to "new" / "shadow" in production yet! +state_keeper_fast_vm_mode = "old" # default value + +[experimental_vm.playground] +# Path to the directory that contains RocksDB with protective reads writer cache. +db_path = "./db/main/vm_playground" +# Mode in which to run the new fast VM +fast_vm_mode = "shadow" diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 97d29c45b0f..670bfc1cc77 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -323,6 +323,12 @@ basic_witness_input_producer: window_size: 3 first_processed_batch: 0 +experimental_vm: + state_keeper_fast_vm_mode: OLD + playground: + db_path: "./db/main/vm_playground" + fast_vm_mode: SHADOW + snapshot_recovery: enabled: false object_store: diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 4284ee99894..e6ef7fd95f8 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -1669,6 +1669,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + [[package]] name = "digest" version = "0.9.0" @@ -1832,6 +1838,18 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "enum_dispatch" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" +dependencies = [ + "once_cell", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "env_filter" version = "0.1.0" @@ -4219,6 +4237,16 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "pretty_assertions" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" +dependencies = [ + "diff", + "yansi", +] + [[package]] name = "prettyplease" version = "0.2.20" @@ -6888,6 +6916,17 @@ dependencies = [ "zksync_vlog", ] +[[package]] +name = "vm2" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=9a38900d7af9b1d72b47ce3be980e77c1239a61d#9a38900d7af9b1d72b47ce3be980e77c1239a61d" +dependencies = [ + "enum_dispatch", + "primitive-types", + "zk_evm_abstractions 0.150.0", + "zkevm_opcode_defs 0.150.0", +] + [[package]] name = "wait-timeout" version = "0.2.0" @@ -7275,6 +7314,12 @@ dependencies = [ "tap", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "zerocopy" version = "0.7.34" @@ -8027,10 +8072,12 @@ dependencies = [ "hex", "itertools 0.10.5", "once_cell", + "pretty_assertions", "serde", "thiserror", "tracing", "vise", + "vm2", "zk_evm 0.131.0-rc.2", "zk_evm 0.133.0", "zk_evm 0.140.0",