From 1f1deee25bf8d4f5e5b7aef48ccca1a797dc9261 Mon Sep 17 00:00:00 2001 From: Rachit Sonthalia <54906134+rachit77@users.noreply.github.com> Date: Wed, 18 Sep 2024 18:20:31 +0530 Subject: [PATCH 01/53] test: Added check for EOA transfer E2E Test (#75) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * tests: PoC bridge testing with bats * Run bats in e2e * Remove unused make lines * tests: wip * test: better * Let's see * test: fix test * test: use cdk image * test: bats path * test: fix * test: deposit on 1 * test: wait for claim * test: timeout * test: timeout * test: increase timeout * test: apply feedback * ci: lint action * test: do not prepare if already present * test: Send EOA and deploy contract E2E tests using Bats (#69) * feat: add helper functions for contract deployment and sending transactions using cast commands * feat: send EOA transaction test basic * feat: add CDK_ERIGON_NODE_NAME var * feat: invoke _common_setup function * feat: deploy ERC20Mock contract E2E test * feat: more strict assertions and use run instead of $ * feat: tweaks * fix: change the way transaction hash is extracted * fix: change the way transactionHash gets fetched from the output * fix: cast call helper function and invocation of balanceOf function * test: use RAW_PRIVATE_KEY env variable for sender private key * fix: address feedback from @vcastellm * fix: Linters warning fixes (#74) * feat: use the latest golangci-lint version and fix config warnings * fix: linter warnings * fix: linter warnings (part 2) * fix: propagate the error from aggregator.Start * fix: format golangci config file * fix: suppress gosec overflow issues * fix: exclude G115 gosec linter rule * fix: use crypto/rand number generator * test: apply feedback * test: lint * ci: increase lint timeout * test: balance check for ether transafers * test: wip * fix: apply feedback * fix: cast commands * test: add edge case * fix: tests * fix: resolve conflicts * fix: tests * fix: tests * fix: rpc * fix: typo * fix: checkTransactionSuccess * fix: send eoa transaction test * refactor: feedback * refactor: feedback * fix: simplifications * fix: even more simplifications --------- Co-authored-by: Victor Castell <0x@vcastellm.xyz> Co-authored-by: Stefan Negovanović <93934272+Stefan-Ethernal@users.noreply.github.com> Co-authored-by: Stefan Negovanović --- test/basic-e2e.bats | 15 ++++ test/helpers/common.bash | 181 +++++++++++++++++++++++++++++---------- 2 files changed, 150 insertions(+), 46 deletions(-) diff --git a/test/basic-e2e.bats b/test/basic-e2e.bats index cbd845f5..7124dcc2 100644 --- a/test/basic-e2e.bats +++ b/test/basic-e2e.bats @@ -11,11 +11,26 @@ setup() { } @test "Send EOA transaction" { + local sender_addr=$(cast wallet address --private-key "$private_key") + local initial_nonce=$(cast nonce "$sender_addr" --rpc-url "$rpc_url") || return 1 local value="10ether" + # case 1: Transaction successful sender has sufficient balance run sendTx "$private_key" "$receiver" "$value" assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" + + # case 2: Transaction rejected as sender attempts to transfer more than it has in its wallet. + # Transaction will fail pre-validation check on the node and will be dropped subsequently from the pool + # without recording it on the chain and hence nonce will not change + local sender_balance=$(cast balance "$sender_addr" --ether --rpc-url "$rpc_url") || return 1 + local excessive_value=$(echo "$sender_balance + 1" | bc)"ether" + run sendTx "$private_key" "$receiver" "$excessive_value" + assert_failure + + # Check whether the sender's nonce was updated correctly + local final_nonce=$(cast nonce "$sender_addr" --rpc-url "$rpc_url") || return 1 + assert_equal "$final_nonce" "$(echo "$initial_nonce + 1" | bc)" } @test "Deploy ERC20Mock contract" { diff --git a/test/helpers/common.bash b/test/helpers/common.bash index 15057d17..aabae0b6 100644 --- a/test/helpers/common.bash +++ b/test/helpers/common.bash @@ -16,13 +16,13 @@ function deployContract() { fi # Get the sender address - local senderAddr=$(cast wallet address "$private_key") + local sender_addr=$(cast wallet address "$private_key") if [[ $? -ne 0 ]]; then echo "Error: Failed to retrieve sender address." return 1 fi - echo "Attempting to deploy contract artifact '$contract_artifact' to $rpc_url (sender: $senderAddr)" >&3 + echo "Attempting to deploy contract artifact '$contract_artifact' to $rpc_url (sender: $sender_addr)" >&3 # Get bytecode from the contract artifact local bytecode=$(jq -r .bytecode "$contract_artifact") @@ -77,77 +77,111 @@ function sendTx() { fi local private_key="$1" # Sender private key - local account_addr="$2" # Receiver address + local receiver_addr="$2" # Receiver address local value_or_function_sig="$3" # Value or function signature # Error handling: Ensure the receiver is a valid Ethereum address - if [[ ! "$account_addr" =~ ^0x[a-fA-F0-9]{40}$ ]]; then - echo "Error: Invalid receiver address '$account_addr'." + if [[ ! "$receiver_addr" =~ ^0x[a-fA-F0-9]{40}$ ]]; then + echo "Error: Invalid receiver address '$receiver_addr'." return 1 fi - shift 3 # Shift the first 3 arguments (private_key, account_addr, value_or_function_sig) + shift 3 # Shift the first 3 arguments (private_key, receiver_addr, value_or_function_sig) + local params=("$@") # Collect all remaining arguments as function parameters - local senderAddr - senderAddr=$(cast wallet address "$private_key") - if [[ $? -ne 0 ]]; then - echo "Error: Failed to extract the sender address for $private_key" + # Get sender address from private key + local sender_addr + sender_addr=$(cast wallet address "$private_key") || { + echo "Error: Failed to extract the sender address." return 1 - fi + } - # Check if the first remaining argument is a numeric value (Ether to be transferred) - if [[ "$value_or_function_sig" =~ ^[0-9]+(ether)?$ ]]; then - # Case: EOA transaction (Ether transfer) - echo "Sending EOA transaction (RPC URL: $rpc_url, sender: $senderAddr) to: $account_addr " \ - "with value: $value_or_function_sig" >&3 + # Get initial ether balances of sender and receiver + local sender_initial_balance receiver_initial_balance + sender_initial_balance=$(cast balance "$sender_addr" --ether --rpc-url "$rpc_url") || return 1 + receiver_initial_balance=$(cast balance "$receiver_addr" --ether --rpc-url "$rpc_url") || return 1 - cast_output=$(cast send --rpc-url "$rpc_url" \ - --private-key "$private_key" \ - "$account_addr" --value "$value_or_function_sig" \ - --legacy \ - 2>&1) + # Check if the value_or_function_sig is a numeric value (Ether to be transferred) + if [[ "$value_or_function_sig" =~ ^[0-9]+(\.[0-9]+)?(ether)?$ ]]; then + # Case: Ether transfer (EOA transaction) + send_eoa_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "$sender_addr" "$sender_initial_balance" "$receiver_initial_balance" else - # Case: Smart contract transaction (contract interaction with function signature and parameters) - local params=("$@") # Collect all remaining arguments as function parameters + # Case: Smart contract interaction (contract interaction with function signature and parameters) + send_smart_contract_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "$sender_addr" "${params[@]}" + fi +} + +function send_eoa_transaction() { + local private_key="$1" + local receiver_addr="$2" + local value="$3" + local sender_addr="$4" + local sender_initial_balance="$5" + local receiver_initial_balance="$6" - echo "Function signature: '$value_or_function_sig'" >&3 + echo "Sending EOA transaction to: $receiver_addr with value: $value" >&3 - # Verify if the function signature starts with "function" - if [[ ! "$value_or_function_sig" =~ ^function\ .+\(.+\)$ ]]; then - echo "Error: Invalid function signature format '$value_or_function_sig'." - return 1 - fi + # Send transaction via cast + local cast_output tx_hash + cast_output=$(cast send --rpc-url "$rpc_url" --private-key "$private_key" "$receiver_addr" --value "$value" --legacy 2>&1) + if [[ $? -ne 0 ]]; then + echo "Error: Failed to send transaction. Output:" + echo "$cast_output" + return 1 + fi - echo "Sending smart contract transaction (RPC URL: $rpc_url, sender: $senderAddr) to $account_addr" \ - "with function signature: '$value_or_function_sig' and params: ${params[*]}" >&3 + tx_hash=$(extract_tx_hash "$cast_output") + [[ -z "$tx_hash" ]] && { + echo "Error: Failed to extract transaction hash." + return 1 + } - # Send the smart contract interaction using cast - cast_output=$(cast send --rpc-url "$rpc_url" \ - --private-key "$private_key" \ - "$account_addr" "$value_or_function_sig" "${params[@]}" \ - --legacy \ - 2>&1) + checkBalances "$sender_addr" "$receiver_addr" "$value" "$tx_hash" "$sender_initial_balance" "$receiver_initial_balance" + if [[ $? -ne 0 ]]; then + echo "Error: Balance not updated correctly." + return 1 fi - # Check if the transaction was successful + echo "Transaction successful (transaction hash: $tx_hash)" +} + +function send_smart_contract_transaction() { + local private_key="$1" + local receiver_addr="$2" + local function_sig="$3" + local sender_addr="$4" + shift 4 + local params=("$@") + + # Verify if the function signature starts with "function" + if [[ ! "$function_sig" =~ ^function\ .+\(.+\)$ ]]; then + echo "Error: Invalid function signature format '$function_sig'." + return 1 + fi + + echo "Sending smart contract transaction to $receiver_addr with function signature: '$function_sig' and params: ${params[*]}" >&3 + + # Send the smart contract interaction using cast + local cast_output tx_hash + cast_output=$(cast send --rpc-url "$rpc_url" --private-key "$private_key" "$receiver_addr" "$function_sig" "${params[@]}" --legacy 2>&1) if [[ $? -ne 0 ]]; then - echo "Error: Failed to send transaction. The cast send output:" + echo "Error: Failed to send transaction. Output:" echo "$cast_output" return 1 fi - # Extract the transaction hash from the output - local tx_hash=$(echo "$cast_output" | grep 'transactionHash' | awk '{print $2}' | tail -n 1) - echo "Tx hash: $tx_hash" - - if [[ -z "$tx_hash" ]]; then + tx_hash=$(extract_tx_hash "$cast_output") + [[ -z "$tx_hash" ]] && { echo "Error: Failed to extract transaction hash." return 1 - fi + } echo "Transaction successful (transaction hash: $tx_hash)" +} - return 0 +function extract_tx_hash() { + local cast_output="$1" + echo "$cast_output" | grep 'transactionHash' | awk '{print $2}' | tail -n 1 } function queryContract() { @@ -186,3 +220,58 @@ function queryContract() { return 0 } + +function checkBalances() { + local sender="$1" + local receiver="$2" + local amount="$3" + local tx_hash="$4" + local sender_initial_balance="$5" + local receiver_initial_balance="$6" + + # Ethereum address regex: 0x followed by 40 hexadecimal characters + if [[ ! "$sender" =~ ^0x[a-fA-F0-9]{40}$ ]]; then + echo "Error: Invalid sender address '$sender'." + return 1 + fi + + if [[ ! "$receiver" =~ ^0x[a-fA-F0-9]{40}$ ]]; then + echo "Error: Invalid receiver address '$receiver'." + return 1 + fi + + # Transaction hash regex: 0x followed by 64 hexadecimal characters + if [[ ! "$tx_hash" =~ ^0x[a-fA-F0-9]{64}$ ]]; then + echo "Error: Invalid transaction hash: $tx_hash". + return 1 + fi + + local sender_final_balance=$(cast balance "$sender" --ether --rpc-url "$rpc_url") || return 1 + local gas_used=$(cast tx "$tx_hash" --rpc-url "$rpc_url" | grep '^gas ' | awk '{print $2}') + local gas_price=$(cast tx "$tx_hash" --rpc-url "$rpc_url" | grep '^gasPrice' | awk '{print $2}') + local gas_fee=$(echo "$gas_used * $gas_price" | bc) + local gas_fee_in_ether=$(cast to-unit "$gas_fee" ether) + + local sender_balance_change=$(echo "$sender_initial_balance - $sender_final_balance" | bc) + echo "Sender balance changed by: '$sender_balance_change' wei" + echo "Gas fee paid: '$gas_fee_in_ether' ether" + + local receiver_final_balance=$(cast balance "$receiver" --ether --rpc-url "$rpc_url") || return 1 + local receiver_balance_change=$(echo "$receiver_final_balance - $receiver_initial_balance" | bc) + echo "Receiver balance changed by: '$receiver_balance_change' wei" + + # Trim 'ether' suffix from amount to get the numeric part + local value_in_ether=$(echo "$amount" | sed 's/ether$//') + + if ! echo "$receiver_balance_change == $value_in_ether" | bc -l; then + echo "Error: receiver balance updated incorrectly. Expected: $value_in_ether, Actual: $receiver_balance_change" + return 1 + fi + + # Calculate expected sender balance change + local expected_sender_change=$(echo "$value_in_ether + $gas_fee_in_ether" | bc) + if ! echo "$sender_balance_change == $expected_sender_change" | bc -l; then + echo "Error: sender balance updated incorrectly. Expected: $expected_sender_change, Actual: $sender_balance_change" + return 1 + fi +} From faf88bbf25a63a5909935f6efabc95baf0df4118 Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 18 Sep 2024 14:57:58 +0200 Subject: [PATCH 02/53] fix: Aggregator: use sequenceBatch maxTimestamp as TimestampLimit (#84) - Update to zkevm-synchronizer-l1 v1.0.1 - Check unexpected/deprecateds fields on config file --- config/config.go | 68 ++++++++++++++++++++++++++++++++++++++----- config/config_test.go | 51 ++++++++++++++++++++++++++++++++ config/default.go | 27 +++++++++++------ go.mod | 28 +++++++++--------- go.sum | 56 +++++++++++++++++------------------ 5 files changed, 172 insertions(+), 58 deletions(-) create mode 100644 config/config_test.go diff --git a/config/config.go b/config/config.go index cb899df8..6f5456b9 100644 --- a/config/config.go +++ b/config/config.go @@ -3,7 +3,6 @@ package config import ( "bytes" "errors" - "fmt" "path/filepath" "strings" @@ -52,6 +51,22 @@ const ( FlagOutputFile = "output" // FlagMaxAmount is the flag to avoid to use the flag FlagAmount FlagMaxAmount = "max-amount" + + deprecatedFieldSyncDB = "Aggregator.Synchronizer.DB is deprecated use Aggregator.Synchronizer.SQLDB instead" +) + +type ForbiddenField struct { + FieldName string + Reason string +} + +var ( + forbiddenFieldsOnConfig = []ForbiddenField{ + { + FieldName: "aggregator.synchronizer.db.", + Reason: deprecatedFieldSyncDB, + }, + } ) /* @@ -128,15 +143,18 @@ func Default() (*Config, error) { return &cfg, nil } +func Load(ctx *cli.Context) (*Config, error) { + configFilePath := ctx.String(FlagCfg) + return LoadFile(configFilePath) +} // Load loads the configuration -func Load(ctx *cli.Context) (*Config, error) { +func LoadFile(configFilePath string) (*Config, error) { cfg, err := Default() if err != nil { return nil, err } - - configFilePath := ctx.String(FlagCfg) + expectedKeys := viper.AllKeys() if configFilePath != "" { dirName, fileName := filepath.Split(configFilePath) @@ -160,7 +178,6 @@ func Load(ctx *cli.Context) (*Config, error) { log.Error("config file not found") } else { log.Errorf("error reading config file: ", err) - return nil, err } } @@ -179,8 +196,45 @@ func Load(ctx *cli.Context) (*Config, error) { if err != nil { return nil, err } + if expectedKeys != nil { + configKeys := viper.AllKeys() + unexpectedFields := getUnexpectedFields(configKeys, expectedKeys) + for _, field := range unexpectedFields { + forbbidenInfo := getForbiddenField(field) + if forbbidenInfo != nil { + log.Warnf("forbidden field %s in config file: %s", field, forbbidenInfo.Reason) + } else { + log.Debugf("field %s in config file doesnt have a default value", field) + } + } + } + return cfg, nil +} - fmt.Println("cfg", cfg.NetworkConfig.L1Config) +func getForbiddenField(fieldName string) *ForbiddenField { + for _, forbiddenField := range forbiddenFieldsOnConfig { + if forbiddenField.FieldName == fieldName || strings.HasPrefix(fieldName, forbiddenField.FieldName) { + return &forbiddenField + } + } + return nil +} - return cfg, nil +func getUnexpectedFields(keysOnFile, expectedConfigKeys []string) []string { + wrongFields := make([]string, 0) + for _, key := range keysOnFile { + if !contains(expectedConfigKeys, key) { + wrongFields = append(wrongFields, key) + } + } + return wrongFields +} + +func contains(keys []string, key string) bool { + for _, k := range keys { + if k == key { + return true + } + } + return false } diff --git a/config/config_test.go b/config/config_test.go new file mode 100644 index 00000000..1aaa24e0 --- /dev/null +++ b/config/config_test.go @@ -0,0 +1,51 @@ +package config + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLoadDeafaultConfig(t *testing.T) { + tmpFile, err := os.CreateTemp("", "ut_config") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.Write([]byte(DefaultValues)) + require.NoError(t, err) + cfg, err := LoadFile(tmpFile.Name()) + require.NoError(t, err) + require.NotNil(t, cfg) +} + +const configWithUnexpectedFields = ` +[UnknownField] +Field = "value" +` + +func TestLoadConfigWithUnexpectedFields(t *testing.T) { + tmpFile, err := os.CreateTemp("", "ut_config") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.Write([]byte(configWithUnexpectedFields)) + require.NoError(t, err) + cfg, err := LoadFile(tmpFile.Name()) + require.NoError(t, err) + require.NotNil(t, cfg) +} + +const configWithForbiddenFields = ` +[aggregator.synchronizer.db] +name = "value" +` + +func TestLoadConfigWithForbiddenFields(t *testing.T) { + tmpFile, err := os.CreateTemp("", "ut_config") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.Write([]byte(configWithForbiddenFields)) + require.NoError(t, err) + cfg, err := LoadFile(tmpFile.Name()) + require.NoError(t, err) + require.NotNil(t, cfg) +} diff --git a/config/default.go b/config/default.go index ce76abc4..6bfb7495 100644 --- a/config/default.go +++ b/config/default.go @@ -106,14 +106,13 @@ SequencerPrivateKey = {} L1ChainID = 11155111 HTTPHeaders = [] [Aggregator.Synchronizer] - [Aggregator.Synchronizer.DB] - Name = "sync_db" - User = "sync_user" - Password = "sync_password" - Host = "cdk-l1-sync-db" - Port = "5432" - EnableLog = false - MaxConns = 10 + [Aggregator.Synchronizer.Log] + Environment = "development" # "production" or "development" + Level = "info" + Outputs = ["stderr"] + [Aggregator.Synchronizer.SQLDB] + DriverName = "sqlite3" + DataSourceName = "file:/tmp/aggregator_sync_db.sqlite" [Aggregator.Synchronizer.Synchronizer] SyncInterval = "10s" SyncChunkSize = 1000 @@ -122,9 +121,19 @@ SequencerPrivateKey = {} BlockFinality = "finalized" OverrideStorageCheck = false [Aggregator.Synchronizer.Etherman] + L1URL = "http://localhost:8545" + ForkIDChunkSize = 100 + L1ChainID = 0 [Aggregator.Synchronizer.Etherman.Validium] Enabled = false - + TrustedSequencerURL = "" + RetryOnDACErrorInterval = "1m" + DataSourcePriority = ["trusted", "external"] + [Aggregator.Synchronizer.Etherman.Validium.Translator] + FullMatchRules = [] + [Aggregator.Synchronizer.Etherman.Validium.RateLimit] + NumRequests = 900 + Interval = "1s" [ReorgDetectorL1] DBPath = "/tmp/reorgdetectorl1" diff --git a/go.mod b/go.mod index a2ca38f4..aeaae312 100644 --- a/go.mod +++ b/go.mod @@ -8,8 +8,8 @@ require ( github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 - github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.7.0 - github.com/ethereum/go-ethereum v1.14.5 + github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.1 + github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/hermeznetwork/tracerr v0.3.2 github.com/iden3/go-iden3-crypto v0.0.16 @@ -19,7 +19,7 @@ require ( github.com/ledgerwatch/erigon-lib v1.0.0 github.com/mattn/go-sqlite3 v1.14.23 github.com/mitchellh/mapstructure v1.5.0 - github.com/rubenv/sql-migrate v1.6.1 + github.com/rubenv/sql-migrate v1.7.0 github.com/russross/meddler v1.0.1 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 @@ -27,9 +27,9 @@ require ( go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel/metric v1.24.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.24.0 - golang.org/x/net v0.26.0 - golang.org/x/sync v0.7.0 + golang.org/x/crypto v0.27.0 + golang.org/x/net v0.29.0 + golang.org/x/sync v0.8.0 google.golang.org/grpc v1.64.0 google.golang.org/protobuf v1.34.2 modernc.org/sqlite v1.32.0 @@ -44,13 +44,14 @@ require ( github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cockroachdb/errors v1.11.1 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v1.1.0 // indirect + github.com/cockroachdb/pebble v1.1.1 // indirect github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect @@ -66,10 +67,9 @@ require ( github.com/erigontech/mdbx-go v0.27.14 // indirect github.com/ethereum/c-kzg-4844 v1.0.0 // indirect github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 // indirect - github.com/fjl/memsize v0.0.2 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect - github.com/getsentry/sentry-go v0.18.0 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -87,7 +87,7 @@ require ( github.com/hashicorp/hcl v1.0.1-0.20180906183839-65a6292f0157 // indirect github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect - github.com/holiman/uint256 v1.2.4 // indirect + github.com/holiman/uint256 v1.3.1 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgio v1.0.0 // indirect @@ -149,8 +149,8 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/multierr v1.10.0 // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 818a9b5d..e3544380 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUx github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 h1:QElCysO7f2xaknY/RDjxcs7IVmcgORfsCX2g+YD0Ko4= github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234/go.mod h1:zBZWxwOHKlw+ghd9roQLgIkDZWA7e7qO3EsfQQT/+oQ= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.7.0 h1:h/B5AzWSZTxb1HouulXeE9nbHD1d4/nc67ZQc0khAQA= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.7.0/go.mod h1:+tQwkDf+5AL3dgL6G1t0qmwct0NJDlGlzqycOM5jn5g= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.1 h1:8GbJBNsYO4zrqiBX++et8eQrJDEWEZuo3Ch3M416YnI= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.1/go.mod h1:96i+QSANfbikwlUY3U9MLNtg3656W3dWfbGqH+Od1/k= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= @@ -33,8 +33,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= -github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= @@ -53,12 +53,14 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= -github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4= -github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E= +github.com/cockroachdb/pebble v1.1.1 h1:XnKU22oiCLy2Xn8vp1re67cXg4SAasg/WDt1NtcRFaw= +github.com/cockroachdb/pebble v1.1.1/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= @@ -95,12 +97,10 @@ github.com/erigontech/mdbx-go v0.27.14 h1:IVVeQVCAjZRpAR8bThlP2ISxrOwdV35NZdGwAg github.com/erigontech/mdbx-go v0.27.14/go.mod h1:FAMxbOgqOnRDx51j8HjuJZIgznbDwjX7LItd+/UWyA4= github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/ethereum/go-ethereum v1.14.5 h1:szuFzO1MhJmweXjoM5nSAeDvjNUH3vIQoMzzQnfvjpw= -github.com/ethereum/go-ethereum v1.14.5/go.mod h1:VEDGGhSxY7IEjn98hJRFXl/uFvpRgbIIf2PpXiyGGgc= +github.com/ethereum/go-ethereum v1.14.8 h1:NgOWvXS+lauK+zFukEvi85UmmsS/OkV0N23UZ1VTIig= +github.com/ethereum/go-ethereum v1.14.8/go.mod h1:TJhyuDq0JDppAkFXgqjwpdlQApywnu/m10kFPxh8vvs= github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 h1:KrE8I4reeVvf7C1tm8elRjj4BdscTYzz/WAbYyf/JI4= github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0/go.mod h1:D9AJLVXSyZQXJQVk8oh1EwjISE+sJTn2duYIZC0dy3w= -github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= -github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -110,8 +110,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= -github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= @@ -188,8 +188,8 @@ github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6w github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= -github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= +github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= @@ -370,8 +370,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/rubenv/sql-migrate v1.6.1 h1:bo6/sjsan9HaXAsNxYP/jCEDUGibHp8JmOBw7NTGRos= -github.com/rubenv/sql-migrate v1.6.1/go.mod h1:tPzespupJS0jacLfhbwto/UjSX+8h2FdWB7ar+QlHa0= +github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI= +github.com/rubenv/sql-migrate v1.7.0/go.mod h1:S4wtDEG1CKn+0ShpTtzWhFpHHI5PvCUtiGI+C+Z2THE= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/meddler v1.0.1 h1:JLR7Z4M4iGm1nr7DIURBq18UW8cTrm+qArUFgOhELo8= @@ -476,8 +476,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -499,15 +499,15 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -539,8 +539,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -550,8 +550,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= From f477acaa60d72f17d8868c7218149ed5f4bb2346 Mon Sep 17 00:00:00 2001 From: Arnau Bennassar Date: Wed, 18 Sep 2024 17:58:37 +0200 Subject: [PATCH 03/53] First L1 Info tree in which bridge is included (#70) * wip * wip * WIP * decoding direct and indeirecr assets and messages works * connect everything * fix building contract scripts * fix building contract scripts * wip * WIP * tree migrated to SQLite * wip * wip * bridgesync working with sqlite * pass tests * minor cleaning * add GetBlockByLER func * handle err not found * merge develop * use memory for sqlite on the tests * increase timestamp to pass UT * review * finished implementation * replace l1bridge2infosync for rpc logic * ut wip * unit tests for new rpc funcs * add UTs for new methods in the l1infotreesync processor * fix UTs * pass linter * add PR requests * add missing processor calls * fixx linter * rephrase binnary search formula * rephrase binnary search formula --- aggoracle/oracle.go | 3 +- bridgesync/bridgesync.go | 15 +- bridgesync/claimcalldata_test.go | 5 +- bridgesync/downloader.go | 3 +- bridgesync/processor.go | 3 +- claimsponsor/claimsponsor.go | 19 +- cmd/run.go | 36 - config/config.go | 5 - config/default.go | 6 - db/meddler.go | 36 + db/sqlite.go | 12 + l1bridge2infoindexsync/config.go | 15 - l1bridge2infoindexsync/downloader.go | 70 -- l1bridge2infoindexsync/driver.go | 221 ------- l1bridge2infoindexsync/e2e_test.go | 232 ------- .../l1bridge2infoindexsync.go | 62 -- l1bridge2infoindexsync/processor.go | 206 ------ l1bridge2infoindexsync/processor_test.go | 22 - l1infotreesync/e2e_test.go | 29 +- l1infotreesync/l1infotreesync.go | 28 + .../migrations/l1infotreesync0001.sql | 12 + l1infotreesync/processor.go | 114 +++- l1infotreesync/processor_test.go | 167 ++++- lastgersync/evmdownloader.go | 6 +- lastgersync/processor.go | 8 +- rpc/bridge.go | 204 ++++-- rpc/bridge_interfaces.go | 40 ++ rpc/bridge_test.go | 443 +++++++++++++ rpc/{bridge_client.go => client/bridge.go} | 7 +- rpc/{ => client}/client.go | 0 rpc/mocks/bridge_client_interface.go | 319 +++++++++ rpc/mocks/bridger.go | 159 +++++ rpc/mocks/claim_sponsorer.go | 145 ++++ rpc/mocks/client_factory_interface.go | 83 +++ rpc/mocks/client_interface.go | 319 +++++++++ rpc/mocks/l1_info_treer.go | 626 ++++++++++++++++++ rpc/mocks/last_ge_rer.go | 104 +++ rpc/types/bridge.go | 12 + test/Makefile | 5 + tree/appendonlytree.go | 4 +- tree/tree.go | 25 +- tree/updatabletree.go | 14 +- 42 files changed, 2849 insertions(+), 995 deletions(-) delete mode 100644 l1bridge2infoindexsync/config.go delete mode 100644 l1bridge2infoindexsync/downloader.go delete mode 100644 l1bridge2infoindexsync/driver.go delete mode 100644 l1bridge2infoindexsync/e2e_test.go delete mode 100644 l1bridge2infoindexsync/l1bridge2infoindexsync.go delete mode 100644 l1bridge2infoindexsync/processor.go delete mode 100644 l1bridge2infoindexsync/processor_test.go create mode 100644 rpc/bridge_interfaces.go create mode 100644 rpc/bridge_test.go rename rpc/{bridge_client.go => client/bridge.go} (95%) rename rpc/{ => client}/client.go (100%) create mode 100644 rpc/mocks/bridge_client_interface.go create mode 100644 rpc/mocks/bridger.go create mode 100644 rpc/mocks/claim_sponsorer.go create mode 100644 rpc/mocks/client_factory_interface.go create mode 100644 rpc/mocks/client_interface.go create mode 100644 rpc/mocks/l1_info_treer.go create mode 100644 rpc/mocks/last_ge_rer.go create mode 100644 rpc/types/bridge.go diff --git a/aggoracle/oracle.go b/aggoracle/oracle.go index 1ba94d7a..874f7ada 100644 --- a/aggoracle/oracle.go +++ b/aggoracle/oracle.go @@ -6,6 +6,7 @@ import ( "math/big" "time" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" @@ -70,7 +71,7 @@ func (a *AggOracle) Start(ctx context.Context) { case errors.Is(err, l1infotreesync.ErrBlockNotProcessed): a.logger.Debugf("syncer is not ready for the block %d", blockNumToFetch) - case errors.Is(err, l1infotreesync.ErrNotFound): + case errors.Is(err, db.ErrNotFound): blockNumToFetch = 0 a.logger.Debugf("syncer has not found any GER until block %d", blockNumToFetch) diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go index e79fba2e..e6a61c5e 100644 --- a/bridgesync/bridgesync.go +++ b/bridgesync/bridgesync.go @@ -160,7 +160,7 @@ func (s *BridgeSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) return s.processor.GetLastProcessedBlock(ctx) } -func (s *BridgeSync) GetBridgeRootByHash(ctx context.Context, root common.Hash) (tree.Root, error) { +func (s *BridgeSync) GetBridgeRootByHash(ctx context.Context, root common.Hash) (*tree.Root, error) { return s.processor.exitTree.GetRootByHash(ctx, root) } @@ -172,10 +172,7 @@ func (s *BridgeSync) GetBridges(ctx context.Context, fromBlock, toBlock uint64) return s.processor.GetBridges(ctx, fromBlock, toBlock) } -// GetProof retrieves the Merkle proof for the given deposit count and exit root. -func (s *BridgeSync) GetProof( - ctx context.Context, depositCount uint32, localExitRoot common.Hash, -) ([32]common.Hash, error) { +func (s *BridgeSync) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (tree.Proof, error) { return s.processor.exitTree.GetProof(ctx, depositCount, localExitRoot) } @@ -186,3 +183,11 @@ func (p *processor) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, } return root.BlockNum, nil } + +func (s *BridgeSync) GetRootByLER(ctx context.Context, ler common.Hash) (*tree.Root, error) { + root, err := s.processor.exitTree.GetRootByHash(ctx, ler) + if err != nil { + return root, err + } + return root, nil +} diff --git a/bridgesync/claimcalldata_test.go b/bridgesync/claimcalldata_test.go index 1319835b..2e574a4e 100644 --- a/bridgesync/claimcalldata_test.go +++ b/bridgesync/claimcalldata_test.go @@ -10,6 +10,7 @@ import ( "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/test/contracts/claimmock" "github.com/0xPolygon/cdk/test/contracts/claimmockcaller" + tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -52,11 +53,11 @@ func TestClaimCalldata(t *testing.T) { require.NoError(t, err) proofLocal := [32][32]byte{} - proofLocalH := [32]common.Hash{} + proofLocalH := tree.Proof{} proofLocal[5] = common.HexToHash("beef") proofLocalH[5] = common.HexToHash("beef") proofRollup := [32][32]byte{} - proofRollupH := [32]common.Hash{} + proofRollupH := tree.Proof{} proofRollup[4] = common.HexToHash("a1fa") proofRollupH[4] = common.HexToHash("a1fa") expectedClaim := Claim{ diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index b34267ce..9be7a6bc 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -9,6 +9,7 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridge" "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2" rpcTypes "github.com/0xPolygon/cdk-rpc/types" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/sync" tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum" @@ -181,7 +182,7 @@ func setClaimCalldata(client EthClienter, bridge common.Address, txHash common.H callStack.Push(c) } } - return ErrNotFound + return db.ErrNotFound } func setClaimIfFoundOnInput(input []byte, claim *Claim) (bool, error) { diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 47b26595..e4ba5423 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -23,7 +23,6 @@ import ( var ( // ErrBlockNotProcessed indicates that the given block(s) have not been processed yet. ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet") - ErrNotFound = errors.New("not found") ) // Bridge is the representation of a bridge event @@ -184,7 +183,7 @@ func (p *processor) queryBlockRange(tx db.Querier, fromBlock, toBlock uint64, ta `, table), fromBlock, toBlock) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return nil, ErrNotFound + return nil, db.ErrNotFound } return nil, err } diff --git a/claimsponsor/claimsponsor.go b/claimsponsor/claimsponsor.go index fbcdca73..c9df6561 100644 --- a/claimsponsor/claimsponsor.go +++ b/claimsponsor/claimsponsor.go @@ -9,8 +9,10 @@ import ( "time" dbCommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" + tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/iter" @@ -31,14 +33,13 @@ const ( var ( ErrInvalidClaim = errors.New("invalid claim") - ErrNotFound = errors.New("not found") ) // Claim representation of a claim event type Claim struct { LeafType uint8 - ProofLocalExitRoot [32]common.Hash - ProofRollupExitRoot [32]common.Hash + ProofLocalExitRoot tree.Proof + ProofRollupExitRoot tree.Proof GlobalIndex *big.Int MainnetExitRoot common.Hash RollupExitRoot common.Hash @@ -131,7 +132,7 @@ func (c *ClaimSponsor) Start(ctx context.Context) { if err2 != nil { err = err2 tx.Rollback() - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { c.logger.Debugf("queue is empty") err = nil time.Sleep(c.waitOnEmptyQueue) @@ -242,7 +243,7 @@ func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error } _, err = getClaim(tx, claim.GlobalIndex) - if !errors.Is(err, ErrNotFound) { + if !errors.Is(err, db.ErrNotFound) { if err != nil { tx.Rollback() @@ -264,7 +265,7 @@ func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error var queuePosition uint64 lastQueuePosition, _, err := getLastQueueIndex(tx) switch { - case errors.Is(err, ErrNotFound): + case errors.Is(err, db.ErrNotFound): queuePosition = 0 case err != nil: @@ -307,7 +308,7 @@ func (c *ClaimSponsor) getClaimByQueueIndex(ctx context.Context, queueIndex uint return nil, err } if globalIndexBytes == nil { - return nil, ErrNotFound + return nil, db.ErrNotFound } return getClaim(tx, new(big.Int).SetBytes(globalIndexBytes)) @@ -345,7 +346,7 @@ func getIndex(iter iter.KV) (uint64, *big.Int, error) { return 0, nil, err } if k == nil { - return 0, nil, ErrNotFound + return 0, nil, db.ErrNotFound } globalIndex := new(big.Int).SetBytes(v) @@ -368,7 +369,7 @@ func getClaim(tx kv.Tx, globalIndex *big.Int) (*Claim, error) { return nil, err } if claimBytes == nil { - return nil, ErrNotFound + return nil, db.ErrNotFound } claim := &Claim{} err = json.Unmarshal(claimBytes, claim) diff --git a/cmd/run.go b/cmd/run.go index 773c5e24..0b744243 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -25,7 +25,6 @@ import ( "github.com/0xPolygon/cdk/etherman" ethermanconfig "github.com/0xPolygon/cdk/etherman/config" "github.com/0xPolygon/cdk/etherman/contracts" - "github.com/0xPolygon/cdk/l1bridge2infoindexsync" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/lastgersync" "github.com/0xPolygon/cdk/log" @@ -81,10 +80,6 @@ func start(cliCtx *cli.Context) error { claimSponsor := runClaimSponsorIfNeeded(cliCtx.Context, components, l2Client, c.ClaimSponsor) l1BridgeSync := runBridgeSyncL1IfNeeded(cliCtx.Context, components, c.BridgeL1Sync, reorgDetectorL1, l1Client) l2BridgeSync := runBridgeSyncL2IfNeeded(cliCtx.Context, components, c.BridgeL2Sync, reorgDetectorL2, l2Client) - l1Bridge2InfoIndexSync := runL1Bridge2InfoIndexSyncIfNeeded( - cliCtx.Context, components, c.L1Bridge2InfoIndexSync, - l1BridgeSync, l1InfoTreeSync, l1Client, - ) lastGERSync := runLastGERSyncIfNeeded( cliCtx.Context, components, c.LastGERSync, reorgDetectorL2, l2Client, l1InfoTreeSync, ) @@ -115,7 +110,6 @@ func start(cliCtx *cli.Context) error { c.Common.NetworkID, claimSponsor, l1InfoTreeSync, - l1Bridge2InfoIndexSync, lastGERSync, l1BridgeSync, l2BridgeSync, @@ -623,34 +617,6 @@ func runClaimSponsorIfNeeded( return cs } -func runL1Bridge2InfoIndexSyncIfNeeded( - ctx context.Context, - components []string, - cfg l1bridge2infoindexsync.Config, - l1BridgeSync *bridgesync.BridgeSync, - l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, - l1Client *ethclient.Client, -) *l1bridge2infoindexsync.L1Bridge2InfoIndexSync { - if !isNeeded([]string{cdkcommon.RPC}, components) { - return nil - } - l1Bridge2InfoIndexSync, err := l1bridge2infoindexsync.New( - cfg.DBPath, - l1BridgeSync, - l1InfoTreeSync, - l1Client, - cfg.RetryAfterErrorPeriod.Duration, - cfg.MaxRetryAttemptsAfterError, - cfg.WaitForSyncersPeriod.Duration, - ) - if err != nil { - log.Fatalf("error creating l1Bridge2InfoIndexSync: %s", err) - } - go l1Bridge2InfoIndexSync.Start(ctx) - - return l1Bridge2InfoIndexSync -} - func runLastGERSyncIfNeeded( ctx context.Context, components []string, @@ -751,7 +717,6 @@ func createRPC( cdkNetworkID uint32, sponsor *claimsponsor.ClaimSponsor, l1InfoTree *l1infotreesync.L1InfoTreeSync, - l1Bridge2Index *l1bridge2infoindexsync.L1Bridge2InfoIndexSync, injectedGERs *lastgersync.LastGERSync, bridgeL1 *bridgesync.BridgeSync, bridgeL2 *bridgesync.BridgeSync, @@ -767,7 +732,6 @@ func createRPC( cdkNetworkID, sponsor, l1InfoTree, - l1Bridge2Index, injectedGERs, bridgeL1, bridgeL2, diff --git a/config/config.go b/config/config.go index 6f5456b9..431d0175 100644 --- a/config/config.go +++ b/config/config.go @@ -13,7 +13,6 @@ import ( "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/common" ethermanconfig "github.com/0xPolygon/cdk/etherman/config" - "github.com/0xPolygon/cdk/l1bridge2infoindexsync" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/lastgersync" "github.com/0xPolygon/cdk/log" @@ -111,10 +110,6 @@ type Config struct { // ClaimSponsor is the config for the claim sponsor ClaimSponsor claimsponsor.EVMClaimSponsorConfig - // L1Bridge2InfoIndexSync is the config for the synchronizers that maintains the relation of - // bridge from L1 --> L1 Info tree index. Needed for the bridge service (RPC) - L1Bridge2InfoIndexSync l1bridge2infoindexsync.Config - // BridgeL1Sync is the configuration for the synchronizer of the bridge of the L1 BridgeL1Sync bridgesync.Config diff --git a/config/default.go b/config/default.go index 6bfb7495..377e9033 100644 --- a/config/default.go +++ b/config/default.go @@ -221,12 +221,6 @@ GasOffset = 0 L1ChainID = 1337 HTTPHeaders = [] -[L1Bridge2InfoIndexSync] -DBPath = "/tmp/l1bridge2infoindexsync" -RetryAfterErrorPeriod = "1s" -MaxRetryAttemptsAfterError = -1 -WaitForSyncersPeriod = "3s" - [BridgeL1Sync] DBPath = "/tmp/bridgel1sync" BlockFinality = "LatestBlock" diff --git a/db/meddler.go b/db/meddler.go index 90071916..e1f55086 100644 --- a/db/meddler.go +++ b/db/meddler.go @@ -19,6 +19,7 @@ func initMeddler() { meddler.Register("bigint", BigIntMeddler{}) meddler.Register("merkleproof", MerkleProofMeddler{}) meddler.Register("hash", HashMeddler{}) + meddler.Register("address", AddressMeddler{}) } func SQLiteErr(err error) (*sqlite.Error, bool) { @@ -176,3 +177,38 @@ func (b HashMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err } return field.Hex(), nil } + +// AddressMeddler encodes or decodes the field value to or from JSON +type AddressMeddler struct{} + +// PreRead is called before a Scan operation for fields that have the ProofMeddler +func (b AddressMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { + // give a pointer to a byte buffer to grab the raw data + return new(string), nil +} + +// PostRead is called after a Scan operation for fields that have the ProofMeddler +func (b AddressMeddler) PostRead(fieldPtr, scanTarget interface{}) error { + ptr, ok := scanTarget.(*string) + if !ok { + return errors.New("scanTarget is not *string") + } + if ptr == nil { + return errors.New("AddressMeddler.PostRead: nil pointer") + } + field, ok := fieldPtr.(*common.Address) + if !ok { + return errors.New("fieldPtr is not common.Address") + } + *field = common.HexToAddress(*ptr) + return nil +} + +// PreWrite is called before an Insert or Update operation for fields that have the ProofMeddler +func (b AddressMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { + field, ok := fieldPtr.(common.Address) + if !ok { + return nil, errors.New("fieldPtr is not common.Address") + } + return field.Hex(), nil +} diff --git a/db/sqlite.go b/db/sqlite.go index e30e9e26..ba8faefb 100644 --- a/db/sqlite.go +++ b/db/sqlite.go @@ -2,6 +2,7 @@ package db import ( "database/sql" + "errors" _ "github.com/mattn/go-sqlite3" ) @@ -10,6 +11,10 @@ const ( UniqueConstrain = 1555 ) +var ( + ErrNotFound = errors.New("not found") +) + // NewSQLiteDB creates a new SQLite DB func NewSQLiteDB(dbPath string) (*sql.DB, error) { initMeddler() @@ -25,3 +30,10 @@ func NewSQLiteDB(dbPath string) (*sql.DB, error) { `) return db, err } + +func ReturnErrNotFound(err error) error { + if errors.Is(err, sql.ErrNoRows) { + return ErrNotFound + } + return err +} diff --git a/l1bridge2infoindexsync/config.go b/l1bridge2infoindexsync/config.go deleted file mode 100644 index ef37f738..00000000 --- a/l1bridge2infoindexsync/config.go +++ /dev/null @@ -1,15 +0,0 @@ -package l1bridge2infoindexsync - -import "github.com/0xPolygon/cdk/config/types" - -type Config struct { - // DBPath path of the DB - DBPath string `mapstructure:"DBPath"` - // RetryAfterErrorPeriod is the time that will be waited when an unexpected error happens before retry - RetryAfterErrorPeriod types.Duration `mapstructure:"RetryAfterErrorPeriod"` - // MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicing. - // Any number smaller than zero will be considered as unlimited retries - MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` - // WaitForSyncersPeriod time that will be waited when the synchronizer has reached the latest state - WaitForSyncersPeriod types.Duration `mapstructure:"WaitForSyncersPeriod"` -} diff --git a/l1bridge2infoindexsync/downloader.go b/l1bridge2infoindexsync/downloader.go deleted file mode 100644 index f4db8422..00000000 --- a/l1bridge2infoindexsync/downloader.go +++ /dev/null @@ -1,70 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "math/big" - - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rpc" -) - -type downloader struct { - l1Bridge *bridgesync.BridgeSync - l1Info *l1infotreesync.L1InfoTreeSync - l1Client ethereum.ChainReader -} - -func newDownloader( - l1Bridge *bridgesync.BridgeSync, - l1Info *l1infotreesync.L1InfoTreeSync, - l1Client ethereum.ChainReader, -) *downloader { - return &downloader{ - l1Bridge: l1Bridge, - l1Info: l1Info, - l1Client: l1Client, - } -} - -func (d *downloader) getLastFinalizedL1Block(ctx context.Context) (uint64, error) { - b, err := d.l1Client.BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - if err != nil { - return 0, err - } - - return b.NumberU64(), nil -} - -func (d *downloader) getLastProcessedBlockBridge(ctx context.Context) (uint64, error) { - return d.l1Bridge.GetLastProcessedBlock(ctx) -} - -func (d *downloader) getLastProcessedBlockL1InfoTree(ctx context.Context) (uint64, error) { - return d.l1Info.GetLastProcessedBlock(ctx) -} - -func (d *downloader) getLastL1InfoIndexUntilBlock(ctx context.Context, blockNum uint64) (uint32, error) { - info, err := d.l1Info.GetLatestInfoUntilBlock(ctx, blockNum) - if err != nil { - return 0, err - } - - return info.L1InfoTreeIndex, nil -} - -func (d *downloader) getMainnetExitRootAtL1InfoTreeIndex(ctx context.Context, index uint32) (common.Hash, error) { - leaf, err := d.l1Info.GetInfoByIndex(ctx, index) - if err != nil { - return common.Hash{}, err - } - - return leaf.MainnetExitRoot, nil -} - -func (d *downloader) getBridgeIndex(ctx context.Context, mainnetExitRoot common.Hash) (types.Root, error) { - return d.l1Bridge.GetBridgeRootByHash(ctx, mainnetExitRoot) -} diff --git a/l1bridge2infoindexsync/driver.go b/l1bridge2infoindexsync/driver.go deleted file mode 100644 index 921a0c41..00000000 --- a/l1bridge2infoindexsync/driver.go +++ /dev/null @@ -1,221 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "errors" - "time" - - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sync" -) - -type driver struct { - downloader *downloader - processor *processor - rh *sync.RetryHandler - waitForSyncersPeriod time.Duration -} - -func newDriver( - downloader *downloader, - processor *processor, - rh *sync.RetryHandler, - waitForSyncersPeriod time.Duration, -) *driver { - return &driver{ - downloader: downloader, - processor: processor, - rh: rh, - waitForSyncersPeriod: waitForSyncersPeriod, - } -} - -func (d *driver) sync(ctx context.Context) { - var ( - attempts int - lpbProcessor uint64 - lastProcessedL1InfoIndex uint32 - err error - ) - for { - lpbProcessor, lastProcessedL1InfoIndex, err = d.processor.GetLastProcessedBlockAndL1InfoTreeIndex(ctx) - if err != nil { - attempts++ - log.Errorf("error getting last processed block and index: %v", err) - d.rh.Handle("GetLastProcessedBlockAndL1InfoTreeIndex", attempts) - - continue - } - - break - } - for { - attempts = 0 - var ( - syncUntilBlock uint64 - shouldWait bool - ) - for { - syncUntilBlock, shouldWait, err = d.getTargetSynchronizationBlock(ctx, lpbProcessor) - if err != nil { - attempts++ - log.Errorf("error getting target sync block: %v", err) - d.rh.Handle("getTargetSynchronizationBlock", attempts) - - continue - } - - break - } - if shouldWait { - log.Debugf("waiting for syncers to catch up") - time.Sleep(d.waitForSyncersPeriod) - - continue - } - - attempts = 0 - var lastL1InfoTreeIndex uint32 - found := false - for { - lastL1InfoTreeIndex, err = d.downloader.getLastL1InfoIndexUntilBlock(ctx, syncUntilBlock) - if err != nil { - if errors.Is(err, l1infotreesync.ErrNotFound) || errors.Is(err, l1infotreesync.ErrBlockNotProcessed) { - log.Debugf("l1 info tree index not ready, querying until block %d: %s", syncUntilBlock, err) - - break - } - attempts++ - log.Errorf("error getting last l1 info tree index: %v", err) - d.rh.Handle("getLastL1InfoIndexUntilBlock", attempts) - - continue - } - found = true - - break - } - if !found { - time.Sleep(d.waitForSyncersPeriod) - - continue - } - - relations := []bridge2L1InfoRelation{} - var init uint32 - if lastProcessedL1InfoIndex > 0 { - init = lastProcessedL1InfoIndex + 1 - } - if init <= lastL1InfoTreeIndex { - log.Debugf("getting relations from index %d to %d", init, lastL1InfoTreeIndex) - } - for i := init; i <= lastL1InfoTreeIndex; i++ { - attempts = 0 - for { - relation, err := d.getRelation(ctx, i) - if err != nil { - attempts++ - log.Errorf("error getting relation: %v", err) - d.rh.Handle("getRelation", attempts) - - continue - } - relations = append(relations, relation) - - break - } - } - - attempts = 0 - log.Debugf("processing until block %d: %+v", syncUntilBlock, relations) - for { - if err := d.processor.processUntilBlock(ctx, syncUntilBlock, relations); err != nil { - attempts++ - log.Errorf("error processing block: %v", err) - d.rh.Handle("processUntilBlock", attempts) - - continue - } - - break - } - - lpbProcessor = syncUntilBlock - if len(relations) > 0 { - lastProcessedL1InfoIndex = relations[len(relations)-1].l1InfoTreeIndex - log.Debugf("last processed index %d", lastProcessedL1InfoIndex) - } - } -} - -func (d *driver) getTargetSynchronizationBlock( - ctx context.Context, lpbProcessor uint64, -) (syncUntilBlock uint64, shouldWait bool, err error) { - // NOTE: if this had configurable finality, it would be needed to deal with reorgs - lastFinalised, err := d.downloader.getLastFinalizedL1Block(ctx) - if err != nil { - return - } - checkProcessedBlockFn := func(blockToCheck, lastProcessed uint64, blockType string) bool { - if blockToCheck >= lastProcessed { - log.Debugf( - "should wait because the last processed block (%d) is greater or equal than the %s (%d)", - blockToCheck, blockType, lastProcessed) - shouldWait = true - - return true - } - - return false - } - if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last finalised") { - return - } - lpbInfo, err := d.downloader.getLastProcessedBlockL1InfoTree(ctx) - if err != nil { - return - } - if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last block from L1 Info tree sync") { - return - } - lpbBridge, err := d.downloader.getLastProcessedBlockBridge(ctx) - if err != nil { - return - } - if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last block from l1 bridge sync") { - return - } - - // Bridge, L1Info and L1 ahead of procesor. Pick the smallest block num as target - if lastFinalised <= lpbInfo { - log.Debugf("target sync block is the last finalised block (%d)", lastFinalised) - syncUntilBlock = lastFinalised - } else { - log.Debugf("target sync block is the last processed block from L1 info tree (%d)", lpbInfo) - syncUntilBlock = lpbInfo - } - if lpbBridge < syncUntilBlock { - log.Debugf("target sync block is the last processed block from bridge (%d)", lpbBridge) - syncUntilBlock = lpbBridge - } - - return -} - -func (d *driver) getRelation(ctx context.Context, l1InfoIndex uint32) (bridge2L1InfoRelation, error) { - mer, err := d.downloader.getMainnetExitRootAtL1InfoTreeIndex(ctx, l1InfoIndex) - if err != nil { - return bridge2L1InfoRelation{}, err - } - - bridgeRoot, err := d.downloader.getBridgeIndex(ctx, mer) - if err != nil { - return bridge2L1InfoRelation{}, err - } - - return bridge2L1InfoRelation{ - bridgeIndex: bridgeRoot.Index, - l1InfoTreeIndex: l1InfoIndex, - }, nil -} diff --git a/l1bridge2infoindexsync/e2e_test.go b/l1bridge2infoindexsync/e2e_test.go deleted file mode 100644 index e134c1ab..00000000 --- a/l1bridge2infoindexsync/e2e_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package l1bridge2infoindexsync_test - -import ( - "context" - "errors" - "fmt" - "math/big" - "path" - "strconv" - "testing" - "time" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmglobalexitrootv2" - "github.com/0xPolygon/cdk/bridgesync" - cdktypes "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1bridge2infoindexsync" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/reorgdetector" - "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" - "github.com/0xPolygon/cdk/test/helpers" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" - "github.com/ethereum/go-ethereum/rpc" - "github.com/stretchr/testify/require" -) - -func newSimulatedClient(authDeployer, authCaller *bind.TransactOpts) ( - client *simulated.Backend, - gerAddr common.Address, - bridgeAddr common.Address, - gerContract *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, - bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, - err error, -) { - ctx := context.Background() - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) - genesisAlloc := map[common.Address]types.Account{ - authDeployer.From: { - Balance: balance, - }, - authCaller.From: { - Balance: balance, - }, - } - blockGasLimit := uint64(999999999999999999) - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) - - bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - client.Commit() - - nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) - bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - if bridgeABI == nil { - return nil, common.Address{}, common.Address{}, nil, nil, errors.New("GetABI returned nil") - } - dataCallProxy, err := bridgeABI.Pack("initialize", - uint32(0), // networkIDMainnet - common.Address{}, // gasTokenAddressMainnet" - uint32(0), // gasTokenNetworkMainnet - precalculatedAddr, - common.Address{}, - []byte{}, // gasTokenMetadata - ) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( - authDeployer, - client.Client(), - bridgeImplementationAddr, - authDeployer.From, - dataCallProxy, - ) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - client.Commit() - bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{}) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - if precalculatedAddr != checkGERAddr { - err = errors.New("error deploying bridge") - return - } - - gerAddr, _, gerContract, err = polygonzkevmglobalexitrootv2.DeployPolygonzkevmglobalexitrootv2( - authDeployer, client.Client(), authCaller.From, bridgeAddr, - ) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - client.Commit() - - if precalculatedAddr != gerAddr { - return nil, common.Address{}, common.Address{}, nil, nil, errors.New("error calculating addr") - } - - return client, gerAddr, bridgeAddr, gerContract, bridgeContract, nil -} - -func TestE2E(t *testing.T) { - ctx := context.Background() - dbPathBridgeSync := path.Join(t.TempDir(), "file::memory:?cache=shared") - dbPathL1Sync := path.Join(t.TempDir(), "file::memory:?cache=shared") - dbPathReorg := t.TempDir() - dbPathL12InfoSync := t.TempDir() - - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - privateKey, err = crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - require.NotEqual(t, authDeployer.From, auth.From) - client, gerAddr, bridgeAddr, gerSc, bridgeSc, err := newSimulatedClient(authDeployer, auth) - require.NoError(t, err) - rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Second)}) - require.NoError(t, err) - require.NoError(t, rd.Start(ctx)) - - testClient := helpers.TestClient{ClientRenamed: client.Client()} - bridgeSync, err := bridgesync.NewL1(ctx, dbPathBridgeSync, bridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0) - require.NoError(t, err) - go bridgeSync.Start(ctx) - - l1Sync, err := l1infotreesync.New( - ctx, - dbPathL1Sync, - gerAddr, - common.Address{}, - 10, - etherman.SafeBlock, - rd, - client.Client(), - time.Millisecond, - 0, - time.Millisecond, - 3, - ) - require.NoError(t, err) - go l1Sync.Start(ctx) - - bridge2InfoSync, err := l1bridge2infoindexsync.New(dbPathL12InfoSync, bridgeSync, l1Sync, client.Client(), 0, 0, time.Millisecond) - require.NoError(t, err) - go bridge2InfoSync.Start(ctx) - - // Send bridge txs - expectedIndex := -1 - for i := 0; i < 10; i++ { - bridge := bridgesync.Bridge{ - Amount: big.NewInt(0), - DestinationNetwork: 3, - DestinationAddress: common.HexToAddress("f00"), - } - _, err := bridgeSc.BridgeAsset( - auth, - bridge.DestinationNetwork, - bridge.DestinationAddress, - bridge.Amount, - bridge.OriginAddress, - true, nil, - ) - require.NoError(t, err) - expectedIndex++ - client.Commit() - - // Wait for block to be finalised - updateAtBlock, err := client.Client().BlockNumber(ctx) - require.NoError(t, err) - for { - lastFinalisedBlock, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - require.NoError(t, err) - if lastFinalisedBlock.NumberU64() >= updateAtBlock { - break - } - client.Commit() - time.Sleep(time.Microsecond) - } - - // Wait for syncer to catch up - syncerUpToDate := false - var errMsg string - lb, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - require.NoError(t, err) - for i := 0; i < 10; i++ { - lpb, err := bridge2InfoSync.GetLastProcessedBlock(ctx) - require.NoError(t, err) - if lpb == lb.NumberU64() { - syncerUpToDate = true - - break - } - time.Sleep(time.Millisecond * 100) - errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb.NumberU64(), lpb) - } - require.True(t, syncerUpToDate, errMsg) - - actualIndex, err := bridge2InfoSync.GetL1InfoTreeIndexByDepositCount(ctx, uint32(i)) - require.NoError(t, err) - require.Equal(t, uint32(expectedIndex), actualIndex) - - if i%2 == 1 { - // Update L1 info tree without a bridge on L1 - _, err = gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) - require.NoError(t, err) - expectedIndex++ - client.Commit() - } - } -} diff --git a/l1bridge2infoindexsync/l1bridge2infoindexsync.go b/l1bridge2infoindexsync/l1bridge2infoindexsync.go deleted file mode 100644 index c24bebba..00000000 --- a/l1bridge2infoindexsync/l1bridge2infoindexsync.go +++ /dev/null @@ -1,62 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "time" - - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/sync" - "github.com/ethereum/go-ethereum" -) - -type L1Bridge2InfoIndexSync struct { - processor *processor - driver *driver -} - -func New( - dbPath string, - l1Bridge *bridgesync.BridgeSync, - l1Info *l1infotreesync.L1InfoTreeSync, - l1Client ethereum.ChainReader, - retryAfterErrorPeriod time.Duration, - maxRetryAttemptsAfterError int, - waitForSyncersPeriod time.Duration, -) (*L1Bridge2InfoIndexSync, error) { - dwn := newDownloader(l1Bridge, l1Info, l1Client) - - prc, err := newProcessor(dbPath) - if err != nil { - return nil, err - } - - rh := &sync.RetryHandler{ - RetryAfterErrorPeriod: retryAfterErrorPeriod, - MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError, - } - drv := newDriver(dwn, prc, rh, waitForSyncersPeriod) - - return &L1Bridge2InfoIndexSync{ - driver: drv, - processor: prc, - }, nil -} - -func (s *L1Bridge2InfoIndexSync) Start(ctx context.Context) { - s.driver.sync(ctx) -} - -// GetLastProcessedBlock retrieves the last processed block number by the processor. -func (s *L1Bridge2InfoIndexSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - lpb, _, err := s.processor.GetLastProcessedBlockAndL1InfoTreeIndex(ctx) - - return lpb, err -} - -// GetL1InfoTreeIndexByDepositCount retrieves the L1 Info Tree index for a given deposit count. -func (s *L1Bridge2InfoIndexSync) GetL1InfoTreeIndexByDepositCount( - ctx context.Context, depositCount uint32, -) (uint32, error) { - return s.processor.getL1InfoTreeIndexByBridgeIndex(ctx, depositCount) -} diff --git a/l1bridge2infoindexsync/processor.go b/l1bridge2infoindexsync/processor.go deleted file mode 100644 index bfe9f3a6..00000000 --- a/l1bridge2infoindexsync/processor.go +++ /dev/null @@ -1,206 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "errors" - "fmt" - - "github.com/0xPolygon/cdk/common" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" -) - -const ( - lastProcessedTable = "l1bridge2infoindexsync-lastProcessed" - relationTable = "l1bridge2infoindexsync-relation" -) - -var ( - lastProcessedKey = []byte("lp") - ErrNotFound = errors.New("not found") -) - -type processor struct { - db kv.RwDB -} - -type bridge2L1InfoRelation struct { - bridgeIndex uint32 - l1InfoTreeIndex uint32 -} - -type lastProcessed struct { - block uint64 - index uint32 -} - -func (lp *lastProcessed) MarshalBinary() ([]byte, error) { - return append(common.Uint64ToBytes(lp.block), common.Uint32ToBytes(lp.index)...), nil -} - -func (lp *lastProcessed) UnmarshalBinary(data []byte) error { - const expectedDataLength = 12 - if len(data) != expectedDataLength { - return fmt.Errorf("expected len %d, actual len %d", expectedDataLength, len(data)) - } - lp.block = common.BytesToUint64(data[:8]) - lp.index = common.BytesToUint32(data[8:]) - - return nil -} - -func newProcessor(dbPath string) (*processor, error) { - tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg { - return kv.TableCfg{ - lastProcessedTable: {}, - relationTable: {}, - } - } - db, err := mdbx.NewMDBX(nil). - Path(dbPath). - WithTableCfg(tableCfgFunc). - Open() - if err != nil { - return nil, err - } - - return &processor{ - db: db, - }, nil -} - -// GetLastProcessedBlockAndL1InfoTreeIndex returns the last processed block oby the processor, including blocks -// that don't have events -func (p *processor) GetLastProcessedBlockAndL1InfoTreeIndex(ctx context.Context) (uint64, uint32, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, 0, err - } - defer tx.Rollback() - - return p.getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx) -} - -func (p *processor) getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx kv.Tx) (uint64, uint32, error) { - if lastProcessedBytes, err := tx.GetOne(lastProcessedTable, lastProcessedKey); err != nil { - return 0, 0, err - } else if lastProcessedBytes == nil { - return 0, 0, nil - } else { - lp := &lastProcessed{} - if err := lp.UnmarshalBinary(lastProcessedBytes); err != nil { - return 0, 0, err - } - - return lp.block, lp.index, nil - } -} - -func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndex( - ctx context.Context, blockNum uint64, index uint32, -) error { - tx, err := p.db.BeginRw(ctx) - if err != nil { - return err - } - if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx(tx, blockNum, index); err != nil { - tx.Rollback() - - return err - } - - return tx.Commit() -} - -func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndexWithTx(tx kv.RwTx, blockNum uint64, index uint32) error { - lp := &lastProcessed{ - block: blockNum, - index: index, - } - value, err := lp.MarshalBinary() - if err != nil { - return err - } - - return tx.Put(lastProcessedTable, lastProcessedKey, value) -} - -func (p *processor) processUntilBlock( - ctx context.Context, lastProcessedBlock uint64, relations []bridge2L1InfoRelation, -) error { - tx, err := p.db.BeginRw(ctx) - if err != nil { - return err - } - - if len(relations) == 0 { - _, lastIndex, err := p.getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx) - if err != nil { - tx.Rollback() - - return err - } - if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx( - tx, - lastProcessedBlock, - lastIndex, - ); err != nil { - tx.Rollback() - - return err - } - - return tx.Commit() - } - - for _, relation := range relations { - if _, err := p.getL1InfoTreeIndexByBridgeIndexWithTx(tx, relation.bridgeIndex); !errors.Is(err, ErrNotFound) { - // Note that indexes could be repeated as the L1 Info tree update can be produced by a rollup and not mainnet. - // Hence if the index already exist, do not update as it's better to have the lowest index possible for the relation - continue - } - if err := tx.Put( - relationTable, - common.Uint32ToBytes(relation.bridgeIndex), - common.Uint32ToBytes(relation.l1InfoTreeIndex), - ); err != nil { - tx.Rollback() - - return err - } - } - - if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx( - tx, - lastProcessedBlock, - relations[len(relations)-1].l1InfoTreeIndex, - ); err != nil { - tx.Rollback() - - return err - } - - return tx.Commit() -} - -func (p *processor) getL1InfoTreeIndexByBridgeIndex(ctx context.Context, depositCount uint32) (uint32, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, err - } - defer tx.Rollback() - - return p.getL1InfoTreeIndexByBridgeIndexWithTx(tx, depositCount) -} - -func (p *processor) getL1InfoTreeIndexByBridgeIndexWithTx(tx kv.Tx, depositCount uint32) (uint32, error) { - indexBytes, err := tx.GetOne(relationTable, common.Uint32ToBytes(depositCount)) - if err != nil { - return 0, err - } - if indexBytes == nil { - return 0, ErrNotFound - } - - return common.BytesToUint32(indexBytes), nil -} diff --git a/l1bridge2infoindexsync/processor_test.go b/l1bridge2infoindexsync/processor_test.go deleted file mode 100644 index 9305dd9b..00000000 --- a/l1bridge2infoindexsync/processor_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestDuplicatedKey(t *testing.T) { - dbPath := t.TempDir() - p, err := newProcessor(dbPath) - require.NoError(t, err) - ctx := context.Background() - err = p.processUntilBlock(ctx, 5, []bridge2L1InfoRelation{{bridgeIndex: 2, l1InfoTreeIndex: 2}}) - require.NoError(t, err) - err = p.processUntilBlock(ctx, 7, []bridge2L1InfoRelation{{bridgeIndex: 2, l1InfoTreeIndex: 3}}) - require.NoError(t, err) - l1InfoTreeIndex, err := p.getL1InfoTreeIndexByBridgeIndex(ctx, 2) - require.NoError(t, err) - require.Equal(t, uint32(2), l1InfoTreeIndex) -} diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 90f7f091..146c1924 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -119,18 +119,43 @@ func TestE2E(t *testing.T) { tx, err := verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, i%2 != 0) require.NoError(t, err) client.Commit() - // Let the processor catch up - time.Sleep(time.Millisecond * 100) receipt, err := client.Client().TransactionReceipt(ctx, tx.Hash()) require.NoError(t, err) require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful) require.True(t, len(receipt.Logs) == 1+i%2+i%2) + // Let the processor catch + processorUpdated := false + for i := 0; i < 30; i++ { + lpb, err := syncer.GetLastProcessedBlock(ctx) + require.NoError(t, err) + if receipt.BlockNumber.Uint64() == lpb { + processorUpdated = true + break + } + time.Sleep(time.Millisecond * 10) + } + require.True(t, processorUpdated) + + // Assert rollup exit root expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) require.NoError(t, err) actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) require.NoError(t, err) require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash, fmt.Sprintf("rollupID: %d, i: %d", rollupID, i)) + + // Assert verify batches + expectedVerify := l1infotreesync.VerifyBatches{ + BlockNumber: receipt.BlockNumber.Uint64(), + BlockPosition: uint64(i%2 + i%2), + RollupID: rollupID, + ExitRoot: newLocalExitRoot, + Aggregator: auth.From, + RollupExitRoot: expectedRollupExitRoot, + } + actualVerify, err := syncer.GetLastVerifiedBatches(rollupID) + require.NoError(t, err) + require.Equal(t, expectedVerify, *actualVerify) } } } diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index 546a8ead..c414e42b 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -151,3 +151,31 @@ func (s *L1InfoTreeSync) GetLocalExitRoot( return s.processor.rollupExitTree.GetLeaf(ctx, networkID-1, rollupExitRoot) } + +func (s *L1InfoTreeSync) GetLastVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + return s.processor.GetLastVerifiedBatches(rollupID) +} + +func (s *L1InfoTreeSync) GetFirstVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + return s.processor.GetFirstVerifiedBatches(rollupID) +} + +func (s *L1InfoTreeSync) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*VerifyBatches, error) { + return s.processor.GetFirstVerifiedBatchesAfterBlock(rollupID, blockNum) +} + +func (s *L1InfoTreeSync) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*L1InfoTreeLeaf, error) { + return s.processor.GetFirstL1InfoWithRollupExitRoot(rollupExitRoot) +} + +func (s *L1InfoTreeSync) GetLastInfo() (*L1InfoTreeLeaf, error) { + return s.processor.GetLastInfo() +} + +func (s *L1InfoTreeSync) GetFirstInfo() (*L1InfoTreeLeaf, error) { + return s.processor.GetFirstInfo() +} + +func (s *L1InfoTreeSync) GetFirstInfoAfterBlock(blockNum uint64) (*L1InfoTreeLeaf, error) { + return s.processor.GetFirstInfoAfterBlock(blockNum) +} diff --git a/l1infotreesync/migrations/l1infotreesync0001.sql b/l1infotreesync/migrations/l1infotreesync0001.sql index 39a45dd4..f22408cd 100644 --- a/l1infotreesync/migrations/l1infotreesync0001.sql +++ b/l1infotreesync/migrations/l1infotreesync0001.sql @@ -20,3 +20,15 @@ CREATE TABLE l1info_leaf ( hash VARCHAR NOT NULL, PRIMARY KEY (block_num, block_pos) ); + +CREATE TABLE verify_batches ( + block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + rollup_id INTEGER NOT NULL, + batch_num INTEGER NOT NULL, + state_root VARCHAR NOT NULL, + exit_root VARCHAR NOT NULL, + aggregator VARCHAR NOT NULL, + rollup_exit_root VARCHAR NOT NULL, + PRIMARY KEY (block_num, block_pos) +); diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index c76d7aac..7b762b55 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -21,7 +21,6 @@ import ( var ( ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet") - ErrNotFound = errors.New("not found") ErrNoBlock0 = errors.New("blockNum must be greater than 0") ) @@ -42,12 +41,16 @@ type UpdateL1InfoTree struct { // VerifyBatches representation of the VerifyBatches and VerifyBatchesTrustedAggregator events type VerifyBatches struct { - BlockPosition uint64 - RollupID uint32 - NumBatch uint64 - StateRoot ethCommon.Hash - ExitRoot ethCommon.Hash - Aggregator ethCommon.Address + BlockNumber uint64 `meddler:"block_num"` + BlockPosition uint64 `meddler:"block_pos"` + RollupID uint32 `meddler:"rollup_id"` + NumBatch uint64 `meddler:"batch_num"` + StateRoot ethCommon.Hash `meddler:"state_root,hash"` + ExitRoot ethCommon.Hash `meddler:"exit_root,hash"` + Aggregator ethCommon.Address `meddler:"aggregator,address"` + + // Not provided by downloader + RollupExitRoot ethCommon.Hash `meddler:"rollup_exit_root,hash"` } type InitL1InfoRootMap struct { @@ -76,7 +79,7 @@ type L1InfoTreeLeaf struct { // Hash as expected by the tree func (l *L1InfoTreeLeaf) hash() ethCommon.Hash { - var res [32]byte + var res [treeTypes.DefaultHeight]byte t := make([]byte, 8) //nolint:mnd binary.BigEndian.PutUint64(t, l.Timestamp) copy(res[:], keccak256.Hash(l.globalExitRoot().Bytes(), l.PreviousBlockHash.Bytes(), t)) @@ -85,7 +88,7 @@ func (l *L1InfoTreeLeaf) hash() ethCommon.Hash { // GlobalExitRoot returns the GER func (l *L1InfoTreeLeaf) globalExitRoot() ethCommon.Hash { - var gerBytes [32]byte + var gerBytes [treeTypes.DefaultHeight]byte hasher := sha3.NewLegacyKeccak256() hasher.Write(l.MainnetExitRoot[:]) hasher.Write(l.RollupExitRoot[:]) @@ -153,7 +156,7 @@ func (p *processor) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64 ) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return nil, ErrNotFound + return nil, db.ErrNotFound } return nil, err } @@ -219,7 +222,6 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { if err := tx.Commit(); err != nil { return err } - return nil } @@ -247,7 +249,7 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { lastIndex, err := p.getLastIndex(tx) switch { - case errors.Is(err, ErrNotFound): + case errors.Is(err, db.ErrNotFound): initialL1InfoIndex = 0 err = nil case err != nil: @@ -274,8 +276,7 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { } info.GlobalExitRoot = info.globalExitRoot() info.Hash = info.hash() - err = meddler.Insert(tx, "l1info_leaf", info) - if err != nil { + if err = meddler.Insert(tx, "l1info_leaf", info); err != nil { return fmt.Errorf("err: %w", err) } err = p.l1InfoTree.AddLeaf(tx, info.BlockNumber, info.BlockPosition, treeTypes.Leaf{ @@ -289,13 +290,19 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { } if event.VerifyBatches != nil { - err = p.rollupExitTree.UpsertLeaf(tx, b.Num, event.VerifyBatches.BlockPosition, treeTypes.Leaf{ + newRoot, err := p.rollupExitTree.UpsertLeaf(tx, b.Num, event.VerifyBatches.BlockPosition, treeTypes.Leaf{ Index: event.VerifyBatches.RollupID - 1, Hash: event.VerifyBatches.ExitRoot, }) if err != nil { return fmt.Errorf("err: %w", err) } + verifyBatches := event.VerifyBatches + verifyBatches.BlockNumber = b.Num + verifyBatches.RollupExitRoot = newRoot + if err = meddler.Insert(tx, "verify_batches", verifyBatches); err != nil { + return fmt.Errorf("err: %w", err) + } } if event.InitL1InfoRootMap != nil { @@ -317,7 +324,82 @@ func (p *processor) getLastIndex(tx db.Querier) (uint32, error) { row := tx.QueryRow("SELECT position FROM l1info_leaf ORDER BY block_num DESC, block_pos DESC LIMIT 1;") err := row.Scan(&lastProcessedIndex) if errors.Is(err, sql.ErrNoRows) { - return 0, ErrNotFound + return 0, db.ErrNotFound } return lastProcessedIndex, err } + +func (p *processor) GetLastVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + verified := &VerifyBatches{} + err := meddler.QueryRow(p.db, verified, ` + SELECT * FROM verify_batches + WHERE rollup_id = $1 + ORDER BY block_num DESC, block_pos DESC + LIMIT 1; + `, rollupID) + return verified, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + verified := &VerifyBatches{} + err := meddler.QueryRow(p.db, verified, ` + SELECT * FROM verify_batches + WHERE rollup_id = $1 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, rollupID) + return verified, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*VerifyBatches, error) { + verified := &VerifyBatches{} + err := meddler.QueryRow(p.db, verified, ` + SELECT * FROM verify_batches + WHERE rollup_id = $1 AND block_num >= $2 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, rollupID, blockNum) + return verified, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot ethCommon.Hash) (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + WHERE rollup_exit_root = $1 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, rollupExitRoot.Hex()) + return info, db.ReturnErrNotFound(err) +} + +func (p *processor) GetLastInfo() (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + ORDER BY block_num DESC, block_pos DESC + LIMIT 1; + `) + return info, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstInfo() (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `) + return info, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstInfoAfterBlock(blockNum uint64) (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + WHERE block_num >= $1 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, blockNum) + return info, db.ReturnErrNotFound(err) +} diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go index 01550f31..5853e90e 100644 --- a/l1infotreesync/processor_test.go +++ b/l1infotreesync/processor_test.go @@ -1,3 +1,168 @@ package l1infotreesync -// TODO: add unit test +import ( + "testing" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/sync" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +func TestGetVerifiedBatches(t *testing.T) { + dbPath := "file:TestGetVerifiedBatches?mode=memory&cache=shared" + p, err := newProcessor(dbPath) + require.NoError(t, err) + ctx := context.Background() + + // Test ErrNotFound returned correctly on all methods + _, err = p.GetLastVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstVerifiedBatchesAfterBlock(0, 0) + require.Equal(t, db.ErrNotFound, err) + + // First insert + expected1 := &VerifyBatches{ + RollupID: 420, + NumBatch: 69, + StateRoot: common.HexToHash("5ca1e"), + ExitRoot: common.HexToHash("b455"), + Aggregator: common.HexToAddress("beef"), + } + err = p.ProcessBlock(ctx, sync.Block{ + Num: 1, + Events: []interface{}{ + Event{VerifyBatches: expected1}, + }, + }) + require.NoError(t, err) + _, err = p.GetLastVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + actual, err := p.GetLastVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected1, actual) + actual, err = p.GetFirstVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected1, actual) + + // Second insert + expected2 := &VerifyBatches{ + RollupID: 420, + NumBatch: 690, + StateRoot: common.HexToHash("5ca1e3"), + ExitRoot: common.HexToHash("ba55"), + Aggregator: common.HexToAddress("beef3"), + } + err = p.ProcessBlock(ctx, sync.Block{ + Num: 2, + Events: []interface{}{ + Event{VerifyBatches: expected2}, + }, + }) + require.NoError(t, err) + _, err = p.GetLastVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + actual, err = p.GetLastVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected2, actual) + actual, err = p.GetFirstVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected1, actual) + actual, err = p.GetFirstVerifiedBatchesAfterBlock(420, 2) + require.NoError(t, err) + require.Equal(t, expected2, actual) +} + +func TestGetInfo(t *testing.T) { + dbPath := "file:TestGetInfo?mode=memory&cache=shared" + p, err := newProcessor(dbPath) + require.NoError(t, err) + ctx := context.Background() + + // Test ErrNotFound returned correctly on all methods + _, err = p.GetFirstL1InfoWithRollupExitRoot(common.Hash{}) + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetLastInfo() + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstInfo() + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstInfoAfterBlock(0) + require.Equal(t, db.ErrNotFound, err) + + // First insert + info1 := &UpdateL1InfoTree{ + MainnetExitRoot: common.HexToHash("beef"), + RollupExitRoot: common.HexToHash("5ca1e"), + ParentHash: common.HexToHash("1010101"), + Timestamp: 420, + } + expected1 := L1InfoTreeLeaf{ + BlockNumber: 1, + L1InfoTreeIndex: 0, + PreviousBlockHash: info1.ParentHash, + Timestamp: info1.Timestamp, + MainnetExitRoot: info1.MainnetExitRoot, + RollupExitRoot: info1.RollupExitRoot, + } + expected1.GlobalExitRoot = expected1.globalExitRoot() + expected1.Hash = expected1.hash() + err = p.ProcessBlock(ctx, sync.Block{ + Num: 1, + Events: []interface{}{ + Event{UpdateL1InfoTree: info1}, + }, + }) + require.NoError(t, err) + actual, err := p.GetFirstL1InfoWithRollupExitRoot(info1.RollupExitRoot) + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetLastInfo() + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetFirstInfo() + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetFirstInfoAfterBlock(0) + require.NoError(t, err) + require.Equal(t, expected1, *actual) + + // Second insert + info2 := &UpdateL1InfoTree{ + MainnetExitRoot: common.HexToHash("beef"), + RollupExitRoot: common.HexToHash("5ca1e"), + ParentHash: common.HexToHash("1010101"), + Timestamp: 420, + } + expected2 := L1InfoTreeLeaf{ + BlockNumber: 2, + L1InfoTreeIndex: 1, + PreviousBlockHash: info2.ParentHash, + Timestamp: info2.Timestamp, + MainnetExitRoot: info2.MainnetExitRoot, + RollupExitRoot: info2.RollupExitRoot, + } + expected2.GlobalExitRoot = expected2.globalExitRoot() + expected2.Hash = expected2.hash() + err = p.ProcessBlock(ctx, sync.Block{ + Num: 2, + Events: []interface{}{ + Event{UpdateL1InfoTree: info2}, + }, + }) + require.NoError(t, err) + actual, err = p.GetFirstL1InfoWithRollupExitRoot(info2.RollupExitRoot) + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetLastInfo() + require.NoError(t, err) + require.Equal(t, expected2, *actual) + actual, err = p.GetFirstInfo() + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetFirstInfoAfterBlock(2) + require.NoError(t, err) + require.Equal(t, expected2, *actual) +} diff --git a/lastgersync/evmdownloader.go b/lastgersync/evmdownloader.go index 91e05c7a..97235c28 100644 --- a/lastgersync/evmdownloader.go +++ b/lastgersync/evmdownloader.go @@ -8,10 +8,10 @@ import ( "time" "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitroot" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" - "github.com/0xPolygon/cdk/tree" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -67,7 +67,7 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC ) for { lastIndex, err = d.processor.getLastIndex(ctx) - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { lastIndex = 0 } else if err != nil { log.Errorf("error getting last indes: %v", err) @@ -129,7 +129,7 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC func (d *downloader) getGERsFromIndex(ctx context.Context, fromL1InfoTreeIndex uint32) ([]Event, error) { lastRoot, err := d.l1InfoTreesync.GetLastL1InfoTreeRoot(ctx) - if errors.Is(err, tree.ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { return nil, nil } if err != nil { diff --git a/lastgersync/processor.go b/lastgersync/processor.go index 628ea04a..45104f09 100644 --- a/lastgersync/processor.go +++ b/lastgersync/processor.go @@ -7,6 +7,7 @@ import ( "math" "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" ethCommon "github.com/ethereum/go-ethereum/common" @@ -22,7 +23,6 @@ const ( var ( lastProcessedKey = []byte("lp") - ErrNotFound = errors.New("not found") ) type Event struct { @@ -111,7 +111,7 @@ func (p *processor) getLastIndexWithTx(tx kv.Tx) (uint32, error) { return 0, err } if k == nil { - return 0, ErrNotFound + return 0, db.ErrNotFound } return common.BytesToUint32(k), nil @@ -142,7 +142,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { if lenEvents > 0 { li, err := p.getLastIndexWithTx(tx) switch { - case errors.Is(err, ErrNotFound): + case errors.Is(err, db.ErrNotFound): lastIndex = -1 case err != nil: @@ -286,7 +286,7 @@ func (p *processor) GetFirstGERAfterL1InfoTreeIndex( return 0, ethCommon.Hash{}, err } if l1InfoIndexBytes == nil { - return 0, ethCommon.Hash{}, ErrNotFound + return 0, ethCommon.Hash{}, db.ErrNotFound } return common.BytesToUint32(l1InfoIndexBytes), ethCommon.BytesToHash(ger), nil diff --git a/rpc/bridge.go b/rpc/bridge.go index 23c67409..c769158e 100644 --- a/rpc/bridge.go +++ b/rpc/bridge.go @@ -2,18 +2,16 @@ package rpc import ( "context" + "errors" "fmt" "math/big" "time" "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" - "github.com/0xPolygon/cdk/l1bridge2infoindexsync" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/lastgersync" "github.com/0xPolygon/cdk/log" - "github.com/ethereum/go-ethereum/common" + "github.com/0xPolygon/cdk/rpc/types" + tree "github.com/0xPolygon/cdk/tree/types" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" ) @@ -23,22 +21,26 @@ const ( BRIDGE = "bridge" meterName = "github.com/0xPolygon/cdk/rpc" - zeroHex = "0x0" + zeroHex = "0x0" + binnarySearchDivider = 2 +) + +var ( + ErrNotOnL1Info = errors.New("this bridge has not been included on the L1 Info Tree yet") ) // BridgeEndpoints contains implementations for the "bridge" RPC endpoints type BridgeEndpoints struct { - logger *log.Logger - meter metric.Meter - readTimeout time.Duration - writeTimeout time.Duration - networkID uint32 - sponsor *claimsponsor.ClaimSponsor - l1InfoTree *l1infotreesync.L1InfoTreeSync - l1Bridge2Index *l1bridge2infoindexsync.L1Bridge2InfoIndexSync - injectedGERs *lastgersync.LastGERSync - bridgeL1 *bridgesync.BridgeSync - bridgeL2 *bridgesync.BridgeSync + logger *log.Logger + meter metric.Meter + readTimeout time.Duration + writeTimeout time.Duration + networkID uint32 + sponsor ClaimSponsorer + l1InfoTree L1InfoTreer + injectedGERs LastGERer + bridgeL1 Bridger + bridgeL2 Bridger } // NewBridgeEndpoints returns InteropEndpoints @@ -47,26 +49,24 @@ func NewBridgeEndpoints( writeTimeout time.Duration, readTimeout time.Duration, networkID uint32, - sponsor *claimsponsor.ClaimSponsor, - l1InfoTree *l1infotreesync.L1InfoTreeSync, - l1Bridge2Index *l1bridge2infoindexsync.L1Bridge2InfoIndexSync, - injectedGERs *lastgersync.LastGERSync, - bridgeL1 *bridgesync.BridgeSync, - bridgeL2 *bridgesync.BridgeSync, + sponsor ClaimSponsorer, + l1InfoTree L1InfoTreer, + injectedGERs LastGERer, + bridgeL1 Bridger, + bridgeL2 Bridger, ) *BridgeEndpoints { meter := otel.Meter(meterName) return &BridgeEndpoints{ - logger: logger, - meter: meter, - readTimeout: readTimeout, - writeTimeout: writeTimeout, - networkID: networkID, - sponsor: sponsor, - l1InfoTree: l1InfoTree, - l1Bridge2Index: l1Bridge2Index, - injectedGERs: injectedGERs, - bridgeL1: bridgeL1, - bridgeL2: bridgeL2, + logger: logger, + meter: meter, + readTimeout: readTimeout, + writeTimeout: writeTimeout, + networkID: networkID, + sponsor: sponsor, + l1InfoTree: l1InfoTree, + injectedGERs: injectedGERs, + bridgeL1: bridgeL1, + bridgeL2: bridgeL2, } } @@ -84,21 +84,26 @@ func (b *BridgeEndpoints) L1InfoTreeIndexForBridge(networkID uint32, depositCoun c.Add(ctx, 1) if networkID == 0 { - l1InfoTreeIndex, err := b.l1Bridge2Index.GetL1InfoTreeIndexByDepositCount(ctx, depositCount) + l1InfoTreeIndex, err := b.getFirstL1InfoTreeIndexForL1Bridge(ctx, depositCount) // TODO: special treatment of the error when not found, // as it's expected that it will take some time for the L1 Info tree to be updated if err != nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get l1InfoTreeIndex, error: %s", err)) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf( + "failed to get l1InfoTreeIndex for networkID %d and deposit count %d, error: %s", networkID, depositCount, err), + ) } return l1InfoTreeIndex, nil } if networkID == b.networkID { + l1InfoTreeIndex, err := b.getFirstL1InfoTreeIndexForL2Bridge(ctx, depositCount) // TODO: special treatment of the error when not found, // as it's expected that it will take some time for the L1 Info tree to be updated - return zeroHex, rpc.NewRPCError( - rpc.DefaultErrorCode, - "TODO: batchsync / certificatesync missing implementation", - ) + if err != nil { + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf( + "failed to get l1InfoTreeIndex for networkID %d and deposit count %d, error: %s", networkID, depositCount, err), + ) + } + return l1InfoTreeIndex, nil } return zeroHex, rpc.NewRPCError( rpc.DefaultErrorCode, @@ -143,12 +148,6 @@ func (b *BridgeEndpoints) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeInd ) } -type ClaimProof struct { - ProofLocalExitRoot [32]common.Hash - ProofRollupExitRoot [32]common.Hash - L1InfoTreeLeaf l1infotreesync.L1InfoTreeLeaf -} - // ClaimProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin // while globalExitRoot should be already injected on the destination network. // This call needs to be done to a client of the same network were the bridge tx was sent @@ -172,7 +171,7 @@ func (b *BridgeEndpoints) ClaimProof( if err != nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get rollup exit proof, error: %s", err)) } - var proofLocalExitRoot [32]common.Hash + var proofLocalExitRoot tree.Proof switch { case networkID == 0: proofLocalExitRoot, err = b.bridgeL1.GetProof(ctx, depositCount, info.MainnetExitRoot) @@ -202,8 +201,7 @@ func (b *BridgeEndpoints) ClaimProof( fmt.Sprintf("this client does not support network %d", networkID), ) } - - return ClaimProof{ + return types.ClaimProof{ ProofLocalExitRoot: proofLocalExitRoot, ProofRollupExitRoot: proofRollupExitRoot, L1InfoTreeLeaf: *info, @@ -258,3 +256,111 @@ func (b *BridgeEndpoints) GetSponsoredClaimStatus(globalIndex *big.Int) (interfa } return claim.Status, nil } + +func (b *BridgeEndpoints) getFirstL1InfoTreeIndexForL1Bridge(ctx context.Context, depositCount uint32) (uint32, error) { + lastInfo, err := b.l1InfoTree.GetLastInfo() + if err != nil { + return 0, err + } + + root, err := b.bridgeL1.GetRootByLER(ctx, lastInfo.MainnetExitRoot) + if err != nil { + return 0, err + } + if root.Index < depositCount { + return 0, ErrNotOnL1Info + } + + firstInfo, err := b.l1InfoTree.GetFirstInfo() + if err != nil { + return 0, err + } + + // Binary search between the first and last blcoks where L1 info tree was updated. + // Find the smallest l1 info tree index that is greater than depositCount and matches with + // a MER that is included on the l1 info tree + bestResult := lastInfo + lowerLimit := firstInfo.BlockNumber + upperLimit := lastInfo.BlockNumber + for lowerLimit <= upperLimit { + targetBlock := lowerLimit + ((upperLimit - lowerLimit) / binnarySearchDivider) + targetInfo, err := b.l1InfoTree.GetFirstInfoAfterBlock(targetBlock) + if err != nil { + return 0, err + } + root, err := b.bridgeL1.GetRootByLER(ctx, targetInfo.MainnetExitRoot) + if err != nil { + return 0, err + } + //nolint:gocritic // switch statement doesn't make sense here, I couldn't break + if root.Index < depositCount { + lowerLimit = targetBlock + 1 + } else if root.Index == depositCount { + bestResult = targetInfo + break + } else { + bestResult = targetInfo + upperLimit = targetBlock - 1 + } + } + + return bestResult.L1InfoTreeIndex, nil +} + +func (b *BridgeEndpoints) getFirstL1InfoTreeIndexForL2Bridge(ctx context.Context, depositCount uint32) (uint32, error) { + // NOTE: this code assumes that all the rollup exit roots + // (produced by the smart contract call verifyBatches / verifyBatchesTrustedAggregator) + // are included in the L1 info tree. As per the current implementation (smart contracts) of the protocol + // this is true. This could change in the future + lastVerified, err := b.l1InfoTree.GetLastVerifiedBatches(b.networkID - 1) + if err != nil { + return 0, err + } + + root, err := b.bridgeL2.GetRootByLER(ctx, lastVerified.ExitRoot) + if err != nil { + return 0, err + } + if root.Index < depositCount { + return 0, ErrNotOnL1Info + } + + firstVerified, err := b.l1InfoTree.GetFirstVerifiedBatches(b.networkID - 1) + if err != nil { + return 0, err + } + + // Binary search between the first and last blcoks where batches were verified. + // Find the smallest deposit count that is greater than depositCount and matches with + // a LER that is verified + bestResult := lastVerified + lowerLimit := firstVerified.BlockNumber + upperLimit := lastVerified.BlockNumber + for lowerLimit <= upperLimit { + targetBlock := lowerLimit + ((upperLimit - lowerLimit) / binnarySearchDivider) + targetVerified, err := b.l1InfoTree.GetFirstVerifiedBatchesAfterBlock(b.networkID-1, targetBlock) + if err != nil { + return 0, err + } + root, err = b.bridgeL2.GetRootByLER(ctx, targetVerified.ExitRoot) + if err != nil { + return 0, err + } + //nolint:gocritic // switch statement doesn't make sense here, I couldn't break + if root.Index < depositCount { + lowerLimit = targetBlock + 1 + } else if root.Index == depositCount { + bestResult = targetVerified + break + } else { + bestResult = targetVerified + upperLimit = targetBlock - 1 + } + } + + info, err := b.l1InfoTree.GetFirstL1InfoWithRollupExitRoot(bestResult.RollupExitRoot) + if err != nil { + return 0, err + } + return info.L1InfoTreeIndex, nil +} diff --git a/rpc/bridge_interfaces.go b/rpc/bridge_interfaces.go new file mode 100644 index 00000000..84292e22 --- /dev/null +++ b/rpc/bridge_interfaces.go @@ -0,0 +1,40 @@ +package rpc + +import ( + "context" + "math/big" + + "github.com/0xPolygon/cdk/claimsponsor" + "github.com/0xPolygon/cdk/l1infotreesync" + tree "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" +) + +type Bridger interface { + GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (tree.Proof, error) + GetRootByLER(ctx context.Context, ler common.Hash) (*tree.Root, error) +} + +type LastGERer interface { + GetFirstGERAfterL1InfoTreeIndex( + ctx context.Context, atOrAfterL1InfoTreeIndex uint32, + ) (injectedL1InfoTreeIndex uint32, ger common.Hash, err error) +} + +type L1InfoTreer interface { + GetInfoByIndex(ctx context.Context, index uint32) (*l1infotreesync.L1InfoTreeLeaf, error) + GetRollupExitTreeMerkleProof(ctx context.Context, networkID uint32, root common.Hash) (tree.Proof, error) + GetLocalExitRoot(ctx context.Context, networkID uint32, rollupExitRoot common.Hash) (common.Hash, error) + GetLastInfo() (*l1infotreesync.L1InfoTreeLeaf, error) + GetFirstInfo() (*l1infotreesync.L1InfoTreeLeaf, error) + GetFirstInfoAfterBlock(blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) + GetLastVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) + GetFirstVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) + GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*l1infotreesync.VerifyBatches, error) + GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) +} + +type ClaimSponsorer interface { + AddClaimToQueue(ctx context.Context, claim *claimsponsor.Claim) error + GetClaim(ctx context.Context, globalIndex *big.Int) (*claimsponsor.Claim, error) +} diff --git a/rpc/bridge_test.go b/rpc/bridge_test.go new file mode 100644 index 00000000..9d461a50 --- /dev/null +++ b/rpc/bridge_test.go @@ -0,0 +1,443 @@ +package rpc + +import ( + "context" + "errors" + "testing" + + cdkCommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" + mocks "github.com/0xPolygon/cdk/rpc/mocks" + tree "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestGetFirstL1InfoTreeIndexForL1Bridge(t *testing.T) { + type testCase struct { + description string + setupMocks func() + depositCount uint32 + expectedIndex uint32 + expectedErr error + } + ctx := context.Background() + b := newBridgeWithMocks(t) + fooErr := errors.New("foo") + firstL1Info := &l1infotreesync.L1InfoTreeLeaf{ + BlockNumber: 10, + MainnetExitRoot: common.HexToHash("alfa"), + } + lastL1Info := &l1infotreesync.L1InfoTreeLeaf{ + BlockNumber: 1000, + MainnetExitRoot: common.HexToHash("alfa"), + } + mockHappyPath := func() { + // to make this work, assume that block number == l1 info tree index == deposit count + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.l1InfoTree.On("GetFirstInfo"). + Return(firstL1Info, nil). + Once() + infoAfterBlock := &l1infotreesync.L1InfoTreeLeaf{} + b.l1InfoTree.On("GetFirstInfoAfterBlock", mock.Anything). + Run(func(args mock.Arguments) { + blockNum, ok := args.Get(0).(uint64) + require.True(t, ok) + infoAfterBlock.L1InfoTreeIndex = uint32(blockNum) + infoAfterBlock.BlockNumber = blockNum + infoAfterBlock.MainnetExitRoot = common.BytesToHash(cdkCommon.Uint32ToBytes(uint32(blockNum))) + }). + Return(infoAfterBlock, nil) + rootByLER := &tree.Root{} + b.bridgeL1.On("GetRootByLER", ctx, mock.Anything). + Run(func(args mock.Arguments) { + ler, ok := args.Get(1).(common.Hash) + require.True(t, ok) + index := cdkCommon.BytesToUint32(ler.Bytes()[28:]) // hash is 32 bytes, uint32 is just 4 + if ler == common.HexToHash("alfa") { + index = uint32(lastL1Info.BlockNumber) + } + rootByLER.Index = index + }). + Return(rootByLER, nil) + } + testCases := []testCase{ + { + description: "error on GetLastInfo", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on first GetRootByLER", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{}, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "not included yet", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{Index: 10}, nil). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: ErrNotOnL1Info, + }, + { + description: "error on GetFirstInfo", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstInfo"). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on GetFirstInfoAfterBlock", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstInfo"). + Return(firstL1Info, nil). + Once() + b.l1InfoTree.On("GetFirstInfoAfterBlock", mock.Anything). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on GetRootByLER (inside binnary search)", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstInfo"). + Return(firstL1Info, nil). + Once() + b.l1InfoTree.On("GetFirstInfoAfterBlock", mock.Anything). + Return(firstL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, mock.Anything). + Return(&tree.Root{}, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "happy path 1", + setupMocks: mockHappyPath, + depositCount: 10, + expectedIndex: 10, + expectedErr: nil, + }, + { + description: "happy path 2", + setupMocks: mockHappyPath, + depositCount: 11, + expectedIndex: 11, + expectedErr: nil, + }, + { + description: "happy path 3", + setupMocks: mockHappyPath, + depositCount: 333, + expectedIndex: 333, + expectedErr: nil, + }, + { + description: "happy path 4", + setupMocks: mockHappyPath, + depositCount: 420, + expectedIndex: 420, + expectedErr: nil, + }, + { + description: "happy path 5", + setupMocks: mockHappyPath, + depositCount: 69, + expectedIndex: 69, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + log.Debugf("running test case: %s", tc.description) + tc.setupMocks() + actualIndex, err := b.bridge.getFirstL1InfoTreeIndexForL1Bridge(ctx, tc.depositCount) + require.Equal(t, tc.expectedErr, err) + require.Equal(t, tc.expectedIndex, actualIndex) + } +} + +func TestGetFirstL1InfoTreeIndexForL2Bridge(t *testing.T) { + type testCase struct { + description string + setupMocks func() + depositCount uint32 + expectedIndex uint32 + expectedErr error + } + ctx := context.Background() + b := newBridgeWithMocks(t) + fooErr := errors.New("foo") + firstVerified := &l1infotreesync.VerifyBatches{ + BlockNumber: 10, + ExitRoot: common.HexToHash("alfa"), + } + lastVerified := &l1infotreesync.VerifyBatches{ + BlockNumber: 1000, + ExitRoot: common.HexToHash("alfa"), + } + mockHappyPath := func() { + // to make this work, assume that block number == l1 info tree index == deposit count + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). + Return(firstVerified, nil). + Once() + verifiedAfterBlock := &l1infotreesync.VerifyBatches{} + b.l1InfoTree.On("GetFirstVerifiedBatchesAfterBlock", uint32(1), mock.Anything). + Run(func(args mock.Arguments) { + blockNum, ok := args.Get(1).(uint64) + require.True(t, ok) + verifiedAfterBlock.BlockNumber = blockNum + verifiedAfterBlock.ExitRoot = common.BytesToHash(cdkCommon.Uint32ToBytes(uint32(blockNum))) + verifiedAfterBlock.RollupExitRoot = common.BytesToHash(cdkCommon.Uint32ToBytes(uint32(blockNum))) + }). + Return(verifiedAfterBlock, nil) + rootByLER := &tree.Root{} + b.bridgeL2.On("GetRootByLER", ctx, mock.Anything). + Run(func(args mock.Arguments) { + ler, ok := args.Get(1).(common.Hash) + require.True(t, ok) + index := cdkCommon.BytesToUint32(ler.Bytes()[28:]) // hash is 32 bytes, uint32 is just 4 + if ler == common.HexToHash("alfa") { + index = uint32(lastVerified.BlockNumber) + } + rootByLER.Index = index + }). + Return(rootByLER, nil) + info := &l1infotreesync.L1InfoTreeLeaf{} + b.l1InfoTree.On("GetFirstL1InfoWithRollupExitRoot", mock.Anything). + Run(func(args mock.Arguments) { + exitRoot, ok := args.Get(0).(common.Hash) + require.True(t, ok) + index := cdkCommon.BytesToUint32(exitRoot.Bytes()[28:]) // hash is 32 bytes, uint32 is just 4 + info.L1InfoTreeIndex = index + }). + Return(info, nil). + Once() + } + testCases := []testCase{ + { + description: "error on GetLastVerified", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on first GetRootByLER", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{}, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "not included yet", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{Index: 10}, nil). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: ErrNotOnL1Info, + }, + { + description: "error on GetFirstVerified", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on GetFirstVerifiedBatchesAfterBlock", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). + Return(firstVerified, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatchesAfterBlock", uint32(1), mock.Anything). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on GetRootByLER (inside binnary search)", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). + Return(firstVerified, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatchesAfterBlock", uint32(1), mock.Anything). + Return(firstVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, mock.Anything). + Return(&tree.Root{}, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "happy path 1", + setupMocks: mockHappyPath, + depositCount: 10, + expectedIndex: 10, + expectedErr: nil, + }, + { + description: "happy path 2", + setupMocks: mockHappyPath, + depositCount: 11, + expectedIndex: 11, + expectedErr: nil, + }, + { + description: "happy path 3", + setupMocks: mockHappyPath, + depositCount: 333, + expectedIndex: 333, + expectedErr: nil, + }, + { + description: "happy path 4", + setupMocks: mockHappyPath, + depositCount: 420, + expectedIndex: 420, + expectedErr: nil, + }, + { + description: "happy path 5", + setupMocks: mockHappyPath, + depositCount: 69, + expectedIndex: 69, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + log.Debugf("running test case: %s", tc.description) + tc.setupMocks() + actualIndex, err := b.bridge.getFirstL1InfoTreeIndexForL2Bridge(ctx, tc.depositCount) + require.Equal(t, tc.expectedErr, err) + require.Equal(t, tc.expectedIndex, actualIndex) + } +} + +type bridgeWithMocks struct { + bridge *BridgeEndpoints + sponsor *mocks.ClaimSponsorer + l1InfoTree *mocks.L1InfoTreer + injectedGERs *mocks.LastGERer + bridgeL1 *mocks.Bridger + bridgeL2 *mocks.Bridger +} + +func newBridgeWithMocks(t *testing.T) bridgeWithMocks { + t.Helper() + b := bridgeWithMocks{ + sponsor: mocks.NewClaimSponsorer(t), + l1InfoTree: mocks.NewL1InfoTreer(t), + injectedGERs: mocks.NewLastGERer(t), + bridgeL1: mocks.NewBridger(t), + bridgeL2: mocks.NewBridger(t), + } + logger := log.WithFields("module", "bridgerpc") + b.bridge = NewBridgeEndpoints( + logger, 0, 0, 2, b.sponsor, b.l1InfoTree, b.injectedGERs, b.bridgeL1, b.bridgeL2, + ) + return b +} diff --git a/rpc/bridge_client.go b/rpc/client/bridge.go similarity index 95% rename from rpc/bridge_client.go rename to rpc/client/bridge.go index 04d57700..f67907f2 100644 --- a/rpc/bridge_client.go +++ b/rpc/client/bridge.go @@ -8,12 +8,13 @@ import ( "github.com/0xPolygon/cdk-rpc/rpc" "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/rpc/types" ) type BridgeClientInterface interface { L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) - ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*ClaimProof, error) + ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) SponsorClaim(claim claimsponsor.Claim) error GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) } @@ -53,7 +54,7 @@ func (c *Client) InjectedInfoAfterIndex( // ClaimProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin // while globalExitRoot should be already injected on the destination network. // This call needs to be done to a client of the same network were the bridge tx was sent -func (c *Client) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*ClaimProof, error) { +func (c *Client) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) { response, err := rpc.JSONRPCCall(c.url, "bridge_claimProof", networkID, depositCount, l1InfoTreeIndex) if err != nil { return nil, err @@ -61,7 +62,7 @@ func (c *Client) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeInd if response.Error != nil { return nil, fmt.Errorf("%v %v", response.Error.Code, response.Error.Message) } - var result ClaimProof + var result types.ClaimProof return &result, json.Unmarshal(response.Result, &result) } diff --git a/rpc/client.go b/rpc/client/client.go similarity index 100% rename from rpc/client.go rename to rpc/client/client.go diff --git a/rpc/mocks/bridge_client_interface.go b/rpc/mocks/bridge_client_interface.go new file mode 100644 index 00000000..4c5200e4 --- /dev/null +++ b/rpc/mocks/bridge_client_interface.go @@ -0,0 +1,319 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + claimsponsor "github.com/0xPolygon/cdk/claimsponsor" + l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/rpc/types" +) + +// BridgeClientInterface is an autogenerated mock type for the BridgeClientInterface type +type BridgeClientInterface struct { + mock.Mock +} + +type BridgeClientInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *BridgeClientInterface) EXPECT() *BridgeClientInterface_Expecter { + return &BridgeClientInterface_Expecter{mock: &_m.Mock} +} + +// ClaimProof provides a mock function with given fields: networkID, depositCount, l1InfoTreeIndex +func (_m *BridgeClientInterface) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) { + ret := _m.Called(networkID, depositCount, l1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for ClaimProof") + } + + var r0 *types.ClaimProof + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) (*types.ClaimProof, error)); ok { + return rf(networkID, depositCount, l1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) *types.ClaimProof); ok { + r0 = rf(networkID, depositCount, l1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ClaimProof) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint32, uint32) error); ok { + r1 = rf(networkID, depositCount, l1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeClientInterface_ClaimProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClaimProof' +type BridgeClientInterface_ClaimProof_Call struct { + *mock.Call +} + +// ClaimProof is a helper method to define mock.On call +// - networkID uint32 +// - depositCount uint32 +// - l1InfoTreeIndex uint32 +func (_e *BridgeClientInterface_Expecter) ClaimProof(networkID interface{}, depositCount interface{}, l1InfoTreeIndex interface{}) *BridgeClientInterface_ClaimProof_Call { + return &BridgeClientInterface_ClaimProof_Call{Call: _e.mock.On("ClaimProof", networkID, depositCount, l1InfoTreeIndex)} +} + +func (_c *BridgeClientInterface_ClaimProof_Call) Run(run func(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32)) *BridgeClientInterface_ClaimProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32), args[2].(uint32)) + }) + return _c +} + +func (_c *BridgeClientInterface_ClaimProof_Call) Return(_a0 *types.ClaimProof, _a1 error) *BridgeClientInterface_ClaimProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeClientInterface_ClaimProof_Call) RunAndReturn(run func(uint32, uint32, uint32) (*types.ClaimProof, error)) *BridgeClientInterface_ClaimProof_Call { + _c.Call.Return(run) + return _c +} + +// GetSponsoredClaimStatus provides a mock function with given fields: globalIndex +func (_m *BridgeClientInterface) GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) { + ret := _m.Called(globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetSponsoredClaimStatus") + } + + var r0 claimsponsor.ClaimStatus + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int) (claimsponsor.ClaimStatus, error)); ok { + return rf(globalIndex) + } + if rf, ok := ret.Get(0).(func(*big.Int) claimsponsor.ClaimStatus); ok { + r0 = rf(globalIndex) + } else { + r0 = ret.Get(0).(claimsponsor.ClaimStatus) + } + + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(globalIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeClientInterface_GetSponsoredClaimStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSponsoredClaimStatus' +type BridgeClientInterface_GetSponsoredClaimStatus_Call struct { + *mock.Call +} + +// GetSponsoredClaimStatus is a helper method to define mock.On call +// - globalIndex *big.Int +func (_e *BridgeClientInterface_Expecter) GetSponsoredClaimStatus(globalIndex interface{}) *BridgeClientInterface_GetSponsoredClaimStatus_Call { + return &BridgeClientInterface_GetSponsoredClaimStatus_Call{Call: _e.mock.On("GetSponsoredClaimStatus", globalIndex)} +} + +func (_c *BridgeClientInterface_GetSponsoredClaimStatus_Call) Run(run func(globalIndex *big.Int)) *BridgeClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*big.Int)) + }) + return _c +} + +func (_c *BridgeClientInterface_GetSponsoredClaimStatus_Call) Return(_a0 claimsponsor.ClaimStatus, _a1 error) *BridgeClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeClientInterface_GetSponsoredClaimStatus_Call) RunAndReturn(run func(*big.Int) (claimsponsor.ClaimStatus, error)) *BridgeClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Return(run) + return _c +} + +// InjectedInfoAfterIndex provides a mock function with given fields: networkID, l1InfoTreeIndex +func (_m *BridgeClientInterface) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(networkID, l1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for InjectedInfoAfterIndex") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(networkID, l1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(uint32, uint32) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(networkID, l1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { + r1 = rf(networkID, l1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeClientInterface_InjectedInfoAfterIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InjectedInfoAfterIndex' +type BridgeClientInterface_InjectedInfoAfterIndex_Call struct { + *mock.Call +} + +// InjectedInfoAfterIndex is a helper method to define mock.On call +// - networkID uint32 +// - l1InfoTreeIndex uint32 +func (_e *BridgeClientInterface_Expecter) InjectedInfoAfterIndex(networkID interface{}, l1InfoTreeIndex interface{}) *BridgeClientInterface_InjectedInfoAfterIndex_Call { + return &BridgeClientInterface_InjectedInfoAfterIndex_Call{Call: _e.mock.On("InjectedInfoAfterIndex", networkID, l1InfoTreeIndex)} +} + +func (_c *BridgeClientInterface_InjectedInfoAfterIndex_Call) Run(run func(networkID uint32, l1InfoTreeIndex uint32)) *BridgeClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32)) + }) + return _c +} + +func (_c *BridgeClientInterface_InjectedInfoAfterIndex_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *BridgeClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeClientInterface_InjectedInfoAfterIndex_Call) RunAndReturn(run func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)) *BridgeClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Return(run) + return _c +} + +// L1InfoTreeIndexForBridge provides a mock function with given fields: networkID, depositCount +func (_m *BridgeClientInterface) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) { + ret := _m.Called(networkID, depositCount) + + if len(ret) == 0 { + panic("no return value specified for L1InfoTreeIndexForBridge") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32) (uint32, error)); ok { + return rf(networkID, depositCount) + } + if rf, ok := ret.Get(0).(func(uint32, uint32) uint32); ok { + r0 = rf(networkID, depositCount) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { + r1 = rf(networkID, depositCount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeClientInterface_L1InfoTreeIndexForBridge_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'L1InfoTreeIndexForBridge' +type BridgeClientInterface_L1InfoTreeIndexForBridge_Call struct { + *mock.Call +} + +// L1InfoTreeIndexForBridge is a helper method to define mock.On call +// - networkID uint32 +// - depositCount uint32 +func (_e *BridgeClientInterface_Expecter) L1InfoTreeIndexForBridge(networkID interface{}, depositCount interface{}) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { + return &BridgeClientInterface_L1InfoTreeIndexForBridge_Call{Call: _e.mock.On("L1InfoTreeIndexForBridge", networkID, depositCount)} +} + +func (_c *BridgeClientInterface_L1InfoTreeIndexForBridge_Call) Run(run func(networkID uint32, depositCount uint32)) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32)) + }) + return _c +} + +func (_c *BridgeClientInterface_L1InfoTreeIndexForBridge_Call) Return(_a0 uint32, _a1 error) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeClientInterface_L1InfoTreeIndexForBridge_Call) RunAndReturn(run func(uint32, uint32) (uint32, error)) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Return(run) + return _c +} + +// SponsorClaim provides a mock function with given fields: claim +func (_m *BridgeClientInterface) SponsorClaim(claim claimsponsor.Claim) error { + ret := _m.Called(claim) + + if len(ret) == 0 { + panic("no return value specified for SponsorClaim") + } + + var r0 error + if rf, ok := ret.Get(0).(func(claimsponsor.Claim) error); ok { + r0 = rf(claim) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BridgeClientInterface_SponsorClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SponsorClaim' +type BridgeClientInterface_SponsorClaim_Call struct { + *mock.Call +} + +// SponsorClaim is a helper method to define mock.On call +// - claim claimsponsor.Claim +func (_e *BridgeClientInterface_Expecter) SponsorClaim(claim interface{}) *BridgeClientInterface_SponsorClaim_Call { + return &BridgeClientInterface_SponsorClaim_Call{Call: _e.mock.On("SponsorClaim", claim)} +} + +func (_c *BridgeClientInterface_SponsorClaim_Call) Run(run func(claim claimsponsor.Claim)) *BridgeClientInterface_SponsorClaim_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(claimsponsor.Claim)) + }) + return _c +} + +func (_c *BridgeClientInterface_SponsorClaim_Call) Return(_a0 error) *BridgeClientInterface_SponsorClaim_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BridgeClientInterface_SponsorClaim_Call) RunAndReturn(run func(claimsponsor.Claim) error) *BridgeClientInterface_SponsorClaim_Call { + _c.Call.Return(run) + return _c +} + +// NewBridgeClientInterface creates a new instance of BridgeClientInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBridgeClientInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *BridgeClientInterface { + mock := &BridgeClientInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/bridger.go b/rpc/mocks/bridger.go new file mode 100644 index 00000000..d0344c29 --- /dev/null +++ b/rpc/mocks/bridger.go @@ -0,0 +1,159 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/tree/types" +) + +// Bridger is an autogenerated mock type for the Bridger type +type Bridger struct { + mock.Mock +} + +type Bridger_Expecter struct { + mock *mock.Mock +} + +func (_m *Bridger) EXPECT() *Bridger_Expecter { + return &Bridger_Expecter{mock: &_m.Mock} +} + +// GetProof provides a mock function with given fields: ctx, depositCount, localExitRoot +func (_m *Bridger) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (types.Proof, error) { + ret := _m.Called(ctx, depositCount, localExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetProof") + } + + var r0 types.Proof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (types.Proof, error)); ok { + return rf(ctx, depositCount, localExitRoot) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) types.Proof); ok { + r0 = rf(ctx, depositCount, localExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { + r1 = rf(ctx, depositCount, localExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Bridger_GetProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProof' +type Bridger_GetProof_Call struct { + *mock.Call +} + +// GetProof is a helper method to define mock.On call +// - ctx context.Context +// - depositCount uint32 +// - localExitRoot common.Hash +func (_e *Bridger_Expecter) GetProof(ctx interface{}, depositCount interface{}, localExitRoot interface{}) *Bridger_GetProof_Call { + return &Bridger_GetProof_Call{Call: _e.mock.On("GetProof", ctx, depositCount, localExitRoot)} +} + +func (_c *Bridger_GetProof_Call) Run(run func(ctx context.Context, depositCount uint32, localExitRoot common.Hash)) *Bridger_GetProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) + }) + return _c +} + +func (_c *Bridger_GetProof_Call) Return(_a0 types.Proof, _a1 error) *Bridger_GetProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Bridger_GetProof_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (types.Proof, error)) *Bridger_GetProof_Call { + _c.Call.Return(run) + return _c +} + +// GetRootByLER provides a mock function with given fields: ctx, ler +func (_m *Bridger) GetRootByLER(ctx context.Context, ler common.Hash) (*types.Root, error) { + ret := _m.Called(ctx, ler) + + if len(ret) == 0 { + panic("no return value specified for GetRootByLER") + } + + var r0 *types.Root + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Root, error)); ok { + return rf(ctx, ler) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Root); ok { + r0 = rf(ctx, ler) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Root) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, ler) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Bridger_GetRootByLER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRootByLER' +type Bridger_GetRootByLER_Call struct { + *mock.Call +} + +// GetRootByLER is a helper method to define mock.On call +// - ctx context.Context +// - ler common.Hash +func (_e *Bridger_Expecter) GetRootByLER(ctx interface{}, ler interface{}) *Bridger_GetRootByLER_Call { + return &Bridger_GetRootByLER_Call{Call: _e.mock.On("GetRootByLER", ctx, ler)} +} + +func (_c *Bridger_GetRootByLER_Call) Run(run func(ctx context.Context, ler common.Hash)) *Bridger_GetRootByLER_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *Bridger_GetRootByLER_Call) Return(_a0 *types.Root, _a1 error) *Bridger_GetRootByLER_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Bridger_GetRootByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Root, error)) *Bridger_GetRootByLER_Call { + _c.Call.Return(run) + return _c +} + +// NewBridger creates a new instance of Bridger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBridger(t interface { + mock.TestingT + Cleanup(func()) +}) *Bridger { + mock := &Bridger{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/claim_sponsorer.go b/rpc/mocks/claim_sponsorer.go new file mode 100644 index 00000000..59530955 --- /dev/null +++ b/rpc/mocks/claim_sponsorer.go @@ -0,0 +1,145 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + claimsponsor "github.com/0xPolygon/cdk/claimsponsor" + + mock "github.com/stretchr/testify/mock" +) + +// ClaimSponsorer is an autogenerated mock type for the ClaimSponsorer type +type ClaimSponsorer struct { + mock.Mock +} + +type ClaimSponsorer_Expecter struct { + mock *mock.Mock +} + +func (_m *ClaimSponsorer) EXPECT() *ClaimSponsorer_Expecter { + return &ClaimSponsorer_Expecter{mock: &_m.Mock} +} + +// AddClaimToQueue provides a mock function with given fields: ctx, claim +func (_m *ClaimSponsorer) AddClaimToQueue(ctx context.Context, claim *claimsponsor.Claim) error { + ret := _m.Called(ctx, claim) + + if len(ret) == 0 { + panic("no return value specified for AddClaimToQueue") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *claimsponsor.Claim) error); ok { + r0 = rf(ctx, claim) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClaimSponsorer_AddClaimToQueue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddClaimToQueue' +type ClaimSponsorer_AddClaimToQueue_Call struct { + *mock.Call +} + +// AddClaimToQueue is a helper method to define mock.On call +// - ctx context.Context +// - claim *claimsponsor.Claim +func (_e *ClaimSponsorer_Expecter) AddClaimToQueue(ctx interface{}, claim interface{}) *ClaimSponsorer_AddClaimToQueue_Call { + return &ClaimSponsorer_AddClaimToQueue_Call{Call: _e.mock.On("AddClaimToQueue", ctx, claim)} +} + +func (_c *ClaimSponsorer_AddClaimToQueue_Call) Run(run func(ctx context.Context, claim *claimsponsor.Claim)) *ClaimSponsorer_AddClaimToQueue_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*claimsponsor.Claim)) + }) + return _c +} + +func (_c *ClaimSponsorer_AddClaimToQueue_Call) Return(_a0 error) *ClaimSponsorer_AddClaimToQueue_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClaimSponsorer_AddClaimToQueue_Call) RunAndReturn(run func(context.Context, *claimsponsor.Claim) error) *ClaimSponsorer_AddClaimToQueue_Call { + _c.Call.Return(run) + return _c +} + +// GetClaim provides a mock function with given fields: ctx, globalIndex +func (_m *ClaimSponsorer) GetClaim(ctx context.Context, globalIndex *big.Int) (*claimsponsor.Claim, error) { + ret := _m.Called(ctx, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetClaim") + } + + var r0 *claimsponsor.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*claimsponsor.Claim, error)); ok { + return rf(ctx, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *claimsponsor.Claim); ok { + r0 = rf(ctx, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*claimsponsor.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, globalIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimSponsorer_GetClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaim' +type ClaimSponsorer_GetClaim_Call struct { + *mock.Call +} + +// GetClaim is a helper method to define mock.On call +// - ctx context.Context +// - globalIndex *big.Int +func (_e *ClaimSponsorer_Expecter) GetClaim(ctx interface{}, globalIndex interface{}) *ClaimSponsorer_GetClaim_Call { + return &ClaimSponsorer_GetClaim_Call{Call: _e.mock.On("GetClaim", ctx, globalIndex)} +} + +func (_c *ClaimSponsorer_GetClaim_Call) Run(run func(ctx context.Context, globalIndex *big.Int)) *ClaimSponsorer_GetClaim_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *ClaimSponsorer_GetClaim_Call) Return(_a0 *claimsponsor.Claim, _a1 error) *ClaimSponsorer_GetClaim_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimSponsorer_GetClaim_Call) RunAndReturn(run func(context.Context, *big.Int) (*claimsponsor.Claim, error)) *ClaimSponsorer_GetClaim_Call { + _c.Call.Return(run) + return _c +} + +// NewClaimSponsorer creates a new instance of ClaimSponsorer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClaimSponsorer(t interface { + mock.TestingT + Cleanup(func()) +}) *ClaimSponsorer { + mock := &ClaimSponsorer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/client_factory_interface.go b/rpc/mocks/client_factory_interface.go new file mode 100644 index 00000000..aca7aed0 --- /dev/null +++ b/rpc/mocks/client_factory_interface.go @@ -0,0 +1,83 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + rpc "github.com/0xPolygon/cdk/rpc/client" + mock "github.com/stretchr/testify/mock" +) + +// ClientFactoryInterface is an autogenerated mock type for the ClientFactoryInterface type +type ClientFactoryInterface struct { + mock.Mock +} + +type ClientFactoryInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *ClientFactoryInterface) EXPECT() *ClientFactoryInterface_Expecter { + return &ClientFactoryInterface_Expecter{mock: &_m.Mock} +} + +// NewClient provides a mock function with given fields: url +func (_m *ClientFactoryInterface) NewClient(url string) rpc.ClientInterface { + ret := _m.Called(url) + + if len(ret) == 0 { + panic("no return value specified for NewClient") + } + + var r0 rpc.ClientInterface + if rf, ok := ret.Get(0).(func(string) rpc.ClientInterface); ok { + r0 = rf(url) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(rpc.ClientInterface) + } + } + + return r0 +} + +// ClientFactoryInterface_NewClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewClient' +type ClientFactoryInterface_NewClient_Call struct { + *mock.Call +} + +// NewClient is a helper method to define mock.On call +// - url string +func (_e *ClientFactoryInterface_Expecter) NewClient(url interface{}) *ClientFactoryInterface_NewClient_Call { + return &ClientFactoryInterface_NewClient_Call{Call: _e.mock.On("NewClient", url)} +} + +func (_c *ClientFactoryInterface_NewClient_Call) Run(run func(url string)) *ClientFactoryInterface_NewClient_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *ClientFactoryInterface_NewClient_Call) Return(_a0 rpc.ClientInterface) *ClientFactoryInterface_NewClient_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClientFactoryInterface_NewClient_Call) RunAndReturn(run func(string) rpc.ClientInterface) *ClientFactoryInterface_NewClient_Call { + _c.Call.Return(run) + return _c +} + +// NewClientFactoryInterface creates a new instance of ClientFactoryInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClientFactoryInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *ClientFactoryInterface { + mock := &ClientFactoryInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/client_interface.go b/rpc/mocks/client_interface.go new file mode 100644 index 00000000..28b87775 --- /dev/null +++ b/rpc/mocks/client_interface.go @@ -0,0 +1,319 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + claimsponsor "github.com/0xPolygon/cdk/claimsponsor" + l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/rpc/types" +) + +// ClientInterface is an autogenerated mock type for the ClientInterface type +type ClientInterface struct { + mock.Mock +} + +type ClientInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *ClientInterface) EXPECT() *ClientInterface_Expecter { + return &ClientInterface_Expecter{mock: &_m.Mock} +} + +// ClaimProof provides a mock function with given fields: networkID, depositCount, l1InfoTreeIndex +func (_m *ClientInterface) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) { + ret := _m.Called(networkID, depositCount, l1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for ClaimProof") + } + + var r0 *types.ClaimProof + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) (*types.ClaimProof, error)); ok { + return rf(networkID, depositCount, l1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) *types.ClaimProof); ok { + r0 = rf(networkID, depositCount, l1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ClaimProof) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint32, uint32) error); ok { + r1 = rf(networkID, depositCount, l1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientInterface_ClaimProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClaimProof' +type ClientInterface_ClaimProof_Call struct { + *mock.Call +} + +// ClaimProof is a helper method to define mock.On call +// - networkID uint32 +// - depositCount uint32 +// - l1InfoTreeIndex uint32 +func (_e *ClientInterface_Expecter) ClaimProof(networkID interface{}, depositCount interface{}, l1InfoTreeIndex interface{}) *ClientInterface_ClaimProof_Call { + return &ClientInterface_ClaimProof_Call{Call: _e.mock.On("ClaimProof", networkID, depositCount, l1InfoTreeIndex)} +} + +func (_c *ClientInterface_ClaimProof_Call) Run(run func(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32)) *ClientInterface_ClaimProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32), args[2].(uint32)) + }) + return _c +} + +func (_c *ClientInterface_ClaimProof_Call) Return(_a0 *types.ClaimProof, _a1 error) *ClientInterface_ClaimProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientInterface_ClaimProof_Call) RunAndReturn(run func(uint32, uint32, uint32) (*types.ClaimProof, error)) *ClientInterface_ClaimProof_Call { + _c.Call.Return(run) + return _c +} + +// GetSponsoredClaimStatus provides a mock function with given fields: globalIndex +func (_m *ClientInterface) GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) { + ret := _m.Called(globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetSponsoredClaimStatus") + } + + var r0 claimsponsor.ClaimStatus + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int) (claimsponsor.ClaimStatus, error)); ok { + return rf(globalIndex) + } + if rf, ok := ret.Get(0).(func(*big.Int) claimsponsor.ClaimStatus); ok { + r0 = rf(globalIndex) + } else { + r0 = ret.Get(0).(claimsponsor.ClaimStatus) + } + + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(globalIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientInterface_GetSponsoredClaimStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSponsoredClaimStatus' +type ClientInterface_GetSponsoredClaimStatus_Call struct { + *mock.Call +} + +// GetSponsoredClaimStatus is a helper method to define mock.On call +// - globalIndex *big.Int +func (_e *ClientInterface_Expecter) GetSponsoredClaimStatus(globalIndex interface{}) *ClientInterface_GetSponsoredClaimStatus_Call { + return &ClientInterface_GetSponsoredClaimStatus_Call{Call: _e.mock.On("GetSponsoredClaimStatus", globalIndex)} +} + +func (_c *ClientInterface_GetSponsoredClaimStatus_Call) Run(run func(globalIndex *big.Int)) *ClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*big.Int)) + }) + return _c +} + +func (_c *ClientInterface_GetSponsoredClaimStatus_Call) Return(_a0 claimsponsor.ClaimStatus, _a1 error) *ClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientInterface_GetSponsoredClaimStatus_Call) RunAndReturn(run func(*big.Int) (claimsponsor.ClaimStatus, error)) *ClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Return(run) + return _c +} + +// InjectedInfoAfterIndex provides a mock function with given fields: networkID, l1InfoTreeIndex +func (_m *ClientInterface) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(networkID, l1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for InjectedInfoAfterIndex") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(networkID, l1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(uint32, uint32) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(networkID, l1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { + r1 = rf(networkID, l1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientInterface_InjectedInfoAfterIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InjectedInfoAfterIndex' +type ClientInterface_InjectedInfoAfterIndex_Call struct { + *mock.Call +} + +// InjectedInfoAfterIndex is a helper method to define mock.On call +// - networkID uint32 +// - l1InfoTreeIndex uint32 +func (_e *ClientInterface_Expecter) InjectedInfoAfterIndex(networkID interface{}, l1InfoTreeIndex interface{}) *ClientInterface_InjectedInfoAfterIndex_Call { + return &ClientInterface_InjectedInfoAfterIndex_Call{Call: _e.mock.On("InjectedInfoAfterIndex", networkID, l1InfoTreeIndex)} +} + +func (_c *ClientInterface_InjectedInfoAfterIndex_Call) Run(run func(networkID uint32, l1InfoTreeIndex uint32)) *ClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32)) + }) + return _c +} + +func (_c *ClientInterface_InjectedInfoAfterIndex_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *ClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientInterface_InjectedInfoAfterIndex_Call) RunAndReturn(run func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)) *ClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Return(run) + return _c +} + +// L1InfoTreeIndexForBridge provides a mock function with given fields: networkID, depositCount +func (_m *ClientInterface) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) { + ret := _m.Called(networkID, depositCount) + + if len(ret) == 0 { + panic("no return value specified for L1InfoTreeIndexForBridge") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32) (uint32, error)); ok { + return rf(networkID, depositCount) + } + if rf, ok := ret.Get(0).(func(uint32, uint32) uint32); ok { + r0 = rf(networkID, depositCount) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { + r1 = rf(networkID, depositCount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientInterface_L1InfoTreeIndexForBridge_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'L1InfoTreeIndexForBridge' +type ClientInterface_L1InfoTreeIndexForBridge_Call struct { + *mock.Call +} + +// L1InfoTreeIndexForBridge is a helper method to define mock.On call +// - networkID uint32 +// - depositCount uint32 +func (_e *ClientInterface_Expecter) L1InfoTreeIndexForBridge(networkID interface{}, depositCount interface{}) *ClientInterface_L1InfoTreeIndexForBridge_Call { + return &ClientInterface_L1InfoTreeIndexForBridge_Call{Call: _e.mock.On("L1InfoTreeIndexForBridge", networkID, depositCount)} +} + +func (_c *ClientInterface_L1InfoTreeIndexForBridge_Call) Run(run func(networkID uint32, depositCount uint32)) *ClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32)) + }) + return _c +} + +func (_c *ClientInterface_L1InfoTreeIndexForBridge_Call) Return(_a0 uint32, _a1 error) *ClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientInterface_L1InfoTreeIndexForBridge_Call) RunAndReturn(run func(uint32, uint32) (uint32, error)) *ClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Return(run) + return _c +} + +// SponsorClaim provides a mock function with given fields: claim +func (_m *ClientInterface) SponsorClaim(claim claimsponsor.Claim) error { + ret := _m.Called(claim) + + if len(ret) == 0 { + panic("no return value specified for SponsorClaim") + } + + var r0 error + if rf, ok := ret.Get(0).(func(claimsponsor.Claim) error); ok { + r0 = rf(claim) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClientInterface_SponsorClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SponsorClaim' +type ClientInterface_SponsorClaim_Call struct { + *mock.Call +} + +// SponsorClaim is a helper method to define mock.On call +// - claim claimsponsor.Claim +func (_e *ClientInterface_Expecter) SponsorClaim(claim interface{}) *ClientInterface_SponsorClaim_Call { + return &ClientInterface_SponsorClaim_Call{Call: _e.mock.On("SponsorClaim", claim)} +} + +func (_c *ClientInterface_SponsorClaim_Call) Run(run func(claim claimsponsor.Claim)) *ClientInterface_SponsorClaim_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(claimsponsor.Claim)) + }) + return _c +} + +func (_c *ClientInterface_SponsorClaim_Call) Return(_a0 error) *ClientInterface_SponsorClaim_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClientInterface_SponsorClaim_Call) RunAndReturn(run func(claimsponsor.Claim) error) *ClientInterface_SponsorClaim_Call { + _c.Call.Return(run) + return _c +} + +// NewClientInterface creates a new instance of ClientInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClientInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *ClientInterface { + mock := &ClientInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/l1_info_treer.go b/rpc/mocks/l1_info_treer.go new file mode 100644 index 00000000..a4e0f66c --- /dev/null +++ b/rpc/mocks/l1_info_treer.go @@ -0,0 +1,626 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/tree/types" +) + +// L1InfoTreer is an autogenerated mock type for the L1InfoTreer type +type L1InfoTreer struct { + mock.Mock +} + +type L1InfoTreer_Expecter struct { + mock *mock.Mock +} + +func (_m *L1InfoTreer) EXPECT() *L1InfoTreer_Expecter { + return &L1InfoTreer_Expecter{mock: &_m.Mock} +} + +// GetFirstInfo provides a mock function with given fields: +func (_m *L1InfoTreer) GetFirstInfo() (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetFirstInfo") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func() (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstInfo' +type L1InfoTreer_GetFirstInfo_Call struct { + *mock.Call +} + +// GetFirstInfo is a helper method to define mock.On call +func (_e *L1InfoTreer_Expecter) GetFirstInfo() *L1InfoTreer_GetFirstInfo_Call { + return &L1InfoTreer_GetFirstInfo_Call{Call: _e.mock.On("GetFirstInfo")} +} + +func (_c *L1InfoTreer_GetFirstInfo_Call) Run(run func()) *L1InfoTreer_GetFirstInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstInfo_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetFirstInfo_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstInfo_Call) RunAndReturn(run func() (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetFirstInfo_Call { + _c.Call.Return(run) + return _c +} + +// GetFirstInfoAfterBlock provides a mock function with given fields: blockNum +func (_m *L1InfoTreer) GetFirstInfoAfterBlock(blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(blockNum) + + if len(ret) == 0 { + panic("no return value specified for GetFirstInfoAfterBlock") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(blockNum) + } + if rf, ok := ret.Get(0).(func(uint64) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(blockNum) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(blockNum) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstInfoAfterBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstInfoAfterBlock' +type L1InfoTreer_GetFirstInfoAfterBlock_Call struct { + *mock.Call +} + +// GetFirstInfoAfterBlock is a helper method to define mock.On call +// - blockNum uint64 +func (_e *L1InfoTreer_Expecter) GetFirstInfoAfterBlock(blockNum interface{}) *L1InfoTreer_GetFirstInfoAfterBlock_Call { + return &L1InfoTreer_GetFirstInfoAfterBlock_Call{Call: _e.mock.On("GetFirstInfoAfterBlock", blockNum)} +} + +func (_c *L1InfoTreer_GetFirstInfoAfterBlock_Call) Run(run func(blockNum uint64)) *L1InfoTreer_GetFirstInfoAfterBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint64)) + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstInfoAfterBlock_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetFirstInfoAfterBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstInfoAfterBlock_Call) RunAndReturn(run func(uint64) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetFirstInfoAfterBlock_Call { + _c.Call.Return(run) + return _c +} + +// GetFirstL1InfoWithRollupExitRoot provides a mock function with given fields: rollupExitRoot +func (_m *L1InfoTreer) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(rollupExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetFirstL1InfoWithRollupExitRoot") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(rollupExitRoot) + } + if rf, ok := ret.Get(0).(func(common.Hash) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(rollupExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash) error); ok { + r1 = rf(rollupExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstL1InfoWithRollupExitRoot' +type L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call struct { + *mock.Call +} + +// GetFirstL1InfoWithRollupExitRoot is a helper method to define mock.On call +// - rollupExitRoot common.Hash +func (_e *L1InfoTreer_Expecter) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot interface{}) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { + return &L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call{Call: _e.mock.On("GetFirstL1InfoWithRollupExitRoot", rollupExitRoot)} +} + +func (_c *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call) Run(run func(rollupExitRoot common.Hash)) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call) RunAndReturn(run func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetFirstVerifiedBatches provides a mock function with given fields: rollupID +func (_m *L1InfoTreer) GetFirstVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) { + ret := _m.Called(rollupID) + + if len(ret) == 0 { + panic("no return value specified for GetFirstVerifiedBatches") + } + + var r0 *l1infotreesync.VerifyBatches + var r1 error + if rf, ok := ret.Get(0).(func(uint32) (*l1infotreesync.VerifyBatches, error)); ok { + return rf(rollupID) + } + if rf, ok := ret.Get(0).(func(uint32) *l1infotreesync.VerifyBatches); ok { + r0 = rf(rollupID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.VerifyBatches) + } + } + + if rf, ok := ret.Get(1).(func(uint32) error); ok { + r1 = rf(rollupID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstVerifiedBatches_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstVerifiedBatches' +type L1InfoTreer_GetFirstVerifiedBatches_Call struct { + *mock.Call +} + +// GetFirstVerifiedBatches is a helper method to define mock.On call +// - rollupID uint32 +func (_e *L1InfoTreer_Expecter) GetFirstVerifiedBatches(rollupID interface{}) *L1InfoTreer_GetFirstVerifiedBatches_Call { + return &L1InfoTreer_GetFirstVerifiedBatches_Call{Call: _e.mock.On("GetFirstVerifiedBatches", rollupID)} +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatches_Call) Run(run func(rollupID uint32)) *L1InfoTreer_GetFirstVerifiedBatches_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32)) + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatches_Call) Return(_a0 *l1infotreesync.VerifyBatches, _a1 error) *L1InfoTreer_GetFirstVerifiedBatches_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatches_Call) RunAndReturn(run func(uint32) (*l1infotreesync.VerifyBatches, error)) *L1InfoTreer_GetFirstVerifiedBatches_Call { + _c.Call.Return(run) + return _c +} + +// GetFirstVerifiedBatchesAfterBlock provides a mock function with given fields: rollupID, blockNum +func (_m *L1InfoTreer) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*l1infotreesync.VerifyBatches, error) { + ret := _m.Called(rollupID, blockNum) + + if len(ret) == 0 { + panic("no return value specified for GetFirstVerifiedBatchesAfterBlock") + } + + var r0 *l1infotreesync.VerifyBatches + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint64) (*l1infotreesync.VerifyBatches, error)); ok { + return rf(rollupID, blockNum) + } + if rf, ok := ret.Get(0).(func(uint32, uint64) *l1infotreesync.VerifyBatches); ok { + r0 = rf(rollupID, blockNum) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.VerifyBatches) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint64) error); ok { + r1 = rf(rollupID, blockNum) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstVerifiedBatchesAfterBlock' +type L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call struct { + *mock.Call +} + +// GetFirstVerifiedBatchesAfterBlock is a helper method to define mock.On call +// - rollupID uint32 +// - blockNum uint64 +func (_e *L1InfoTreer_Expecter) GetFirstVerifiedBatchesAfterBlock(rollupID interface{}, blockNum interface{}) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { + return &L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call{Call: _e.mock.On("GetFirstVerifiedBatchesAfterBlock", rollupID, blockNum)} +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call) Run(run func(rollupID uint32, blockNum uint64)) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint64)) + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call) Return(_a0 *l1infotreesync.VerifyBatches, _a1 error) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call) RunAndReturn(run func(uint32, uint64) (*l1infotreesync.VerifyBatches, error)) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { + _c.Call.Return(run) + return _c +} + +// GetInfoByIndex provides a mock function with given fields: ctx, index +func (_m *L1InfoTreer) GetInfoByIndex(ctx context.Context, index uint32) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(ctx, index) + + if len(ret) == 0 { + panic("no return value specified for GetInfoByIndex") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(ctx, index) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(ctx, index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { + r1 = rf(ctx, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetInfoByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInfoByIndex' +type L1InfoTreer_GetInfoByIndex_Call struct { + *mock.Call +} + +// GetInfoByIndex is a helper method to define mock.On call +// - ctx context.Context +// - index uint32 +func (_e *L1InfoTreer_Expecter) GetInfoByIndex(ctx interface{}, index interface{}) *L1InfoTreer_GetInfoByIndex_Call { + return &L1InfoTreer_GetInfoByIndex_Call{Call: _e.mock.On("GetInfoByIndex", ctx, index)} +} + +func (_c *L1InfoTreer_GetInfoByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L1InfoTreer_GetInfoByIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32)) + }) + return _c +} + +func (_c *L1InfoTreer_GetInfoByIndex_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetInfoByIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetInfoByIndex_Call) RunAndReturn(run func(context.Context, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetInfoByIndex_Call { + _c.Call.Return(run) + return _c +} + +// GetLastInfo provides a mock function with given fields: +func (_m *L1InfoTreer) GetLastInfo() (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLastInfo") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func() (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetLastInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastInfo' +type L1InfoTreer_GetLastInfo_Call struct { + *mock.Call +} + +// GetLastInfo is a helper method to define mock.On call +func (_e *L1InfoTreer_Expecter) GetLastInfo() *L1InfoTreer_GetLastInfo_Call { + return &L1InfoTreer_GetLastInfo_Call{Call: _e.mock.On("GetLastInfo")} +} + +func (_c *L1InfoTreer_GetLastInfo_Call) Run(run func()) *L1InfoTreer_GetLastInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L1InfoTreer_GetLastInfo_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetLastInfo_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetLastInfo_Call) RunAndReturn(run func() (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetLastInfo_Call { + _c.Call.Return(run) + return _c +} + +// GetLastVerifiedBatches provides a mock function with given fields: rollupID +func (_m *L1InfoTreer) GetLastVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) { + ret := _m.Called(rollupID) + + if len(ret) == 0 { + panic("no return value specified for GetLastVerifiedBatches") + } + + var r0 *l1infotreesync.VerifyBatches + var r1 error + if rf, ok := ret.Get(0).(func(uint32) (*l1infotreesync.VerifyBatches, error)); ok { + return rf(rollupID) + } + if rf, ok := ret.Get(0).(func(uint32) *l1infotreesync.VerifyBatches); ok { + r0 = rf(rollupID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.VerifyBatches) + } + } + + if rf, ok := ret.Get(1).(func(uint32) error); ok { + r1 = rf(rollupID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetLastVerifiedBatches_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastVerifiedBatches' +type L1InfoTreer_GetLastVerifiedBatches_Call struct { + *mock.Call +} + +// GetLastVerifiedBatches is a helper method to define mock.On call +// - rollupID uint32 +func (_e *L1InfoTreer_Expecter) GetLastVerifiedBatches(rollupID interface{}) *L1InfoTreer_GetLastVerifiedBatches_Call { + return &L1InfoTreer_GetLastVerifiedBatches_Call{Call: _e.mock.On("GetLastVerifiedBatches", rollupID)} +} + +func (_c *L1InfoTreer_GetLastVerifiedBatches_Call) Run(run func(rollupID uint32)) *L1InfoTreer_GetLastVerifiedBatches_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32)) + }) + return _c +} + +func (_c *L1InfoTreer_GetLastVerifiedBatches_Call) Return(_a0 *l1infotreesync.VerifyBatches, _a1 error) *L1InfoTreer_GetLastVerifiedBatches_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetLastVerifiedBatches_Call) RunAndReturn(run func(uint32) (*l1infotreesync.VerifyBatches, error)) *L1InfoTreer_GetLastVerifiedBatches_Call { + _c.Call.Return(run) + return _c +} + +// GetLocalExitRoot provides a mock function with given fields: ctx, networkID, rollupExitRoot +func (_m *L1InfoTreer) GetLocalExitRoot(ctx context.Context, networkID uint32, rollupExitRoot common.Hash) (common.Hash, error) { + ret := _m.Called(ctx, networkID, rollupExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetLocalExitRoot") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (common.Hash, error)); ok { + return rf(ctx, networkID, rollupExitRoot) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) common.Hash); ok { + r0 = rf(ctx, networkID, rollupExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { + r1 = rf(ctx, networkID, rollupExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetLocalExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLocalExitRoot' +type L1InfoTreer_GetLocalExitRoot_Call struct { + *mock.Call +} + +// GetLocalExitRoot is a helper method to define mock.On call +// - ctx context.Context +// - networkID uint32 +// - rollupExitRoot common.Hash +func (_e *L1InfoTreer_Expecter) GetLocalExitRoot(ctx interface{}, networkID interface{}, rollupExitRoot interface{}) *L1InfoTreer_GetLocalExitRoot_Call { + return &L1InfoTreer_GetLocalExitRoot_Call{Call: _e.mock.On("GetLocalExitRoot", ctx, networkID, rollupExitRoot)} +} + +func (_c *L1InfoTreer_GetLocalExitRoot_Call) Run(run func(ctx context.Context, networkID uint32, rollupExitRoot common.Hash)) *L1InfoTreer_GetLocalExitRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreer_GetLocalExitRoot_Call) Return(_a0 common.Hash, _a1 error) *L1InfoTreer_GetLocalExitRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetLocalExitRoot_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (common.Hash, error)) *L1InfoTreer_GetLocalExitRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetRollupExitTreeMerkleProof provides a mock function with given fields: ctx, networkID, root +func (_m *L1InfoTreer) GetRollupExitTreeMerkleProof(ctx context.Context, networkID uint32, root common.Hash) (types.Proof, error) { + ret := _m.Called(ctx, networkID, root) + + if len(ret) == 0 { + panic("no return value specified for GetRollupExitTreeMerkleProof") + } + + var r0 types.Proof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (types.Proof, error)); ok { + return rf(ctx, networkID, root) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) types.Proof); ok { + r0 = rf(ctx, networkID, root) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { + r1 = rf(ctx, networkID, root) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetRollupExitTreeMerkleProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollupExitTreeMerkleProof' +type L1InfoTreer_GetRollupExitTreeMerkleProof_Call struct { + *mock.Call +} + +// GetRollupExitTreeMerkleProof is a helper method to define mock.On call +// - ctx context.Context +// - networkID uint32 +// - root common.Hash +func (_e *L1InfoTreer_Expecter) GetRollupExitTreeMerkleProof(ctx interface{}, networkID interface{}, root interface{}) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { + return &L1InfoTreer_GetRollupExitTreeMerkleProof_Call{Call: _e.mock.On("GetRollupExitTreeMerkleProof", ctx, networkID, root)} +} + +func (_c *L1InfoTreer_GetRollupExitTreeMerkleProof_Call) Run(run func(ctx context.Context, networkID uint32, root common.Hash)) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreer_GetRollupExitTreeMerkleProof_Call) Return(_a0 types.Proof, _a1 error) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetRollupExitTreeMerkleProof_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (types.Proof, error)) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { + _c.Call.Return(run) + return _c +} + +// NewL1InfoTreer creates a new instance of L1InfoTreer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewL1InfoTreer(t interface { + mock.TestingT + Cleanup(func()) +}) *L1InfoTreer { + mock := &L1InfoTreer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/last_ge_rer.go b/rpc/mocks/last_ge_rer.go new file mode 100644 index 00000000..d2e3068a --- /dev/null +++ b/rpc/mocks/last_ge_rer.go @@ -0,0 +1,104 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" +) + +// LastGERer is an autogenerated mock type for the LastGERer type +type LastGERer struct { + mock.Mock +} + +type LastGERer_Expecter struct { + mock *mock.Mock +} + +func (_m *LastGERer) EXPECT() *LastGERer_Expecter { + return &LastGERer_Expecter{mock: &_m.Mock} +} + +// GetFirstGERAfterL1InfoTreeIndex provides a mock function with given fields: ctx, atOrAfterL1InfoTreeIndex +func (_m *LastGERer) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, atOrAfterL1InfoTreeIndex uint32) (uint32, common.Hash, error) { + ret := _m.Called(ctx, atOrAfterL1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for GetFirstGERAfterL1InfoTreeIndex") + } + + var r0 uint32 + var r1 common.Hash + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (uint32, common.Hash, error)); ok { + return rf(ctx, atOrAfterL1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) uint32); ok { + r0 = rf(ctx, atOrAfterL1InfoTreeIndex) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) common.Hash); ok { + r1 = rf(ctx, atOrAfterL1InfoTreeIndex) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(common.Hash) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, uint32) error); ok { + r2 = rf(ctx, atOrAfterL1InfoTreeIndex) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstGERAfterL1InfoTreeIndex' +type LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call struct { + *mock.Call +} + +// GetFirstGERAfterL1InfoTreeIndex is a helper method to define mock.On call +// - ctx context.Context +// - atOrAfterL1InfoTreeIndex uint32 +func (_e *LastGERer_Expecter) GetFirstGERAfterL1InfoTreeIndex(ctx interface{}, atOrAfterL1InfoTreeIndex interface{}) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + return &LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call{Call: _e.mock.On("GetFirstGERAfterL1InfoTreeIndex", ctx, atOrAfterL1InfoTreeIndex)} +} + +func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Run(run func(ctx context.Context, atOrAfterL1InfoTreeIndex uint32)) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32)) + }) + return _c +} + +func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Return(injectedL1InfoTreeIndex uint32, ger common.Hash, err error) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + _c.Call.Return(injectedL1InfoTreeIndex, ger, err) + return _c +} + +func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) RunAndReturn(run func(context.Context, uint32) (uint32, common.Hash, error)) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + _c.Call.Return(run) + return _c +} + +// NewLastGERer creates a new instance of LastGERer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLastGERer(t interface { + mock.TestingT + Cleanup(func()) +}) *LastGERer { + mock := &LastGERer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/types/bridge.go b/rpc/types/bridge.go new file mode 100644 index 00000000..eb8c6464 --- /dev/null +++ b/rpc/types/bridge.go @@ -0,0 +1,12 @@ +package types + +import ( + "github.com/0xPolygon/cdk/l1infotreesync" + tree "github.com/0xPolygon/cdk/tree/types" +) + +type ClaimProof struct { + ProofLocalExitRoot tree.Proof + ProofRollupExitRoot tree.Proof + L1InfoTreeLeaf l1infotreesync.L1InfoTreeLeaf +} diff --git a/test/Makefile b/test/Makefile index 0c50ec35..0864b8d2 100644 --- a/test/Makefile +++ b/test/Makefile @@ -21,6 +21,11 @@ generate-mocks-da: ## Generates mocks for dataavailability, using mockery tool rm -Rf ../dataavailability/mocks_da export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../dataavailability --output ../dataavailability/mocks_da --outpkg mocks_da ${COMMON_MOCKERY_PARAMS} +.PHONY: generate-mocks-rpc +generate-mocks-rpc: ## Generates mocks for rpc, using mockery tool + rm -Rf ../rpc/mocks + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../rpc --output ../rpc/mocks --outpkg mocks ${COMMON_MOCKERY_PARAMS} + .PHONY: test-e2e-elderberry-validium test-e2e-elderberry-validium: stop ## Runs e2e tests checking elderberry/validium diff --git a/tree/appendonlytree.go b/tree/appendonlytree.go index 20d22ec1..418a576b 100644 --- a/tree/appendonlytree.go +++ b/tree/appendonlytree.go @@ -82,7 +82,7 @@ func (t *AppendOnlyTree) initCache(tx db.Txer) error { siblings := [types.DefaultHeight]common.Hash{} lastRoot, err := t.getLastRootWithTx(tx) if err != nil { - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { t.lastIndex = -1 t.lastLeftCache = siblings return nil @@ -102,7 +102,7 @@ func (t *AppendOnlyTree) initCache(tx db.Txer) error { ) } if currentNode == nil { - return ErrNotFound + return db.ErrNotFound } siblings[h] = currentNode.Left if index&(1< 0 { diff --git a/tree/tree.go b/tree/tree.go index 2107ba68..5d307e8a 100644 --- a/tree/tree.go +++ b/tree/tree.go @@ -14,8 +14,7 @@ import ( ) var ( - EmptyProof = types.Proof{} - ErrNotFound = errors.New("not found") + EmptyProof = types.Proof{} ) type Tree struct { @@ -50,7 +49,7 @@ func newTree(db *sql.DB, tablePrefix string) *Tree { } func (t *Tree) getSiblings(tx db.Querier, index uint32, root common.Hash) ( - siblings [32]common.Hash, + siblings types.Proof, hasUsedZeroHashes bool, err error, ) { @@ -60,7 +59,7 @@ func (t *Tree) getSiblings(tx db.Querier, index uint32, root common.Hash) ( var currentNode *types.TreeNode currentNode, err = t.getRHTNode(tx, currentNodeHash) if err != nil { - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { hasUsedZeroHashes = true siblings[h] = t.zeroHashes[h] err = nil @@ -113,7 +112,7 @@ func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) (ty return types.Proof{}, err } if isErrNotFound { - return types.Proof{}, ErrNotFound + return types.Proof{}, db.ErrNotFound } return siblings, nil } @@ -127,7 +126,7 @@ func (t *Tree) getRHTNode(tx db.Querier, nodeHash common.Hash) (*types.TreeNode, ) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return node, ErrNotFound + return node, db.ErrNotFound } return node, err } @@ -185,7 +184,7 @@ func (t *Tree) getLastRootWithTx(tx db.Querier) (types.Root, error) { ) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return root, ErrNotFound + return root, db.ErrNotFound } return root, err } @@ -201,7 +200,7 @@ func (t *Tree) GetRootByIndex(ctx context.Context, index uint32) (types.Root, er index, ); err != nil { if errors.Is(err, sql.ErrNoRows) { - return root, ErrNotFound + return root, db.ErrNotFound } return root, err } @@ -209,17 +208,17 @@ func (t *Tree) GetRootByIndex(ctx context.Context, index uint32) (types.Root, er } // GetRootByHash returns the root associated to the hash -func (t *Tree) GetRootByHash(ctx context.Context, hash common.Hash) (types.Root, error) { - var root types.Root +func (t *Tree) GetRootByHash(ctx context.Context, hash common.Hash) (*types.Root, error) { + var root *types.Root if err := meddler.QueryRow( - t.db, &root, + t.db, root, fmt.Sprintf(`SELECT * FROM %s WHERE hash = $1;`, t.rootTable), hash.Hex(), ); err != nil { if errors.Is(err, sql.ErrNoRows) { - return root, ErrNotFound + return nil, db.ErrNotFound } - return root, err + return nil, err } return root, nil } diff --git a/tree/updatabletree.go b/tree/updatabletree.go index 3ed8b881..be861b55 100644 --- a/tree/updatabletree.go +++ b/tree/updatabletree.go @@ -23,21 +23,21 @@ func NewUpdatableTree(db *sql.DB, dbPrefix string) *UpdatableTree { return ut } -func (t *UpdatableTree) UpsertLeaf(tx db.Txer, blockNum, blockPosition uint64, leaf types.Leaf) error { +func (t *UpdatableTree) UpsertLeaf(tx db.Txer, blockNum, blockPosition uint64, leaf types.Leaf) (common.Hash, error) { var rootHash common.Hash root, err := t.getLastRootWithTx(tx) if err != nil { - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { rootHash = t.zeroHashes[types.DefaultHeight] } else { - return err + return common.Hash{}, err } } else { rootHash = root.Hash } siblings, _, err := t.getSiblings(tx, leaf.Index, rootHash) if err != nil { - return err + return common.Hash{}, err } currentChildHash := leaf.Hash newNodes := []types.TreeNode{} @@ -59,10 +59,10 @@ func (t *UpdatableTree) UpsertLeaf(tx db.Txer, blockNum, blockPosition uint64, l BlockNum: blockNum, BlockPosition: blockPosition, }); err != nil { - return err + return common.Hash{}, err } if err := t.storeNodes(tx, newNodes); err != nil { - return err + return common.Hash{}, err } - return nil + return currentChildHash, nil } From db81a2644ffd06aa461fa5a39c61fd46c501f943 Mon Sep 17 00:00:00 2001 From: Arnau Bennassar Date: Wed, 18 Sep 2024 18:22:58 +0200 Subject: [PATCH 04/53] feat: add missing methods needed to generate PP certificate (#82) * wip * wip * WIP * decoding direct and indeirecr assets and messages works * connect everything * fix building contract scripts * fix building contract scripts * wip * WIP * tree migrated to SQLite * wip * wip * bridgesync working with sqlite * pass tests * minor cleaning * add GetBlockByLER func * handle err not found * merge develop * use memory for sqlite on the tests * increase timestamp to pass UT * review * finished implementation * replace l1bridge2infosync for rpc logic * ut wip * unit tests for new rpc funcs * add UTs for new methods in the l1infotreesync processor * fix UTs * pass linter * add PR requests * add missing processor calls * fixx linter * feat: add missing methods needed to generate PP certificate * fix linter --- l1infotreesync/l1infotreesync.go | 11 ++++ .../migrations/l1infotreesync0001.sql | 2 +- l1infotreesync/processor.go | 60 +++++++++++-------- l1infotreesync/processor_test.go | 10 +++- 4 files changed, 56 insertions(+), 27 deletions(-) diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index c414e42b..4c4b796e 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -179,3 +179,14 @@ func (s *L1InfoTreeSync) GetFirstInfo() (*L1InfoTreeLeaf, error) { func (s *L1InfoTreeSync) GetFirstInfoAfterBlock(blockNum uint64) (*L1InfoTreeLeaf, error) { return s.processor.GetFirstInfoAfterBlock(blockNum) } + +func (s *L1InfoTreeSync) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) { + return s.processor.GetInfoByGlobalExitRoot(ger) +} + +// GetL1InfoTreeMerkleProofFromIndexToRoot creates a merkle proof for the L1 Info tree +func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProofFromIndexToRoot( + ctx context.Context, index uint32, root common.Hash, +) (types.Proof, error) { + return s.processor.l1InfoTree.GetProof(ctx, index, root) +} diff --git a/l1infotreesync/migrations/l1infotreesync0001.sql b/l1infotreesync/migrations/l1infotreesync0001.sql index f22408cd..7a689281 100644 --- a/l1infotreesync/migrations/l1infotreesync0001.sql +++ b/l1infotreesync/migrations/l1infotreesync0001.sql @@ -16,7 +16,7 @@ CREATE TABLE l1info_leaf ( timestamp INTEGER NOT NULL, mainnet_exit_root VARCHAR NOT NULL, rollup_exit_root VARCHAR NOT NULL, - global_exit_root VARCHAR NOT NULL, + global_exit_root VARCHAR NOT NULL UNIQUE, hash VARCHAR NOT NULL, PRIMARY KEY (block_num, block_pos) ); diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index 7b762b55..a672c5ef 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -13,7 +13,7 @@ import ( "github.com/0xPolygon/cdk/sync" "github.com/0xPolygon/cdk/tree" treeTypes "github.com/0xPolygon/cdk/tree/types" - ethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common" "github.com/iden3/go-iden3-crypto/keccak256" "github.com/russross/meddler" "golang.org/x/crypto/sha3" @@ -33,29 +33,29 @@ type processor struct { // UpdateL1InfoTree representation of the UpdateL1InfoTree event type UpdateL1InfoTree struct { BlockPosition uint64 - MainnetExitRoot ethCommon.Hash - RollupExitRoot ethCommon.Hash - ParentHash ethCommon.Hash + MainnetExitRoot common.Hash + RollupExitRoot common.Hash + ParentHash common.Hash Timestamp uint64 } // VerifyBatches representation of the VerifyBatches and VerifyBatchesTrustedAggregator events type VerifyBatches struct { - BlockNumber uint64 `meddler:"block_num"` - BlockPosition uint64 `meddler:"block_pos"` - RollupID uint32 `meddler:"rollup_id"` - NumBatch uint64 `meddler:"batch_num"` - StateRoot ethCommon.Hash `meddler:"state_root,hash"` - ExitRoot ethCommon.Hash `meddler:"exit_root,hash"` - Aggregator ethCommon.Address `meddler:"aggregator,address"` + BlockNumber uint64 `meddler:"block_num"` + BlockPosition uint64 `meddler:"block_pos"` + RollupID uint32 `meddler:"rollup_id"` + NumBatch uint64 `meddler:"batch_num"` + StateRoot common.Hash `meddler:"state_root,hash"` + ExitRoot common.Hash `meddler:"exit_root,hash"` + Aggregator common.Address `meddler:"aggregator,address"` // Not provided by downloader - RollupExitRoot ethCommon.Hash `meddler:"rollup_exit_root,hash"` + RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` } type InitL1InfoRootMap struct { LeafCount uint32 - CurrentL1InfoRoot ethCommon.Hash + CurrentL1InfoRoot common.Hash } type Event struct { @@ -66,19 +66,19 @@ type Event struct { // L1InfoTreeLeaf representation of a leaf of the L1 Info tree type L1InfoTreeLeaf struct { - BlockNumber uint64 `meddler:"block_num"` - BlockPosition uint64 `meddler:"block_pos"` - L1InfoTreeIndex uint32 `meddler:"position"` - PreviousBlockHash ethCommon.Hash `meddler:"previous_block_hash,hash"` - Timestamp uint64 `meddler:"timestamp"` - MainnetExitRoot ethCommon.Hash `meddler:"mainnet_exit_root,hash"` - RollupExitRoot ethCommon.Hash `meddler:"rollup_exit_root,hash"` - GlobalExitRoot ethCommon.Hash `meddler:"global_exit_root,hash"` - Hash ethCommon.Hash `meddler:"hash,hash"` + BlockNumber uint64 `meddler:"block_num"` + BlockPosition uint64 `meddler:"block_pos"` + L1InfoTreeIndex uint32 `meddler:"position"` + PreviousBlockHash common.Hash `meddler:"previous_block_hash,hash"` + Timestamp uint64 `meddler:"timestamp"` + MainnetExitRoot common.Hash `meddler:"mainnet_exit_root,hash"` + RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` + GlobalExitRoot common.Hash `meddler:"global_exit_root,hash"` + Hash common.Hash `meddler:"hash,hash"` } // Hash as expected by the tree -func (l *L1InfoTreeLeaf) hash() ethCommon.Hash { +func (l *L1InfoTreeLeaf) hash() common.Hash { var res [treeTypes.DefaultHeight]byte t := make([]byte, 8) //nolint:mnd binary.BigEndian.PutUint64(t, l.Timestamp) @@ -87,7 +87,7 @@ func (l *L1InfoTreeLeaf) hash() ethCommon.Hash { } // GlobalExitRoot returns the GER -func (l *L1InfoTreeLeaf) globalExitRoot() ethCommon.Hash { +func (l *L1InfoTreeLeaf) globalExitRoot() common.Hash { var gerBytes [treeTypes.DefaultHeight]byte hasher := sha3.NewLegacyKeccak256() hasher.Write(l.MainnetExitRoot[:]) @@ -362,7 +362,7 @@ func (p *processor) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum return verified, db.ReturnErrNotFound(err) } -func (p *processor) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot ethCommon.Hash) (*L1InfoTreeLeaf, error) { +func (p *processor) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*L1InfoTreeLeaf, error) { info := &L1InfoTreeLeaf{} err := meddler.QueryRow(p.db, info, ` SELECT * FROM l1info_leaf @@ -403,3 +403,13 @@ func (p *processor) GetFirstInfoAfterBlock(blockNum uint64) (*L1InfoTreeLeaf, er `, blockNum) return info, db.ReturnErrNotFound(err) } + +func (p *processor) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + WHERE global_exit_root = $1 + LIMIT 1; + `, ger.Hex()) + return info, db.ReturnErrNotFound(err) +} diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go index 5853e90e..3da02998 100644 --- a/l1infotreesync/processor_test.go +++ b/l1infotreesync/processor_test.go @@ -91,6 +91,8 @@ func TestGetInfo(t *testing.T) { require.Equal(t, db.ErrNotFound, err) _, err = p.GetFirstInfoAfterBlock(0) require.Equal(t, db.ErrNotFound, err) + _, err = p.GetInfoByGlobalExitRoot(common.Hash{}) + require.Equal(t, db.ErrNotFound, err) // First insert info1 := &UpdateL1InfoTree{ @@ -128,10 +130,13 @@ func TestGetInfo(t *testing.T) { actual, err = p.GetFirstInfoAfterBlock(0) require.NoError(t, err) require.Equal(t, expected1, *actual) + actual, err = p.GetInfoByGlobalExitRoot(expected1.GlobalExitRoot) + require.NoError(t, err) + require.Equal(t, expected1, *actual) // Second insert info2 := &UpdateL1InfoTree{ - MainnetExitRoot: common.HexToHash("beef"), + MainnetExitRoot: common.HexToHash("b055"), RollupExitRoot: common.HexToHash("5ca1e"), ParentHash: common.HexToHash("1010101"), Timestamp: 420, @@ -165,4 +170,7 @@ func TestGetInfo(t *testing.T) { actual, err = p.GetFirstInfoAfterBlock(2) require.NoError(t, err) require.Equal(t, expected2, *actual) + actual, err = p.GetInfoByGlobalExitRoot(expected2.GlobalExitRoot) + require.NoError(t, err) + require.Equal(t, expected2, *actual) } From 681c9878e37bb930b3f3bd4f62cbcc8fbe3d2819 Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 20 Sep 2024 11:48:05 +0200 Subject: [PATCH 05/53] feat: seq sender sanity check l1infotree (#86) --- sequencesender/sequencesender.go | 19 +++++--- sequencesender/sequencesender_test.go | 44 ++++++++++++++++++ sequencesender/txbuilder/banana_base.go | 16 +++++++ sequencesender/txbuilder/banana_base_test.go | 49 +++++++++++++++++++- sequencesender/txbuilder/banana_types.go | 35 ++++++++++++++ 5 files changed, 154 insertions(+), 9 deletions(-) diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 3431d3fe..1d76d3c0 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -214,6 +214,7 @@ func (s *SequenceSender) purgeSequences() { // Purge the information of batches that are already virtualized s.mutexSequence.Lock() + defer s.mutexSequence.Unlock() truncateUntil := 0 toPurge := make([]uint64, 0) for i := 0; i < len(s.sequenceList); i++ { @@ -240,7 +241,6 @@ func (s *SequenceSender) purgeSequences() { } s.logger.Infof("batches purged count: %d, fromBatch: %d, toBatch: %d", len(toPurge), firstPurged, lastPurged) } - s.mutexSequence.Unlock() } // purgeEthTx purges transactions from memory structures @@ -252,6 +252,7 @@ func (s *SequenceSender) purgeEthTx(ctx context.Context) { // Purge old transactions that are finalized s.mutexEthTx.Lock() + defer s.mutexEthTx.Unlock() timePurge := time.Now().Add(-s.cfg.WaitPeriodPurgeTxFile.Duration) toPurge := make([]common.Hash, 0) for hash, data := range s.ethTransactions { @@ -289,7 +290,6 @@ func (s *SequenceSender) purgeEthTx(ctx context.Context) { } s.logger.Infof("txs purged count: %d, fromNonce: %d, toNonce: %d", len(toPurge), firstPurged, lastPurged) } - s.mutexEthTx.Unlock() } // syncEthTxResults syncs results from L1 for transactions in the memory structure @@ -1168,7 +1168,9 @@ func (s *SequenceSender) addInfoSequenceBatchEnd(batch *datastream.BatchEnd) { // addNewBatchL2Block adds a new L2 block to the work in progress batch func (s *SequenceSender) addNewBatchL2Block(l2Block *datastream.L2Block) { s.mutexSequence.Lock() - s.logger.Infof(".....new L2 block, number %d (batch %d)", l2Block.Number, l2Block.BatchNumber) + defer s.mutexSequence.Unlock() + s.logger.Infof(".....new L2 block, number %d (batch %d) l1infotree %d", + l2Block.Number, l2Block.BatchNumber, l2Block.L1InfotreeIndex) // Current batch data := s.sequenceData[s.wipBatch] @@ -1183,7 +1185,12 @@ func (s *SequenceSender) addNewBatchL2Block(l2Block *datastream.L2Block) { ) } data.batch.SetLastCoinbase(common.BytesToAddress(l2Block.Coinbase)) - data.batch.SetL1InfoTreeIndex(l2Block.L1InfotreeIndex) + if l2Block.L1InfotreeIndex != 0 { + data.batch.SetL1InfoTreeIndex(l2Block.L1InfotreeIndex) + } else { + s.logger.Warnf("L2 Block L1InfotreeIndex is 0, we don't change batch L1InfotreeIndex (%d)", + data.batch.L1InfoTreeIndex()) + } // New L2 block raw newBlockRaw := state.L2BlockRaw{} @@ -1200,13 +1207,12 @@ func (s *SequenceSender) addNewBatchL2Block(l2Block *datastream.L2Block) { blockRaw.DeltaTimestamp = l2Block.DeltaTimestamp blockRaw.IndexL1InfoTree = l2Block.L1InfotreeIndex } - - s.mutexSequence.Unlock() } // addNewBlockTx adds a new Tx to the current L2 block func (s *SequenceSender) addNewBlockTx(l2Tx *datastream.Transaction) { s.mutexSequence.Lock() + defer s.mutexSequence.Unlock() s.logger.Debugf("........new tx, length %d EGP %d SR %x..", len(l2Tx.Encoded), l2Tx.EffectiveGasPricePercentage, l2Tx.ImStateRoot[:8], ) @@ -1229,7 +1235,6 @@ func (s *SequenceSender) addNewBlockTx(l2Tx *datastream.Transaction) { // Add Tx blockRaw.Transactions = append(blockRaw.Transactions, l2TxRaw) - s.mutexSequence.Unlock() } // getWipL2Block returns index of the array and pointer to the current L2 block (helper func) diff --git a/sequencesender/sequencesender_test.go b/sequencesender/sequencesender_test.go index c16fda42..f839cfca 100644 --- a/sequencesender/sequencesender_test.go +++ b/sequencesender/sequencesender_test.go @@ -4,7 +4,10 @@ import ( "testing" "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state" + "github.com/0xPolygon/cdk/state/datastream" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" ) @@ -71,3 +74,44 @@ func TestStreamTx(t *testing.T) { printBatch(decodedBatch, true, true) } + +func TestAddNewBatchL2Block(t *testing.T) { + logger := log.GetDefaultLogger() + txBuilder := txbuilder.NewTxBuilderBananaZKEVM(logger, nil, nil, bind.TransactOpts{}, 100, nil, nil, nil) + sut := SequenceSender{ + logger: logger, + cfg: Config{}, + ethTransactions: make(map[common.Hash]*ethTxData), + ethTxData: make(map[common.Hash][]byte), + sequenceData: make(map[uint64]*sequenceData), + validStream: false, + latestStreamBatch: 0, + seqSendingStopped: false, + TxBuilder: txBuilder, + } + + l2Block := datastream.L2Block{ + Number: 1, + BatchNumber: 1, + L1InfotreeIndex: 1, + } + sut.addNewSequenceBatch(&l2Block) + l2Block = datastream.L2Block{ + Number: 2, + BatchNumber: 1, + L1InfotreeIndex: 0, + } + sut.addNewBatchL2Block(&l2Block) + data := sut.sequenceData[sut.wipBatch] + // L1InfotreeIndex 0 is ignored + require.Equal(t, uint32(1), data.batch.L1InfoTreeIndex(), "new block have index=0 and is ignored") + + l2Block = datastream.L2Block{ + Number: 2, + BatchNumber: 1, + L1InfotreeIndex: 5, + } + sut.addNewBatchL2Block(&l2Block) + data = sut.sequenceData[sut.wipBatch] + require.Equal(t, uint32(5), data.batch.L1InfoTreeIndex(), "new block have index=5 and is set") +} diff --git a/sequencesender/txbuilder/banana_base.go b/sequencesender/txbuilder/banana_base.go index 7b451ed8..6d191c4a 100644 --- a/sequencesender/txbuilder/banana_base.go +++ b/sequencesender/txbuilder/banana_base.go @@ -134,10 +134,26 @@ func (t *TxBuilderBananaBase) NewSequence( sequence.OldAccInputHash = oldAccInputHash sequence.AccInputHash = accInputHash + + err = SequenceSanityCheck(sequence) + if err != nil { + return nil, fmt.Errorf("sequenceSanityCheck fails. Err: %w", err) + } res := NewBananaSequence(*sequence) return res, nil } +func SequenceSanityCheck(seq *etherman.SequenceBanana) error { + maxL1InfoIndex, err := calculateMaxL1InfoTreeIndexInsideSequence(seq) + if err != nil { + return err + } + if seq.CounterL1InfoRoot < maxL1InfoIndex+1 { + return fmt.Errorf("wrong CounterL1InfoRoot(%d): BatchL2Data (max=%d) ", seq.CounterL1InfoRoot, maxL1InfoIndex) + } + return nil +} + func (t *TxBuilderBananaBase) getL1InfoRoot(counterL1InfoRoot uint32) (common.Hash, error) { return t.globalExitRootContract.L1InfoRootMap(&bind.CallOpts{Pending: false}, counterL1InfoRoot) } diff --git a/sequencesender/txbuilder/banana_base_test.go b/sequencesender/txbuilder/banana_base_test.go index af4b05c0..3b449084 100644 --- a/sequencesender/txbuilder/banana_base_test.go +++ b/sequencesender/txbuilder/banana_base_test.go @@ -2,14 +2,17 @@ package txbuilder_test import ( "context" + "fmt" "math/big" "testing" + "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/sequencesender/txbuilder/mocks_txbuilder" + "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/cdk/state/datastream" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -31,8 +34,15 @@ func TestBananaBaseNewSequenceEmpty(t *testing.T) { seq, err := testData.sut.NewSequence(context.TODO(), nil, common.Address{}) require.NotNil(t, seq) require.NoError(t, err) - // TODO check values - // require.Equal(t, lastAcc, seq.LastAccInputHash()) +} + +func TestBananaBaseNewSequenceErrorHeaderByNumber(t *testing.T) { + testData := newBananaBaseTestData(t) + testData.l1Client.On("HeaderByNumber", mock.Anything, mock.Anything). + Return(nil, fmt.Errorf("error")) + seq, err := testData.sut.NewSequence(context.TODO(), nil, common.Address{}) + require.Nil(t, seq) + require.Error(t, err) } func TestBananaBaseNewBatchFromL2Block(t *testing.T) { @@ -79,6 +89,41 @@ func TestBananaBaseNewSequenceBatch(t *testing.T) { // TODO: check that the seq have the right values } +func TestBananaSanityCheck(t *testing.T) { + batch := state.BatchRawV2{ + Blocks: []state.L2BlockRaw{ + { + BlockNumber: 1, + ChangeL2BlockHeader: state.ChangeL2BlockHeader{ + DeltaTimestamp: 1, + IndexL1InfoTree: 1, + }, + }, + }, + } + data, err := state.EncodeBatchV2(&batch) + require.NoError(t, err) + require.NotNil(t, data) + seq := etherman.SequenceBanana{ + CounterL1InfoRoot: 2, + Batches: []etherman.Batch{ + { + L2Data: data, + }, + }, + } + err = txbuilder.SequenceSanityCheck(&seq) + require.NoError(t, err, "inside batchl2data max is 1 and counter is 2 (2>=1+1)") + seq.CounterL1InfoRoot = 1 + err = txbuilder.SequenceSanityCheck(&seq) + require.Error(t, err, "inside batchl2data max is 1 and counter is 1. The batchl2data is not included in counter") +} + +func TestBananaSanityCheckNilSeq(t *testing.T) { + err := txbuilder.SequenceSanityCheck(nil) + require.Error(t, err, "nil sequence") +} + type testDataBananaBase struct { rollupContract *mocks_txbuilder.RollupBananaBaseContractor getContract *mocks_txbuilder.GlobalExitRootBananaContractor diff --git a/sequencesender/txbuilder/banana_types.go b/sequencesender/txbuilder/banana_types.go index c09095b6..c69d2876 100644 --- a/sequencesender/txbuilder/banana_types.go +++ b/sequencesender/txbuilder/banana_types.go @@ -5,6 +5,7 @@ import ( "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/0xPolygon/cdk/state" "github.com/ethereum/go-ethereum/common" ) @@ -147,3 +148,37 @@ func (b *BananaSequence) LastVirtualBatchNumber() uint64 { func (b *BananaSequence) SetLastVirtualBatchNumber(batchNumber uint64) { b.SequenceBanana.LastVirtualBatchNumber = batchNumber } + +func calculateMaxL1InfoTreeIndexInsideL2Data(l2data []byte) (uint32, error) { + batchRawV2, err := state.DecodeBatchV2(l2data) + if err != nil { + return 0, fmt.Errorf("calculateMaxL1InfoTreeIndexInsideL2Data: error decoding batchL2Data, err:%w", err) + } + if batchRawV2 == nil { + return 0, fmt.Errorf("calculateMaxL1InfoTreeIndexInsideL2Data: batchRawV2 is nil") + } + maxIndex := uint32(0) + for _, block := range batchRawV2.Blocks { + if block.IndexL1InfoTree > maxIndex { + maxIndex = block.IndexL1InfoTree + } + } + return maxIndex, nil +} + +func calculateMaxL1InfoTreeIndexInsideSequence(seq *etherman.SequenceBanana) (uint32, error) { + if seq == nil { + return 0, fmt.Errorf("calculateMaxL1InfoTreeIndexInsideSequence: seq is nil") + } + maxIndex := uint32(0) + for _, batch := range seq.Batches { + index, err := calculateMaxL1InfoTreeIndexInsideL2Data(batch.L2Data) + if err != nil { + return 0, fmt.Errorf("calculateMaxL1InfoTreeIndexInsideBatches: error getting batch L1InfoTree , err:%w", err) + } + if index > maxIndex { + maxIndex = index + } + } + return maxIndex, nil +} From a68d90014909ee2e9eace4831f815c8c1d0ebd21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= <93934272+Stefan-Ethernal@users.noreply.github.com> Date: Fri, 20 Sep 2024 21:22:00 +0200 Subject: [PATCH 06/53] test: Custom native gas token transfer E2E test (#79) --- test/access-list-e2e.bats | 33 ++--- test/basic-e2e.bats | 15 +- test/bridge-e2e.bats | 143 ++++++++++++++------ test/helpers/common-setup.bash | 17 ++- test/helpers/common.bash | 111 +++++++++++---- test/helpers/lxly-bridge-test.bash | 46 +++++-- test/scripts/env.sh | 5 +- test/scripts/kurtosis_prepare_params_yml.sh | 1 + 8 files changed, 255 insertions(+), 116 deletions(-) diff --git a/test/access-list-e2e.bats b/test/access-list-e2e.bats index c47b004a..cdcccc6a 100644 --- a/test/access-list-e2e.bats +++ b/test/access-list-e2e.bats @@ -3,14 +3,11 @@ setup() { load 'helpers/common' _common_setup - readonly enclave=${ENCLAVE:-cdk-v1} - readonly sequencer=${KURTOSIS_NODE:-cdk-erigon-sequencer-001} - readonly node=${KURTOSIS_NODE:-cdk-erigon-node-001} - readonly rpc_url=${RPC_URL:-$(kurtosis port print "$enclave" "$node" http-rpc)} + readonly erigon_sequencer_node=${KURTOSIS_ERIGON_SEQUENCER:-cdk-erigon-sequencer-001} + readonly kurtosis_sequencer_wrapper=${KURTOSIS_SEQUENCER_WRAPPER:-"kurtosis service exec $enclave $erigon_sequencer_node"} readonly key=${SENDER_key:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} readonly receiver=${RECEIVER:-"0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6"} readonly data_dir=${ACL_DATA_DIR:-"/home/erigon/data/dynamic-kurtosis-sequencer/txpool/acls"} - readonly kurtosis_sequencer_wrapper=${KURTOSIS_WRAPPER:-"kurtosis service exec $enclave $sequencer"} } teardown() { @@ -36,7 +33,7 @@ set_acl_mode() { @test "Test Block List - Sending regular transaction when address not in block list" { local value="10ether" run set_acl_mode "blocklist" - run sendTx $key $receiver $value + run sendTx $l2_rpc_url $key $receiver $value assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" @@ -45,7 +42,7 @@ set_acl_mode() { @test "Test Block List - Sending contracts deploy transaction when address not in block list" { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" run set_acl_mode "blocklist" - run deployContract $key $contract_artifact + run deployContract $l2_rpc_url $key $contract_artifact assert_success @@ -59,7 +56,7 @@ set_acl_mode() { run set_acl_mode "blocklist" run add_to_access_list "blocklist" "sendTx" - run sendTx $key $receiver $value + run sendTx $l2_rpc_url $key $receiver $value assert_failure assert_output --partial "sender disallowed to send tx by ACL policy" @@ -70,7 +67,7 @@ set_acl_mode() { run set_acl_mode "blocklist" run add_to_access_list "blocklist" "deploy" - run deployContract $key $contract_artifact + run deployContract $l2_rpc_url $key $contract_artifact assert_failure assert_output --partial "sender disallowed to deploy contract by ACL policy" @@ -80,7 +77,7 @@ set_acl_mode() { local value="10ether" run set_acl_mode "allowlist" - run sendTx $key $receiver $value + run sendTx $l2_rpc_url $key $receiver $value assert_failure assert_output --partial "sender disallowed to send tx by ACL policy" @@ -90,7 +87,7 @@ set_acl_mode() { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" run set_acl_mode "allowlist" - run deployContract $key $contract_artifact + run deployContract $l2_rpc_url $key $contract_artifact assert_failure assert_output --partial "sender disallowed to deploy contract by ACL policy" @@ -99,10 +96,10 @@ set_acl_mode() { @test "Test Allow List - Sending regular transaction when address is in allow list" { local value="10ether" - run set_acl_mode "allowlist" - run add_to_access_list "allowlist" "sendTx" - run sendTx $key $receiver $value - + run set_acl_mode "allowlist" + run add_to_access_list "allowlist" "sendTx" + run sendTx $l2_rpc_url $key $receiver $value + assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" } @@ -110,9 +107,9 @@ set_acl_mode() { @test "Test Allow List - Sending contracts deploy transaction when address is in allow list" { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" - run set_acl_mode "allowlist" - run add_to_access_list "allowlist" "deploy" - run deployContract $key $contract_artifact + run set_acl_mode "allowlist" + run add_to_access_list "allowlist" "deploy" + run deployContract $l2_rpc_url $key $contract_artifact assert_success diff --git a/test/basic-e2e.bats b/test/basic-e2e.bats index 7124dcc2..a3fb4982 100644 --- a/test/basic-e2e.bats +++ b/test/basic-e2e.bats @@ -3,10 +3,7 @@ setup() { load 'helpers/common' _common_setup - readonly enclave=${ENCLAVE:-cdk-v1} - readonly node=${KURTOSIS_NODE:-cdk-erigon-node-001} - readonly rpc_url=${RPC_URL:-$(kurtosis port print "$enclave" "$node" http-rpc)} - readonly private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} readonly receiver=${RECEIVER:-"0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6"} } @@ -16,7 +13,7 @@ setup() { local value="10ether" # case 1: Transaction successful sender has sufficient balance - run sendTx "$private_key" "$receiver" "$value" + run sendTx "$l2_rpc_url" "$sender_private_key" "$receiver" "$value" assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" @@ -37,21 +34,19 @@ setup() { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" # Deploy ERC20Mock - run deployContract "$private_key" "$contract_artifact" + run deployContract "$l2_rpc_url" "$sender_private_key" "$contract_artifact" assert_success contract_addr=$(echo "$output" | tail -n 1) # Mint ERC20 tokens - local mintFnSig="function mint(address receiver, uint256 amount)" local amount="5" - run sendTx "$private_key" "$contract_addr" "$mintFnSig" "$receiver" "$amount" + run sendTx "$l2_rpc_url" "$sender_private_key" "$contract_addr" "$mint_fn_sig" "$receiver" "$amount" assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" # Assert that balance is correct - local balanceOfFnSig="function balanceOf(address) (uint256)" - run queryContract "$contract_addr" "$balanceOfFnSig" "$receiver" + run queryContract "$l2_rpc_url" "$contract_addr" "$balance_of_fn_sig" "$receiver" assert_success receiverBalance=$(echo "$output" | tail -n 1) diff --git a/test/bridge-e2e.bats b/test/bridge-e2e.bats index 98443b3b..c392e647 100644 --- a/test/bridge-e2e.bats +++ b/test/bridge-e2e.bats @@ -1,43 +1,45 @@ setup() { load 'helpers/common-setup' _common_setup + load 'helpers/common' + load 'helpers/lxly-bridge-test' readonly data_availability_mode=${DATA_AVAILABILITY_MODE:-"cdk-validium"} $PROJECT_ROOT/test/scripts/kurtosis_prepare_params_yml.sh ../kurtosis-cdk $data_availability_mode [ $? -ne 0 ] && echo "Error preparing params.yml" && exit 1 - # Check if the genesis file is already downloaded - if [ ! -f "./tmp/cdk/genesis/genesis.json" ]; then - mkdir -p ./tmp/cdk - kurtosis files download cdk-v1 genesis ./tmp/cdk/genesis - [ $? -ne 0 ] && echo "Error downloading genesis file" && exit 1 + if [ -z "$BRIDGE_ADDRESS" ]; then + local combined_json_file="/opt/zkevm/combined.json" + echo "BRIDGE_ADDRESS env variable is not provided, resolving the bridge address from the Kurtosis CDK '$combined_json_file'" >&3 + + # Fetching the combined JSON output and filtering to get polygonZkEVMBridgeAddress + combined_json_output=$($contracts_service_wrapper "cat $combined_json_file" | tail -n +2) + bridge_default_address=$(echo "$combined_json_output" | jq -r .polygonZkEVMBridgeAddress) + BRIDGE_ADDRESS=$bridge_default_address fi - # Download the genesis file - readonly bridge_default_address=$(jq -r ".genesis[] | select(.contractName == \"PolygonZkEVMBridge proxy\") | .address" ./tmp/cdk/genesis/genesis.json) - - readonly skey=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} - readonly destination_net=${DESTINATION_NET:-"1"} - readonly destination_addr=${DESTINATION_ADDRESS:-"0x0bb7AA0b4FdC2D2862c088424260e99ed6299148"} - readonly ether_value=${ETHER_VALUE:-"0.0200000054"} - readonly token_addr=${TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} + + echo "Bridge address=$BRIDGE_ADDRESS" >&3 + + readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + destination_net=${DESTINATION_NET:-"1"} + destination_addr=${DESTINATION_ADDRESS:-"0x0bb7AA0b4FdC2D2862c088424260e99ed6299148"} + ether_value=${ETHER_VALUE:-"0.0200000054"} + amount=$(cast to-wei $ether_value ether) + token_addr=${TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} readonly is_forced=${IS_FORCED:-"true"} - readonly bridge_addr=${BRIDGE_ADDRESS:-$bridge_default_address} + readonly bridge_addr=$BRIDGE_ADDRESS readonly meta_bytes=${META_BYTES:-"0x"} - readonly l1_rpc_url=${L1_ETH_RPC_URL:-"$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)"} - readonly l2_rpc_url=${L2_ETH_RPC_URL:-"$(kurtosis port print cdk-v1 cdk-erigon-node-001 http-rpc)"} - readonly bridge_api_url=${BRIDGE_API_URL:-"$(kurtosis port print cdk-v1 zkevm-bridge-service-001 rpc)"} + readonly l1_rpc_url=${L1_ETH_RPC_URL:-"$(kurtosis port print $enclave el-1-geth-lighthouse rpc)"} + readonly bridge_api_url=${BRIDGE_API_URL:-"$(kurtosis port print $enclave zkevm-bridge-service-001 rpc)"} readonly dry_run=${DRY_RUN:-"false"} - - readonly amount=$(cast to-wei $ether_value ether) - readonly current_addr="$(cast wallet address --private-key $skey)" - readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID()(uint32)') - readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID()(uint32)') + readonly sender_addr="$(cast wallet address --private-key $sender_private_key)" + readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID() (uint32)') + readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID() (uint32)') } @test "Run deposit" { - load 'helpers/lxly-bridge-test' echo "Running LxLy deposit" >&3 run deposit assert_success @@ -45,27 +47,82 @@ setup() { } @test "Run claim" { - load 'helpers/lxly-bridge-test' - echo "Running LxLy claim" + echo "Running LxLy claim" >&3 - # The script timeout (in seconds). timeout="120" - start_time=$(date +%s) - end_time=$((start_time + timeout)) - - while true; do - current_time=$(date +%s) - if ((current_time > end_time)); then - echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Exiting... Timeout reached!" - exit 1 - fi - - run claim - if [ $status -eq 0 ]; then - break - fi - sleep 10 - done - + claim_frequency="10" + run wait_for_claim "$timeout" "$claim_frequency" + assert_success +} + +@test "Custom native token transfer" { + # Use GAS_TOKEN_ADDR if provided, otherwise retrieve from file + if [[ -n "$GAS_TOKEN_ADDR" ]]; then + echo "Using provided GAS_TOKEN_ADDR: $GAS_TOKEN_ADDR" >&3 + local gas_token_addr="$GAS_TOKEN_ADDR" + else + echo "GAS_TOKEN_ADDR not provided, retrieving from rollup parameters file." >&3 + readonly rollup_params_file=/opt/zkevm/create_rollup_parameters.json + run bash -c "$contracts_service_wrapper 'cat $rollup_params_file' | tail -n +2 | jq -r '.gasTokenAddress'" + assert_success + assert_output --regexp "0x[a-fA-F0-9]{40}" + local gas_token_addr=$output + fi + + echo "Gas token addr $gas_token_addr, L1 RPC: $l1_rpc_url" >&3 + + # Set receiver address and query for its initial native token balance on the L2 + receiver=${RECEIVER:-"0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6"} + local initial_receiver_balance=$(cast balance --ether "$receiver" --rpc-url "$l2_rpc_url") + echo "Initial receiver balance of native token on L2 $initial_receiver_balance" >&3 + + # Query for initial sender balance + run queryContract "$l1_rpc_url" "$gas_token_addr" "$balance_of_fn_sig" "$sender_addr" + assert_success + local gas_token_init_sender_balance=$(echo "$output" | tail -n 1 | awk '{print $1}') + echo "Initial sender balance $gas_token_init_sender_balance" of gas token on L1 >&3 + + # Mint gas token on L1 + local tokens_amount="0.1ether" + local wei_amount=$(cast --to-unit $tokens_amount wei) + local minter_key=${MINTER_KEY:-"42b6e34dc21598a807dc19d7784c71b2a7a01f6480dc6f58258f78e539f1a1fa"} + run mint_erc20_tokens "$l1_rpc_url" "$gas_token_addr" "$minter_key" "$sender_addr" "$tokens_amount" + assert_success + + # Assert that balance of gas token (on the L1) is correct + run queryContract "$l1_rpc_url" "$gas_token_addr" "$balance_of_fn_sig" "$sender_addr" + assert_success + local gas_token_final_sender_balance=$(echo "$output" | + tail -n 1 | + awk '{print $1}') + local expected_balance=$(echo "$gas_token_init_sender_balance + $wei_amount" | + bc | + awk '{print $1}') + + echo "Sender balance ($sender_addr) (gas token L1): $gas_token_final_sender_balance" >&3 + assert_equal "$gas_token_final_sender_balance" "$expected_balance" + + # Send approve transaction to the gas token on L1 + deposit_ether_value="0.1ether" + run sendTx "$l1_rpc_url" "$sender_private_key" "$gas_token_addr" "$approve_fn_sig" "$bridge_addr" "$deposit_ether_value" + assert_success + assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" + + # Deposit + token_addr=$gas_token_addr + destination_addr=$receiver + destination_net=$l2_rpc_network_id + amount=$wei_amount + run deposit + assert_success + + # Claim deposits (settle them on the L2) + timeout="120" + claim_frequency="10" + run wait_for_claim "$timeout" "$claim_frequency" + assert_success + + # Validate that the native token of receiver on L2 has increased by the bridge tokens amount + run verify_native_token_balance "$l2_rpc_url" "$receiver" "$initial_receiver_balance" "$tokens_amount" assert_success } diff --git a/test/helpers/common-setup.bash b/test/helpers/common-setup.bash index b7691366..7cb4dec7 100644 --- a/test/helpers/common-setup.bash +++ b/test/helpers/common-setup.bash @@ -3,11 +3,24 @@ _common_setup() { bats_load_library 'bats-support' bats_load_library 'bats-assert' - + # get the containing directory of this file # use $BATS_TEST_FILENAME instead of ${BASH_SOURCE[0]} or $0, # as those will point to the bats executable's location or the preprocessed file respectively - PROJECT_ROOT="$( cd "$( dirname "$BATS_TEST_FILENAME" )/.." >/dev/null 2>&1 && pwd )" + PROJECT_ROOT="$(cd "$(dirname "$BATS_TEST_FILENAME")/.." >/dev/null 2>&1 && pwd)" # make executables in src/ visible to PATH PATH="$PROJECT_ROOT/src:$PATH" + + # ERC20 contracts function signatures + readonly mint_fn_sig="function mint(address,uint256)" + readonly balance_of_fn_sig="function balanceOf(address) (uint256)" + readonly approve_fn_sig="function approve(address,uint256)" + + + # Kurtosis enclave and service identifiers + readonly enclave=${KURTOSIS_ENCLAVE:-cdk-v1} + readonly contracts_container=${KURTOSIS_CONTRACTS:-contracts-001} + readonly contracts_service_wrapper=${KURTOSIS_CONTRACTS_WRAPPER:-"kurtosis service exec $enclave $contracts_container"} + readonly erigon_rpc_node=${KURTOSIS_ERIGON_RPC:-cdk-erigon-node-001} + readonly l2_rpc_url=${L2_ETH_RPC_URL:-"$(kurtosis port print $enclave $erigon_rpc_node http-rpc)"} } diff --git a/test/helpers/common.bash b/test/helpers/common.bash index aabae0b6..4857a677 100644 --- a/test/helpers/common.bash +++ b/test/helpers/common.bash @@ -1,12 +1,13 @@ #!/usr/bin/env bash function deployContract() { - local private_key="$1" - local contract_artifact="$2" + local rpc_url="$1" + local private_key="$2" + local contract_artifact="$3" # Check if rpc_url is available if [[ -z "$rpc_url" ]]; then - echo "Error: rpc_url environment variable is not set." + echo "Error: rpc_url parameter is not set." return 1 fi @@ -16,13 +17,13 @@ function deployContract() { fi # Get the sender address - local sender_addr=$(cast wallet address "$private_key") + local sender=$(cast wallet address "$private_key") if [[ $? -ne 0 ]]; then echo "Error: Failed to retrieve sender address." return 1 fi - echo "Attempting to deploy contract artifact '$contract_artifact' to $rpc_url (sender: $sender_addr)" >&3 + echo "Attempting to deploy contract artifact '$contract_artifact' to $rpc_url (sender: $sender)" >&3 # Get bytecode from the contract artifact local bytecode=$(jq -r .bytecode "$contract_artifact") @@ -70,15 +71,16 @@ function deployContract() { } function sendTx() { - # Check if at least 3 arguments are provided - if [[ $# -lt 3 ]]; then - echo "Usage: sendTx [ ...]" + # Check if at least 4 arguments are provided + if [[ $# -lt 4 ]]; then + echo "Usage: sendTx [ ...]" return 1 fi - local private_key="$1" # Sender private key - local receiver_addr="$2" # Receiver address - local value_or_function_sig="$3" # Value or function signature + local rpc_url="$1" # RPC URL + local private_key="$2" # Sender private key + local receiver_addr="$3" # Receiver address + local value_or_function_sig="$4" # Value or function signature # Error handling: Ensure the receiver is a valid Ethereum address if [[ ! "$receiver_addr" =~ ^0x[a-fA-F0-9]{40}$ ]]; then @@ -86,28 +88,28 @@ function sendTx() { return 1 fi - shift 3 # Shift the first 3 arguments (private_key, receiver_addr, value_or_function_sig) + shift 4 # Shift the first 4 arguments (rpc_url, private_key, receiver_addr, value_or_function_sig) local params=("$@") # Collect all remaining arguments as function parameters # Get sender address from private key - local sender_addr - sender_addr=$(cast wallet address "$private_key") || { + local sender + sender=$(cast wallet address "$private_key") || { echo "Error: Failed to extract the sender address." return 1 } # Get initial ether balances of sender and receiver local sender_initial_balance receiver_initial_balance - sender_initial_balance=$(cast balance "$sender_addr" --ether --rpc-url "$rpc_url") || return 1 + sender_initial_balance=$(cast balance "$sender" --ether --rpc-url "$rpc_url") || return 1 receiver_initial_balance=$(cast balance "$receiver_addr" --ether --rpc-url "$rpc_url") || return 1 # Check if the value_or_function_sig is a numeric value (Ether to be transferred) if [[ "$value_or_function_sig" =~ ^[0-9]+(\.[0-9]+)?(ether)?$ ]]; then # Case: Ether transfer (EOA transaction) - send_eoa_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "$sender_addr" "$sender_initial_balance" "$receiver_initial_balance" + send_eoa_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "$sender" "$sender_initial_balance" "$receiver_initial_balance" else # Case: Smart contract interaction (contract interaction with function signature and parameters) - send_smart_contract_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "$sender_addr" "${params[@]}" + send_smart_contract_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "$sender" "${params[@]}" fi } @@ -115,7 +117,7 @@ function send_eoa_transaction() { local private_key="$1" local receiver_addr="$2" local value="$3" - local sender_addr="$4" + local sender="$4" local sender_initial_balance="$5" local receiver_initial_balance="$6" @@ -136,7 +138,7 @@ function send_eoa_transaction() { return 1 } - checkBalances "$sender_addr" "$receiver_addr" "$value" "$tx_hash" "$sender_initial_balance" "$receiver_initial_balance" + check_balances "$sender" "$receiver_addr" "$value" "$tx_hash" "$sender_initial_balance" "$receiver_initial_balance" if [[ $? -ne 0 ]]; then echo "Error: Balance not updated correctly." return 1 @@ -149,7 +151,7 @@ function send_smart_contract_transaction() { local private_key="$1" local receiver_addr="$2" local function_sig="$3" - local sender_addr="$4" + local sender="$4" shift 4 local params=("$@") @@ -185,16 +187,17 @@ function extract_tx_hash() { } function queryContract() { - local addr="$1" # Contract address - local funcSignature="$2" # Function signature - shift 2 # Shift past the first two arguments + local rpc_url="$1" # RPC URL + local addr="$2" # Contract address + local funcSignature="$3" # Function signature + shift 3 # Shift past the first 3 arguments local params=("$@") # Collect remaining arguments as parameters array echo "Querying state of $addr account (RPC URL: $rpc_url) with function signature: '$funcSignature' and params: ${params[*]}" >&3 - # Check if rpc_url is available + # Check if rpc url is available if [[ -z "$rpc_url" ]]; then - echo "Error: rpc_url environment variable is not set." + echo "Error: rpc_url parameter is not provided." return 1 fi @@ -221,7 +224,7 @@ function queryContract() { return 0 } -function checkBalances() { +function check_balances() { local sender="$1" local receiver="$2" local amount="$3" @@ -247,8 +250,9 @@ function checkBalances() { fi local sender_final_balance=$(cast balance "$sender" --ether --rpc-url "$rpc_url") || return 1 - local gas_used=$(cast tx "$tx_hash" --rpc-url "$rpc_url" | grep '^gas ' | awk '{print $2}') - local gas_price=$(cast tx "$tx_hash" --rpc-url "$rpc_url" | grep '^gasPrice' | awk '{print $2}') + local tx_output=$(cast tx "$tx_hash" --rpc-url "$rpc_url") + local gas_used=$(tx_output | grep '^gas ' | awk '{print $2}') + local gas_price=$(tx_output | grep '^gasPrice' | awk '{print $2}') local gas_fee=$(echo "$gas_used * $gas_price" | bc) local gas_fee_in_ether=$(cast to-unit "$gas_fee" ether) @@ -275,3 +279,54 @@ function checkBalances() { return 1 fi } + +function verify_native_token_balance() { + local rpc_url="$1" # RPC URL + local account="$2" # account address + local initial_balance="$3" # initial balance in Ether (decimal) + local ether_amount="$4" # amount to be added (in Ether, decimal) + + # Convert initial balance and amount to wei (no decimals) + local initial_balance_wei=$(cast --to-wei "$initial_balance") + + # Trim 'ether' from ether_amount if it exists + ether_amount=$(echo "$ether_amount" | sed 's/ether//') + local amount_wei=$(cast --to-wei "$ether_amount") + + # Get final balance in wei (after the operation) + local final_balance_wei=$(cast balance "$account" --rpc-url "$rpc_url" | awk '{print $1}') + + # Calculate expected final balance (initial_balance + amount) + local expected_final_balance_wei=$(echo "$initial_balance_wei + $amount_wei" | bc) + + # Check if final_balance matches the expected final balance + if [ "$(echo "$final_balance_wei == $expected_final_balance_wei" | bc)" -eq 1 ]; then + echo "✅ Balance verification successful: final balance is correct." + else + echo "❌ Balance verification failed: expected $expected_final_balance_wei but got $final_balance_wei." + exit 1 + fi +} + +function mint_erc20_tokens() { + local rpc_url="$1" # The L1 RPC URL + local erc20_token_addr="$2" # The gas token contract address + local minter_private_key="$3" # The minter private key + local receiver="$4" # The receiver address (for minted tokens) + local tokens_amount="$5" # The amount of tokens to transfer (e.g., "0.1ether") + + # Query the erc20 token balance of the sender + run queryContract "$rpc_url" "$erc20_token_addr" "$balance_of_fn_sig" "$sender_addr" + assert_success + local erc20_token_balance=$(echo "$output" | tail -n 1) + + # Log the account's current gas token balance + echo "Initial account balance: $erc20_token_balance wei" >&3 + + # Convert tokens_amount to Wei for comparison + local wei_amount=$(cast --to-unit "$tokens_amount" wei) + + # Mint the required tokens by sending a transaction + run sendTx "$rpc_url" "$minter_private_key" "$erc20_token_addr" "$mint_fn_sig" "$receiver" "$tokens_amount" + assert_success +} diff --git a/test/helpers/lxly-bridge-test.bash b/test/helpers/lxly-bridge-test.bash index bbaf45e1..c1b43533 100644 --- a/test/helpers/lxly-bridge-test.bash +++ b/test/helpers/lxly-bridge-test.bash @@ -1,25 +1,26 @@ #!/usr/bin/env bash # Error code reference https://hackmd.io/WwahVBZERJKdfK3BbKxzQQ -function deposit () { +function deposit() { readonly deposit_sig='bridgeAsset(uint32,address,uint256,address,bool,bytes)' if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then - echo "Checking the current ETH balance: " >&3 - cast balance -e --rpc-url $l1_rpc_url $current_addr >&3 + echo "The ETH balance for sender "$sender_addr":" >&3 + cast balance -e --rpc-url $l1_rpc_url $sender_addr >&3 else - echo "Checking the current token balance for token at $token_addr: " >&3 - cast call --rpc-url $l1_rpc_url $token_addr 'balanceOf(address)(uint256)' $current_addr >&3 + echo "The "$token_addr" token balance for sender "$sender_addr":" >&3 + balance_wei=$(cast call --rpc-url "$l1_rpc_url" "$token_addr" "$balance_of_fn_sig" "$sender_addr") + echo "$(cast --from-wei "$balance_wei")" >&3 fi - echo "Attempting to deposit $amount wei to net $destination_net for token $token_addr" >&3 + echo "Attempting to deposit $amount [wei] to $destination_addr, token $token_addr (sender=$sender_addr, network id=$destination_net, rpc url=$l1_rpc_url)" >&3 if [[ $dry_run == "true" ]]; then cast calldata $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes else if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then - cast send --legacy --private-key $skey --value $amount --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes + cast send --legacy --private-key $sender_private_key --value $amount --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes else - cast send --legacy --private-key $skey --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes + cast send --legacy --private-key $sender_private_key --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes fi fi } @@ -30,7 +31,7 @@ function claim() { readonly claimable_deposit_file=$(mktemp) echo "Getting full list of deposits" >&3 curl -s "$bridge_api_url/bridges/$destination_addr?limit=100&offset=0" | jq '.' | tee $bridge_deposit_file - + echo "Looking for claimable deposits" >&3 jq '[.deposits[] | select(.ready_for_claim == true and .claim_tx_hash == "" and .dest_net == '$destination_net')]' $bridge_deposit_file | tee $claimable_deposit_file readonly claimable_count=$(jq '. | length' $claimable_deposit_file) @@ -40,7 +41,7 @@ function claim() { echo "We have no claimable deposits at this time" >&3 exit 1 fi - + echo "We have $claimable_count claimable deposits on network $destination_net. Let's get this party started." >&3 readonly current_deposit=$(mktemp) readonly current_proof=$(mktemp) @@ -69,9 +70,30 @@ function claim() { cast calldata $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata cast call --rpc-url $l2_rpc_url $bridge_addr $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata else - cast send --legacy --rpc-url $l2_rpc_url --private-key $skey $bridge_addr $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata + cast send --legacy --rpc-url $l2_rpc_url --private-key $sender_private_key $bridge_addr $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata + fi + + done < <(seq 0 $((claimable_count - 1))) +} + +function wait_for_claim() { + local timeout="$1" # timeout (in seconds) + local claim_frequency="$2" # claim frequency (in seconds) + local start_time=$(date +%s) + local end_time=$((start_time + timeout)) + + while true; do + local current_time=$(date +%s) + if ((current_time > end_time)); then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Exiting... Timeout reached!" + exit 1 fi + run claim + if [ $status -eq 0 ]; then + break + fi - done < <(seq 0 $((claimable_count - 1)) ) + sleep "$claim_frequency" + done } diff --git a/test/scripts/env.sh b/test/scripts/env.sh index b81c18a4..2afb2af4 100644 --- a/test/scripts/env.sh +++ b/test/scripts/env.sh @@ -1,8 +1,7 @@ #!/bin/bash ### Common variables -ENCLAVE=cdk-v1 -CDK_ERIGON_NODE_NAME=cdk-erigon-node-001 +KURTOSIS_ENCLAVE=cdk-v1 TMP_CDK_FOLDER=tmp/cdk DEST_KURTOSIS_PARAMS_YML=../$TMP_CDK_FOLDER/e2e-params.yml -KURTOSIS_VERSION=develop KURTOSIS_FOLDER=../kurtosis-cdk +USE_L1_GAS_TOKEN_CONTRACT=true diff --git a/test/scripts/kurtosis_prepare_params_yml.sh b/test/scripts/kurtosis_prepare_params_yml.sh index aa57e272..38f44d51 100755 --- a/test/scripts/kurtosis_prepare_params_yml.sh +++ b/test/scripts/kurtosis_prepare_params_yml.sh @@ -28,3 +28,4 @@ mkdir -p $(dirname $DEST_KURTOSIS_PARAMS_YML) cp $KURTOSIS_FOLDER/params.yml $DEST_KURTOSIS_PARAMS_YML yq -Y --in-place ".args.cdk_node_image = \"cdk\"" $DEST_KURTOSIS_PARAMS_YML yq -Y --in-place ".args.data_availability_mode = \"$DATA_AVAILABILITY_MODE\"" $DEST_KURTOSIS_PARAMS_YML +yq -Y --in-place ".args.zkevm_use_gas_token_contract = $USE_L1_GAS_TOKEN_CONTRACT" $DEST_KURTOSIS_PARAMS_YML From 3e3f348075efdebde6a46dcbd29665d03187172e Mon Sep 17 00:00:00 2001 From: Rachit Sonthalia <54906134+rachit77@users.noreply.github.com> Date: Tue, 24 Sep 2024 14:51:44 +0530 Subject: [PATCH 07/53] test: Add smart contract e2e tests (#85) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: sc tests * fix: tests * fix: test * feat: more tests * fix: tests * fix: tests * fix: seprate keys for contracts deployement * fix: rename sender private key env var * fix: failing e2e tests * chore: rename functions ( bash style) * fix: access lists e2e tests * refactor: apply feedback * fix: polycli version * fix: polycli workflow * refactor: apply feedback --------- Co-authored-by: Stefan Negovanović --- .github/workflows/test-e2e.yml | 10 ++ test/access-list-e2e.bats | 18 ++-- test/basic-e2e.bats | 170 +++++++++++++++++++++++++++++---- test/bridge-e2e.bats | 6 +- test/helpers/common-setup.bash | 2 +- test/helpers/common.bash | 40 ++++---- 6 files changed, 193 insertions(+), 53 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index b20bb982..af54879b 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -48,6 +48,16 @@ jobs: pip3 install yq yq --version + - name: Install polycli + run: | + POLYCLI_VERSION="${{ vars.POLYCLI_VERSION }}" + tmp_dir=$(mktemp -d) + curl -L "https://github.com/0xPolygon/polygon-cli/releases/download/${POLYCLI_VERSION}/polycli_${POLYCLI_VERSION}_linux_amd64.tar.gz" | tar -xz -C "$tmp_dir" + mv "$tmp_dir"/* /usr/local/bin/polycli + rm -rf "$tmp_dir" + sudo chmod +x /usr/local/bin/polycli + /usr/local/bin/polycli version + - name: Install foundry uses: foundry-rs/foundry-toolchain@v1 diff --git a/test/access-list-e2e.bats b/test/access-list-e2e.bats index cdcccc6a..83947c03 100644 --- a/test/access-list-e2e.bats +++ b/test/access-list-e2e.bats @@ -5,7 +5,7 @@ setup() { readonly erigon_sequencer_node=${KURTOSIS_ERIGON_SEQUENCER:-cdk-erigon-sequencer-001} readonly kurtosis_sequencer_wrapper=${KURTOSIS_SEQUENCER_WRAPPER:-"kurtosis service exec $enclave $erigon_sequencer_node"} - readonly key=${SENDER_key:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + readonly key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} readonly receiver=${RECEIVER:-"0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6"} readonly data_dir=${ACL_DATA_DIR:-"/home/erigon/data/dynamic-kurtosis-sequencer/txpool/acls"} } @@ -33,7 +33,7 @@ set_acl_mode() { @test "Test Block List - Sending regular transaction when address not in block list" { local value="10ether" run set_acl_mode "blocklist" - run sendTx $l2_rpc_url $key $receiver $value + run send_tx $l2_rpc_url $key $receiver $value assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" @@ -42,7 +42,7 @@ set_acl_mode() { @test "Test Block List - Sending contracts deploy transaction when address not in block list" { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" run set_acl_mode "blocklist" - run deployContract $l2_rpc_url $key $contract_artifact + run deploy_contract $l2_rpc_url $key $contract_artifact assert_success @@ -56,7 +56,7 @@ set_acl_mode() { run set_acl_mode "blocklist" run add_to_access_list "blocklist" "sendTx" - run sendTx $l2_rpc_url $key $receiver $value + run send_tx $l2_rpc_url $key $receiver $value assert_failure assert_output --partial "sender disallowed to send tx by ACL policy" @@ -67,7 +67,7 @@ set_acl_mode() { run set_acl_mode "blocklist" run add_to_access_list "blocklist" "deploy" - run deployContract $l2_rpc_url $key $contract_artifact + run deploy_contract $l2_rpc_url $key $contract_artifact assert_failure assert_output --partial "sender disallowed to deploy contract by ACL policy" @@ -77,7 +77,7 @@ set_acl_mode() { local value="10ether" run set_acl_mode "allowlist" - run sendTx $l2_rpc_url $key $receiver $value + run send_tx $l2_rpc_url $key $receiver $value assert_failure assert_output --partial "sender disallowed to send tx by ACL policy" @@ -87,7 +87,7 @@ set_acl_mode() { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" run set_acl_mode "allowlist" - run deployContract $l2_rpc_url $key $contract_artifact + run deploy_contract $l2_rpc_url $key $contract_artifact assert_failure assert_output --partial "sender disallowed to deploy contract by ACL policy" @@ -98,7 +98,7 @@ set_acl_mode() { run set_acl_mode "allowlist" run add_to_access_list "allowlist" "sendTx" - run sendTx $l2_rpc_url $key $receiver $value + run send_tx $l2_rpc_url $key $receiver $value assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" @@ -109,7 +109,7 @@ set_acl_mode() { run set_acl_mode "allowlist" run add_to_access_list "allowlist" "deploy" - run deployContract $l2_rpc_url $key $contract_artifact + run deploy_contract $l2_rpc_url $key $contract_artifact assert_success diff --git a/test/basic-e2e.bats b/test/basic-e2e.bats index a3fb4982..1024ac4a 100644 --- a/test/basic-e2e.bats +++ b/test/basic-e2e.bats @@ -8,52 +8,188 @@ setup() { } @test "Send EOA transaction" { - local sender_addr=$(cast wallet address --private-key "$private_key") - local initial_nonce=$(cast nonce "$sender_addr" --rpc-url "$rpc_url") || return 1 + local sender_addr=$(cast wallet address --private-key "$sender_private_key") + local initial_nonce=$(cast nonce "$sender_addr" --rpc-url "$l2_rpc_url") || { + echo "Failed to retrieve nonce for sender: $sender_addr using RPC URL: $l2_rpc_url" + return 1 + } local value="10ether" # case 1: Transaction successful sender has sufficient balance - run sendTx "$l2_rpc_url" "$sender_private_key" "$receiver" "$value" + run send_tx "$l2_rpc_url" "$sender_private_key" "$receiver" "$value" assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" # case 2: Transaction rejected as sender attempts to transfer more than it has in its wallet. # Transaction will fail pre-validation check on the node and will be dropped subsequently from the pool # without recording it on the chain and hence nonce will not change - local sender_balance=$(cast balance "$sender_addr" --ether --rpc-url "$rpc_url") || return 1 + local sender_balance=$(cast balance "$sender_addr" --ether --rpc-url "$l2_rpc_url") || { + echo "Failed to retrieve balance for sender: $sender_addr using RPC URL: $l2_rpc_url" + return 1 + } local excessive_value=$(echo "$sender_balance + 1" | bc)"ether" - run sendTx "$private_key" "$receiver" "$excessive_value" + run send_tx "$l2_rpc_url" "$sender_private_key" "$receiver" "$excessive_value" assert_failure # Check whether the sender's nonce was updated correctly - local final_nonce=$(cast nonce "$sender_addr" --rpc-url "$rpc_url") || return 1 + local final_nonce=$(cast nonce "$sender_addr" --rpc-url "$l2_rpc_url") || { + echo "Failed to retrieve nonce for sender: $sender_addr using RPC URL: $l2_rpc_url" + return 1 + } assert_equal "$final_nonce" "$(echo "$initial_nonce + 1" | bc)" } -@test "Deploy ERC20Mock contract" { +@test "Test ERC20Mock contract" { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" + wallet_A_output=$(cast wallet new) + address_A=$(echo "$wallet_A_output" | grep "Address" | awk '{print $2}') + address_A_private_key=$(echo "$wallet_A_output" | grep "Private key" | awk '{print $3}') + address_B=$(cast wallet new | grep "Address" | awk '{print $2}') # Deploy ERC20Mock - run deployContract "$l2_rpc_url" "$sender_private_key" "$contract_artifact" + run deploy_contract "$l2_rpc_url" "$sender_private_key" "$contract_artifact" assert_success contract_addr=$(echo "$output" | tail -n 1) # Mint ERC20 tokens local amount="5" - run sendTx "$l2_rpc_url" "$sender_private_key" "$contract_addr" "$mint_fn_sig" "$receiver" "$amount" + run send_tx "$l2_rpc_url" "$sender_private_key" "$contract_addr" "$mint_fn_sig" "$address_A" "$amount" assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" - # Assert that balance is correct - run queryContract "$l2_rpc_url" "$contract_addr" "$balance_of_fn_sig" "$receiver" + ## Case 2: Insufficient gas scenario => Transactions fails + # nonce would not increase since transaction fails at the node's pre-validation check + # Get bytecode from the contract artifact + local bytecode=$(jq -r .bytecode "$contract_artifact") + if [[ -z "$bytecode" || "$bytecode" == "null" ]]; then + echo "Error: Failed to read bytecode from $contract_artifact" + return 1 + fi + + # Estimate gas, gas price and gas cost + local gas_units=$(cast estimate --rpc-url "$l2_rpc_url" --create "$bytecode") + gas_units=$(echo "scale=0; $gas_units / 2" | bc) + local gas_price=$(cast gas-price --rpc-url "$l2_rpc_url") + local value=$(echo "$gas_units * $gas_price" | bc) + local value_ether=$(cast to-unit "$value" ether)"ether" + + # Transfer only half amount of tokens needed for contract deployment fees + cast_output=$(cast send --rpc-url "$l2_rpc_url" --private-key "$sender_private_key" "$address_A" --value "$value_ether" --legacy 2>&1) + if [[ $? -ne 0 ]]; then + echo "Error: Failed to send transaction. Output:" + echo "$cast_output" + return 1 + fi + + # Fetch initial nonce for address_A + local address_A_initial_nonce=$(cast nonce "$address_A" --rpc-url "$l2_rpc_url") || return 1 + # Attempt to deploy contract with insufficient gas + run deploy_contract "$l2_rpc_url" "$address_A_private_key" "$contract_artifact" + assert_failure + + ## Case 3: Transaction should fail as address_A tries to transfer more tokens than it has + # nonce would not increase + # Transfer funds for gas fees to address_A + value_ether="4ether" + cast_output=$(cast send --rpc-url "$l2_rpc_url" --private-key "$sender_private_key" "$address_A" --value "$value_ether" --legacy 2>&1) + if [[ $? -ne 0 ]]; then + echo "Error: Failed to send transaction. Output:" + echo "$cast_output" + return 1 + fi + + # Fetch balance of address_A to simulate excessive transfer + run query_contract "$l2_rpc_url" "$contract_addr" "$balance_of_fn_sig" "$address_A" + assert_success + local address_A_Balance=$(echo "$output" | tail -n 1) + address_A_Balance=$(echo "$address_A_Balance" | xargs) + + # Set excessive amount for transfer + local excessive_amount=$(echo "$address_A_Balance + 1" | bc) + + # Attempt transfer of excessive amount from address_A to address_B + local tranferFnSig="transfer(address,uint256)" + run send_tx "$l2_rpc_url" "$address_A_private_key" "$contract_addr" "$tranferFnSig" "$address_B" "$excessive_amount" + assert_failure + + # Verify balance of address_A after failed transaction + run query_contract "$l2_rpc_url" "$contract_addr" "$balance_of_fn_sig" "$address_A" + assert_success + address_A_BalanceAfterFailedTx=$(echo "$output" | tail -n 1) + address_A_BalanceAfterFailedTx=$(echo "$address_A_BalanceAfterFailedTx" | xargs) + + # Ensure balance is unchanged + assert_equal "$address_A_BalanceAfterFailedTx" "$address_A_Balance" + + # Verify balance of address_B is still zero + run query_contract "$l2_rpc_url" "$contract_addr" "$balance_of_fn_sig" "$address_B" assert_success - receiverBalance=$(echo "$output" | tail -n 1) + local address_B_Balance=$(echo "$output" | tail -n 1) + address_B_Balance=$(echo "$address_B_Balance" | xargs) - # Convert balance and amount to a standard format for comparison (e.g., remove any leading/trailing whitespace) - receiverBalance=$(echo "$receiverBalance" | xargs) - amount=$(echo "$amount" | xargs) + assert_equal "$address_B_Balance" "0" - # Check if the balance is equal to the amount - assert_equal "$receiverBalance" "$amount" + # Nonce should not increase + local address_A_final_nonce=$(cast nonce "$address_A" --rpc-url "$l2_rpc_url") || { + echo "Failed to retrieve nonce for sender: $address_A using RPC URL: $l2_rpc_url" + return 1 + } + assert_equal "$address_A_final_nonce" "$address_A_initial_nonce" } + + +@test "Deploy and test UniswapV3 contract" { + # Generate new key pair + wallet_A_output=$(cast wallet new) + address_A=$(echo "$wallet_A_output" | grep "Address" | awk '{print $2}') + address_A_private_key=$(echo "$wallet_A_output" | grep "Private key" | awk '{print $3}') + + # Transfer funds for gas + local value_ether="50ether" + cast_output=$(cast send --rpc-url "$l2_rpc_url" --private-key "$sender_private_key" "$address_A" --value "$value_ether" --legacy 2>&1) + if [[ $? -ne 0 ]]; then + echo "Error: Failed to send transaction. Output:" + echo "$cast_output" + return 1 + fi + + run polycli loadtest uniswapv3 --legacy -v 600 --rpc-url $l2_rpc_url --private-key $address_A_private_key + assert_success + + # Remove ANSI escape codes from the output + output=$(echo "$output" | sed -r "s/\x1B\[[0-9;]*[mGKH]//g") + + # Check if all required Uniswap contracts were deployed + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=WETH9" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=UniswapV3Factory" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=UniswapInterfaceMulticall" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=ProxyAdmin" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=TickLens" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=NFTDescriptor" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=NonfungibleTokenPositionDescriptor" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=TransparentUpgradeableProxy" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=NonfungiblePositionManager" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=V3Migrator" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=UniswapV3Staker" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=QuoterV2" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=SwapRouter02" + + # Check if ERC20 tokens were minted + assert_output --regexp "Minted tokens amount=[0-9]+ recipient=0x[a-fA-F0-9]{40} token=SwapperA" + assert_output --regexp "Minted tokens amount=[0-9]+ recipient=0x[a-fA-F0-9]{40} token=SwapperB" + + # Check if liquidity pool was created and initialized + assert_output --regexp "Pool created and initialized fees=[0-9]+" + + # Check if liquidity was provided to the pool + assert_output --regexp "Liquidity provided to the pool liquidity=[0-9]+" + + # Check if transaction got executed successfully + assert_output --regexp "Starting main load test loop currentNonce=[0-9]+" + assert_output --regexp "Finished main load test loop lastNonce=[0-9]+ startNonce=[0-9]+" + assert_output --regexp "Got final block number currentNonce=[0-9]+ final block number=[0-9]+" + assert_output --regexp "Num errors numErrors=0" + assert_output --regexp "Finished" +} + diff --git a/test/bridge-e2e.bats b/test/bridge-e2e.bats index c392e647..842d87e9 100644 --- a/test/bridge-e2e.bats +++ b/test/bridge-e2e.bats @@ -77,7 +77,7 @@ setup() { echo "Initial receiver balance of native token on L2 $initial_receiver_balance" >&3 # Query for initial sender balance - run queryContract "$l1_rpc_url" "$gas_token_addr" "$balance_of_fn_sig" "$sender_addr" + run query_contract "$l1_rpc_url" "$gas_token_addr" "$balance_of_fn_sig" "$sender_addr" assert_success local gas_token_init_sender_balance=$(echo "$output" | tail -n 1 | awk '{print $1}') echo "Initial sender balance $gas_token_init_sender_balance" of gas token on L1 >&3 @@ -90,7 +90,7 @@ setup() { assert_success # Assert that balance of gas token (on the L1) is correct - run queryContract "$l1_rpc_url" "$gas_token_addr" "$balance_of_fn_sig" "$sender_addr" + run query_contract "$l1_rpc_url" "$gas_token_addr" "$balance_of_fn_sig" "$sender_addr" assert_success local gas_token_final_sender_balance=$(echo "$output" | tail -n 1 | @@ -104,7 +104,7 @@ setup() { # Send approve transaction to the gas token on L1 deposit_ether_value="0.1ether" - run sendTx "$l1_rpc_url" "$sender_private_key" "$gas_token_addr" "$approve_fn_sig" "$bridge_addr" "$deposit_ether_value" + run send_tx "$l1_rpc_url" "$sender_private_key" "$gas_token_addr" "$approve_fn_sig" "$bridge_addr" "$deposit_ether_value" assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" diff --git a/test/helpers/common-setup.bash b/test/helpers/common-setup.bash index 7cb4dec7..415f211d 100644 --- a/test/helpers/common-setup.bash +++ b/test/helpers/common-setup.bash @@ -23,4 +23,4 @@ _common_setup() { readonly contracts_service_wrapper=${KURTOSIS_CONTRACTS_WRAPPER:-"kurtosis service exec $enclave $contracts_container"} readonly erigon_rpc_node=${KURTOSIS_ERIGON_RPC:-cdk-erigon-node-001} readonly l2_rpc_url=${L2_ETH_RPC_URL:-"$(kurtosis port print $enclave $erigon_rpc_node http-rpc)"} -} +} \ No newline at end of file diff --git a/test/helpers/common.bash b/test/helpers/common.bash index 4857a677..821a1f59 100644 --- a/test/helpers/common.bash +++ b/test/helpers/common.bash @@ -1,6 +1,6 @@ #!/usr/bin/env bash -function deployContract() { +function deploy_contract() { local rpc_url="$1" local private_key="$2" local contract_artifact="$3" @@ -70,10 +70,10 @@ function deployContract() { return 0 } -function sendTx() { +function send_tx() { # Check if at least 4 arguments are provided if [[ $# -lt 4 ]]; then - echo "Usage: sendTx [ ...]" + echo "Usage: send_tx [ ...]" return 1 fi @@ -98,18 +98,19 @@ function sendTx() { return 1 } - # Get initial ether balances of sender and receiver - local sender_initial_balance receiver_initial_balance - sender_initial_balance=$(cast balance "$sender" --ether --rpc-url "$rpc_url") || return 1 - receiver_initial_balance=$(cast balance "$receiver_addr" --ether --rpc-url "$rpc_url") || return 1 - # Check if the value_or_function_sig is a numeric value (Ether to be transferred) if [[ "$value_or_function_sig" =~ ^[0-9]+(\.[0-9]+)?(ether)?$ ]]; then # Case: Ether transfer (EOA transaction) - send_eoa_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "$sender" "$sender_initial_balance" "$receiver_initial_balance" + # Get initial ether balances of sender and receiver + local sender_addr=$(cast wallet address --private-key "$private_key") + local sender_initial_balance receiver_initial_balance + sender_initial_balance=$(cast balance "$sender_addr" --ether --rpc-url "$rpc_url") || return 1 + receiver_initial_balance=$(cast balance "$receiver_addr" --ether --rpc-url "$rpc_url") || return 1 + + send_eoa_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "$sender_addr" "$sender_initial_balance" "$receiver_initial_balance" else # Case: Smart contract interaction (contract interaction with function signature and parameters) - send_smart_contract_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "$sender" "${params[@]}" + send_smart_contract_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "${params[@]}" fi } @@ -121,7 +122,7 @@ function send_eoa_transaction() { local sender_initial_balance="$5" local receiver_initial_balance="$6" - echo "Sending EOA transaction to: $receiver_addr with value: $value" >&3 + echo "Sending EOA transaction (from: $sender, rpc url: $rpc_url) to: $receiver_addr with value: $value" >&3 # Send transaction via cast local cast_output tx_hash @@ -151,21 +152,14 @@ function send_smart_contract_transaction() { local private_key="$1" local receiver_addr="$2" local function_sig="$3" - local sender="$4" - shift 4 + shift 3 local params=("$@") - # Verify if the function signature starts with "function" - if [[ ! "$function_sig" =~ ^function\ .+\(.+\)$ ]]; then - echo "Error: Invalid function signature format '$function_sig'." - return 1 - fi - echo "Sending smart contract transaction to $receiver_addr with function signature: '$function_sig' and params: ${params[*]}" >&3 # Send the smart contract interaction using cast local cast_output tx_hash - cast_output=$(cast send --rpc-url "$rpc_url" --private-key "$private_key" "$receiver_addr" "$function_sig" "${params[@]}" --legacy 2>&1) + cast_output=$(cast send "$receiver_addr" --rpc-url "$rpc_url" --private-key "$private_key" --legacy "$function_sig" "${params[@]}" 2>&1) if [[ $? -ne 0 ]]; then echo "Error: Failed to send transaction. Output:" echo "$cast_output" @@ -186,7 +180,7 @@ function extract_tx_hash() { echo "$cast_output" | grep 'transactionHash' | awk '{print $2}' | tail -n 1 } -function queryContract() { +function query_contract() { local rpc_url="$1" # RPC URL local addr="$2" # Contract address local funcSignature="$3" # Function signature @@ -316,7 +310,7 @@ function mint_erc20_tokens() { local tokens_amount="$5" # The amount of tokens to transfer (e.g., "0.1ether") # Query the erc20 token balance of the sender - run queryContract "$rpc_url" "$erc20_token_addr" "$balance_of_fn_sig" "$sender_addr" + run query_contract "$rpc_url" "$erc20_token_addr" "$balance_of_fn_sig" "$sender_addr" assert_success local erc20_token_balance=$(echo "$output" | tail -n 1) @@ -327,6 +321,6 @@ function mint_erc20_tokens() { local wei_amount=$(cast --to-unit "$tokens_amount" wei) # Mint the required tokens by sending a transaction - run sendTx "$rpc_url" "$minter_private_key" "$erc20_token_addr" "$mint_fn_sig" "$receiver" "$tokens_amount" + run send_tx "$rpc_url" "$minter_private_key" "$erc20_token_addr" "$mint_fn_sig" "$receiver" "$tokens_amount" assert_success } From 9a694c42045debb2fee5f8713154c198a17a5c17 Mon Sep 17 00:00:00 2001 From: rbpol Date: Wed, 25 Sep 2024 08:07:37 +0100 Subject: [PATCH 08/53] feat: Add e2e reorg tests to syncers (#56) * feat: Added e2e tests to the syncer * fix: UTs * fix: comments * fix: comments 2 * fix: rebase * fix: lint * fix: rebase remove old test * fix: another rebase fix * fix: stress test * fix: ut build * fix: rebuild tree after reorg * fix: comments --------- Co-authored-by: Goran Rojovic --- l1infotreesync/downloader.go | 3 +- l1infotreesync/e2e_test.go | 229 +++++++++++++++++++++++------ l1infotreesync/processor.go | 2 +- lastgersync/evmdownloader.go | 6 +- reorgdetector/reorgdetector.go | 18 ++- reorgdetector/reorgdetector_db.go | 5 +- reorgdetector/reorgdetector_sub.go | 19 ++- sync/evmdownloader.go | 99 ++++++++----- sync/evmdownloader_test.go | 22 +-- sync/evmdriver.go | 19 +-- sync/evmdriver_test.go | 21 +-- sync/mock_downloader_test.go | 16 +- tree/appendonlytree.go | 2 +- tree/tree_test.go | 83 +++++++++++ 14 files changed, 404 insertions(+), 140 deletions(-) diff --git a/l1infotreesync/downloader.go b/l1infotreesync/downloader.go index 2051f7b5..16ccb37a 100644 --- a/l1infotreesync/downloader.go +++ b/l1infotreesync/downloader.go @@ -86,7 +86,8 @@ func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Addr l, err, ) } - log.Infof("updateL1InfoTreeSignatureV2: expected root: %s", common.Bytes2Hex(l1InfoTreeUpdate.CurrentL1InfoRoot[:])) + log.Infof("updateL1InfoTreeSignatureV2: expected root: %s", + common.BytesToHash(l1InfoTreeUpdate.CurrentL1InfoRoot[:])) return nil } diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 146c1924..21820059 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -160,14 +160,124 @@ func TestE2E(t *testing.T) { } } +func TestWithReorgs(t *testing.T) { + ctx := context.Background() + dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPathReorg := t.TempDir() + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) + require.NoError(t, err) + client, gerAddr, verifyAddr, gerSc, verifySC, err := newSimulatedClient(auth) + require.NoError(t, err) + rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 30)}) + require.NoError(t, err) + require.NoError(t, rd.Start(ctx)) + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 25) + require.NoError(t, err) + go syncer.Start(ctx) + + // Commit block + header, err := client.Client().HeaderByHash(ctx, client.Commit()) // Block 3 + require.NoError(t, err) + reorgFrom := header.Hash() + fmt.Println("start from header:", header.Number) + + updateL1InfoTreeAndRollupExitTree := func(i int, rollupID uint32) { + // Update L1 Info Tree + _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) + require.NoError(t, err) + + // Update L1 Info Tree + Rollup Exit Tree + newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(1)) + _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) + require.NoError(t, err) + + // Update Rollup Exit Tree + newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(2)) + _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) + require.NoError(t, err) + } + + // create some events and update the trees + updateL1InfoTreeAndRollupExitTree(1, 1) + + // Block 4 + commitBlocks(t, client, 1, time.Second*5) + + // Make sure syncer is up to date + waitForSyncerToCatchUp(ctx, t, syncer, client) + + // Assert rollup exit root + expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + t.Log("exit roots", common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + + // Assert L1 Info tree root + expectedL1InfoRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualL1InfoRoot, err := syncer.GetLastL1InfoTreeRoot(ctx) + require.NoError(t, err) + info, err := syncer.GetInfoByIndex(ctx, actualL1InfoRoot.Index) + require.NoError(t, err) + + require.Equal(t, common.Hash(expectedL1InfoRoot), actualL1InfoRoot.Hash) + require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) + + // Forking from block 3 + err = client.Fork(reorgFrom) + require.NoError(t, err) + + // Block 4, 5, 6 after the fork + commitBlocks(t, client, 3, time.Millisecond*500) + + // Make sure syncer is up to date + waitForSyncerToCatchUp(ctx, t, syncer, client) + + // Assert rollup exit root after the fork - should be zero since there are no events in the block after the fork + expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) + require.ErrorContains(t, err, "not found") // rollup exit tree reorged, it does not have any exits in it + t.Log("exit roots", common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + + // Forking from block 3 again + err = client.Fork(reorgFrom) + require.NoError(t, err) + time.Sleep(time.Millisecond * 500) + + // create some events and update the trees + updateL1InfoTreeAndRollupExitTree(2, 1) + + // Block 4, 5, 6, 7 after the fork + commitBlocks(t, client, 4, time.Millisecond*100) + + // Make sure syncer is up to date + waitForSyncerToCatchUp(ctx, t, syncer, client) + + // Assert rollup exit root after the fork - should be zero since there are no events in the block after the fork + expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + t.Log("exit roots", common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) +} + func TestStressAndReorgs(t *testing.T) { const ( - totalIterations = 200 // Have tested with much larger number (+10k) - enableReorgs = false // test fails when set to true - reorgEveryXIterations = 53 - maxReorgDepth = 5 - maxEventsPerBlock = 7 - maxRollups = 31 + totalIterations = 3 + blocksInIteration = 140 + reorgEveryXIterations = 70 + reorgSizeInBlocks = 2 + maxRollupID = 31 + extraBlocksToMine = 10 ) ctx := context.Background() @@ -182,58 +292,48 @@ func TestStressAndReorgs(t *testing.T) { rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}) require.NoError(t, err) require.NoError(t, rd.Start(ctx)) - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3) + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 100) require.NoError(t, err) go syncer.Start(ctx) - for i := 0; i < totalIterations; i++ { - for j := 0; j < i%maxEventsPerBlock; j++ { - switch j % 3 { - case 0: // Update L1 Info Tree - _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) - require.NoError(t, err) - case 1: // Update L1 Info Tree + Rollup Exit Tree - newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(j)) - _, err := verifySC.VerifyBatches(auth, 1+uint32(i%maxRollups), 0, newLocalExitRoot, common.Hash{}, true) - require.NoError(t, err) - case 2: // Update Rollup Exit Tree - newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(j)) - _, err := verifySC.VerifyBatches(auth, 1+uint32(i%maxRollups), 0, newLocalExitRoot, common.Hash{}, false) + updateL1InfoTreeAndRollupExitTree := func(i, j int, rollupID uint32) { + // Update L1 Info Tree + _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) + require.NoError(t, err) + + // Update L1 Info Tree + Rollup Exit Tree + newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(j)) + _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) + require.NoError(t, err) + + // Update Rollup Exit Tree + newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "fffa" + strconv.Itoa(j)) + _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) + require.NoError(t, err) + } + + for i := 1; i <= totalIterations; i++ { + for j := 1; j <= blocksInIteration; j++ { + commitBlocks(t, client, 1, time.Millisecond*10) + + if j%reorgEveryXIterations == 0 { + currentBlockNum, err := client.Client().BlockNumber(ctx) require.NoError(t, err) - } - } - client.Commit() - time.Sleep(time.Microsecond * 30) // Sleep just enough for goroutine to switch - if enableReorgs && i%reorgEveryXIterations == 0 { - reorgDepth := i%maxReorgDepth + 1 - currentBlockNum, err := client.Client().BlockNumber(ctx) - require.NoError(t, err) - targetReorgBlockNum := currentBlockNum - uint64(reorgDepth) - if targetReorgBlockNum < currentBlockNum { // we are dealing with uints... - reorgBlock, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(targetReorgBlockNum))) + + block, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(currentBlockNum-reorgSizeInBlocks))) require.NoError(t, err) - err = client.Fork(reorgBlock.Hash()) + reorgFrom := block.Hash() + err = client.Fork(reorgFrom) require.NoError(t, err) + } else { + updateL1InfoTreeAndRollupExitTree(i, j, uint32(j%maxRollupID)+1) } } } - syncerUpToDate := false - var errMsg string - lb, err := client.Client().BlockNumber(ctx) - require.NoError(t, err) - for i := 0; i < 50; i++ { - lpb, err := syncer.GetLastProcessedBlock(ctx) - require.NoError(t, err) - if lpb == lb { - syncerUpToDate = true + commitBlocks(t, client, 1, time.Millisecond*10) - break - } - time.Sleep(time.Millisecond * 100) - errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb) - } - require.True(t, syncerUpToDate, errMsg) + waitForSyncerToCatchUp(ctx, t, syncer, client) // Assert rollup exit root expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) @@ -252,6 +352,39 @@ func TestStressAndReorgs(t *testing.T) { info, err := syncer.GetInfoByIndex(ctx, lastRoot.Index) require.NoError(t, err, fmt.Sprintf("index: %d", lastRoot.Index)) - require.Equal(t, common.Hash(expectedL1InfoRoot), lastRoot.Hash) + t.Logf("expectedL1InfoRoot: %s", common.Hash(expectedL1InfoRoot).String()) require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) + require.Equal(t, common.Hash(expectedL1InfoRoot), lastRoot.Hash) +} + +func waitForSyncerToCatchUp(ctx context.Context, t *testing.T, syncer *l1infotreesync.L1InfoTreeSync, client *simulated.Backend) { + t.Helper() + + syncerUpToDate := false + var errMsg string + + for i := 0; i < 200; i++ { + lpb, err := syncer.GetLastProcessedBlock(ctx) + require.NoError(t, err) + lb, err := client.Client().BlockNumber(ctx) + require.NoError(t, err) + if lpb == lb { + syncerUpToDate = true + break + } + time.Sleep(time.Second / 2) + errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb) + } + + require.True(t, syncerUpToDate, errMsg) +} + +// commitBlocks commits the specified number of blocks with the given client and waits for the specified duration after each block +func commitBlocks(t *testing.T, client *simulated.Backend, numBlocks int, waitDuration time.Duration) { + t.Helper() + + for i := 0; i < numBlocks; i++ { + client.Commit() + time.Sleep(waitDuration) + } } diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index a672c5ef..0bb31cc3 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -240,7 +240,7 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { } }() - if _, err := tx.Exec(`INSERT INTO block (num) VALUES ($1)`, b.Num); err != nil { + if _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, b.Num); err != nil { return fmt.Errorf("err: %w", err) } diff --git a/lastgersync/evmdownloader.go b/lastgersync/evmdownloader.go index 97235c28..e76bb578 100644 --- a/lastgersync/evmdownloader.go +++ b/lastgersync/evmdownloader.go @@ -105,7 +105,11 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC break } - blockHeader := d.GetBlockHeader(ctx, lastBlock) + blockHeader, isCanceled := d.GetBlockHeader(ctx, lastBlock) + if isCanceled { + return + } + block := &sync.EVMBlock{ EVMBlockHeader: sync.EVMBlockHeader{ Num: blockHeader.Num, diff --git a/reorgdetector/reorgdetector.go b/reorgdetector/reorgdetector.go index 7a995bac..496a844c 100644 --- a/reorgdetector/reorgdetector.go +++ b/reorgdetector/reorgdetector.go @@ -120,12 +120,20 @@ func (rd *ReorgDetector) detectReorgInTrackedList(ctx context.Context) error { errGroup errgroup.Group ) - rd.trackedBlocksLock.Lock() - defer rd.trackedBlocksLock.Unlock() + subscriberIDs := rd.getSubscriberIDs() - for id, hdrs := range rd.trackedBlocks { + for _, id := range subscriberIDs { id := id - hdrs := hdrs + + // This is done like this because of a possible deadlock + // between AddBlocksToTrack and detectReorgInTrackedList + rd.trackedBlocksLock.RLock() + hdrs, ok := rd.trackedBlocks[id] + rd.trackedBlocksLock.RUnlock() + + if !ok { + continue + } errGroup.Go(func() error { headers := hdrs.getSorted() @@ -136,7 +144,7 @@ func (rd *ReorgDetector) detectReorgInTrackedList(ctx context.Context) error { if !ok || currentHeader == nil { if currentHeader, err = rd.client.HeaderByNumber(ctx, new(big.Int).SetUint64(hdr.Num)); err != nil { headersCacheLock.Unlock() - return fmt.Errorf("failed to get the header: %w", err) + return fmt.Errorf("failed to get the header %d: %w", hdr.Num, err) } headersCache[hdr.Num] = currentHeader } diff --git a/reorgdetector/reorgdetector_db.go b/reorgdetector/reorgdetector_db.go index 3174cbc0..79bd6cd4 100644 --- a/reorgdetector/reorgdetector_db.go +++ b/reorgdetector/reorgdetector_db.go @@ -53,6 +53,10 @@ func (rd *ReorgDetector) getTrackedBlocks(ctx context.Context) (map[string]*head // saveTrackedBlock saves the tracked block for a subscriber in db and in memory func (rd *ReorgDetector) saveTrackedBlock(ctx context.Context, id string, b header) error { + rd.trackedBlocksLock.Lock() + + // this has to go after the lock, because of a possible deadlock + // between AddBlocksToTrack and detectReorgInTrackedList tx, err := rd.db.BeginRw(ctx) if err != nil { return err @@ -60,7 +64,6 @@ func (rd *ReorgDetector) saveTrackedBlock(ctx context.Context, id string, b head defer tx.Rollback() - rd.trackedBlocksLock.Lock() hdrs, ok := rd.trackedBlocks[id] if !ok || hdrs.isEmpty() { hdrs = newHeadersList(b) diff --git a/reorgdetector/reorgdetector_sub.go b/reorgdetector/reorgdetector_sub.go index 675a81c5..c5002a2b 100644 --- a/reorgdetector/reorgdetector_sub.go +++ b/reorgdetector/reorgdetector_sub.go @@ -34,9 +34,24 @@ func (rd *ReorgDetector) Subscribe(id string) (*Subscription, error) { func (rd *ReorgDetector) notifySubscriber(id string, startingBlock header) { // Notify subscriber about this particular reorg rd.subscriptionsLock.RLock() - if sub, ok := rd.subscriptions[id]; ok { + sub, ok := rd.subscriptions[id] + rd.subscriptionsLock.RUnlock() + + if ok { sub.ReorgedBlock <- startingBlock.Num <-sub.ReorgProcessed } - rd.subscriptionsLock.RUnlock() +} + +// getSubscriberIDs returns a list of subscriber IDs +func (rd *ReorgDetector) getSubscriberIDs() []string { + rd.subscriptionsLock.RLock() + defer rd.subscriptionsLock.RUnlock() + + ids := make([]string, 0, len(rd.subscriptions)) + for id := range rd.subscriptions { + ids = append(ids, id) + } + + return ids } diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go index c9c4e661..13539f2f 100644 --- a/sync/evmdownloader.go +++ b/sync/evmdownloader.go @@ -2,6 +2,7 @@ package sync import ( "context" + "errors" "math/big" "time" @@ -24,7 +25,7 @@ type EVMDownloaderInterface interface { WaitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64) GetEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []EVMBlock GetLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log - GetBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader + GetBlockHeader(ctx context.Context, blockNum uint64) (EVMBlockHeader, bool) } type LogAppenderMap map[common.Hash]func(b *EVMBlock, l types.Log) error @@ -101,8 +102,13 @@ func (d *EVMDownloader) Download(ctx context.Context, fromBlock uint64, download if len(blocks) == 0 || blocks[len(blocks)-1].Num < toBlock { // Indicate the last downloaded block if there are not events on it d.log.Debugf("sending block %d to the driver (without events)", toBlock) + header, isCanceled := d.GetBlockHeader(ctx, toBlock) + if isCanceled { + return + } + downloadedCh <- EVMBlock{ - EVMBlockHeader: d.GetBlockHeader(ctx, toBlock), + EVMBlockHeader: header, } } fromBlock = toBlock + 1 @@ -170,44 +176,53 @@ func (d *EVMDownloaderImplementation) WaitForNewBlocks( } func (d *EVMDownloaderImplementation) GetEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []EVMBlock { - blocks := []EVMBlock{} - logs := d.GetLogs(ctx, fromBlock, toBlock) - for _, l := range logs { - if len(blocks) == 0 || blocks[len(blocks)-1].Num < l.BlockNumber { - b := d.GetBlockHeader(ctx, l.BlockNumber) - if b.Hash != l.BlockHash { - d.log.Infof( - "there has been a block hash change between the event query and the block query "+ - "for block %d: %s vs %s. Retrying.", - l.BlockNumber, b.Hash, l.BlockHash, - ) - return d.GetEventsByBlockRange(ctx, fromBlock, toBlock) + select { + case <-ctx.Done(): + return nil + default: + blocks := []EVMBlock{} + logs := d.GetLogs(ctx, fromBlock, toBlock) + for _, l := range logs { + if len(blocks) == 0 || blocks[len(blocks)-1].Num < l.BlockNumber { + b, canceled := d.GetBlockHeader(ctx, l.BlockNumber) + if canceled { + return nil + } + + if b.Hash != l.BlockHash { + d.log.Infof( + "there has been a block hash change between the event query and the block query "+ + "for block %d: %s vs %s. Retrying.", + l.BlockNumber, b.Hash, l.BlockHash, + ) + return d.GetEventsByBlockRange(ctx, fromBlock, toBlock) + } + blocks = append(blocks, EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: l.BlockNumber, + Hash: l.BlockHash, + Timestamp: b.Timestamp, + ParentHash: b.ParentHash, + }, + Events: []interface{}{}, + }) } - blocks = append(blocks, EVMBlock{ - EVMBlockHeader: EVMBlockHeader{ - Num: l.BlockNumber, - Hash: l.BlockHash, - Timestamp: b.Timestamp, - ParentHash: b.ParentHash, - }, - Events: []interface{}{}, - }) - } - for { - attempts := 0 - err := d.appender[l.Topics[0]](&blocks[len(blocks)-1], l) - if err != nil { - attempts++ - d.log.Error("error trying to append log: ", err) - d.rh.Handle("getLogs", attempts) - continue + for { + attempts := 0 + err := d.appender[l.Topics[0]](&blocks[len(blocks)-1], l) + if err != nil { + attempts++ + d.log.Error("error trying to append log: ", err) + d.rh.Handle("getLogs", attempts) + continue + } + break } - break } - } - return blocks + return blocks + } } func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log { @@ -224,6 +239,11 @@ func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, to for { unfilteredLogs, err = d.ethClient.FilterLogs(ctx, query) if err != nil { + if errors.Is(err, context.Canceled) { + // context is canceled, we don't want to fatal on max attempts in this case + return nil + } + attempts++ d.log.Error("error calling FilterLogs to eth client: ", err) d.rh.Handle("getLogs", attempts) @@ -243,11 +263,16 @@ func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, to return logs } -func (d *EVMDownloaderImplementation) GetBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader { +func (d *EVMDownloaderImplementation) GetBlockHeader(ctx context.Context, blockNum uint64) (EVMBlockHeader, bool) { attempts := 0 for { header, err := d.ethClient.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNum)) if err != nil { + if errors.Is(err, context.Canceled) { + // context is canceled, we don't want to fatal on max attempts in this case + return EVMBlockHeader{}, true + } + attempts++ d.log.Errorf("error getting block header for block %d, err: %v", blockNum, err) d.rh.Handle("getBlockHeader", attempts) @@ -258,6 +283,6 @@ func (d *EVMDownloaderImplementation) GetBlockHeader(ctx context.Context, blockN Hash: header.Hash(), ParentHash: header.ParentHash, Timestamp: header.Time, - } + }, false } } diff --git a/sync/evmdownloader_test.go b/sync/evmdownloader_test.go index 59c43b8f..04c92e72 100644 --- a/sync/evmdownloader_test.go +++ b/sync/evmdownloader_test.go @@ -222,9 +222,9 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b1) d.On("GetEventsByBlockRange", mock.Anything, uint64(0), uint64(1)). - Return([]EVMBlock{}) + Return([]EVMBlock{}, false) d.On("GetBlockHeader", mock.Anything, uint64(1)). - Return(b1.EVMBlockHeader) + Return(b1.EVMBlockHeader, false) // iteration 1: wait for next block to be created d.On("WaitForNewBlocks", mock.Anything, uint64(1)). @@ -240,7 +240,7 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b2) d.On("GetEventsByBlockRange", mock.Anything, uint64(2), uint64(2)). - Return([]EVMBlock{b2}) + Return([]EVMBlock{b2}, false) // iteration 3: wait for next block to be created (jump to block 8) d.On("WaitForNewBlocks", mock.Anything, uint64(2)). @@ -270,9 +270,9 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b6, b7, b8) d.On("GetEventsByBlockRange", mock.Anything, uint64(3), uint64(8)). - Return([]EVMBlock{b6, b7}) + Return([]EVMBlock{b6, b7}, false) d.On("GetBlockHeader", mock.Anything, uint64(8)). - Return(b8.EVMBlockHeader) + Return(b8.EVMBlockHeader, false) // iteration 5: wait for next block to be created (jump to block 30) d.On("WaitForNewBlocks", mock.Anything, uint64(8)). @@ -288,9 +288,9 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b19) d.On("GetEventsByBlockRange", mock.Anything, uint64(9), uint64(19)). - Return([]EVMBlock{}) + Return([]EVMBlock{}, false) d.On("GetBlockHeader", mock.Anything, uint64(19)). - Return(b19.EVMBlockHeader) + Return(b19.EVMBlockHeader, false) // iteration 7: from block 20 to 30, events on last block b30 := EVMBlock{ @@ -302,7 +302,7 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b30) d.On("GetEventsByBlockRange", mock.Anything, uint64(20), uint64(30)). - Return([]EVMBlock{b30}) + Return([]EVMBlock{b30}, false) // iteration 8: wait for next block to be created (jump to block 35) d.On("WaitForNewBlocks", mock.Anything, uint64(30)). @@ -369,14 +369,16 @@ func TestGetBlockHeader(t *testing.T) { // at first attempt clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(returnedBlock, nil).Once() - actualBlock := d.GetBlockHeader(ctx, blockNum) + actualBlock, isCanceled := d.GetBlockHeader(ctx, blockNum) assert.Equal(t, expectedBlock, actualBlock) + assert.False(t, isCanceled) // after error from client clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(nil, errors.New("foo")).Once() clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(returnedBlock, nil).Once() - actualBlock = d.GetBlockHeader(ctx, blockNum) + actualBlock, isCanceled = d.GetBlockHeader(ctx, blockNum) assert.Equal(t, expectedBlock, actualBlock) + assert.False(t, isCanceled) } func buildAppender() LogAppenderMap { diff --git a/sync/evmdriver.go b/sync/evmdriver.go index ae7388e0..7865f645 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -71,6 +71,7 @@ reset: attempts int err error ) + for { lastProcessedBlock, err = d.processor.GetLastProcessedBlock(ctx) if err != nil { @@ -84,18 +85,19 @@ reset: cancellableCtx, cancel := context.WithCancel(ctx) defer cancel() + log.Info("Starting sync...", " lastProcessedBlock", lastProcessedBlock) // start downloading downloadCh := make(chan EVMBlock, d.downloadBufferSize) - go d.downloader.Download(cancellableCtx, lastProcessedBlock, downloadCh) + go d.downloader.Download(cancellableCtx, lastProcessedBlock+1, downloadCh) for { select { case b := <-downloadCh: - d.log.Debug("handleNewBlock") + d.log.Debug("handleNewBlock", " blockNum: ", b.Num, " blockHash: ", b.Hash) d.handleNewBlock(ctx, b) case firstReorgedBlock := <-d.reorgSub.ReorgedBlock: - d.log.Debug("handleReorg") - d.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) + d.log.Debug("handleReorg from block: ", firstReorgedBlock) + d.handleReorg(ctx, cancel, firstReorgedBlock) goto reset } } @@ -130,15 +132,10 @@ func (d *EVMDriver) handleNewBlock(ctx context.Context, b EVMBlock) { } } -func (d *EVMDriver) handleReorg( - ctx context.Context, cancel context.CancelFunc, downloadCh chan EVMBlock, firstReorgedBlock uint64, -) { +func (d *EVMDriver) handleReorg(ctx context.Context, cancel context.CancelFunc, firstReorgedBlock uint64) { // stop downloader cancel() - _, ok := <-downloadCh - for ok { - _, ok = <-downloadCh - } + // handle reorg attempts := 0 for { diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go index 907dac28..c17370e1 100644 --- a/sync/evmdriver_test.go +++ b/sync/evmdriver_test.go @@ -198,36 +198,19 @@ func TestHandleReorg(t *testing.T) { // happy path _, cancel := context.WithCancel(ctx) - downloadCh := make(chan EVMBlock) firstReorgedBlock := uint64(5) pm.On("Reorg", ctx, firstReorgedBlock).Return(nil) - go driver.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) - close(downloadCh) + go driver.handleReorg(ctx, cancel, firstReorgedBlock) done := <-reorgProcessed require.True(t, done) - // download ch sends some garbage - _, cancel = context.WithCancel(ctx) - downloadCh = make(chan EVMBlock) - firstReorgedBlock = uint64(6) - pm.On("Reorg", ctx, firstReorgedBlock).Return(nil) - go driver.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) - downloadCh <- EVMBlock{} - downloadCh <- EVMBlock{} - downloadCh <- EVMBlock{} - close(downloadCh) - done = <-reorgProcessed - require.True(t, done) - // processor fails 2 times _, cancel = context.WithCancel(ctx) - downloadCh = make(chan EVMBlock) firstReorgedBlock = uint64(7) pm.On("Reorg", ctx, firstReorgedBlock).Return(errors.New("foo")).Once() pm.On("Reorg", ctx, firstReorgedBlock).Return(errors.New("foo")).Once() pm.On("Reorg", ctx, firstReorgedBlock).Return(nil).Once() - go driver.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) - close(downloadCh) + go driver.handleReorg(ctx, cancel, firstReorgedBlock) done = <-reorgProcessed require.True(t, done) } diff --git a/sync/mock_downloader_test.go b/sync/mock_downloader_test.go index c965efb6..f28045b5 100644 --- a/sync/mock_downloader_test.go +++ b/sync/mock_downloader_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.45.0. DO NOT EDIT. package sync @@ -20,7 +20,7 @@ func (_m *EVMDownloaderMock) Download(ctx context.Context, fromBlock uint64, dow } // GetBlockHeader provides a mock function with given fields: ctx, blockNum -func (_m *EVMDownloaderMock) GetBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader { +func (_m *EVMDownloaderMock) GetBlockHeader(ctx context.Context, blockNum uint64) (EVMBlockHeader, bool) { ret := _m.Called(ctx, blockNum) if len(ret) == 0 { @@ -28,13 +28,23 @@ func (_m *EVMDownloaderMock) GetBlockHeader(ctx context.Context, blockNum uint64 } var r0 EVMBlockHeader + var r1 bool + if rf, ok := ret.Get(0).(func(context.Context, uint64) (EVMBlockHeader, bool)); ok { + return rf(ctx, blockNum) + } if rf, ok := ret.Get(0).(func(context.Context, uint64) EVMBlockHeader); ok { r0 = rf(ctx, blockNum) } else { r0 = ret.Get(0).(EVMBlockHeader) } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, uint64) bool); ok { + r1 = rf(ctx, blockNum) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 } // GetEventsByBlockRange provides a mock function with given fields: ctx, fromBlock, toBlock diff --git a/tree/appendonlytree.go b/tree/appendonlytree.go index 418a576b..5b14b962 100644 --- a/tree/appendonlytree.go +++ b/tree/appendonlytree.go @@ -113,7 +113,7 @@ func (t *AppendOnlyTree) initCache(tx db.Txer) error { } // Reverse the siblings to go from leafs to root - for i, j := 0, len(siblings)-1; i < j; i, j = i+1, j-1 { + for i, j := 0, len(siblings)-1; i == j; i, j = i+1, j-1 { siblings[i], siblings[j] = siblings[j], siblings[i] } diff --git a/tree/tree_test.go b/tree/tree_test.go index b5278723..dc2cfc9e 100644 --- a/tree/tree_test.go +++ b/tree/tree_test.go @@ -2,6 +2,7 @@ package tree_test import ( "context" + "database/sql" "encoding/json" "fmt" "os" @@ -18,6 +19,88 @@ import ( "github.com/stretchr/testify/require" ) +func TestCheckExpectedRoot(t *testing.T) { + createTreeDB := func() *sql.DB { + dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + log.Debug("DB created at: ", dbPath) + require.NoError(t, migrations.RunMigrations(dbPath)) + treeDB, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + + return treeDB + } + + addLeaves := func(merkletree *tree.AppendOnlyTree, + treeDB *sql.DB, + numOfLeavesToAdd, from int) { + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + + for i := from; i < from+numOfLeavesToAdd; i++ { + require.NoError(t, merkletree.AddLeaf(tx, uint64(i), 0, types.Leaf{ + Index: uint32(i), + Hash: common.HexToHash(fmt.Sprintf("%x", i)), + })) + } + + require.NoError(t, tx.Commit()) + } + + t.Run("Check when no reorg", func(t *testing.T) { + numOfLeavesToAdd := 10 + indexToCheck := uint32(numOfLeavesToAdd - 1) + + treeDB := createTreeDB() + merkleTree := tree.NewAppendOnlyTree(treeDB, "") + + addLeaves(merkleTree, treeDB, numOfLeavesToAdd, 0) + + expectedRoot, err := merkleTree.GetLastRoot(context.Background()) + require.NoError(t, err) + + addLeaves(merkleTree, treeDB, numOfLeavesToAdd, numOfLeavesToAdd) + + root2, err := merkleTree.GetRootByIndex(context.Background(), indexToCheck) + require.NoError(t, err) + require.Equal(t, expectedRoot.Hash, root2.Hash) + require.Equal(t, expectedRoot.Index, root2.Index) + }) + + t.Run("Check after rebuild tree when reorg", func(t *testing.T) { + numOfLeavesToAdd := 10 + indexToCheck := uint32(numOfLeavesToAdd - 1) + treeDB := createTreeDB() + merkleTree := tree.NewAppendOnlyTree(treeDB, "") + + addLeaves(merkleTree, treeDB, numOfLeavesToAdd, 0) + + expectedRoot, err := merkleTree.GetLastRoot(context.Background()) + require.NoError(t, err) + + addLeaves(merkleTree, treeDB, numOfLeavesToAdd, numOfLeavesToAdd) + + // reorg tree + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + require.NoError(t, merkleTree.Reorg(tx, uint64(indexToCheck+1))) + require.NoError(t, tx.Commit()) + + // rebuild cache on adding new leaf + tx, err = db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + require.NoError(t, merkleTree.AddLeaf(tx, uint64(indexToCheck+1), 0, types.Leaf{ + Index: indexToCheck + 1, + Hash: common.HexToHash(fmt.Sprintf("%x", indexToCheck+1)), + })) + require.NoError(t, tx.Commit()) + + root2, err := merkleTree.GetRootByIndex(context.Background(), indexToCheck) + require.NoError(t, err) + require.Equal(t, expectedRoot.Hash, root2.Hash) + require.Equal(t, expectedRoot.Index, root2.Index) + }) +} + func TestMTAddLeaf(t *testing.T) { data, err := os.ReadFile("testvectors/root-vectors.json") require.NoError(t, err) From 0f18cf58ba2dbb26899e5463a128c850ecf1a21d Mon Sep 17 00:00:00 2001 From: Rachit Sonthalia <54906134+rachit77@users.noreply.github.com> Date: Wed, 25 Sep 2024 14:09:50 +0530 Subject: [PATCH 09/53] ci: update polycli version (#89) --- .github/workflows/test-resequence.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-resequence.yml b/.github/workflows/test-resequence.yml index 9ac51af0..47bcaa37 100644 --- a/.github/workflows/test-resequence.yml +++ b/.github/workflows/test-resequence.yml @@ -51,7 +51,11 @@ jobs: - name: Install polycli run: | - tmp_dir=$(mktemp -d) && curl -L https://github.com/0xPolygon/polygon-cli/releases/download/v0.1.48/polycli_v0.1.48_linux_amd64.tar.gz | tar -xz -C "$tmp_dir" && mv "$tmp_dir"/* /usr/local/bin/polycli && rm -rf "$tmp_dir" + POLYCLI_VERSION="${{ vars.POLYCLI_VERSION }}" + tmp_dir=$(mktemp -d) + curl -L "https://github.com/0xPolygon/polygon-cli/releases/download/${POLYCLI_VERSION}/polycli_${POLYCLI_VERSION}_linux_amd64.tar.gz" | tar -xz -C "$tmp_dir" + mv "$tmp_dir"/* /usr/local/bin/polycli + rm -rf "$tmp_dir" sudo chmod +x /usr/local/bin/polycli /usr/local/bin/polycli version From e9a1ec081ecdd9c8bd33bac040fcec6394af9ffc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Thu, 26 Sep 2024 12:40:14 +0200 Subject: [PATCH 10/53] feat: update relase regex (#93) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b22f8710..8ba59fa4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -34,7 +34,7 @@ jobs: VERSION: ${{ steps.meta.outputs.version }} steps: - name: validate tag - run: echo ${{ github.ref_name }} | grep -qE '^v[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$' + run: echo ${{ github.ref_name }} | grep -qE '^v[0-9]+\.[0-9]+\.[0-9]+*?$' - name: Prepare run: | From be2703dddfd563e2a24457dc855238ffa17de7b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Thu, 26 Sep 2024 13:11:50 +0200 Subject: [PATCH 11/53] Revert "feat: update relase regex (#93)" (#94) This reverts commit e9a1ec081ecdd9c8bd33bac040fcec6394af9ffc. --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8ba59fa4..b22f8710 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -34,7 +34,7 @@ jobs: VERSION: ${{ steps.meta.outputs.version }} steps: - name: validate tag - run: echo ${{ github.ref_name }} | grep -qE '^v[0-9]+\.[0-9]+\.[0-9]+*?$' + run: echo ${{ github.ref_name }} | grep -qE '^v[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$' - name: Prepare run: | From ec3263c90be263a192d239dbb53dd20668aaba0e Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Thu, 26 Sep 2024 15:03:36 +0200 Subject: [PATCH 12/53] ci: pin kurtosis-cdk version in tests (#92) --- .github/workflows/test-e2e.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index af54879b..721cbf09 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -66,6 +66,7 @@ jobs: with: repository: 0xPolygon/kurtosis-cdk path: "kurtosis-cdk" + ref: "v0.2.11" - name: Setup Bats and bats libs uses: bats-core/bats-action@2.0.0 From 6c148e9678c96cb34618a7cd7819219cb3e9b605 Mon Sep 17 00:00:00 2001 From: laisolizq <37299818+laisolizq@users.noreply.github.com> Date: Mon, 30 Sep 2024 09:31:33 +0200 Subject: [PATCH 13/53] add more tests claimtest (#96) --- bridgesync/claimcalldata_test.go | 948 +++++++++++++++++- bridgesync/downloader.go | 2 - test/contracts/abi/claimmocktest.abi | 1 + test/contracts/bin/claimmocktest.bin | 1 + test/contracts/bind.sh | 3 +- .../contracts/claimmocktest/ClaimMockTest.sol | 53 + test/contracts/claimmocktest/claimmocktest.go | 328 ++++++ test/contracts/compile.sh | 9 +- 8 files changed, 1340 insertions(+), 5 deletions(-) create mode 100644 test/contracts/abi/claimmocktest.abi create mode 100644 test/contracts/bin/claimmocktest.bin create mode 100644 test/contracts/claimmocktest/ClaimMockTest.sol create mode 100644 test/contracts/claimmocktest/claimmocktest.go diff --git a/bridgesync/claimcalldata_test.go b/bridgesync/claimcalldata_test.go index 2e574a4e..b8b432ae 100644 --- a/bridgesync/claimcalldata_test.go +++ b/bridgesync/claimcalldata_test.go @@ -10,6 +10,7 @@ import ( "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/test/contracts/claimmock" "github.com/0xPolygon/cdk/test/contracts/claimmockcaller" + "github.com/0xPolygon/cdk/test/contracts/claimmocktest" tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -49,7 +50,9 @@ func TestClaimCalldata(t *testing.T) { // Deploy contracts bridgeAddr, _, bridgeContract, err := claimmock.DeployClaimmock(auth, client) require.NoError(t, err) - _, _, claimCaller, err := claimmockcaller.DeployClaimmockcaller(auth, client, bridgeAddr) + claimCallerAddr, _, claimCaller, err := claimmockcaller.DeployClaimmockcaller(auth, client, bridgeAddr) + require.NoError(t, err) + _, _, claimTest, err := claimmocktest.DeployClaimmocktest(auth, client, bridgeAddr, claimCallerAddr) require.NoError(t, err) proofLocal := [32][32]byte{} @@ -84,6 +87,18 @@ func TestClaimCalldata(t *testing.T) { DestinationNetwork: 0, Metadata: []byte{}, } + expectedClaim3 := Claim{ + OriginNetwork: 69, + OriginAddress: common.HexToAddress("ffaaffaa"), + DestinationAddress: common.HexToAddress("2233445566"), + Amount: big.NewInt(5), + MainnetExitRoot: common.HexToHash("5ca1e"), + RollupExitRoot: common.HexToHash("dead"), + ProofLocalExitRoot: proofLocalH, + ProofRollupExitRoot: proofRollupH, + DestinationNetwork: 0, + Metadata: []byte{}, + } auth.GasLimit = 999999 // for some reason gas estimation fails :( abi, err := claimmock.ClaimmockMetaData.GetAbi() @@ -941,6 +956,937 @@ func TestClaimCalldata(t *testing.T) { expectedClaim: expectedClaim2, }) + // indirect + indirect call claim message bytes + expectedClaim.GlobalIndex = big.NewInt(426) + expectedClaim.IsMessage = true + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.ClaimTestInternal( + auth, + expectedClaimBytes, + false, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "indirect + indirect call to claim message bytes", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + + reverted = [2]bool{false, false} + + // 2 indirect + indirect call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim2TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect + indirect call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "2 indirect + indirect call claim message 2 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + + reverted3 := [3]bool{false, false, false} + + // 3 ok (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err := abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "3 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "3 ok (indirectx2, indirect, indirectx2) call claim message 2 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + testCases = append(testCases, testCase{ + description: "3 ok (indirectx2, indirect, indirectx2) call claim message 3 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[2], + expectedClaim: expectedClaim3, + }) + + // 3 ok (indirectx2, indirect, indirectx2) call claim message (diff global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(428) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(429) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "3 ok (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "3 ok (indirectx2, indirect, indirectx2) call claim message 2 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + testCases = append(testCases, testCase{ + description: "3 ok (indirectx2, indirect, indirectx2) call claim message 3 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[2], + expectedClaim: expectedClaim3, + }) + + reverted3 = [3]bool{true, false, false} + + // 1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message (diff global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(428) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(429) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim2, + }) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 2 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim3, + }) + + reverted3 = [3]bool{false, true, false} + + // 1 ok 1 ko 1 ok (indirectx2, indirect, indirectx2) call claim message (diff global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(428) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(429) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 2 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim3, + }) + + reverted3 = [3]bool{false, false, true} + + // 1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message (diff global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(428) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(429) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 2 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + + reverted3 = [3]bool{true, false, false} + + // 1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim2, + }) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 2 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim3, + }) + + reverted3 = [3]bool{false, true, false} + + // 1 ok 1 ko 1 ok (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 2 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim3, + }) + + reverted3 = [3]bool{false, false, true} + + // 1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 2 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + + reverted3 = [3]bool{true, true, false} + + // 2 ko 1 ok (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 ko 1 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim3, + }) + + reverted3 = [3]bool{false, true, true} + + // 1 ok 2 ko (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ok 2 ko (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + + reverted3 = [3]bool{true, false, true} + + // 1 ko 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ko 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim2, + }) + for _, tc := range testCases { log.Info(tc.description) t.Run(tc.description, func(t *testing.T) { diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index 9be7a6bc..dbea8c8f 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -151,8 +151,6 @@ func setClaimCalldata(client EthClienter, bridge common.Address, txHash common.H } // find the claim linked to the event using DFS - // TODO: take into account potential reverts that may be found on the path, - // and other edge cases callStack := stack.New() callStack.Push(*c) for { diff --git a/test/contracts/abi/claimmocktest.abi b/test/contracts/abi/claimmocktest.abi new file mode 100644 index 00000000..14e67686 --- /dev/null +++ b/test/contracts/abi/claimmocktest.abi @@ -0,0 +1 @@ +[{"inputs":[{"internalType":"contract IClaimMock","name":"_claimMock","type":"address"},{"internalType":"contract IClaimMockCaller","name":"_claimMockCaller","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"bytes","name":"claim1","type":"bytes"},{"internalType":"bytes","name":"claim2","type":"bytes"},{"internalType":"bool[2]","name":"reverted","type":"bool[2]"}],"name":"claim2TestInternal","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"claim1","type":"bytes"},{"internalType":"bytes","name":"claim2","type":"bytes"},{"internalType":"bytes","name":"claim3","type":"bytes"},{"internalType":"bool[3]","name":"reverted","type":"bool[3]"}],"name":"claim3TestInternal","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"claimMock","outputs":[{"internalType":"contract IClaimMock","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"claimMockCaller","outputs":[{"internalType":"contract IClaimMockCaller","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes","name":"claim","type":"bytes"},{"internalType":"bool","name":"reverted","type":"bool"}],"name":"claimTestInternal","outputs":[],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file diff --git a/test/contracts/bin/claimmocktest.bin b/test/contracts/bin/claimmocktest.bin new file mode 100644 index 00000000..6bb6d170 --- /dev/null +++ b/test/contracts/bin/claimmocktest.bin @@ -0,0 +1 @@ +60c0346100a157601f61072e38819003918201601f19168301916001600160401b038311848410176100a65780849260409485528339810103126100a15780516001600160a01b039182821682036100a1576020015191821682036100a15760a05260805260405161067190816100bd82396080518181816102d5015281816103870152818161046a015261054e015260a05181818161031a01526105c20152f35b600080fd5b634e487b7160e01b600052604160045260246000fdfe6080604052600436101561001257600080fd5b6000803560e01c90816348f0c6801461006a575080636e53085414610065578063837a84701461006057806383f5b0061461005b57639bee34681461005657600080fd5b610349565b610304565b6102bf565b610217565b346100f45760c03660031901126100f45767ffffffffffffffff6004358181116100f05761009c903690600401610142565b6024358281116100ec576100b4903690600401610142565b916044359081116100ec576100cd903690600401610142565b36608312156100ec576100e9926100e3366101c6565b92610533565b80f35b8380fd5b8280fd5b80fd5b634e487b7160e01b600052604160045260246000fd5b67ffffffffffffffff811161012157604052565b6100f7565b6040810190811067ffffffffffffffff82111761012157604052565b81601f820112156101a55780359067ffffffffffffffff928383116101215760405193601f8401601f19908116603f011685019081118582101761012157604052828452602083830101116101a557816000926020809301838601378301015290565b600080fd5b6024359081151582036101a557565b359081151582036101a557565b90604051916060830183811067ffffffffffffffff821117610121576040528260c49182116101a5576064905b8282106101ff57505050565b6020809161020c846101b9565b8152019101906101f3565b346101a55760803660031901126101a55767ffffffffffffffff6004358181116101a557610249903690600401610142565b906024359081116101a557610262903690600401610142565b36606312156101a5576040519061027882610126565b819260843681116101a5576044945b81861061029c57505061029a9350610467565b005b602080916102a9886101b9565b815201950194610287565b60009103126101a557565b346101a55760003660031901126101a5576040517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b346101a55760003660031901126101a5576040517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b346101a557600060403660031901126100f45760043567ffffffffffffffff81116103f75761037c903690600401610142565b816103856101aa565b7f00000000000000000000000000000000000000000000000000000000000000006001600160a01b0316803b156100f0576103d793836040518096819582946327e3584360e01b84526004840161043b565b03925af180156103f2576103e9575080f35b6100e99061010d565b61045b565b5080fd5b919082519283825260005b848110610427575050826000602080949584010152601f8019910116010190565b602081830181015184830182015201610406565b906104536020919493946040845260408401906103fb565b931515910152565b6040513d6000823e3d90fd5b917f00000000000000000000000000000000000000000000000000000000000000006001600160a01b031691823b156101a557604051631cf865cf60e01b815260806004820152938492916104d6916104c49060848601906103fb565b848103600319016024860152906103fb565b90600090604484015b60028310610517575050509181600081819503925af180156103f2576105025750565b8061050f6105159261010d565b806102b4565b565b81511515815286945060019290920191602091820191016104df565b91926000906020810151610632575b80516001600160a01b037f000000000000000000000000000000000000000000000000000000000000000081169490911515853b156101a557600061059d91604051809381926327e3584360e01b9b8c84526004840161043b565b0381838a5af19283156103f25760409560208094610aac9460009761061f575b5001917f0000000000000000000000000000000000000000000000000000000000000000165af1500151151590803b156101a55761060e93600080946040519687958694859384526004840161043b565b03925af180156103f2576105025750565b8061050f61062c9261010d565b386105bd565b6001915061054256fea264697066735822122091357ca0b4807d5579dc633a7d2a9263efbfe31944c644c21b7ccf83594a9e2c64736f6c63430008120033 \ No newline at end of file diff --git a/test/contracts/bind.sh b/test/contracts/bind.sh index 957fd956..25ddd782 100755 --- a/test/contracts/bind.sh +++ b/test/contracts/bind.sh @@ -10,4 +10,5 @@ gen() { gen verifybatchesmock gen claimmock -gen claimmockcaller \ No newline at end of file +gen claimmockcaller +gen claimmocktest \ No newline at end of file diff --git a/test/contracts/claimmocktest/ClaimMockTest.sol b/test/contracts/claimmocktest/ClaimMockTest.sol new file mode 100644 index 00000000..81f748a7 --- /dev/null +++ b/test/contracts/claimmocktest/ClaimMockTest.sol @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: AGPL-3.0 + +pragma solidity 0.8.18; + +interface IClaimMock { + event ClaimEvent(uint256 globalIndex, uint32 originNetwork, address originAddress, address destinationAddress, uint256 amount); + function claimAsset(bytes32[32] calldata smtProofLocalExitRoot,bytes32[32] calldata smtProofRollupExitRoot,uint256 globalIndex,bytes32 mainnetExitRoot,bytes32 rollupExitRoot,uint32 originNetwork,address originTokenAddress,uint32 destinationNetwork,address destinationAddress,uint256 amount,bytes calldata metadata) external; + function claimMessage(bytes32[32] calldata smtProofLocalExitRoot,bytes32[32] calldata smtProofRollupExitRoot,uint256 globalIndex,bytes32 mainnetExitRoot,bytes32 rollupExitRoot,uint32 originNetwork,address originAddress,uint32 destinationNetwork,address destinationAddress,uint256 amount,bytes calldata metadata) external; +} + +interface IClaimMockCaller { + function claimAsset(bytes32[32] calldata smtProofLocalExitRoot, bytes32[32] calldata smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes calldata metadata, bool reverted) external; + function claimMessage(bytes32[32] calldata smtProofLocalExitRoot, bytes32[32] calldata smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes calldata metadata, bool reverted) external; + function claimBytes(bytes memory claim, bool reverted) external; + function claim2Bytes(bytes memory claim1, bytes memory claim2, bool[2] memory reverted) external; +} + +contract ClaimMockTest { + IClaimMockCaller public immutable claimMockCaller; + IClaimMock public immutable claimMock; + + uint8 constant _DEPOSIT_CONTRACT_TREE_DEPTH = 32; + + constructor( + IClaimMock _claimMock, + IClaimMockCaller _claimMockCaller + ) { + claimMock = _claimMock; + claimMockCaller = _claimMockCaller; + } + + function claimTestInternal(bytes memory claim, bool reverted) external { + claimMockCaller.claimBytes(claim, reverted); + } + + function claim2TestInternal(bytes memory claim1, bytes memory claim2, bool[2] memory reverted) external { + claimMockCaller.claim2Bytes(claim1, claim2, reverted); + } + + function claim3TestInternal(bytes memory claim1, bytes memory claim2, bytes memory claim3, bool[3] memory reverted) external { + address addr = address(claimMock); + uint256 value1 = 0; + if(reverted[1]) { + value1 = 1; + } + claimMockCaller.claimBytes(claim1, reverted[0]); + assembly { + let success1 := call(gas(), addr, value1, add(claim2, 32), 0xaac, 0x20, 0) + } + claimMockCaller.claimBytes(claim3, reverted[2]); + } + +} \ No newline at end of file diff --git a/test/contracts/claimmocktest/claimmocktest.go b/test/contracts/claimmocktest/claimmocktest.go new file mode 100644 index 00000000..2b4494a1 --- /dev/null +++ b/test/contracts/claimmocktest/claimmocktest.go @@ -0,0 +1,328 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package claimmocktest + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// ClaimmocktestMetaData contains all meta data concerning the Claimmocktest contract. +var ClaimmocktestMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractIClaimMock\",\"name\":\"_claimMock\",\"type\":\"address\"},{\"internalType\":\"contractIClaimMockCaller\",\"name\":\"_claimMockCaller\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"claim1\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"claim2\",\"type\":\"bytes\"},{\"internalType\":\"bool[2]\",\"name\":\"reverted\",\"type\":\"bool[2]\"}],\"name\":\"claim2TestInternal\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"claim1\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"claim2\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"claim3\",\"type\":\"bytes\"},{\"internalType\":\"bool[3]\",\"name\":\"reverted\",\"type\":\"bool[3]\"}],\"name\":\"claim3TestInternal\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"claimMock\",\"outputs\":[{\"internalType\":\"contractIClaimMock\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"claimMockCaller\",\"outputs\":[{\"internalType\":\"contractIClaimMockCaller\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"claim\",\"type\":\"bytes\"},{\"internalType\":\"bool\",\"name\":\"reverted\",\"type\":\"bool\"}],\"name\":\"claimTestInternal\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60c0346100a157601f61072e38819003918201601f19168301916001600160401b038311848410176100a65780849260409485528339810103126100a15780516001600160a01b039182821682036100a1576020015191821682036100a15760a05260805260405161067190816100bd82396080518181816102d5015281816103870152818161046a015261054e015260a05181818161031a01526105c20152f35b600080fd5b634e487b7160e01b600052604160045260246000fdfe6080604052600436101561001257600080fd5b6000803560e01c90816348f0c6801461006a575080636e53085414610065578063837a84701461006057806383f5b0061461005b57639bee34681461005657600080fd5b610349565b610304565b6102bf565b610217565b346100f45760c03660031901126100f45767ffffffffffffffff6004358181116100f05761009c903690600401610142565b6024358281116100ec576100b4903690600401610142565b916044359081116100ec576100cd903690600401610142565b36608312156100ec576100e9926100e3366101c6565b92610533565b80f35b8380fd5b8280fd5b80fd5b634e487b7160e01b600052604160045260246000fd5b67ffffffffffffffff811161012157604052565b6100f7565b6040810190811067ffffffffffffffff82111761012157604052565b81601f820112156101a55780359067ffffffffffffffff928383116101215760405193601f8401601f19908116603f011685019081118582101761012157604052828452602083830101116101a557816000926020809301838601378301015290565b600080fd5b6024359081151582036101a557565b359081151582036101a557565b90604051916060830183811067ffffffffffffffff821117610121576040528260c49182116101a5576064905b8282106101ff57505050565b6020809161020c846101b9565b8152019101906101f3565b346101a55760803660031901126101a55767ffffffffffffffff6004358181116101a557610249903690600401610142565b906024359081116101a557610262903690600401610142565b36606312156101a5576040519061027882610126565b819260843681116101a5576044945b81861061029c57505061029a9350610467565b005b602080916102a9886101b9565b815201950194610287565b60009103126101a557565b346101a55760003660031901126101a5576040517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b346101a55760003660031901126101a5576040517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b346101a557600060403660031901126100f45760043567ffffffffffffffff81116103f75761037c903690600401610142565b816103856101aa565b7f00000000000000000000000000000000000000000000000000000000000000006001600160a01b0316803b156100f0576103d793836040518096819582946327e3584360e01b84526004840161043b565b03925af180156103f2576103e9575080f35b6100e99061010d565b61045b565b5080fd5b919082519283825260005b848110610427575050826000602080949584010152601f8019910116010190565b602081830181015184830182015201610406565b906104536020919493946040845260408401906103fb565b931515910152565b6040513d6000823e3d90fd5b917f00000000000000000000000000000000000000000000000000000000000000006001600160a01b031691823b156101a557604051631cf865cf60e01b815260806004820152938492916104d6916104c49060848601906103fb565b848103600319016024860152906103fb565b90600090604484015b60028310610517575050509181600081819503925af180156103f2576105025750565b8061050f6105159261010d565b806102b4565b565b81511515815286945060019290920191602091820191016104df565b91926000906020810151610632575b80516001600160a01b037f000000000000000000000000000000000000000000000000000000000000000081169490911515853b156101a557600061059d91604051809381926327e3584360e01b9b8c84526004840161043b565b0381838a5af19283156103f25760409560208094610aac9460009761061f575b5001917f0000000000000000000000000000000000000000000000000000000000000000165af1500151151590803b156101a55761060e93600080946040519687958694859384526004840161043b565b03925af180156103f2576105025750565b8061050f61062c9261010d565b386105bd565b6001915061054256fea264697066735822122091357ca0b4807d5579dc633a7d2a9263efbfe31944c644c21b7ccf83594a9e2c64736f6c63430008120033", +} + +// ClaimmocktestABI is the input ABI used to generate the binding from. +// Deprecated: Use ClaimmocktestMetaData.ABI instead. +var ClaimmocktestABI = ClaimmocktestMetaData.ABI + +// ClaimmocktestBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use ClaimmocktestMetaData.Bin instead. +var ClaimmocktestBin = ClaimmocktestMetaData.Bin + +// DeployClaimmocktest deploys a new Ethereum contract, binding an instance of Claimmocktest to it. +func DeployClaimmocktest(auth *bind.TransactOpts, backend bind.ContractBackend, _claimMock common.Address, _claimMockCaller common.Address) (common.Address, *types.Transaction, *Claimmocktest, error) { + parsed, err := ClaimmocktestMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ClaimmocktestBin), backend, _claimMock, _claimMockCaller) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Claimmocktest{ClaimmocktestCaller: ClaimmocktestCaller{contract: contract}, ClaimmocktestTransactor: ClaimmocktestTransactor{contract: contract}, ClaimmocktestFilterer: ClaimmocktestFilterer{contract: contract}}, nil +} + +// Claimmocktest is an auto generated Go binding around an Ethereum contract. +type Claimmocktest struct { + ClaimmocktestCaller // Read-only binding to the contract + ClaimmocktestTransactor // Write-only binding to the contract + ClaimmocktestFilterer // Log filterer for contract events +} + +// ClaimmocktestCaller is an auto generated read-only Go binding around an Ethereum contract. +type ClaimmocktestCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ClaimmocktestTransactor is an auto generated write-only Go binding around an Ethereum contract. +type ClaimmocktestTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ClaimmocktestFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type ClaimmocktestFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ClaimmocktestSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type ClaimmocktestSession struct { + Contract *Claimmocktest // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ClaimmocktestCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type ClaimmocktestCallerSession struct { + Contract *ClaimmocktestCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// ClaimmocktestTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type ClaimmocktestTransactorSession struct { + Contract *ClaimmocktestTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ClaimmocktestRaw is an auto generated low-level Go binding around an Ethereum contract. +type ClaimmocktestRaw struct { + Contract *Claimmocktest // Generic contract binding to access the raw methods on +} + +// ClaimmocktestCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type ClaimmocktestCallerRaw struct { + Contract *ClaimmocktestCaller // Generic read-only contract binding to access the raw methods on +} + +// ClaimmocktestTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type ClaimmocktestTransactorRaw struct { + Contract *ClaimmocktestTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewClaimmocktest creates a new instance of Claimmocktest, bound to a specific deployed contract. +func NewClaimmocktest(address common.Address, backend bind.ContractBackend) (*Claimmocktest, error) { + contract, err := bindClaimmocktest(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Claimmocktest{ClaimmocktestCaller: ClaimmocktestCaller{contract: contract}, ClaimmocktestTransactor: ClaimmocktestTransactor{contract: contract}, ClaimmocktestFilterer: ClaimmocktestFilterer{contract: contract}}, nil +} + +// NewClaimmocktestCaller creates a new read-only instance of Claimmocktest, bound to a specific deployed contract. +func NewClaimmocktestCaller(address common.Address, caller bind.ContractCaller) (*ClaimmocktestCaller, error) { + contract, err := bindClaimmocktest(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ClaimmocktestCaller{contract: contract}, nil +} + +// NewClaimmocktestTransactor creates a new write-only instance of Claimmocktest, bound to a specific deployed contract. +func NewClaimmocktestTransactor(address common.Address, transactor bind.ContractTransactor) (*ClaimmocktestTransactor, error) { + contract, err := bindClaimmocktest(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ClaimmocktestTransactor{contract: contract}, nil +} + +// NewClaimmocktestFilterer creates a new log filterer instance of Claimmocktest, bound to a specific deployed contract. +func NewClaimmocktestFilterer(address common.Address, filterer bind.ContractFilterer) (*ClaimmocktestFilterer, error) { + contract, err := bindClaimmocktest(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ClaimmocktestFilterer{contract: contract}, nil +} + +// bindClaimmocktest binds a generic wrapper to an already deployed contract. +func bindClaimmocktest(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ClaimmocktestMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Claimmocktest *ClaimmocktestRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Claimmocktest.Contract.ClaimmocktestCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Claimmocktest *ClaimmocktestRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Claimmocktest.Contract.ClaimmocktestTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Claimmocktest *ClaimmocktestRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Claimmocktest.Contract.ClaimmocktestTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Claimmocktest *ClaimmocktestCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Claimmocktest.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Claimmocktest *ClaimmocktestTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Claimmocktest.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Claimmocktest *ClaimmocktestTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Claimmocktest.Contract.contract.Transact(opts, method, params...) +} + +// ClaimMock is a free data retrieval call binding the contract method 0x83f5b006. +// +// Solidity: function claimMock() view returns(address) +func (_Claimmocktest *ClaimmocktestCaller) ClaimMock(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Claimmocktest.contract.Call(opts, &out, "claimMock") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// ClaimMock is a free data retrieval call binding the contract method 0x83f5b006. +// +// Solidity: function claimMock() view returns(address) +func (_Claimmocktest *ClaimmocktestSession) ClaimMock() (common.Address, error) { + return _Claimmocktest.Contract.ClaimMock(&_Claimmocktest.CallOpts) +} + +// ClaimMock is a free data retrieval call binding the contract method 0x83f5b006. +// +// Solidity: function claimMock() view returns(address) +func (_Claimmocktest *ClaimmocktestCallerSession) ClaimMock() (common.Address, error) { + return _Claimmocktest.Contract.ClaimMock(&_Claimmocktest.CallOpts) +} + +// ClaimMockCaller is a free data retrieval call binding the contract method 0x837a8470. +// +// Solidity: function claimMockCaller() view returns(address) +func (_Claimmocktest *ClaimmocktestCaller) ClaimMockCaller(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Claimmocktest.contract.Call(opts, &out, "claimMockCaller") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// ClaimMockCaller is a free data retrieval call binding the contract method 0x837a8470. +// +// Solidity: function claimMockCaller() view returns(address) +func (_Claimmocktest *ClaimmocktestSession) ClaimMockCaller() (common.Address, error) { + return _Claimmocktest.Contract.ClaimMockCaller(&_Claimmocktest.CallOpts) +} + +// ClaimMockCaller is a free data retrieval call binding the contract method 0x837a8470. +// +// Solidity: function claimMockCaller() view returns(address) +func (_Claimmocktest *ClaimmocktestCallerSession) ClaimMockCaller() (common.Address, error) { + return _Claimmocktest.Contract.ClaimMockCaller(&_Claimmocktest.CallOpts) +} + +// Claim2TestInternal is a paid mutator transaction binding the contract method 0x6e530854. +// +// Solidity: function claim2TestInternal(bytes claim1, bytes claim2, bool[2] reverted) returns() +func (_Claimmocktest *ClaimmocktestTransactor) Claim2TestInternal(opts *bind.TransactOpts, claim1 []byte, claim2 []byte, reverted [2]bool) (*types.Transaction, error) { + return _Claimmocktest.contract.Transact(opts, "claim2TestInternal", claim1, claim2, reverted) +} + +// Claim2TestInternal is a paid mutator transaction binding the contract method 0x6e530854. +// +// Solidity: function claim2TestInternal(bytes claim1, bytes claim2, bool[2] reverted) returns() +func (_Claimmocktest *ClaimmocktestSession) Claim2TestInternal(claim1 []byte, claim2 []byte, reverted [2]bool) (*types.Transaction, error) { + return _Claimmocktest.Contract.Claim2TestInternal(&_Claimmocktest.TransactOpts, claim1, claim2, reverted) +} + +// Claim2TestInternal is a paid mutator transaction binding the contract method 0x6e530854. +// +// Solidity: function claim2TestInternal(bytes claim1, bytes claim2, bool[2] reverted) returns() +func (_Claimmocktest *ClaimmocktestTransactorSession) Claim2TestInternal(claim1 []byte, claim2 []byte, reverted [2]bool) (*types.Transaction, error) { + return _Claimmocktest.Contract.Claim2TestInternal(&_Claimmocktest.TransactOpts, claim1, claim2, reverted) +} + +// Claim3TestInternal is a paid mutator transaction binding the contract method 0x48f0c680. +// +// Solidity: function claim3TestInternal(bytes claim1, bytes claim2, bytes claim3, bool[3] reverted) returns() +func (_Claimmocktest *ClaimmocktestTransactor) Claim3TestInternal(opts *bind.TransactOpts, claim1 []byte, claim2 []byte, claim3 []byte, reverted [3]bool) (*types.Transaction, error) { + return _Claimmocktest.contract.Transact(opts, "claim3TestInternal", claim1, claim2, claim3, reverted) +} + +// Claim3TestInternal is a paid mutator transaction binding the contract method 0x48f0c680. +// +// Solidity: function claim3TestInternal(bytes claim1, bytes claim2, bytes claim3, bool[3] reverted) returns() +func (_Claimmocktest *ClaimmocktestSession) Claim3TestInternal(claim1 []byte, claim2 []byte, claim3 []byte, reverted [3]bool) (*types.Transaction, error) { + return _Claimmocktest.Contract.Claim3TestInternal(&_Claimmocktest.TransactOpts, claim1, claim2, claim3, reverted) +} + +// Claim3TestInternal is a paid mutator transaction binding the contract method 0x48f0c680. +// +// Solidity: function claim3TestInternal(bytes claim1, bytes claim2, bytes claim3, bool[3] reverted) returns() +func (_Claimmocktest *ClaimmocktestTransactorSession) Claim3TestInternal(claim1 []byte, claim2 []byte, claim3 []byte, reverted [3]bool) (*types.Transaction, error) { + return _Claimmocktest.Contract.Claim3TestInternal(&_Claimmocktest.TransactOpts, claim1, claim2, claim3, reverted) +} + +// ClaimTestInternal is a paid mutator transaction binding the contract method 0x9bee3468. +// +// Solidity: function claimTestInternal(bytes claim, bool reverted) returns() +func (_Claimmocktest *ClaimmocktestTransactor) ClaimTestInternal(opts *bind.TransactOpts, claim []byte, reverted bool) (*types.Transaction, error) { + return _Claimmocktest.contract.Transact(opts, "claimTestInternal", claim, reverted) +} + +// ClaimTestInternal is a paid mutator transaction binding the contract method 0x9bee3468. +// +// Solidity: function claimTestInternal(bytes claim, bool reverted) returns() +func (_Claimmocktest *ClaimmocktestSession) ClaimTestInternal(claim []byte, reverted bool) (*types.Transaction, error) { + return _Claimmocktest.Contract.ClaimTestInternal(&_Claimmocktest.TransactOpts, claim, reverted) +} + +// ClaimTestInternal is a paid mutator transaction binding the contract method 0x9bee3468. +// +// Solidity: function claimTestInternal(bytes claim, bool reverted) returns() +func (_Claimmocktest *ClaimmocktestTransactorSession) ClaimTestInternal(claim []byte, reverted bool) (*types.Transaction, error) { + return _Claimmocktest.Contract.ClaimTestInternal(&_Claimmocktest.TransactOpts, claim, reverted) +} diff --git a/test/contracts/compile.sh b/test/contracts/compile.sh index faeba125..7dd357a9 100755 --- a/test/contracts/compile.sh +++ b/test/contracts/compile.sh @@ -13,5 +13,12 @@ mv -f ClaimMock.bin bin/claimmock.bin docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/claimmockcaller/ClaimMockCaller.sol -o /contracts --abi --bin --overwrite --optimize --via-ir mv -f ClaimMockCaller.abi abi/claimmockcaller.abi mv -f ClaimMockCaller.bin bin/claimmockcaller.bin + +docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/claimmocktest/ClaimMockTest.sol -o /contracts --abi --bin --overwrite --optimize --via-ir +mv -f ClaimMockTest.abi abi/claimmocktest.abi +mv -f ClaimMockTest.bin bin/claimmocktest.bin + rm -f IClaimMock.abi -rm -f IClaimMock.bin \ No newline at end of file +rm -f IClaimMock.bin +rm -f IClaimMockCaller.abi +rm -f IClaimMockCaller.bin \ No newline at end of file From c4c790f5f6cf1e3d9e288aefcdd8c96b26f5cff3 Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 30 Sep 2024 10:37:09 +0200 Subject: [PATCH 14/53] feat: Use InitL1InfoRootMap (#87) - Fix l1infotreesync. Error UNIQUE constraint failed: rollup_exit_root.hash on VerifyBatches Event - l1infotreesync: Add verification for contract address. The problem is that if you set bad address cdk run normally but you don't get any information about L1InfoTree. - l1infotreesync: Add support to `InitL1InfoRootMap` - Allow to use `InitL1InfoRootMap` if there are no leaves on L1InfoTree Internal: - Fix local config file generation for debug on vscode (`./scripts/local_config`) - Add support to `contractVersions` - Remove param `-custom-network-file` that is no longer used - Refactor `l1infotreesync/processor.go` in multiples files - Change some tree functions to use a `tx db.Querier` instead of `ctx context.Context`: context was not used to do DB query was using `db` directly. In some test I need to query over current tx --- .github/workflows/test-e2e.yml | 1 + bridgesync/mock_l2_test.go | 24 +- cmd/run.go | 3 +- config/default.go | 22 +- l1infotreesync/downloader.go | 62 +- l1infotreesync/downloader_test.go | 55 + l1infotreesync/e2e_test.go | 11 +- l1infotreesync/l1infotreesync.go | 37 +- .../migrations/l1infotreesync0002.sql | 14 + l1infotreesync/migrations/migrations.go | 7 + l1infotreesync/mock_reorgdetector_test.go | 2 +- l1infotreesync/mocks/eth_clienter.go | 1086 +++++++++++++++++ l1infotreesync/processor.go | 116 +- l1infotreesync/processor_initl1inforootmap.go | 37 + .../processor_initl1inforootmap_test.go | 67 + l1infotreesync/processor_test.go | 79 +- l1infotreesync/processor_verifybatches.go | 104 ++ .../processor_verifybatches_test.go | 127 ++ reorgdetector/mock_eth_client.go | 2 +- scripts/local_config | 45 +- sequencesender/txbuilder/banana_base.go | 93 +- sequencesender/txbuilder/banana_base_test.go | 40 + .../txbuilder/banana_validium_test.go | 4 + sequencesender/txbuilder/banana_zkevm_test.go | 4 + .../mocks_txbuilder/l1_info_syncer.go | 58 + sonar-project.properties | 4 +- sync/mock_l2_test.go | 2 +- sync/mock_processor_test.go | 2 +- sync/mock_reorgdetector_test.go | 2 +- test/Makefile | 35 +- test/config/test.kurtosis_template.toml | 29 +- test/helpers/aggoracle_e2e.go | 3 +- test/helpers/mock_ethtxmanager.go | 2 +- tree/tree.go | 11 +- tree/tree_test.go | 20 +- tree/updatabletree_test.go | 49 + 36 files changed, 2050 insertions(+), 209 deletions(-) create mode 100644 l1infotreesync/downloader_test.go create mode 100644 l1infotreesync/migrations/l1infotreesync0002.sql create mode 100644 l1infotreesync/mocks/eth_clienter.go create mode 100644 l1infotreesync/processor_initl1inforootmap.go create mode 100644 l1infotreesync/processor_initl1inforootmap_test.go create mode 100644 l1infotreesync/processor_verifybatches.go create mode 100644 l1infotreesync/processor_verifybatches_test.go create mode 100644 tree/updatabletree_test.go diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 721cbf09..c89275c7 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -65,6 +65,7 @@ jobs: uses: actions/checkout@v4 with: repository: 0xPolygon/kurtosis-cdk + ref: fix/missing_cdk_config_rollupmanager path: "kurtosis-cdk" ref: "v0.2.11" diff --git a/bridgesync/mock_l2_test.go b/bridgesync/mock_l2_test.go index a8f33ef8..adbff51f 100644 --- a/bridgesync/mock_l2_test.go +++ b/bridgesync/mock_l2_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package bridgesync @@ -12,6 +12,8 @@ import ( mock "github.com/stretchr/testify/mock" + rpc "github.com/ethereum/go-ethereum/rpc" + types "github.com/ethereum/go-ethereum/core/types" ) @@ -138,6 +140,26 @@ func (_m *L2Mock) CallContract(ctx context.Context, call ethereum.CallMsg, block return r0, r1 } +// Client provides a mock function with given fields: +func (_m *L2Mock) Client() *rpc.Client { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Client") + } + + var r0 *rpc.Client + if rf, ok := ret.Get(0).(func() *rpc.Client); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.Client) + } + } + + return r0 +} + // CodeAt provides a mock function with given fields: ctx, contract, blockNumber func (_m *L2Mock) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { ret := _m.Called(ctx, contract, blockNumber) diff --git a/cmd/run.go b/cmd/run.go index 0b744243..68f4acdd 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -492,6 +492,7 @@ func runL1InfoTreeSyncerIfNeeded( cfg.L1InfoTreeSync.InitialBlock, cfg.L1InfoTreeSync.RetryAfterErrorPeriod.Duration, cfg.L1InfoTreeSync.MaxRetryAttemptsAfterError, + l1infotreesync.FlagNone, ) if err != nil { log.Fatal(err) @@ -511,7 +512,7 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client log.Debugf("dialing L1 client at: %s", urlRPCL1) l1CLient, err := ethclient.Dial(urlRPCL1) if err != nil { - log.Fatal(err) + log.Fatalf("failed to create client for L1 using URL: %s. Err:%v", urlRPCL1, err) } return l1CLient diff --git a/config/default.go b/config/default.go index 377e9033..e02a37ac 100644 --- a/config/default.go +++ b/config/default.go @@ -5,6 +5,18 @@ const DefaultValues = ` ForkUpgradeBatchNumber = 0 ForkUpgradeNewForkId = 0 +[Etherman] + URL="http://localhost:8545" + ForkIDChunkSize=100 + [Etherman.EthermanConfig] + URL="http://localhost:8545" + MultiGasProvider=false + L1ChainID=1337 + HTTPHeaders=[] + [Etherman.EthermanConfig.Etherscan] + ApiKey="" + Url="https://api.etherscan.io/api?module=gastracker&action=gasoracle&apikey=" + [Common] NetworkID = 1 IsValidiumMode = false @@ -141,7 +153,7 @@ DBPath = "/tmp/reorgdetectorl1" DBPath = "/tmp/reorgdetectorl2" [L1InfoTreeSync] -DBPath = "/tmp/L1InfoTreeSync" +DBPath = "/tmp/L1InfoTreeSync.sqlite" GlobalExitRootAddr="0x8464135c8F25Da09e49BC8782676a84730C318bC" SyncBlockChunkSize=10 BlockFinality="LatestBlock" @@ -250,4 +262,12 @@ RetryAfterErrorPeriod = "1s" MaxRetryAttemptsAfterError = -1 WaitForNewBlocksPeriod = "1s" DownloadBufferSize = 100 + +[NetworkConfig.L1] +L1ChainID = 0 +PolAddr = "0x0000000000000000000000000000000000000000" +ZkEVMAddr = "0x0000000000000000000000000000000000000000" +RollupManagerAddr = "0x0000000000000000000000000000000000000000" +GlobalExitRootManagerAddr = "0x0000000000000000000000000000000000000000" + ` diff --git a/l1infotreesync/downloader.go b/l1infotreesync/downloader.go index 16ccb37a..ed3c7efb 100644 --- a/l1infotreesync/downloader.go +++ b/l1infotreesync/downloader.go @@ -33,15 +33,68 @@ type EthClienter interface { bind.ContractBackend } -func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Address) (sync.LogAppenderMap, error) { - ger, err := polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2(globalExitRoot, client) +func checkSMCIsRollupManager(rollupManagerAddr common.Address, + rollupManagerContract *polygonrollupmanager.Polygonrollupmanager) error { + bridgeAddr, err := rollupManagerContract.BridgeAddress(nil) if err != nil { - return nil, err + return fmt.Errorf("fail sanity check RollupManager(%s) Contract. Err: %w", rollupManagerAddr.String(), err) + } + log.Infof("sanity check rollupManager(%s) OK. bridgeAddr: %s", rollupManagerAddr.String(), bridgeAddr.String()) + return nil +} + +func checkSMCIsGlobalExitRoot(globalExitRootAddr common.Address, + gerContract *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2) error { + depositCount, err := gerContract.DepositCount(nil) + if err != nil { + return fmt.Errorf("fail sanity check GlobalExitRoot(%s) Contract. Err: %w", globalExitRootAddr.String(), err) } - rm, err := polygonrollupmanager.NewPolygonrollupmanager(rollupManager, client) + log.Infof("sanity check GlobalExitRoot(%s) OK. DepositCount: %v", globalExitRootAddr.String(), depositCount) + return nil +} + +func sanityCheckContracts(globalExitRoot, rollupManager common.Address, + gerContract *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, + rollupManagerContract *polygonrollupmanager.Polygonrollupmanager) error { + errGER := checkSMCIsGlobalExitRoot(globalExitRoot, gerContract) + errRollup := checkSMCIsRollupManager(rollupManager, rollupManagerContract) + if errGER != nil || errRollup != nil { + err := fmt.Errorf("sanityCheckContracts: fails sanity check contracts. ErrGER: %w, ErrRollup: %w", errGER, errRollup) + log.Error(err) + return err + } + return nil +} + +func createContracts(client EthClienter, globalExitRoot, rollupManager common.Address) ( + *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, + *polygonrollupmanager.Polygonrollupmanager, + error) { + gerContract, err := polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2(globalExitRoot, client) if err != nil { + return nil, nil, err + } + + rollupManagerContract, err := polygonrollupmanager.NewPolygonrollupmanager(rollupManager, client) + if err != nil { + return nil, nil, err + } + return gerContract, rollupManagerContract, nil +} + +func buildAppender(client EthClienter, globalExitRoot, + rollupManager common.Address, flags CreationFlags) (sync.LogAppenderMap, error) { + ger, rm, err := createContracts(client, globalExitRoot, rollupManager) + if err != nil { + err := fmt.Errorf("buildAppender: fails contracts creation. Err:%w", err) + log.Error(err) return nil, err } + err = sanityCheckContracts(globalExitRoot, rollupManager, ger, rm) + if err != nil && flags&FlagAllowWrongContractsAddrs == 0 { + return nil, fmt.Errorf("buildAppender: fails sanity check contracts. Err:%w", err) + } + appender := make(sync.LogAppenderMap) appender[initL1InfoRootMapSignature] = func(b *sync.EVMBlock, l types.Log) error { init, err := ger.ParseInitL1InfoRootMap(l) @@ -91,6 +144,7 @@ func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Addr return nil } + // This event is coming from RollupManager appender[verifyBatchesSignature] = func(b *sync.EVMBlock, l types.Log) error { verifyBatches, err := rm.ParseVerifyBatches(l) if err != nil { diff --git a/l1infotreesync/downloader_test.go b/l1infotreesync/downloader_test.go new file mode 100644 index 00000000..6007a3d6 --- /dev/null +++ b/l1infotreesync/downloader_test.go @@ -0,0 +1,55 @@ +package l1infotreesync + +import ( + "fmt" + "math/big" + "strings" + "testing" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/banana/polygonzkevmglobalexitrootv2" + mocks_l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync/mocks" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestBuildAppenderErrorOnBadContractAddr(t *testing.T) { + l1Client := mocks_l1infotreesync.NewEthClienter(t) + globalExitRoot := common.HexToAddress("0x1") + rollupManager := common.HexToAddress("0x2") + l1Client.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("test-error")) + flags := FlagNone + _, err := buildAppender(l1Client, globalExitRoot, rollupManager, flags) + require.Error(t, err) +} + +func TestBuildAppenderBypassBadContractAddr(t *testing.T) { + l1Client := mocks_l1infotreesync.NewEthClienter(t) + globalExitRoot := common.HexToAddress("0x1") + rollupManager := common.HexToAddress("0x2") + l1Client.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("test-error")) + flags := FlagAllowWrongContractsAddrs + _, err := buildAppender(l1Client, globalExitRoot, rollupManager, flags) + require.NoError(t, err) +} + +func TestBuildAppenderVerifiedContractAddr(t *testing.T) { + l1Client := mocks_l1infotreesync.NewEthClienter(t) + globalExitRoot := common.HexToAddress("0x1") + rollupManager := common.HexToAddress("0x2") + + smcAbi, err := abi.JSON(strings.NewReader(polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2ABI)) + require.NoError(t, err) + bigInt := big.NewInt(1) + returnGER, err := smcAbi.Methods["depositCount"].Outputs.Pack(bigInt) + require.NoError(t, err) + l1Client.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything).Return(returnGER, nil).Once() + v := common.HexToAddress("0x1234") + returnRM, err := smcAbi.Methods["bridgeAddress"].Outputs.Pack(v) + require.NoError(t, err) + l1Client.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything).Return(returnRM, nil).Once() + flags := FlagNone + _, err = buildAppender(l1Client, globalExitRoot, rollupManager, flags) + require.NoError(t, err) +} diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 21820059..c522c73a 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -80,7 +80,8 @@ func TestE2E(t *testing.T) { rdm.On("AddBlockToTrack", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) client, gerAddr, verifyAddr, gerSc, verifySC, err := newSimulatedClient(auth) require.NoError(t, err) - syncer, err := l1infotreesync.New(ctx, dbPath, gerAddr, verifyAddr, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3) + syncer, err := l1infotreesync.New(ctx, dbPath, gerAddr, verifyAddr, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3, + l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) go syncer.Start(ctx) @@ -173,7 +174,8 @@ func TestWithReorgs(t *testing.T) { rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 30)}) require.NoError(t, err) require.NoError(t, rd.Start(ctx)) - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 25) + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 25, + l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) go syncer.Start(ctx) @@ -281,7 +283,7 @@ func TestStressAndReorgs(t *testing.T) { ) ctx := context.Background() - dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") + dbPathSyncer := path.Join(t.TempDir(), "file:TestStressAndReorgs:memory:?cache=shared") dbPathReorg := t.TempDir() privateKey, err := crypto.GenerateKey() require.NoError(t, err) @@ -292,7 +294,8 @@ func TestStressAndReorgs(t *testing.T) { rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}) require.NoError(t, err) require.NoError(t, rd.Start(ctx)) - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 100) + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 100, + l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) go syncer.Start(ctx) diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index 4c4b796e..a7e50128 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -5,6 +5,7 @@ import ( "errors" "time" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/sync" "github.com/0xPolygon/cdk/tree" @@ -12,9 +13,18 @@ import ( "github.com/ethereum/go-ethereum/common" ) +type CreationFlags uint64 + const ( reorgDetectorID = "l1infotreesync" downloadBufferSize = 1000 + // CreationFlags defitinion + FlagNone CreationFlags = 0 + FlagAllowWrongContractsAddrs CreationFlags = 1 << iota // Allow to set wrong contracts addresses +) + +var ( + ErrNotFound = errors.New("l1infotreesync: not found") ) type L1InfoTreeSync struct { @@ -36,6 +46,7 @@ func New( initialBlock uint64, retryAfterErrorPeriod time.Duration, maxRetryAttemptsAfterError int, + flags CreationFlags, ) (*L1InfoTreeSync, error) { processor, err := newProcessor(dbPath) if err != nil { @@ -59,7 +70,7 @@ func New( MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError, } - appender, err := buildAppender(l1Client, globalExitRoot, rollupManager) + appender, err := buildAppender(l1Client, globalExitRoot, rollupManager, flags) if err != nil { return nil, err } @@ -111,10 +122,21 @@ func (s *L1InfoTreeSync) GetRollupExitTreeMerkleProof( return s.processor.rollupExitTree.GetProof(ctx, networkID-1, root) } +func translateError(err error) error { + if errors.Is(err, db.ErrNotFound) { + return ErrNotFound + } + return err +} + // GetLatestInfoUntilBlock returns the most recent L1InfoTreeLeaf that occurred before or at blockNum. // If the blockNum has not been processed yet the error ErrBlockNotProcessed will be returned +// It can returns next errors: +// - ErrBlockNotProcessed, +// - ErrNotFound func (s *L1InfoTreeSync) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*L1InfoTreeLeaf, error) { - return s.processor.GetLatestInfoUntilBlock(ctx, blockNum) + leaf, err := s.processor.GetLatestInfoUntilBlock(ctx, blockNum) + return leaf, translateError(err) } // GetInfoByIndex returns the value of a leaf (not the hash) of the L1 info tree @@ -129,12 +151,12 @@ func (s *L1InfoTreeSync) GetL1InfoTreeRootByIndex(ctx context.Context, index uin // GetLastRollupExitRoot return the last rollup exit root processed func (s *L1InfoTreeSync) GetLastRollupExitRoot(ctx context.Context) (types.Root, error) { - return s.processor.rollupExitTree.GetLastRoot(ctx) + return s.processor.rollupExitTree.GetLastRoot(nil) } // GetLastL1InfoTreeRoot return the last root and index processed from the L1 Info tree func (s *L1InfoTreeSync) GetLastL1InfoTreeRoot(ctx context.Context) (types.Root, error) { - return s.processor.l1InfoTree.GetLastRoot(ctx) + return s.processor.l1InfoTree.GetLastRoot(nil) } // GetLastProcessedBlock return the last processed block @@ -149,7 +171,7 @@ func (s *L1InfoTreeSync) GetLocalExitRoot( return common.Hash{}, errors.New("network 0 is not a rollup, and it's not part of the rollup exit tree") } - return s.processor.rollupExitTree.GetLeaf(ctx, networkID-1, rollupExitRoot) + return s.processor.rollupExitTree.GetLeaf(nil, networkID-1, rollupExitRoot) } func (s *L1InfoTreeSync) GetLastVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { @@ -190,3 +212,8 @@ func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProofFromIndexToRoot( ) (types.Proof, error) { return s.processor.l1InfoTree.GetProof(ctx, index, root) } + +// GetInitL1InfoRootMap returns the initial L1 info root map, nil if no root map has been set +func (s *L1InfoTreeSync) GetInitL1InfoRootMap(ctx context.Context) (*L1InfoTreeInitial, error) { + return s.processor.GetInitL1InfoRootMap(nil) +} diff --git a/l1infotreesync/migrations/l1infotreesync0002.sql b/l1infotreesync/migrations/l1infotreesync0002.sql new file mode 100644 index 00000000..d1f09481 --- /dev/null +++ b/l1infotreesync/migrations/l1infotreesync0002.sql @@ -0,0 +1,14 @@ +-- +migrate Down +DROP TABLE IF EXISTS l1info_initial; + +-- +migrate Up + +CREATE TABLE l1info_initial ( + -- single_row_id prevent to have more than 1 row in this table + single_row_id INTEGER check(single_row_id=1) NOT NULL DEFAULT 1, + block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, + leaf_count INTEGER NOT NULL, + l1_info_root VARCHAR NOT NULL, + PRIMARY KEY (single_row_id) +); + diff --git a/l1infotreesync/migrations/migrations.go b/l1infotreesync/migrations/migrations.go index 768dde37..47fac070 100644 --- a/l1infotreesync/migrations/migrations.go +++ b/l1infotreesync/migrations/migrations.go @@ -16,12 +16,19 @@ const ( //go:embed l1infotreesync0001.sql var mig001 string +//go:embed l1infotreesync0002.sql +var mig002 string + func RunMigrations(dbPath string) error { migrations := []types.Migration{ { ID: "l1infotreesync0001", SQL: mig001, }, + { + ID: "l1infotreesync0002", + SQL: mig002, + }, } for _, tm := range treeMigrations.Migrations { migrations = append(migrations, types.Migration{ diff --git a/l1infotreesync/mock_reorgdetector_test.go b/l1infotreesync/mock_reorgdetector_test.go index 8255443e..18ac7bc8 100644 --- a/l1infotreesync/mock_reorgdetector_test.go +++ b/l1infotreesync/mock_reorgdetector_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package l1infotreesync diff --git a/l1infotreesync/mocks/eth_clienter.go b/l1infotreesync/mocks/eth_clienter.go new file mode 100644 index 00000000..270c40d9 --- /dev/null +++ b/l1infotreesync/mocks/eth_clienter.go @@ -0,0 +1,1086 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks_l1infotreesync + +import ( + context "context" + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + ethereum "github.com/ethereum/go-ethereum" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthClienter is an autogenerated mock type for the EthClienter type +type EthClienter struct { + mock.Mock +} + +type EthClienter_Expecter struct { + mock *mock.Mock +} + +func (_m *EthClienter) EXPECT() *EthClienter_Expecter { + return &EthClienter_Expecter{mock: &_m.Mock} +} + +// BlockByHash provides a mock function with given fields: ctx, hash +func (_m *EthClienter) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for BlockByHash") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Block, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Block); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_BlockByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByHash' +type EthClienter_BlockByHash_Call struct { + *mock.Call +} + +// BlockByHash is a helper method to define mock.On call +// - ctx context.Context +// - hash common.Hash +func (_e *EthClienter_Expecter) BlockByHash(ctx interface{}, hash interface{}) *EthClienter_BlockByHash_Call { + return &EthClienter_BlockByHash_Call{Call: _e.mock.On("BlockByHash", ctx, hash)} +} + +func (_c *EthClienter_BlockByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthClienter_BlockByHash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthClienter_BlockByHash_Call) Return(_a0 *types.Block, _a1 error) *EthClienter_BlockByHash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_BlockByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Block, error)) *EthClienter_BlockByHash_Call { + _c.Call.Return(run) + return _c +} + +// BlockByNumber provides a mock function with given fields: ctx, number +func (_m *EthClienter) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for BlockByNumber") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Block, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Block); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_BlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByNumber' +type EthClienter_BlockByNumber_Call struct { + *mock.Call +} + +// BlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *EthClienter_Expecter) BlockByNumber(ctx interface{}, number interface{}) *EthClienter_BlockByNumber_Call { + return &EthClienter_BlockByNumber_Call{Call: _e.mock.On("BlockByNumber", ctx, number)} +} + +func (_c *EthClienter_BlockByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClienter_BlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_BlockByNumber_Call) Return(_a0 *types.Block, _a1 error) *EthClienter_BlockByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_BlockByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Block, error)) *EthClienter_BlockByNumber_Call { + _c.Call.Return(run) + return _c +} + +// BlockNumber provides a mock function with given fields: ctx +func (_m *EthClienter) BlockNumber(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' +type EthClienter_BlockNumber_Call struct { + *mock.Call +} + +// BlockNumber is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClienter_Expecter) BlockNumber(ctx interface{}) *EthClienter_BlockNumber_Call { + return &EthClienter_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} +} + +func (_c *EthClienter_BlockNumber_Call) Run(run func(ctx context.Context)) *EthClienter_BlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClienter_BlockNumber_Call) Return(_a0 uint64, _a1 error) *EthClienter_BlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *EthClienter_BlockNumber_Call { + _c.Call.Return(run) + return _c +} + +// CallContract provides a mock function with given fields: ctx, call, blockNumber +func (_m *EthClienter) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, call, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)); ok { + return rf(ctx, call, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) []byte); ok { + r0 = rf(ctx, call, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg, *big.Int) error); ok { + r1 = rf(ctx, call, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_CallContract_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CallContract' +type EthClienter_CallContract_Call struct { + *mock.Call +} + +// CallContract is a helper method to define mock.On call +// - ctx context.Context +// - call ethereum.CallMsg +// - blockNumber *big.Int +func (_e *EthClienter_Expecter) CallContract(ctx interface{}, call interface{}, blockNumber interface{}) *EthClienter_CallContract_Call { + return &EthClienter_CallContract_Call{Call: _e.mock.On("CallContract", ctx, call, blockNumber)} +} + +func (_c *EthClienter_CallContract_Call) Run(run func(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int)) *EthClienter_CallContract_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.CallMsg), args[2].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_CallContract_Call) Return(_a0 []byte, _a1 error) *EthClienter_CallContract_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_CallContract_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)) *EthClienter_CallContract_Call { + _c.Call.Return(run) + return _c +} + +// CodeAt provides a mock function with given fields: ctx, contract, blockNumber +func (_m *EthClienter) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, contract, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]byte, error)); ok { + return rf(ctx, contract, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []byte); ok { + r0 = rf(ctx, contract, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, contract, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_CodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CodeAt' +type EthClienter_CodeAt_Call struct { + *mock.Call +} + +// CodeAt is a helper method to define mock.On call +// - ctx context.Context +// - contract common.Address +// - blockNumber *big.Int +func (_e *EthClienter_Expecter) CodeAt(ctx interface{}, contract interface{}, blockNumber interface{}) *EthClienter_CodeAt_Call { + return &EthClienter_CodeAt_Call{Call: _e.mock.On("CodeAt", ctx, contract, blockNumber)} +} + +func (_c *EthClienter_CodeAt_Call) Run(run func(ctx context.Context, contract common.Address, blockNumber *big.Int)) *EthClienter_CodeAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address), args[2].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_CodeAt_Call) Return(_a0 []byte, _a1 error) *EthClienter_CodeAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_CodeAt_Call) RunAndReturn(run func(context.Context, common.Address, *big.Int) ([]byte, error)) *EthClienter_CodeAt_Call { + _c.Call.Return(run) + return _c +} + +// EstimateGas provides a mock function with given fields: ctx, call +func (_m *EthClienter) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { + ret := _m.Called(ctx, call) + + if len(ret) == 0 { + panic("no return value specified for EstimateGas") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) (uint64, error)); ok { + return rf(ctx, call) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) uint64); ok { + r0 = rf(ctx, call) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg) error); ok { + r1 = rf(ctx, call) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_EstimateGas_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EstimateGas' +type EthClienter_EstimateGas_Call struct { + *mock.Call +} + +// EstimateGas is a helper method to define mock.On call +// - ctx context.Context +// - call ethereum.CallMsg +func (_e *EthClienter_Expecter) EstimateGas(ctx interface{}, call interface{}) *EthClienter_EstimateGas_Call { + return &EthClienter_EstimateGas_Call{Call: _e.mock.On("EstimateGas", ctx, call)} +} + +func (_c *EthClienter_EstimateGas_Call) Run(run func(ctx context.Context, call ethereum.CallMsg)) *EthClienter_EstimateGas_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.CallMsg)) + }) + return _c +} + +func (_c *EthClienter_EstimateGas_Call) Return(_a0 uint64, _a1 error) *EthClienter_EstimateGas_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_EstimateGas_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg) (uint64, error)) *EthClienter_EstimateGas_Call { + _c.Call.Return(run) + return _c +} + +// FilterLogs provides a mock function with given fields: ctx, q +func (_m *EthClienter) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + ret := _m.Called(ctx, q) + + if len(ret) == 0 { + panic("no return value specified for FilterLogs") + } + + var r0 []types.Log + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]types.Log, error)); ok { + return rf(ctx, q) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []types.Log); ok { + r0 = rf(ctx, q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Log) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { + r1 = rf(ctx, q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_FilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterLogs' +type EthClienter_FilterLogs_Call struct { + *mock.Call +} + +// FilterLogs is a helper method to define mock.On call +// - ctx context.Context +// - q ethereum.FilterQuery +func (_e *EthClienter_Expecter) FilterLogs(ctx interface{}, q interface{}) *EthClienter_FilterLogs_Call { + return &EthClienter_FilterLogs_Call{Call: _e.mock.On("FilterLogs", ctx, q)} +} + +func (_c *EthClienter_FilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery)) *EthClienter_FilterLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.FilterQuery)) + }) + return _c +} + +func (_c *EthClienter_FilterLogs_Call) Return(_a0 []types.Log, _a1 error) *EthClienter_FilterLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_FilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery) ([]types.Log, error)) *EthClienter_FilterLogs_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByHash provides a mock function with given fields: ctx, hash +func (_m *EthClienter) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for HeaderByHash") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Header, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Header); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_HeaderByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByHash' +type EthClienter_HeaderByHash_Call struct { + *mock.Call +} + +// HeaderByHash is a helper method to define mock.On call +// - ctx context.Context +// - hash common.Hash +func (_e *EthClienter_Expecter) HeaderByHash(ctx interface{}, hash interface{}) *EthClienter_HeaderByHash_Call { + return &EthClienter_HeaderByHash_Call{Call: _e.mock.On("HeaderByHash", ctx, hash)} +} + +func (_c *EthClienter_HeaderByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthClienter_HeaderByHash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthClienter_HeaderByHash_Call) Return(_a0 *types.Header, _a1 error) *EthClienter_HeaderByHash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_HeaderByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Header, error)) *EthClienter_HeaderByHash_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByNumber provides a mock function with given fields: ctx, number +func (_m *EthClienter) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' +type EthClienter_HeaderByNumber_Call struct { + *mock.Call +} + +// HeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *EthClienter_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *EthClienter_HeaderByNumber_Call { + return &EthClienter_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} +} + +func (_c *EthClienter_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClienter_HeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_HeaderByNumber_Call) Return(_a0 *types.Header, _a1 error) *EthClienter_HeaderByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Header, error)) *EthClienter_HeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// PendingCodeAt provides a mock function with given fields: ctx, account +func (_m *EthClienter) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + ret := _m.Called(ctx, account) + + if len(ret) == 0 { + panic("no return value specified for PendingCodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) ([]byte, error)); ok { + return rf(ctx, account) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) []byte); ok { + r0 = rf(ctx, account) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, account) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_PendingCodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingCodeAt' +type EthClienter_PendingCodeAt_Call struct { + *mock.Call +} + +// PendingCodeAt is a helper method to define mock.On call +// - ctx context.Context +// - account common.Address +func (_e *EthClienter_Expecter) PendingCodeAt(ctx interface{}, account interface{}) *EthClienter_PendingCodeAt_Call { + return &EthClienter_PendingCodeAt_Call{Call: _e.mock.On("PendingCodeAt", ctx, account)} +} + +func (_c *EthClienter_PendingCodeAt_Call) Run(run func(ctx context.Context, account common.Address)) *EthClienter_PendingCodeAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address)) + }) + return _c +} + +func (_c *EthClienter_PendingCodeAt_Call) Return(_a0 []byte, _a1 error) *EthClienter_PendingCodeAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_PendingCodeAt_Call) RunAndReturn(run func(context.Context, common.Address) ([]byte, error)) *EthClienter_PendingCodeAt_Call { + _c.Call.Return(run) + return _c +} + +// PendingNonceAt provides a mock function with given fields: ctx, account +func (_m *EthClienter) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + ret := _m.Called(ctx, account) + + if len(ret) == 0 { + panic("no return value specified for PendingNonceAt") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) (uint64, error)); ok { + return rf(ctx, account) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) uint64); ok { + r0 = rf(ctx, account) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, account) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_PendingNonceAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingNonceAt' +type EthClienter_PendingNonceAt_Call struct { + *mock.Call +} + +// PendingNonceAt is a helper method to define mock.On call +// - ctx context.Context +// - account common.Address +func (_e *EthClienter_Expecter) PendingNonceAt(ctx interface{}, account interface{}) *EthClienter_PendingNonceAt_Call { + return &EthClienter_PendingNonceAt_Call{Call: _e.mock.On("PendingNonceAt", ctx, account)} +} + +func (_c *EthClienter_PendingNonceAt_Call) Run(run func(ctx context.Context, account common.Address)) *EthClienter_PendingNonceAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address)) + }) + return _c +} + +func (_c *EthClienter_PendingNonceAt_Call) Return(_a0 uint64, _a1 error) *EthClienter_PendingNonceAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_PendingNonceAt_Call) RunAndReturn(run func(context.Context, common.Address) (uint64, error)) *EthClienter_PendingNonceAt_Call { + _c.Call.Return(run) + return _c +} + +// SendTransaction provides a mock function with given fields: ctx, tx +func (_m *EthClienter) SendTransaction(ctx context.Context, tx *types.Transaction) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EthClienter_SendTransaction_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendTransaction' +type EthClienter_SendTransaction_Call struct { + *mock.Call +} + +// SendTransaction is a helper method to define mock.On call +// - ctx context.Context +// - tx *types.Transaction +func (_e *EthClienter_Expecter) SendTransaction(ctx interface{}, tx interface{}) *EthClienter_SendTransaction_Call { + return &EthClienter_SendTransaction_Call{Call: _e.mock.On("SendTransaction", ctx, tx)} +} + +func (_c *EthClienter_SendTransaction_Call) Run(run func(ctx context.Context, tx *types.Transaction)) *EthClienter_SendTransaction_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*types.Transaction)) + }) + return _c +} + +func (_c *EthClienter_SendTransaction_Call) Return(_a0 error) *EthClienter_SendTransaction_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EthClienter_SendTransaction_Call) RunAndReturn(run func(context.Context, *types.Transaction) error) *EthClienter_SendTransaction_Call { + _c.Call.Return(run) + return _c +} + +// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch +func (_m *EthClienter) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + ret := _m.Called(ctx, q, ch) + + if len(ret) == 0 { + panic("no return value specified for SubscribeFilterLogs") + } + + var r0 ethereum.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)); ok { + return rf(ctx, q, ch) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) ethereum.Subscription); ok { + r0 = rf(ctx, q, ch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ethereum.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) error); ok { + r1 = rf(ctx, q, ch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SubscribeFilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeFilterLogs' +type EthClienter_SubscribeFilterLogs_Call struct { + *mock.Call +} + +// SubscribeFilterLogs is a helper method to define mock.On call +// - ctx context.Context +// - q ethereum.FilterQuery +// - ch chan<- types.Log +func (_e *EthClienter_Expecter) SubscribeFilterLogs(ctx interface{}, q interface{}, ch interface{}) *EthClienter_SubscribeFilterLogs_Call { + return &EthClienter_SubscribeFilterLogs_Call{Call: _e.mock.On("SubscribeFilterLogs", ctx, q, ch)} +} + +func (_c *EthClienter_SubscribeFilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log)) *EthClienter_SubscribeFilterLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.FilterQuery), args[2].(chan<- types.Log)) + }) + return _c +} + +func (_c *EthClienter_SubscribeFilterLogs_Call) Return(_a0 ethereum.Subscription, _a1 error) *EthClienter_SubscribeFilterLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SubscribeFilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)) *EthClienter_SubscribeFilterLogs_Call { + _c.Call.Return(run) + return _c +} + +// SubscribeNewHead provides a mock function with given fields: ctx, ch +func (_m *EthClienter) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { + ret := _m.Called(ctx, ch) + + if len(ret) == 0 { + panic("no return value specified for SubscribeNewHead") + } + + var r0 ethereum.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)); ok { + return rf(ctx, ch) + } + if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) ethereum.Subscription); ok { + r0 = rf(ctx, ch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ethereum.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, chan<- *types.Header) error); ok { + r1 = rf(ctx, ch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SubscribeNewHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeNewHead' +type EthClienter_SubscribeNewHead_Call struct { + *mock.Call +} + +// SubscribeNewHead is a helper method to define mock.On call +// - ctx context.Context +// - ch chan<- *types.Header +func (_e *EthClienter_Expecter) SubscribeNewHead(ctx interface{}, ch interface{}) *EthClienter_SubscribeNewHead_Call { + return &EthClienter_SubscribeNewHead_Call{Call: _e.mock.On("SubscribeNewHead", ctx, ch)} +} + +func (_c *EthClienter_SubscribeNewHead_Call) Run(run func(ctx context.Context, ch chan<- *types.Header)) *EthClienter_SubscribeNewHead_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(chan<- *types.Header)) + }) + return _c +} + +func (_c *EthClienter_SubscribeNewHead_Call) Return(_a0 ethereum.Subscription, _a1 error) *EthClienter_SubscribeNewHead_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SubscribeNewHead_Call) RunAndReturn(run func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)) *EthClienter_SubscribeNewHead_Call { + _c.Call.Return(run) + return _c +} + +// SuggestGasPrice provides a mock function with given fields: ctx +func (_m *EthClienter) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasPrice") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SuggestGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasPrice' +type EthClienter_SuggestGasPrice_Call struct { + *mock.Call +} + +// SuggestGasPrice is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClienter_Expecter) SuggestGasPrice(ctx interface{}) *EthClienter_SuggestGasPrice_Call { + return &EthClienter_SuggestGasPrice_Call{Call: _e.mock.On("SuggestGasPrice", ctx)} +} + +func (_c *EthClienter_SuggestGasPrice_Call) Run(run func(ctx context.Context)) *EthClienter_SuggestGasPrice_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClienter_SuggestGasPrice_Call) Return(_a0 *big.Int, _a1 error) *EthClienter_SuggestGasPrice_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SuggestGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *EthClienter_SuggestGasPrice_Call { + _c.Call.Return(run) + return _c +} + +// SuggestGasTipCap provides a mock function with given fields: ctx +func (_m *EthClienter) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasTipCap") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SuggestGasTipCap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasTipCap' +type EthClienter_SuggestGasTipCap_Call struct { + *mock.Call +} + +// SuggestGasTipCap is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClienter_Expecter) SuggestGasTipCap(ctx interface{}) *EthClienter_SuggestGasTipCap_Call { + return &EthClienter_SuggestGasTipCap_Call{Call: _e.mock.On("SuggestGasTipCap", ctx)} +} + +func (_c *EthClienter_SuggestGasTipCap_Call) Run(run func(ctx context.Context)) *EthClienter_SuggestGasTipCap_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClienter_SuggestGasTipCap_Call) Return(_a0 *big.Int, _a1 error) *EthClienter_SuggestGasTipCap_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SuggestGasTipCap_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *EthClienter_SuggestGasTipCap_Call { + _c.Call.Return(run) + return _c +} + +// TransactionCount provides a mock function with given fields: ctx, blockHash +func (_m *EthClienter) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) { + ret := _m.Called(ctx, blockHash) + + if len(ret) == 0 { + panic("no return value specified for TransactionCount") + } + + var r0 uint + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint, error)); ok { + return rf(ctx, blockHash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint); ok { + r0 = rf(ctx, blockHash) + } else { + r0 = ret.Get(0).(uint) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, blockHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_TransactionCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionCount' +type EthClienter_TransactionCount_Call struct { + *mock.Call +} + +// TransactionCount is a helper method to define mock.On call +// - ctx context.Context +// - blockHash common.Hash +func (_e *EthClienter_Expecter) TransactionCount(ctx interface{}, blockHash interface{}) *EthClienter_TransactionCount_Call { + return &EthClienter_TransactionCount_Call{Call: _e.mock.On("TransactionCount", ctx, blockHash)} +} + +func (_c *EthClienter_TransactionCount_Call) Run(run func(ctx context.Context, blockHash common.Hash)) *EthClienter_TransactionCount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthClienter_TransactionCount_Call) Return(_a0 uint, _a1 error) *EthClienter_TransactionCount_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_TransactionCount_Call) RunAndReturn(run func(context.Context, common.Hash) (uint, error)) *EthClienter_TransactionCount_Call { + _c.Call.Return(run) + return _c +} + +// TransactionInBlock provides a mock function with given fields: ctx, blockHash, index +func (_m *EthClienter) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) { + ret := _m.Called(ctx, blockHash, index) + + if len(ret) == 0 { + panic("no return value specified for TransactionInBlock") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) (*types.Transaction, error)); ok { + return rf(ctx, blockHash, index) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) *types.Transaction); ok { + r0 = rf(ctx, blockHash, index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, uint) error); ok { + r1 = rf(ctx, blockHash, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_TransactionInBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionInBlock' +type EthClienter_TransactionInBlock_Call struct { + *mock.Call +} + +// TransactionInBlock is a helper method to define mock.On call +// - ctx context.Context +// - blockHash common.Hash +// - index uint +func (_e *EthClienter_Expecter) TransactionInBlock(ctx interface{}, blockHash interface{}, index interface{}) *EthClienter_TransactionInBlock_Call { + return &EthClienter_TransactionInBlock_Call{Call: _e.mock.On("TransactionInBlock", ctx, blockHash, index)} +} + +func (_c *EthClienter_TransactionInBlock_Call) Run(run func(ctx context.Context, blockHash common.Hash, index uint)) *EthClienter_TransactionInBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash), args[2].(uint)) + }) + return _c +} + +func (_c *EthClienter_TransactionInBlock_Call) Return(_a0 *types.Transaction, _a1 error) *EthClienter_TransactionInBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_TransactionInBlock_Call) RunAndReturn(run func(context.Context, common.Hash, uint) (*types.Transaction, error)) *EthClienter_TransactionInBlock_Call { + _c.Call.Return(run) + return _c +} + +// NewEthClienter creates a new instance of EthClienter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthClienter(t interface { + mock.TestingT + Cleanup(func()) +}) *EthClienter { + mock := &EthClienter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index 0bb31cc3..c6a4ef1a 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -53,11 +53,22 @@ type VerifyBatches struct { RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` } +func (v *VerifyBatches) String() string { + return fmt.Sprintf("BlockNumber: %d, BlockPosition: %d, RollupID: %d, NumBatch: %d, StateRoot: %s, "+ + "ExitRoot: %s, Aggregator: %s, RollupExitRoot: %s", + v.BlockNumber, v.BlockPosition, v.RollupID, v.NumBatch, v.StateRoot.String(), + v.ExitRoot.String(), v.Aggregator.String(), v.RollupExitRoot.String()) +} + type InitL1InfoRootMap struct { LeafCount uint32 CurrentL1InfoRoot common.Hash } +func (i *InitL1InfoRootMap) String() string { + return fmt.Sprintf("LeafCount: %d, CurrentL1InfoRoot: %s", i.LeafCount, i.CurrentL1InfoRoot.String()) +} + type Event struct { UpdateL1InfoTree *UpdateL1InfoTree VerifyBatches *VerifyBatches @@ -77,6 +88,24 @@ type L1InfoTreeLeaf struct { Hash common.Hash `meddler:"hash,hash"` } +func (l *L1InfoTreeLeaf) String() string { + return fmt.Sprintf("BlockNumber: %d, BlockPosition: %d, L1InfoTreeIndex: %d, PreviousBlockHash: %s, "+ + "Timestamp: %d, MainnetExitRoot: %s, RollupExitRoot: %s, GlobalExitRoot: %s, Hash: %s", + l.BlockNumber, l.BlockPosition, l.L1InfoTreeIndex, l.PreviousBlockHash.String(), + l.Timestamp, l.MainnetExitRoot.String(), l.RollupExitRoot.String(), l.GlobalExitRoot.String(), l.Hash.String()) +} + +// L1InfoTreeInitial representation of the initial info of the L1 Info tree for this rollup +type L1InfoTreeInitial struct { + BlockNumber uint64 `meddler:"block_num"` + LeafCount uint32 `meddler:"leaf_count"` + L1InfoRoot common.Hash `meddler:"l1_info_root,hash"` +} + +func (l *L1InfoTreeInitial) String() string { + return fmt.Sprintf("BlockNumber: %d, LeafCount: %d, L1InfoRoot: %s", l.BlockNumber, l.LeafCount, l.L1InfoRoot.String()) +} + // Hash as expected by the tree func (l *L1InfoTreeLeaf) hash() common.Hash { var res [treeTypes.DefaultHeight]byte @@ -227,7 +256,7 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { // ProcessBlock process the events of the block to build the rollup exit tree and the l1 info tree // and updates the last processed block (can be called without events for that purpose) -func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { +func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { tx, err := db.NewTx(ctx, p.db) if err != nil { return err @@ -240,8 +269,8 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { } }() - if _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, b.Num); err != nil { - return fmt.Errorf("err: %w", err) + if _, err := tx.Exec(`INSERT INTO block (num) VALUES ($1)`, block.Num); err != nil { + return fmt.Errorf("insert Block. err: %w", err) } var initialL1InfoIndex uint32 @@ -253,12 +282,12 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { initialL1InfoIndex = 0 err = nil case err != nil: - return fmt.Errorf("err: %w", err) + return fmt.Errorf("getLastIndex err: %w", err) default: initialL1InfoIndex = lastIndex + 1 } - for _, e := range b.Events { + for _, e := range block.Events { event, ok := e.(Event) if !ok { return errors.New("failed to convert from sync.Block.Event into Event") @@ -266,7 +295,7 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { if event.UpdateL1InfoTree != nil { index := initialL1InfoIndex + l1InfoLeavesAdded info := &L1InfoTreeLeaf{ - BlockNumber: b.Num, + BlockNumber: block.Num, BlockPosition: event.UpdateL1InfoTree.BlockPosition, L1InfoTreeIndex: index, PreviousBlockHash: event.UpdateL1InfoTree.ParentHash, @@ -277,45 +306,44 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { info.GlobalExitRoot = info.globalExitRoot() info.Hash = info.hash() if err = meddler.Insert(tx, "l1info_leaf", info); err != nil { - return fmt.Errorf("err: %w", err) + return fmt.Errorf("insert l1info_leaf %s. err: %w", info.String(), err) } + err = p.l1InfoTree.AddLeaf(tx, info.BlockNumber, info.BlockPosition, treeTypes.Leaf{ Index: info.L1InfoTreeIndex, Hash: info.Hash, }) if err != nil { - return fmt.Errorf("err: %w", err) + return fmt.Errorf("AddLeaf(%s). err: %w", info.String(), err) } + log.Infof("inserted L1InfoTreeLeaf %s", info.String()) l1InfoLeavesAdded++ } - if event.VerifyBatches != nil { - newRoot, err := p.rollupExitTree.UpsertLeaf(tx, b.Num, event.VerifyBatches.BlockPosition, treeTypes.Leaf{ - Index: event.VerifyBatches.RollupID - 1, - Hash: event.VerifyBatches.ExitRoot, - }) + log.Debugf("handle VerifyBatches event %s", event.VerifyBatches.String()) + err = p.processVerifyBatches(tx, block.Num, event.VerifyBatches) if err != nil { - return fmt.Errorf("err: %w", err) - } - verifyBatches := event.VerifyBatches - verifyBatches.BlockNumber = b.Num - verifyBatches.RollupExitRoot = newRoot - if err = meddler.Insert(tx, "verify_batches", verifyBatches); err != nil { - return fmt.Errorf("err: %w", err) + err = fmt.Errorf("processVerifyBatches. err: %w", err) + log.Errorf("error processing VerifyBatches: %v", err) + return err } } if event.InitL1InfoRootMap != nil { - // TODO: indicate that l1 Info tree indexes before the one on this - // event are not safe to use - log.Debugf("TODO: handle InitL1InfoRootMap event") + log.Debugf("handle InitL1InfoRootMap event %s", event.InitL1InfoRootMap.String()) + err = processEventInitL1InfoRootMap(tx, block.Num, event.InitL1InfoRootMap) + if err != nil { + err = fmt.Errorf("initL1InfoRootMap. Err: %w", err) + log.Errorf("error processing InitL1InfoRootMap: %v", err) + return err + } } } if err := tx.Commit(); err != nil { return fmt.Errorf("err: %w", err) } - log.Infof("block %d processed with %d events", b.Num, len(b.Events)) + log.Infof("block %d processed with %d events", block.Num, len(block.Events)) return nil } @@ -329,39 +357,6 @@ func (p *processor) getLastIndex(tx db.Querier) (uint32, error) { return lastProcessedIndex, err } -func (p *processor) GetLastVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { - verified := &VerifyBatches{} - err := meddler.QueryRow(p.db, verified, ` - SELECT * FROM verify_batches - WHERE rollup_id = $1 - ORDER BY block_num DESC, block_pos DESC - LIMIT 1; - `, rollupID) - return verified, db.ReturnErrNotFound(err) -} - -func (p *processor) GetFirstVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { - verified := &VerifyBatches{} - err := meddler.QueryRow(p.db, verified, ` - SELECT * FROM verify_batches - WHERE rollup_id = $1 - ORDER BY block_num ASC, block_pos ASC - LIMIT 1; - `, rollupID) - return verified, db.ReturnErrNotFound(err) -} - -func (p *processor) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*VerifyBatches, error) { - verified := &VerifyBatches{} - err := meddler.QueryRow(p.db, verified, ` - SELECT * FROM verify_batches - WHERE rollup_id = $1 AND block_num >= $2 - ORDER BY block_num ASC, block_pos ASC - LIMIT 1; - `, rollupID, blockNum) - return verified, db.ReturnErrNotFound(err) -} - func (p *processor) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*L1InfoTreeLeaf, error) { info := &L1InfoTreeLeaf{} err := meddler.QueryRow(p.db, info, ` @@ -413,3 +408,10 @@ func (p *processor) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, e `, ger.Hex()) return info, db.ReturnErrNotFound(err) } + +func (p *processor) getDBQuerier(tx db.Txer) db.Querier { + if tx != nil { + return tx + } + return p.db +} diff --git a/l1infotreesync/processor_initl1inforootmap.go b/l1infotreesync/processor_initl1inforootmap.go new file mode 100644 index 00000000..92732cd9 --- /dev/null +++ b/l1infotreesync/processor_initl1inforootmap.go @@ -0,0 +1,37 @@ +package l1infotreesync + +import ( + "database/sql" + "errors" + "fmt" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/log" + "github.com/russross/meddler" +) + +func processEventInitL1InfoRootMap(tx db.Txer, blockNumber uint64, event *InitL1InfoRootMap) error { + if event == nil { + return nil + } + info := &L1InfoTreeInitial{ + BlockNumber: blockNumber, + LeafCount: event.LeafCount, + L1InfoRoot: event.CurrentL1InfoRoot, + } + log.Infof("insert InitL1InfoRootMap %s ", info.String()) + if err := meddler.Insert(tx, "l1info_initial", info); err != nil { + return fmt.Errorf("err: %w", err) + } + return nil +} + +// GetInitL1InfoRootMap returns the initial L1 info root map, nil if no root map has been set +func (p *processor) GetInitL1InfoRootMap(tx db.Txer) (*L1InfoTreeInitial, error) { + info := &L1InfoTreeInitial{} + err := meddler.QueryRow(p.getDBQuerier(tx), info, `SELECT block_num, leaf_count,l1_info_root FROM l1info_initial`) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return info, err +} diff --git a/l1infotreesync/processor_initl1inforootmap_test.go b/l1infotreesync/processor_initl1inforootmap_test.go new file mode 100644 index 00000000..753d7a25 --- /dev/null +++ b/l1infotreesync/processor_initl1inforootmap_test.go @@ -0,0 +1,67 @@ +package l1infotreesync + +import ( + "context" + "testing" + + "github.com/0xPolygon/cdk/sync" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestInitL1InfoRootMap(t *testing.T) { + dbPath := "file:TestInitL1InfoRootMap?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + ctx := context.TODO() + event := InitL1InfoRootMap{ + LeafCount: 1, + CurrentL1InfoRoot: common.HexToHash("beef"), + } + block := sync.Block{ + Num: 1, + Events: []interface{}{ + Event{InitL1InfoRootMap: &event}, + }, + } + + err = sut.ProcessBlock(ctx, block) + require.NoError(t, err) + + info, err := sut.GetInitL1InfoRootMap(nil) + require.NoError(t, err) + require.NotNil(t, info) + require.Equal(t, event.LeafCount, info.LeafCount) + require.Equal(t, event.CurrentL1InfoRoot, info.L1InfoRoot) + require.Equal(t, block.Num, info.BlockNumber) +} + +func TestInitL1InfoRootMapDontAllow2Rows(t *testing.T) { + dbPath := "file:TestInitL1InfoRootMapDontAllow2Rows?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + ctx := context.TODO() + block := sync.Block{ + Num: 1, + Events: []interface{}{ + Event{InitL1InfoRootMap: &InitL1InfoRootMap{ + LeafCount: 1, + CurrentL1InfoRoot: common.HexToHash("beef"), + }}, + }, + } + err = sut.ProcessBlock(ctx, block) + require.NoError(t, err) + block.Num = 2 + err = sut.ProcessBlock(ctx, block) + require.Error(t, err, "should not allow to insert a second row") +} + +func TestGetInitL1InfoRootMap(t *testing.T) { + dbPath := "file:TestGetInitL1InfoRootMap?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + info, err := sut.GetInitL1InfoRootMap(nil) + require.NoError(t, err, "should return no error if no row is present, because it returns data=nil") + require.Nil(t, info, "should return nil if no row is present") +} diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go index 3da02998..b31d2237 100644 --- a/l1infotreesync/processor_test.go +++ b/l1infotreesync/processor_test.go @@ -10,72 +10,6 @@ import ( "golang.org/x/net/context" ) -func TestGetVerifiedBatches(t *testing.T) { - dbPath := "file:TestGetVerifiedBatches?mode=memory&cache=shared" - p, err := newProcessor(dbPath) - require.NoError(t, err) - ctx := context.Background() - - // Test ErrNotFound returned correctly on all methods - _, err = p.GetLastVerifiedBatches(0) - require.Equal(t, db.ErrNotFound, err) - _, err = p.GetFirstVerifiedBatches(0) - require.Equal(t, db.ErrNotFound, err) - _, err = p.GetFirstVerifiedBatchesAfterBlock(0, 0) - require.Equal(t, db.ErrNotFound, err) - - // First insert - expected1 := &VerifyBatches{ - RollupID: 420, - NumBatch: 69, - StateRoot: common.HexToHash("5ca1e"), - ExitRoot: common.HexToHash("b455"), - Aggregator: common.HexToAddress("beef"), - } - err = p.ProcessBlock(ctx, sync.Block{ - Num: 1, - Events: []interface{}{ - Event{VerifyBatches: expected1}, - }, - }) - require.NoError(t, err) - _, err = p.GetLastVerifiedBatches(0) - require.Equal(t, db.ErrNotFound, err) - actual, err := p.GetLastVerifiedBatches(420) - require.NoError(t, err) - require.Equal(t, expected1, actual) - actual, err = p.GetFirstVerifiedBatches(420) - require.NoError(t, err) - require.Equal(t, expected1, actual) - - // Second insert - expected2 := &VerifyBatches{ - RollupID: 420, - NumBatch: 690, - StateRoot: common.HexToHash("5ca1e3"), - ExitRoot: common.HexToHash("ba55"), - Aggregator: common.HexToAddress("beef3"), - } - err = p.ProcessBlock(ctx, sync.Block{ - Num: 2, - Events: []interface{}{ - Event{VerifyBatches: expected2}, - }, - }) - require.NoError(t, err) - _, err = p.GetLastVerifiedBatches(0) - require.Equal(t, db.ErrNotFound, err) - actual, err = p.GetLastVerifiedBatches(420) - require.NoError(t, err) - require.Equal(t, expected2, actual) - actual, err = p.GetFirstVerifiedBatches(420) - require.NoError(t, err) - require.Equal(t, expected1, actual) - actual, err = p.GetFirstVerifiedBatchesAfterBlock(420, 2) - require.NoError(t, err) - require.Equal(t, expected2, actual) -} - func TestGetInfo(t *testing.T) { dbPath := "file:TestGetInfo?mode=memory&cache=shared" p, err := newProcessor(dbPath) @@ -174,3 +108,16 @@ func TestGetInfo(t *testing.T) { require.NoError(t, err) require.Equal(t, expected2, *actual) } + +func TestGetLatestInfoUntilBlockIfNotFoundReturnsErrNotFound(t *testing.T) { + dbPath := "file:TestGetLatestInfoUntilBlock?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + ctx := context.Background() + // Fake block 1 + _, err = sut.db.Exec(`INSERT INTO block (num) VALUES ($1)`, 1) + require.NoError(t, err) + + _, err = sut.GetLatestInfoUntilBlock(ctx, 1) + require.Equal(t, db.ErrNotFound, err) +} diff --git a/l1infotreesync/processor_verifybatches.go b/l1infotreesync/processor_verifybatches.go new file mode 100644 index 00000000..9d1d0efb --- /dev/null +++ b/l1infotreesync/processor_verifybatches.go @@ -0,0 +1,104 @@ +package l1infotreesync + +import ( + "errors" + "fmt" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/log" + treeTypes "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/russross/meddler" +) + +func (p *processor) processVerifyBatches(tx db.Txer, blockNumber uint64, event *VerifyBatches) error { + if event == nil { + return fmt.Errorf("processVerifyBatches: event is nil") + } + if tx == nil { + return fmt.Errorf("processVerifyBatches: tx is nil, is mandatory to pass a tx") + } + log.Debugf("VerifyBatches: rollupExitTree.UpsertLeaf (blockNumber=%d, event=%s)", blockNumber, event.String()) + // If ExitRoot is zero if the leaf doesnt exists doesnt change the root of tree. + // if leaf already exists doesn't make sense to 'empty' the leaf, so we keep previous value + if event.ExitRoot == (common.Hash{}) { + log.Infof("skipping VerifyBatches event with empty ExitRoot (blockNumber=%d, event=%s)", blockNumber, event.String()) + return nil + } + isNewLeaf, err := p.isNewValueForRollupExitTree(tx, event) + if err != nil { + return fmt.Errorf("isNewValueForrollupExitTree. err: %w", err) + } + if !isNewLeaf { + log.Infof("skipping VerifyBatches event with same ExitRoot (blockNumber=%d, event=%s)", blockNumber, event.String()) + return nil + } + log.Infof("UpsertLeaf VerifyBatches event (blockNumber=%d, event=%s)", blockNumber, event.String()) + newRoot, err := p.rollupExitTree.UpsertLeaf(tx, blockNumber, event.BlockPosition, treeTypes.Leaf{ + Index: event.RollupID - 1, + Hash: event.ExitRoot, + }) + if err != nil { + return fmt.Errorf("error rollupExitTree.UpsertLeaf. err: %w", err) + } + verifyBatches := event + verifyBatches.BlockNumber = blockNumber + verifyBatches.RollupExitRoot = newRoot + if err = meddler.Insert(tx, "verify_batches", verifyBatches); err != nil { + return fmt.Errorf("error inserting verify_batches. err: %w", err) + } + return nil +} + +func (p *processor) isNewValueForRollupExitTree(tx db.Querier, event *VerifyBatches) (bool, error) { + currentRoot, err := p.rollupExitTree.GetLastRoot(tx) + if err != nil && errors.Is(err, db.ErrNotFound) { + // The tree is empty, so is a new value for sure + return true, nil + } + if err != nil { + return false, fmt.Errorf("error rollupExitTree.GetLastRoot. err: %w", err) + } + leaf, err := p.rollupExitTree.GetLeaf(tx, event.RollupID-1, currentRoot.Hash) + if err != nil && errors.Is(err, db.ErrNotFound) { + // The leaf doesn't exist, so is a new value + return true, nil + } + if err != nil { + return false, fmt.Errorf("error rollupExitTree.GetLeaf. err: %w", err) + } + return leaf != event.ExitRoot, nil +} + +func (p *processor) GetLastVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + verified := &VerifyBatches{} + err := meddler.QueryRow(p.db, verified, ` + SELECT * FROM verify_batches + WHERE rollup_id = $1 + ORDER BY block_num DESC, block_pos DESC + LIMIT 1; + `, rollupID) + return verified, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + verified := &VerifyBatches{} + err := meddler.QueryRow(p.db, verified, ` + SELECT * FROM verify_batches + WHERE rollup_id = $1 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, rollupID) + return verified, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*VerifyBatches, error) { + verified := &VerifyBatches{} + err := meddler.QueryRow(p.db, verified, ` + SELECT * FROM verify_batches + WHERE rollup_id = $1 AND block_num >= $2 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, rollupID, blockNum) + return verified, db.ReturnErrNotFound(err) +} diff --git a/l1infotreesync/processor_verifybatches_test.go b/l1infotreesync/processor_verifybatches_test.go new file mode 100644 index 00000000..d943b541 --- /dev/null +++ b/l1infotreesync/processor_verifybatches_test.go @@ -0,0 +1,127 @@ +package l1infotreesync + +import ( + "context" + "testing" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/sync" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestProcessVerifyBatchesNil(t *testing.T) { + dbPath := "file:TestProcessVerifyBatchesNil?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + err = sut.processVerifyBatches(nil, 1, nil) + require.Error(t, err) +} + +func TestProcessVerifyBatchesOK(t *testing.T) { + dbPath := "file:TestProcessVerifyBatchesOK?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + event := VerifyBatches{ + BlockPosition: 1, + RollupID: 1, + NumBatch: 1, + StateRoot: common.HexToHash("5ca1e"), + ExitRoot: common.HexToHash("b455"), + Aggregator: common.HexToAddress("beef"), + RollupExitRoot: common.HexToHash("b455"), + } + ctx := context.TODO() + tx, err := db.NewTx(ctx, sut.db) + require.NoError(t, err) + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 1) + require.NoError(t, err) + err = sut.processVerifyBatches(tx, 1, &event) + require.NoError(t, err) +} + +func TestProcessVerifyBatchesSkip0000(t *testing.T) { + dbPath := "file:TestProcessVerifyBatchesSkip0000?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + event := VerifyBatches{ + BlockPosition: 1, + RollupID: 1, + NumBatch: 1, + StateRoot: common.HexToHash("5ca1e"), + ExitRoot: common.Hash{}, + Aggregator: common.HexToAddress("beef"), + RollupExitRoot: common.HexToHash("b455"), + } + ctx := context.TODO() + tx, err := db.NewTx(ctx, sut.db) + require.NoError(t, err) + err = sut.processVerifyBatches(tx, 1, &event) + require.NoError(t, err) +} + +func TestGetVerifiedBatches(t *testing.T) { + dbPath := "file:TestGetVerifiedBatches?mode=memory&cache=shared" + p, err := newProcessor(dbPath) + require.NoError(t, err) + ctx := context.Background() + + // Test ErrNotFound returned correctly on all methods + _, err = p.GetLastVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstVerifiedBatchesAfterBlock(0, 0) + require.Equal(t, db.ErrNotFound, err) + + // First insert + expected1 := &VerifyBatches{ + RollupID: 420, + NumBatch: 69, + StateRoot: common.HexToHash("5ca1e"), + ExitRoot: common.HexToHash("b455"), + Aggregator: common.HexToAddress("beef"), + } + err = p.ProcessBlock(ctx, sync.Block{ + Num: 1, + Events: []interface{}{ + Event{VerifyBatches: expected1}, + }, + }) + require.NoError(t, err) + _, err = p.GetLastVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + actual, err := p.GetLastVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected1, actual) + actual, err = p.GetFirstVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected1, actual) + + // Second insert + expected2 := &VerifyBatches{ + RollupID: 420, + NumBatch: 690, + StateRoot: common.HexToHash("5ca1e3"), + ExitRoot: common.HexToHash("ba55"), + Aggregator: common.HexToAddress("beef3"), + } + err = p.ProcessBlock(ctx, sync.Block{ + Num: 2, + Events: []interface{}{ + Event{VerifyBatches: expected2}, + }, + }) + require.NoError(t, err) + _, err = p.GetLastVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + actual, err = p.GetLastVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected2, actual) + actual, err = p.GetFirstVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected1, actual) + actual, err = p.GetFirstVerifiedBatchesAfterBlock(420, 2) + require.NoError(t, err) + require.Equal(t, expected2, actual) +} diff --git a/reorgdetector/mock_eth_client.go b/reorgdetector/mock_eth_client.go index a76c62f9..0c561ab3 100644 --- a/reorgdetector/mock_eth_client.go +++ b/reorgdetector/mock_eth_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package reorgdetector diff --git a/scripts/local_config b/scripts/local_config index ed8aaec3..aeb008b0 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -2,6 +2,18 @@ #Include common varaibles source $(dirname $0)/../test/scripts/env.sh +function export_values_of_genesis(){ + local _GENESIS_FILE=$1 + if [ ! -f $_GENESIS_FILE ]; then + echo "Error: genesis file not found: $_GENESIS_FILE" + exit 1 + fi + export l1_chain_id=$(jq -r '.L1Config.chainId' $_GENESIS_FILE | tr -d '"') + export pol_token_address=$(jq -r '.L1Config.polTokenAddress' $_GENESIS_FILE) + export zkevm_rollup_address=$(jq -r '.L1Config.polygonZkEVMAddress' $_GENESIS_FILE) + export zkevm_rollup_manager_address=$(jq -r '.L1Config.polygonRollupManagerAddress' $_GENESIS_FILE) + export zkevm_global_exit_root_address=$(jq -r '.L1Config.polygonZkEVMGlobalExitRootAddress' $_GENESIS_FILE) +} @@ -23,16 +35,16 @@ EOF fi -if [ -z $TMP_CDK_FOLDER -o -z $ENCLAVE ]; then - echo "TMP_CDK_FOLDER or ENCLAVE is not set. Must be set on file env.sh" +if [ -z $TMP_CDK_FOLDER -o -z $KURTOSIS_ENCLAVE ]; then + echo "TMP_CDK_FOLDER or KURTOSIS_ENCLAVE is not set. Must be set on file env.sh" exit 1 fi -kurtosis enclave inspect $ENCLAVE > /dev/null +kurtosis enclave inspect $KURTOSIS_ENCLAVE > /dev/null if [ $? -ne 0 ]; then - echo "Error inspecting enclave $ENCLAVE" + echo "Error inspecting enclave $KURTOSIS_ENCLAVE" echo "You must start kurtosis environment before running this script" echo "- start kurtosis:" - echo " kurtosis clean --all; kurtosis run --enclave $ENCLAVE --args-file params.yml --image-download always ." + echo " kurtosis clean --all; kurtosis run --enclave $KURTOSIS_ENCLAVE --args-file params.yml --image-download always ." exit 1 fi @@ -40,34 +52,39 @@ DEST=${TMP_CDK_FOLDER}/local_config [ ! -d ${DEST} ] && mkdir -p ${DEST} rm $DEST/* -kurtosis files download $ENCLAVE genesis $DEST +kurtosis files download $KURTOSIS_ENCLAVE genesis $DEST [ $? -ne 0 ] && echo "Error downloading genesis" && exit 1 export genesis_file=$DEST/genesis.json -kurtosis files download $ENCLAVE sequencer-keystore $DEST +export_values_of_genesis $genesis_file +kurtosis files download $KURTOSIS_ENCLAVE sequencer-keystore $DEST [ $? -ne 0 ] && echo "Error downloading sequencer-keystore" && exit 1 export sequencer_keystore_file=$DEST/sequencer.keystore -l1_rpc_port=$(kurtosis port print $ENCLAVE el-1-geth-lighthouse rpc | cut -f 3 -d ":") +l1_rpc_port=$(kurtosis port print $KURTOSIS_ENCLAVE el-1-geth-lighthouse rpc | cut -f 3 -d ":") [ $? -ne 0 ] && echo "Error getting l1_rpc_port" && exit 1 || export l1_rpc_port && echo "l1_rpc_port=$l1_rpc_port" +l1_rpc_addr=$(kurtosis port print $KURTOSIS_ENCLAVE el-1-geth-lighthouse rpc) +[ $? -ne 0 ] && echo "Error getting l1_rpc_addr" && exit 1 || export l1_rpc_addr && echo "l1_rpc_addr=$l1_rpc_addr" +l2_rpc_addr=$(kurtosis port print $KURTOSIS_ENCLAVE cdk-erigon-node-001 http-rpc) +[ $? -ne 0 ] && echo "Error getting l2_rpc_addr" && exit 1 || export l2_rpc_addr && echo "l2_rpc_addr=$l2_rpc_addr" -zkevm_data_streamer_port=$(kurtosis port print $ENCLAVE cdk-erigon-sequencer-001 data-streamer | cut -f 3 -d ":") +zkevm_data_streamer_port=$(kurtosis port print $KURTOSIS_ENCLAVE cdk-erigon-sequencer-001 data-streamer | cut -f 3 -d ":") [ $? -ne 0 ] && echo "Error getting zkevm_data_streamer_port" && exit 1 || export zkevm_data_streamer_port && echo "zkevm_data_streamer_port=$zkevm_data_streamer_port" -kurtosis files download $ENCLAVE cdk-node-config-artifact $DEST +kurtosis files download $KURTOSIS_ENCLAVE cdk-node-config-artifact $DEST export zkevm_l2_sequencer_address=$(cat $DEST/cdk-node-config.toml |grep L2Coinbase | cut -f 2 -d "="| tr -d '"' | tr -d ' ') export zkevm_l2_keystore_password=$(cat $DEST/cdk-node-config.toml |grep -A1 L2Coinbase | tr ',' '\n' | grep Password | cut -f 2 -d '=' | tr -d '}' | tr -d '"' | tr -d ' ') export l1_chain_id=$(cat $DEST/cdk-node-config.toml | grep L1ChainID | cut -f 2 -d '=' | head -n 1) echo $l1_chain_id export zkevm_is_validium=$(cat $DEST/cdk-node-config.toml | grep IsValidiumMode | cut -f 2 -d '=') - +export zkevm_contract_versions=$(cat $DEST/cdk-node-config.toml | grep ContractVersions | cut -f 2 -d '=' | tr -d '"' | tr -d ' ') if [ "$zkevm_is_validium" == "true" ]; then echo "Validium mode detected... Retrieving the dac_port" - dac_port=$(kurtosis port print $ENCLAVE zkevm-dac-001 dac | cut -f 3 -d ":") + dac_port=$(kurtosis port print $KURTOSIS_ENCLAVE zkevm-dac-001 dac | cut -f 3 -d ":") [ $? -ne 0 ] && echo "Error getting dac_port" && exit 1 || export dac_port && echo "dac_port=$dac_port" fi envsubst < test/config/test.kurtosis_template.toml > $DEST/test.kurtosis.toml - +echo "file generated at:" $DEST/test.kurtosis.toml echo "- to restart kurtosis:" echo " kurtosis clean --all; kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always ." echo " " @@ -87,7 +104,7 @@ cat << EOF "run", "-cfg", "$DEST/test.kurtosis.toml", "-components", "sequence-sender,aggregator", - "-custom-network-file", "$DEST/local_config/genesis.json" ] }, EOF + diff --git a/sequencesender/txbuilder/banana_base.go b/sequencesender/txbuilder/banana_base.go index 6d191c4a..2868bb4b 100644 --- a/sequencesender/txbuilder/banana_base.go +++ b/sequencesender/txbuilder/banana_base.go @@ -2,6 +2,7 @@ package txbuilder import ( "context" + "errors" "fmt" "math/big" @@ -27,6 +28,7 @@ type globalExitRootBananaContractor interface { type l1InfoSyncer interface { GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) + GetInitL1InfoRootMap(ctx context.Context) (*l1infotreesync.L1InfoTreeInitial, error) } type l1Client interface { @@ -74,39 +76,90 @@ func (t *TxBuilderBananaBase) NewBatchFromL2Block(l2Block *datastream.L2Block) s return NewBananaBatch(batch) } -func (t *TxBuilderBananaBase) NewSequence( - ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address, -) (seqsendertypes.Sequence, error) { - ethBatches := toEthermanBatches(batches) - sequence := etherman.NewSequenceBanana(ethBatches, coinbase) - var greatestL1Index uint32 - for _, b := range sequence.Batches { - if greatestL1Index < b.L1InfoTreeIndex { - greatestL1Index = b.L1InfoTreeIndex +func getHighestL1InfoIndex(batches []etherman.Batch) uint32 { + var highestL1Index uint32 + for _, b := range batches { + if highestL1Index < b.L1InfoTreeIndex { + highestL1Index = b.L1InfoTreeIndex } } + return highestL1Index +} + +// Returns CounterL1InfoRoot to use for this batch +func (t *TxBuilderBananaBase) GetCounterL1InfoRoot(ctx context.Context, highestL1IndexInBatch uint32) (uint32, error) { header, err := t.ethClient.HeaderByNumber(ctx, t.blockFinality) if err != nil { - return nil, fmt.Errorf("error calling HeaderByNumber, with block finality %d: %w", t.blockFinality.Int64(), err) + return 0, fmt.Errorf("error calling HeaderByNumber, with block finality %d: %w", t.blockFinality.Int64(), err) } + var resL1InfoCounter uint32 + info, err := t.l1InfoTree.GetLatestInfoUntilBlock(ctx, header.Number.Uint64()) + if err == nil { + resL1InfoCounter = info.L1InfoTreeIndex + 1 + } + if errors.Is(err, l1infotreesync.ErrNotFound) { + // There are no L1 Info tree leaves yet, so we can try to use L1InfoRootMap event + l1infotreeInitial, err := t.l1InfoTree.GetInitL1InfoRootMap(ctx) + if l1infotreeInitial == nil || err != nil { + return 0, fmt.Errorf("error no leaves on L1InfoTree yet and GetInitL1InfoRootMap fails: %w", err) + } + // We use this leaf as first one + resL1InfoCounter = l1infotreeInitial.LeafCount + } else if err != nil { + return 0, fmt.Errorf("error calling GetLatestInfoUntilBlock with block num %d: %w", header.Number.Uint64(), err) + } + // special case: there are no leaves in L1InfoTree yet + if resL1InfoCounter == 0 && highestL1IndexInBatch == 0 { + log.Infof("No L1 Info tree leaves yet, batch use no leaf") + return resL1InfoCounter, nil + } + if resL1InfoCounter > highestL1IndexInBatch { + return resL1InfoCounter, nil + } + + return 0, fmt.Errorf( + "sequence contained an L1 Info tree index (%d) that is greater than the one synced with the desired finality (%d)", + highestL1IndexInBatch, resL1InfoCounter, + ) +} + +func (t *TxBuilderBananaBase) CheckL1InfoTreeLeafCounterVsInitL1InfoMap(ctx context.Context, leafCounter uint32) error { + l1infotreeInitial, err := t.l1InfoTree.GetInitL1InfoRootMap(ctx) if err != nil { - return nil, fmt.Errorf("error calling GetLatestInfoUntilBlock with block num %d: %w", header.Number.Uint64(), err) - } - if info.L1InfoTreeIndex >= greatestL1Index { - sequence.CounterL1InfoRoot = info.L1InfoTreeIndex + 1 - } else { - return nil, fmt.Errorf( - "sequence contained an L1 Info tree index (%d) that is greater than the one synced with the desired finality (%d)", - greatestL1Index, info.L1InfoTreeIndex, - ) + return fmt.Errorf("l1InfoTree.GetInitL1InfoRootMap fails: %w", err) } + if l1infotreeInitial == nil { + log.Warnf("No InitL1InfoRootMap found, skipping check") + return nil + } + if leafCounter < l1infotreeInitial.LeafCount { + return fmt.Errorf("cant use this leafCounter because is previous to first value on contract Map"+ + "leafCounter(%d) < l1infotreeInitial.LeafCount(%d)", leafCounter, l1infotreeInitial.LeafCount) + } + return nil +} +func (t *TxBuilderBananaBase) NewSequence( + ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address, +) (seqsendertypes.Sequence, error) { + ethBatches := toEthermanBatches(batches) + sequence := etherman.NewSequenceBanana(ethBatches, coinbase) + greatestL1Index := getHighestL1InfoIndex(sequence.Batches) + + counterL1InfoRoot, err := t.GetCounterL1InfoRoot(ctx, greatestL1Index) + if err != nil { + return nil, err + } + sequence.CounterL1InfoRoot = counterL1InfoRoot l1InfoRoot, err := t.getL1InfoRoot(sequence.CounterL1InfoRoot) if err != nil { return nil, err } - + err = t.CheckL1InfoTreeLeafCounterVsInitL1InfoMap(ctx, sequence.CounterL1InfoRoot) + if err != nil { + return nil, err + } sequence.L1InfoRoot = l1InfoRoot accInputHash, err := t.rollupContract.LastAccInputHash(&bind.CallOpts{Pending: false}) diff --git a/sequencesender/txbuilder/banana_base_test.go b/sequencesender/txbuilder/banana_base_test.go index 3b449084..44d7a7b1 100644 --- a/sequencesender/txbuilder/banana_base_test.go +++ b/sequencesender/txbuilder/banana_base_test.go @@ -31,6 +31,7 @@ func TestBananaBaseNewSequenceEmpty(t *testing.T) { Return(&l1infotreesync.L1InfoTreeLeaf{L1InfoTreeIndex: 69}, nil) lastAcc := common.HexToHash("0x8aca9664752dbae36135fd0956c956fc4a370feeac67485b49bcd4b99608ae41") testData.rollupContract.EXPECT().LastAccInputHash(mock.Anything).Return(lastAcc, nil) + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(mock.Anything).Return(nil, nil) seq, err := testData.sut.NewSequence(context.TODO(), nil, common.Address{}) require.NotNil(t, seq) require.NoError(t, err) @@ -74,6 +75,8 @@ func TestBananaBaseNewSequenceBatch(t *testing.T) { Coinbase: []byte{1, 2, 3}, GlobalExitRoot: []byte{4, 5, 6}, } + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(mock.Anything).Return(nil, nil).Once() + batch := testData.sut.NewBatchFromL2Block(l2Block) batches := []seqsendertypes.Batch{batch} lastAcc := common.HexToHash("0x8aca9664752dbae36135fd0956c956fc4a370feeac67485b49bcd4b99608ae41") @@ -124,6 +127,41 @@ func TestBananaSanityCheckNilSeq(t *testing.T) { require.Error(t, err, "nil sequence") } +func TestBananaEmptyL1InfoTree(t *testing.T) { + testData := newBananaBaseTestData(t) + + testData.l1Client.On("HeaderByNumber", mock.Anything, mock.Anything). + Return(&types.Header{Number: big.NewInt(69)}, nil) + testData.l1InfoTreeSync.EXPECT().GetLatestInfoUntilBlock(testData.ctx, uint64(69)).Return(nil, l1infotreesync.ErrNotFound) + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(testData.ctx).Return(&l1infotreesync.L1InfoTreeInitial{LeafCount: 10}, nil) + + leafCounter, err := testData.sut.GetCounterL1InfoRoot(testData.ctx, 0) + require.NoError(t, err) + require.Equal(t, uint32(10), leafCounter) +} + +func TestCheckL1InfoTreeLeafCounterVsInitL1InfoMap(t *testing.T) { + testData := newBananaBaseTestData(t) + + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(testData.ctx).Return(&l1infotreesync.L1InfoTreeInitial{LeafCount: 10}, nil) + err := testData.sut.CheckL1InfoTreeLeafCounterVsInitL1InfoMap(testData.ctx, 10) + require.NoError(t, err, "10 == 10 so is accepted") + + err = testData.sut.CheckL1InfoTreeLeafCounterVsInitL1InfoMap(testData.ctx, 9) + require.Error(t, err, "9 < 10 so is rejected") + + err = testData.sut.CheckL1InfoTreeLeafCounterVsInitL1InfoMap(testData.ctx, 11) + require.NoError(t, err, "11 > 10 so is accepted") +} + +func TestCheckL1InfoTreeLeafCounterVsInitL1InfoMapNotFound(t *testing.T) { + testData := newBananaBaseTestData(t) + + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(testData.ctx).Return(nil, nil) + err := testData.sut.CheckL1InfoTreeLeafCounterVsInitL1InfoMap(testData.ctx, 10) + require.NoError(t, err, "10 == 10 so is accepted") +} + type testDataBananaBase struct { rollupContract *mocks_txbuilder.RollupBananaBaseContractor getContract *mocks_txbuilder.GlobalExitRootBananaContractor @@ -131,6 +169,7 @@ type testDataBananaBase struct { sut *txbuilder.TxBuilderBananaBase l1InfoTreeSync *mocks_txbuilder.L1InfoSyncer l1Client *mocks_txbuilder.L1Client + ctx context.Context } func newBananaBaseTestData(t *testing.T) *testDataBananaBase { @@ -155,5 +194,6 @@ func newBananaBaseTestData(t *testing.T) *testDataBananaBase { sut: sut, l1InfoTreeSync: l1InfoSyncer, l1Client: l1Client, + ctx: context.TODO(), } } diff --git a/sequencesender/txbuilder/banana_validium_test.go b/sequencesender/txbuilder/banana_validium_test.go index 8f764595..71f059b9 100644 --- a/sequencesender/txbuilder/banana_validium_test.go +++ b/sequencesender/txbuilder/banana_validium_test.go @@ -34,6 +34,8 @@ func TestBananaValidiumBuildSequenceBatchesTxSequenceErrorsFromDA(t *testing.T) Return(&types.Header{Number: big.NewInt(69)}, nil) testData.l1InfoTreeSync.On("GetLatestInfoUntilBlock", mock.Anything, mock.Anything). Return(&l1infotreesync.L1InfoTreeLeaf{L1InfoTreeIndex: 7}, nil) + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(mock.Anything).Return(nil, nil) + seq, err := newSequenceBananaValidiumForTest(testData) require.NoError(t, err) ctx := context.TODO() @@ -53,6 +55,8 @@ func TestBananaValidiumBuildSequenceBatchesTxSequenceDAOk(t *testing.T) { Return(&types.Header{Number: big.NewInt(69)}, nil) testData.l1InfoTreeSync.On("GetLatestInfoUntilBlock", mock.Anything, mock.Anything). Return(&l1infotreesync.L1InfoTreeLeaf{L1InfoTreeIndex: 7}, nil) + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(mock.Anything).Return(nil, nil) + seq, err := newSequenceBananaValidiumForTest(testData) require.NoError(t, err) ctx := context.TODO() diff --git a/sequencesender/txbuilder/banana_zkevm_test.go b/sequencesender/txbuilder/banana_zkevm_test.go index a4ff4bd7..4570729e 100644 --- a/sequencesender/txbuilder/banana_zkevm_test.go +++ b/sequencesender/txbuilder/banana_zkevm_test.go @@ -40,6 +40,8 @@ func TestBananaZkevmBuildSequenceBatchesTxOk(t *testing.T) { Return(&types.Header{Number: big.NewInt(69)}, nil) testData.l1InfoTreeSync.On("GetLatestInfoUntilBlock", mock.Anything, mock.Anything). Return(&l1infotreesync.L1InfoTreeLeaf{L1InfoTreeIndex: 7}, nil) + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(mock.Anything).Return(nil, nil) + seq, err := newSequenceBananaZKEVMForTest(testData) require.NoError(t, err) @@ -61,6 +63,8 @@ func TestBananaZkevmBuildSequenceBatchesTxErr(t *testing.T) { Return(&types.Header{Number: big.NewInt(69)}, nil) testData.l1InfoTreeSync.On("GetLatestInfoUntilBlock", mock.Anything, mock.Anything). Return(&l1infotreesync.L1InfoTreeLeaf{L1InfoTreeIndex: 7}, nil) + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(mock.Anything).Return(nil, nil) + seq, err := newSequenceBananaZKEVMForTest(testData) require.NoError(t, err) diff --git a/sequencesender/txbuilder/mocks_txbuilder/l1_info_syncer.go b/sequencesender/txbuilder/mocks_txbuilder/l1_info_syncer.go index 65bf9394..12d641a8 100644 --- a/sequencesender/txbuilder/mocks_txbuilder/l1_info_syncer.go +++ b/sequencesender/txbuilder/mocks_txbuilder/l1_info_syncer.go @@ -22,6 +22,64 @@ func (_m *L1InfoSyncer) EXPECT() *L1InfoSyncer_Expecter { return &L1InfoSyncer_Expecter{mock: &_m.Mock} } +// GetInitL1InfoRootMap provides a mock function with given fields: ctx +func (_m *L1InfoSyncer) GetInitL1InfoRootMap(ctx context.Context) (*l1infotreesync.L1InfoTreeInitial, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetInitL1InfoRootMap") + } + + var r0 *l1infotreesync.L1InfoTreeInitial + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*l1infotreesync.L1InfoTreeInitial, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *l1infotreesync.L1InfoTreeInitial); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeInitial) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoSyncer_GetInitL1InfoRootMap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInitL1InfoRootMap' +type L1InfoSyncer_GetInitL1InfoRootMap_Call struct { + *mock.Call +} + +// GetInitL1InfoRootMap is a helper method to define mock.On call +// - ctx context.Context +func (_e *L1InfoSyncer_Expecter) GetInitL1InfoRootMap(ctx interface{}) *L1InfoSyncer_GetInitL1InfoRootMap_Call { + return &L1InfoSyncer_GetInitL1InfoRootMap_Call{Call: _e.mock.On("GetInitL1InfoRootMap", ctx)} +} + +func (_c *L1InfoSyncer_GetInitL1InfoRootMap_Call) Run(run func(ctx context.Context)) *L1InfoSyncer_GetInitL1InfoRootMap_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *L1InfoSyncer_GetInitL1InfoRootMap_Call) Return(_a0 *l1infotreesync.L1InfoTreeInitial, _a1 error) *L1InfoSyncer_GetInitL1InfoRootMap_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoSyncer_GetInitL1InfoRootMap_Call) RunAndReturn(run func(context.Context) (*l1infotreesync.L1InfoTreeInitial, error)) *L1InfoSyncer_GetInitL1InfoRootMap_Call { + _c.Call.Return(run) + return _c +} + // GetLatestInfoUntilBlock provides a mock function with given fields: ctx, blockNum func (_m *L1InfoSyncer) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) { ret := _m.Called(ctx, blockNum) diff --git a/sonar-project.properties b/sonar-project.properties index 559f7073..b8f78410 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -7,11 +7,11 @@ sonar.projectName=cdk sonar.organization=0xpolygon sonar.sources=. -sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql +sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql,**/mocks_*/* sonar.tests=. sonar.test.inclusions=**/*_test.go -sonar.test.exclusions=**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml +sonar.test.exclusions=**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml,**/mocks_*/* sonar.issue.enforceSemantic=true # ===================================================== diff --git a/sync/mock_l2_test.go b/sync/mock_l2_test.go index 78d75191..7a4bae36 100644 --- a/sync/mock_l2_test.go +++ b/sync/mock_l2_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package sync diff --git a/sync/mock_processor_test.go b/sync/mock_processor_test.go index 8e562e9b..afbb34cb 100644 --- a/sync/mock_processor_test.go +++ b/sync/mock_processor_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package sync diff --git a/sync/mock_reorgdetector_test.go b/sync/mock_reorgdetector_test.go index 52cd0cd0..9689f7e7 100644 --- a/sync/mock_reorgdetector_test.go +++ b/sync/mock_reorgdetector_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package sync diff --git a/test/Makefile b/test/Makefile index 0864b8d2..4833f214 100644 --- a/test/Makefile +++ b/test/Makefile @@ -1,5 +1,5 @@ .PHONY: generate-mocks -generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector generate-mocks-sequencesender generate-mocks-da generate-mocks-l1infotreesync generate-mocks-helpers generate-mocks-sync +generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector generate-mocks-sequencesender generate-mocks-da generate-mocks-l1infotreesync generate-mocks-helpers generate-mocks-sync generate-mocks-l1infotreesync .PHONY: generate-mocks-bridgesync @@ -26,6 +26,25 @@ generate-mocks-rpc: ## Generates mocks for rpc, using mockery tool rm -Rf ../rpc/mocks export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../rpc --output ../rpc/mocks --outpkg mocks ${COMMON_MOCKERY_PARAMS} +.PHONY: generate-mocks-l1infotreesync +generate-mocks-l1infotreesync: ## Generates mocks for l1infotreesync, using mockery tool + rm -Rf ../l1infotreesync/mocks + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../l1infotreesync --output ../l1infotreesync/mocks --outpkg mocks_l1infotreesync ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../l1infotreesync --outpkg=l1infotreesync --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go + + + +.PHONY: generate-mocks-aggoracle +generate-mocks-helpers: ## Generates mocks for helpers , using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir=../aggoracle/chaingersender --output=./helpers --outpkg=helpers --structname=EthTxManagerMock --filename=mock_ethtxmanager.go + +.PHONY: generate-mocks-sync +generate-mocks-sync: ## Generates mocks for sync, using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthClienter --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=L2Mock --filename=mock_l2_test.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=evmDownloaderFull --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=EVMDownloaderMock --filename=mock_downloader_test.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=processorInterface --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ProcessorMock --filename=mock_processor_test.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go + .PHONY: test-e2e-elderberry-validium test-e2e-elderberry-validium: stop ## Runs e2e tests checking elderberry/validium @@ -52,17 +71,3 @@ help: ## Prints this help @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) \ | sort \ | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' -.PHONY: generate-mocks-l1infotreesync -generate-mocks-l1infotreesync: ## Generates mocks for l1infotreesync , using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../l1infotreesync --outpkg=l1infotreesync --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go - -.PHONY: generate-mocks-aggoracle -generate-mocks-helpers: ## Generates mocks for helpers , using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir=../aggoracle/chaingersender --output=./helpers --outpkg=helpers --structname=EthTxManagerMock --filename=mock_ethtxmanager.go - -.PHONY: generate-mocks-sync -generate-mocks-sync: ## Generates mocks for sync, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthClienter --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=L2Mock --filename=mock_l2_test.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=evmDownloaderFull --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=EVMDownloaderMock --filename=mock_downloader_test.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=processorInterface --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ProcessorMock --filename=mock_processor_test.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go diff --git a/test/config/test.kurtosis_template.toml b/test/config/test.kurtosis_template.toml index 66471c6a..c065cd6d 100644 --- a/test/config/test.kurtosis_template.toml +++ b/test/config/test.kurtosis_template.toml @@ -1,9 +1,12 @@ ForkUpgradeBatchNumber = 0 ForkUpgradeNewForkId = 0 +[Etherman] + URL = "http://127.0.0.1:${l1_rpc_port}" + [Common] IsValidiumMode = ${zkevm_is_validium} -ContractVersions = "elderberry" +ContractVersions = "${zkevm_contract_versions}" [Common.Translator] FullMatchRules = [ {Old="http://zkevm-dac-001:8484", New="http://127.0.0.1:${dac_port}"}, @@ -11,7 +14,7 @@ ContractVersions = "elderberry" [Log] Environment = "development" # "production" or "development" -Level = "info" +Level = "debug" Outputs = ["stderr"] [SequenceSender] @@ -27,7 +30,7 @@ SequencesTxFileName = "sequencesender.json" GasOffset = 80000 WaitPeriodPurgeTxFile = "15m" MaxPendingTx = 1 -SanityCheckRPCURL = "http://127.0.0.1:8123" +SanityCheckRPCURL = "${l2_rpc_addr}" [SequenceSender.StreamClient] Server = "127.0.0.1:${zkevm_data_streamer_port}" [SequenceSender.EthTxManager] @@ -124,3 +127,23 @@ SequencerPrivateKey = {} [Aggregator.Synchronizer.Etherman] [Aggregator.Synchronizer.Etherman.Validium] Enabled = ${zkevm_is_validium} + + +[L1InfoTreeSync] +DBPath = "/tmp/L1InfoTreeSync.sqlite" +GlobalExitRootAddr="${zkevm_global_exit_root_address}" +RollupManagerAddr="${zkevm_rollup_manager_address}" +SyncBlockChunkSize=100 +BlockFinality="LatestBlock" +# http://el-1-geth-lighthouse:8545 +URLRPCL1="${l1_rpc_addr}" +WaitForNewBlocksPeriod="100ms" +InitialBlock=0 + + +[NetworkConfig.L1] +L1ChainID = ${l1_chain_id} +PolAddr = "${pol_token_address}" +ZkEVMAddr = "${zkevm_rollup_address}" +RollupManagerAddr = "${zkevm_rollup_manager_address}" +GlobalExitRootManagerAddr = "${zkevm_global_exit_root_address}" diff --git a/test/helpers/aggoracle_e2e.go b/test/helpers/aggoracle_e2e.go index 8b5073fb..125d73cf 100644 --- a/test/helpers/aggoracle_e2e.go +++ b/test/helpers/aggoracle_e2e.go @@ -117,7 +117,8 @@ func CommonSetup(t *testing.T) ( require.NoError(t, err) // Syncer dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, common.Address{}, syncBlockChunkSize, etherman.LatestBlock, reorg, l1Client.Client(), time.Millisecond, 0, periodRetry, retries) + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, common.Address{}, syncBlockChunkSize, etherman.LatestBlock, reorg, l1Client.Client(), time.Millisecond, 0, periodRetry, retries, + l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) go syncer.Start(ctx) diff --git a/test/helpers/mock_ethtxmanager.go b/test/helpers/mock_ethtxmanager.go index 848992f4..4dd103af 100644 --- a/test/helpers/mock_ethtxmanager.go +++ b/test/helpers/mock_ethtxmanager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package helpers diff --git a/tree/tree.go b/tree/tree.go index 5d307e8a..0e3a0c69 100644 --- a/tree/tree.go +++ b/tree/tree.go @@ -172,8 +172,11 @@ func (t *Tree) storeRoot(tx db.Txer, root types.Root) error { } // GetLastRoot returns the last processed root -func (t *Tree) GetLastRoot(ctx context.Context) (types.Root, error) { - return t.getLastRootWithTx(t.db) +func (t *Tree) GetLastRoot(tx db.Querier) (types.Root, error) { + if tx == nil { + tx = t.db + } + return t.getLastRootWithTx(tx) } func (t *Tree) getLastRootWithTx(tx db.Querier) (types.Root, error) { @@ -223,10 +226,10 @@ func (t *Tree) GetRootByHash(ctx context.Context, hash common.Hash) (*types.Root return root, nil } -func (t *Tree) GetLeaf(ctx context.Context, index uint32, root common.Hash) (common.Hash, error) { +func (t *Tree) GetLeaf(tx db.Querier, index uint32, root common.Hash) (common.Hash, error) { currentNodeHash := root for h := int(types.DefaultHeight - 1); h >= 0; h-- { - currentNode, err := t.getRHTNode(t.db, currentNodeHash) + currentNode, err := t.getRHTNode(tx, currentNodeHash) if err != nil { return common.Hash{}, err } diff --git a/tree/tree_test.go b/tree/tree_test.go index dc2cfc9e..c2748856 100644 --- a/tree/tree_test.go +++ b/tree/tree_test.go @@ -55,7 +55,7 @@ func TestCheckExpectedRoot(t *testing.T) { addLeaves(merkleTree, treeDB, numOfLeavesToAdd, 0) - expectedRoot, err := merkleTree.GetLastRoot(context.Background()) + expectedRoot, err := merkleTree.GetLastRoot(nil) require.NoError(t, err) addLeaves(merkleTree, treeDB, numOfLeavesToAdd, numOfLeavesToAdd) @@ -74,7 +74,7 @@ func TestCheckExpectedRoot(t *testing.T) { addLeaves(merkleTree, treeDB, numOfLeavesToAdd, 0) - expectedRoot, err := merkleTree.GetLastRoot(context.Background()) + expectedRoot, err := merkleTree.GetLastRoot(nil) require.NoError(t, err) addLeaves(merkleTree, treeDB, numOfLeavesToAdd, numOfLeavesToAdd) @@ -134,7 +134,7 @@ func TestMTAddLeaf(t *testing.T) { } require.NoError(t, tx.Commit()) if len(testVector.ExistingLeaves) > 0 { - root, err := merkletree.GetLastRoot(ctx) + root, err := merkletree.GetLastRoot(nil) require.NoError(t, err) require.Equal(t, common.HexToHash(testVector.CurrentRoot), root.Hash) } @@ -149,7 +149,7 @@ func TestMTAddLeaf(t *testing.T) { require.NoError(t, err) require.NoError(t, tx.Commit()) - root, err := merkletree.GetLastRoot(ctx) + root, err := merkletree.GetLastRoot(nil) require.NoError(t, err) require.Equal(t, common.HexToHash(testVector.NewRoot), root.Hash) }) @@ -185,7 +185,7 @@ func TestMTGetProof(t *testing.T) { } require.NoError(t, tx.Commit()) - root, err := tre.GetLastRoot(ctx) + root, err := tre.GetLastRoot(nil) require.NoError(t, err) expectedRoot := common.HexToHash(testVector.ExpectedRoot) require.Equal(t, expectedRoot, root.Hash) @@ -198,3 +198,13 @@ func TestMTGetProof(t *testing.T) { }) } } + +func createTreeDBForTest(t *testing.T) *sql.DB { + t.Helper() + dbPath := "file::memory:?cache=shared" + err := migrations.RunMigrations(dbPath) + require.NoError(t, err) + treeDB, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + return treeDB +} diff --git a/tree/updatabletree_test.go b/tree/updatabletree_test.go new file mode 100644 index 00000000..a684fd0e --- /dev/null +++ b/tree/updatabletree_test.go @@ -0,0 +1,49 @@ +package tree_test + +import ( + "context" + "testing" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/tree" + "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestUpdatableTreeExploratory(t *testing.T) { + treeDB := createTreeDBForTest(t) + sut := tree.NewUpdatableTree(treeDB, "") + blockNum := uint64(1) + blockPosition := uint64(1) + leaf1 := types.Leaf{ + Index: 10, + Hash: common.HexToHash("0x123456"), + } + leaf2 := types.Leaf{ + Index: 1, + Hash: common.HexToHash("0x123478"), + } + ctx := context.TODO() + + tx, err := db.NewTx(ctx, treeDB) + require.NoError(t, err) + _, err = sut.UpsertLeaf(tx, blockNum, blockPosition, leaf1) + require.NoError(t, err) + + root2, err := sut.UpsertLeaf(tx, blockNum, blockPosition, leaf2) + require.NoError(t, err) + leaf1get, err := sut.GetLeaf(tx, leaf1.Index, root2) + require.NoError(t, err) + require.Equal(t, leaf1.Hash, leaf1get) + // If a leaf dont exist return 'not found' error + _, err = sut.GetLeaf(tx, 99, root2) + require.ErrorIs(t, err, db.ErrNotFound) + leaf99 := types.Leaf{ + Index: 99, + Hash: common.Hash{}, // 0x00000 + } + + _, err = sut.UpsertLeaf(tx, blockNum, blockPosition, leaf99) + require.Error(t, err, "insert 0x000 doesnt change root and return UNIQUE constraint failed: root.hash") +} From 473cc880b1f4bc6f863e8a1c7f68ff4982a180fd Mon Sep 17 00:00:00 2001 From: Goran Rojovic <100121253+goran-ethernal@users.noreply.github.com> Date: Mon, 30 Sep 2024 20:05:55 +0200 Subject: [PATCH 15/53] feat: New `zkevm-ethtx-manager` version (#98) * fix: update zkevm-ethtx-manager version * fix: UTs --- aggoracle/chaingersender/evm.go | 3 +-- aggregator/aggregator.go | 2 +- bridgesync/mock_l2_test.go | 2 +- claimsponsor/evmclaimsponsor.go | 4 ++-- go.mod | 2 +- go.sum | 2 ++ reorgdetector/types_test.go | 9 +++++++-- sequencesender/sequencesender.go | 19 +------------------ test/helpers/ethtxmanmock_e2e.go | 4 ++-- test/helpers/mock_ethtxmanager.go | 20 ++++++++++---------- 10 files changed, 28 insertions(+), 39 deletions(-) diff --git a/aggoracle/chaingersender/evm.go b/aggoracle/chaingersender/evm.go index ee02771e..4ad053c4 100644 --- a/aggoracle/chaingersender/evm.go +++ b/aggoracle/chaingersender/evm.go @@ -31,7 +31,6 @@ type EthTxManager interface { Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error) Add(ctx context.Context, to *common.Address, - forcedNonce *uint64, value *big.Int, data []byte, gasOffset uint64, @@ -103,7 +102,7 @@ func (c *EVMChainGERSender) UpdateGERWaitUntilMined(ctx context.Context, ger com if err != nil { return err } - id, err := c.ethTxMan.Add(ctx, &c.gerAddr, nil, big.NewInt(0), data, c.gasOffset, nil) + id, err := c.ethTxMan.Add(ctx, &c.gerAddr, big.NewInt(0), data, c.gasOffset, nil) if err != nil { return err } diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index a97d72f9..249f260a 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -978,7 +978,7 @@ func (a *Aggregator) settleDirect( return false } - monitoredTxID, err := a.ethTxManager.Add(ctx, to, nil, big.NewInt(0), data, a.cfg.GasOffset, nil) + monitoredTxID, err := a.ethTxManager.Add(ctx, to, big.NewInt(0), data, a.cfg.GasOffset, nil) if err != nil { a.logger.Errorf("Error Adding TX to ethTxManager: %v", err) mTxLogger := ethtxmanager.CreateLogger(monitoredTxID, sender, to) diff --git a/bridgesync/mock_l2_test.go b/bridgesync/mock_l2_test.go index adbff51f..ef842d18 100644 --- a/bridgesync/mock_l2_test.go +++ b/bridgesync/mock_l2_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.39.0. DO NOT EDIT. +// Code generated by mockery v2.45.0. DO NOT EDIT. package bridgesync diff --git a/claimsponsor/evmclaimsponsor.go b/claimsponsor/evmclaimsponsor.go index 540f3203..5f394b14 100644 --- a/claimsponsor/evmclaimsponsor.go +++ b/claimsponsor/evmclaimsponsor.go @@ -36,7 +36,7 @@ type EthTxManager interface { ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus, ) ([]ethtxmanager.MonitoredTxResult, error) Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error) - Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte, + Add(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) } @@ -152,7 +152,7 @@ func (c *EVMClaimSponsor) sendClaim(ctx context.Context, claim *Claim) (string, if err != nil { return "", err } - id, err := c.ethTxManager.Add(ctx, &c.bridgeAddr, nil, big.NewInt(0), data, c.gasOffest, nil) + id, err := c.ethTxManager.Add(ctx, &c.bridgeAddr, big.NewInt(0), data, c.gasOffest, nil) if err != nil { return "", err } diff --git a/go.mod b/go.mod index aeaae312..6396bd68 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/0xPolygon/cdk-data-availability v0.0.9 github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 - github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 + github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240930120324-65816dead447 github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.1 github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 diff --git a/go.sum b/go.sum index e3544380..d20624ff 100644 --- a/go.sum +++ b/go.sum @@ -8,6 +8,8 @@ github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUx github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 h1:QElCysO7f2xaknY/RDjxcs7IVmcgORfsCX2g+YD0Ko4= github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234/go.mod h1:zBZWxwOHKlw+ghd9roQLgIkDZWA7e7qO3EsfQQT/+oQ= +github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240930120324-65816dead447 h1:8nZjZrHZo+P9hTkhwtQ4J6eh9v4MTMtVb9jRDra8h0s= +github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240930120324-65816dead447/go.mod h1:4iWpcwMOZJPapUzFB/HjTAM0X/gltHSEzQHE0lOt+eY= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.1 h1:8GbJBNsYO4zrqiBX++et8eQrJDEWEZuo3Ch3M416YnI= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.1/go.mod h1:96i+QSANfbikwlUY3U9MLNtg3656W3dWfbGqH+Od1/k= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= diff --git a/reorgdetector/types_test.go b/reorgdetector/types_test.go index 9e20e363..42f7f61c 100644 --- a/reorgdetector/types_test.go +++ b/reorgdetector/types_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" ) func TestBlockMap(t *testing.T) { @@ -50,8 +51,12 @@ func TestBlockMap(t *testing.T) { t.Parallel() copiedBm := bm.copy() - if !reflect.DeepEqual(bm, copiedBm) { - t.Errorf("add() returned incorrect result, expected: %v, got: %v", bm, copiedBm) + for i, header := range bm.headers { + copiedHeader, exists := copiedBm.headers[i] + require.True(t, exists) + if !reflect.DeepEqual(header, copiedHeader) { + t.Errorf("copy() returned incorrect result, expected: %v, got: %v", header, copiedHeader) + } } }) diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 1d76d3c0..363b5a10 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -32,8 +32,6 @@ type SequenceSender struct { logger *log.Logger ethTxManager *ethtxmanager.Client etherman *etherman.Client - currentNonce uint64 - nonceMutex sync.Mutex latestVirtualBatch uint64 // Latest virtualized batch obtained from L1 latestVirtualTime time.Time // Latest virtual batch timestamp latestSentToL1Batch uint64 // Latest batch sent to L1 @@ -141,14 +139,6 @@ func (s *SequenceSender) Start(ctx context.Context) { // Get current nonce var err error - s.nonceMutex.Lock() - s.currentNonce, err = s.etherman.CurrentNonce(ctx, s.cfg.L2Coinbase) - if err != nil { - s.logger.Fatalf("failed to get current nonce from %v, error: %v", s.cfg.L2Coinbase, err) - } else { - s.logger.Infof("current nonce for %v is %d", s.cfg.L2Coinbase, s.currentNonce) - } - s.nonceMutex.Unlock() // Get latest virtual state batch from L1 err = s.updateLatestVirtualBatch() @@ -601,18 +591,12 @@ func (s *SequenceSender) sendTx( ) error { // Params if new tx to send or resend a previous tx var paramTo *common.Address - var paramNonce *uint64 var paramData []byte var valueFromBatch uint64 var valueToBatch uint64 var valueToAddress common.Address if !resend { - s.nonceMutex.Lock() - nonce := s.currentNonce - s.currentNonce++ - s.nonceMutex.Unlock() - paramNonce = &nonce paramTo = to paramData = data valueFromBatch = fromBatch @@ -623,7 +607,6 @@ func (s *SequenceSender) sendTx( return errors.New("resend tx with nil hash monitor id") } paramTo = &s.ethTransactions[*txOldHash].To - paramNonce = &s.ethTransactions[*txOldHash].Nonce paramData = s.ethTxData[*txOldHash] valueFromBatch = s.ethTransactions[*txOldHash].FromBatch valueToBatch = s.ethTransactions[*txOldHash].ToBatch @@ -633,7 +616,7 @@ func (s *SequenceSender) sendTx( } // Add sequence tx - txHash, err := s.ethTxManager.AddWithGas(ctx, paramTo, paramNonce, big.NewInt(0), paramData, s.cfg.GasOffset, nil, gas) + txHash, err := s.ethTxManager.AddWithGas(ctx, paramTo, big.NewInt(0), paramData, s.cfg.GasOffset, nil, gas) if err != nil { s.logger.Errorf("error adding sequence to ethtxmanager: %v", err) return err diff --git a/test/helpers/ethtxmanmock_e2e.go b/test/helpers/ethtxmanmock_e2e.go index b6753c22..ebc3513f 100644 --- a/test/helpers/ethtxmanmock_e2e.go +++ b/test/helpers/ethtxmanmock_e2e.go @@ -24,13 +24,13 @@ func NewEthTxManMock( const ( ArgToIndex = 1 - ArgDataIndex = 4 + ArgDataIndex = 3 ZeroValue = 0 ) ethTxMock := NewEthTxManagerMock(t) ethTxMock.On( - "Add", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + "Add", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Run(func(args mock.Arguments) { ctx := context.Background() nonce, err := client.Client().PendingNonceAt(ctx, auth.From) diff --git a/test/helpers/mock_ethtxmanager.go b/test/helpers/mock_ethtxmanager.go index 4dd103af..a75f57e9 100644 --- a/test/helpers/mock_ethtxmanager.go +++ b/test/helpers/mock_ethtxmanager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.39.0. DO NOT EDIT. +// Code generated by mockery v2.45.0. DO NOT EDIT. package helpers @@ -21,9 +21,9 @@ type EthTxManagerMock struct { mock.Mock } -// Add provides a mock function with given fields: ctx, to, forcedNonce, value, data, gasOffset, sidecar -func (_m *EthTxManagerMock) Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) { - ret := _m.Called(ctx, to, forcedNonce, value, data, gasOffset, sidecar) +// Add provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar +func (_m *EthTxManagerMock) Add(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) { + ret := _m.Called(ctx, to, value, data, gasOffset, sidecar) if len(ret) == 0 { panic("no return value specified for Add") @@ -31,19 +31,19 @@ func (_m *EthTxManagerMock) Add(ctx context.Context, to *common.Address, forcedN var r0 common.Hash var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *uint64, *big.Int, []byte, uint64, *types.BlobTxSidecar) (common.Hash, error)); ok { - return rf(ctx, to, forcedNonce, value, data, gasOffset, sidecar) + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) (common.Hash, error)); ok { + return rf(ctx, to, value, data, gasOffset, sidecar) } - if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *uint64, *big.Int, []byte, uint64, *types.BlobTxSidecar) common.Hash); ok { - r0 = rf(ctx, to, forcedNonce, value, data, gasOffset, sidecar) + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) common.Hash); ok { + r0 = rf(ctx, to, value, data, gasOffset, sidecar) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(common.Hash) } } - if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *uint64, *big.Int, []byte, uint64, *types.BlobTxSidecar) error); ok { - r1 = rf(ctx, to, forcedNonce, value, data, gasOffset, sidecar) + if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) error); ok { + r1 = rf(ctx, to, value, data, gasOffset, sidecar) } else { r1 = ret.Error(1) } From ba2b0778b930a720a69e9379c4edf2669e2805e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Tue, 1 Oct 2024 09:12:35 +0200 Subject: [PATCH 16/53] feat: remove data stream from sequence sender (#63) * feat: implement batch interface * feat: update sync lib * feat: enable test * feat: update config files * feat: fix test * feat: fix test * feat: batch string method * feat: update kurtosis commit * fix: comments * fix: comments * fix: linter * fix: vars * fix: error return * fix: regex * fix: hex to to bytes * feat: update kurtosis template --- .github/workflows/test-resequence.yml | 9 +- aggregator/aggregator.go | 8 +- config/default.go | 4 +- sequencesender.json | 1 + sequencesender/config.go | 10 +- sequencesender/ethtx.go | 388 ++++++ sequencesender/rpc.go | 95 ++ .../seqsendertypes/rpcbatch/rpcbatch.go | 132 ++ sequencesender/sequencesender.go | 1065 ++--------------- sequencesender/sequencesender_test.go | 117 -- test/config/test.config.toml | 3 +- test/config/test.kurtosis_template.toml | 3 +- 12 files changed, 752 insertions(+), 1083 deletions(-) create mode 100644 sequencesender.json create mode 100644 sequencesender/ethtx.go create mode 100644 sequencesender/rpc.go create mode 100644 sequencesender/seqsendertypes/rpcbatch/rpcbatch.go delete mode 100644 sequencesender/sequencesender_test.go diff --git a/.github/workflows/test-resequence.yml b/.github/workflows/test-resequence.yml index 47bcaa37..71ebc7d7 100644 --- a/.github/workflows/test-resequence.yml +++ b/.github/workflows/test-resequence.yml @@ -3,7 +3,7 @@ on: push: branches: # Disable test for the moment as it takes too long - - "this-test-is-disabled" + - "test_disabled" concurrency: @@ -34,7 +34,7 @@ jobs: uses: actions/checkout@v4 with: repository: 0xPolygon/kurtosis-cdk - ref: 3debe0a4dd000e02f7e6bde3247432211bf0336f + ref: a7a80b7b5d98a69a23415ab0018e556257a6dfb6 path: kurtosis-cdk - name: Install Kurtosis CDK tools @@ -72,8 +72,11 @@ jobs: - name: Configure Kurtosis CDK working-directory: ./kurtosis-cdk run: | - /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "jerrycgh/cdk-erigon:d5d04906f723f3f1d8c43c9e6baf3e18c27ff348"' params.yml /usr/local/bin/yq -i '.args.cdk_node_image = "cdk:local"' params.yml + /usr/local/bin/yq -i '.args.zkevm_rollup_fork_id = "12"' params.yml + /usr/local/bin/yq -i '.args.zkevm_prover_image = "hermeznetwork/zkevm-prover:v8.0.0-RC5-fork.12"' params.yml + /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "jerrycgh/cdk-erigon:d5d04906f723f3f1d8c43c9e6baf3e18c27ff348"' params.yml + - name: Deploy Kurtosis CDK package working-directory: ./kurtosis-cdk diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 249f260a..359021eb 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -1875,13 +1875,7 @@ func (a *Aggregator) getWitness(batchNumber uint64, url string, fullWitness bool return nil, err } - witnessString := strings.TrimLeft(witness, "0x") - if len(witnessString)%2 != 0 { - witnessString = "0" + witnessString - } - bytes := common.Hex2Bytes(witnessString) - - return bytes, nil + return common.FromHex(witness), nil } func printInputProver(logger *log.Logger, inputProver *prover.StatelessInputProver) { diff --git a/config/default.go b/config/default.go index e02a37ac..d10bf1f8 100644 --- a/config/default.go +++ b/config/default.go @@ -40,8 +40,8 @@ WaitPeriodPurgeTxFile = "15m" MaxPendingTx = 1 MaxBatchesForL1 = 300 BlockFinality = "FinalizedBlock" - [SequenceSender.StreamClient] - Server = "127.0.0.1:6900" +RPCURL = "" +GetBatchWaitInterval = "10s" [SequenceSender.EthTxManager] FrequencyToMonitorTxs = "1s" WaitTxToBeMined = "2m" diff --git a/sequencesender.json b/sequencesender.json new file mode 100644 index 00000000..0967ef42 --- /dev/null +++ b/sequencesender.json @@ -0,0 +1 @@ +{} diff --git a/sequencesender/config.go b/sequencesender/config.go index 7b7aada0..3e138e49 100644 --- a/sequencesender/config.go +++ b/sequencesender/config.go @@ -54,9 +54,6 @@ type Config struct { // MaxPendingTx is the maximum number of pending transactions (those that are not in a final state) MaxPendingTx uint64 - // StreamClient is the config for the stream client - StreamClient StreamClientCfg `mapstructure:"StreamClient"` - // EthTxManager is the config for the ethtxmanager EthTxManager ethtxmanager.Config `mapstructure:"EthTxManager"` @@ -68,8 +65,11 @@ type Config struct { // BlockFinality indicates the status of the blocks that will be queried in order to sync BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll - // SanityCheckRPCURL is the URL of the RPC server to perform sanity check regarding the number of blocks in a batch - SanityCheckRPCURL string `mapstructure:"SanityCheckRPCURL"` + // RPCURL is the URL of the RPC server + RPCURL string `mapstructure:"RPCURL"` + + // GetBatchWaitInterval is the time to wait to query for a new batch when there are no more batches available + GetBatchWaitInterval types.Duration `mapstructure:"GetBatchWaitInterval"` } // StreamClientCfg contains the data streamer's configuration properties diff --git a/sequencesender/ethtx.go b/sequencesender/ethtx.go new file mode 100644 index 00000000..1898a8e8 --- /dev/null +++ b/sequencesender/ethtx.go @@ -0,0 +1,388 @@ +package sequencesender + +import ( + "context" + "encoding/json" + "errors" + "math" + "math/big" + "os" + "strings" + "time" + + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/ethereum/go-ethereum/common" +) + +type ethTxData struct { + Nonce uint64 `json:"nonce"` + Status string `json:"status"` + SentL1Timestamp time.Time `json:"sentL1Timestamp"` + StatusTimestamp time.Time `json:"statusTimestamp"` + FromBatch uint64 `json:"fromBatch"` + ToBatch uint64 `json:"toBatch"` + MinedAtBlock big.Int `json:"minedAtBlock"` + OnMonitor bool `json:"onMonitor"` + To common.Address `json:"to"` + StateHistory []string `json:"stateHistory"` + Txs map[common.Hash]ethTxAdditionalData `json:"txs"` + Gas uint64 `json:"gas"` +} + +type ethTxAdditionalData struct { + GasPrice *big.Int `json:"gasPrice,omitempty"` + RevertMessage string `json:"revertMessage,omitempty"` +} + +// sendTx adds transaction to the ethTxManager to send it to L1 +func (s *SequenceSender) sendTx(ctx context.Context, resend bool, txOldHash *common.Hash, to *common.Address, + fromBatch uint64, toBatch uint64, data []byte, gas uint64) error { + // Params if new tx to send or resend a previous tx + var ( + paramTo *common.Address + paramData []byte + valueFromBatch uint64 + valueToBatch uint64 + valueToAddress common.Address + ) + + if !resend { + paramTo = to + paramData = data + valueFromBatch = fromBatch + valueToBatch = toBatch + } else { + if txOldHash == nil { + log.Errorf("trying to resend a tx with nil hash") + return errors.New("resend tx with nil hash monitor id") + } + oldEthTx := s.ethTransactions[*txOldHash] + paramTo = &oldEthTx.To + paramData = s.ethTxData[*txOldHash] + valueFromBatch = oldEthTx.FromBatch + valueToBatch = oldEthTx.ToBatch + } + if paramTo != nil { + valueToAddress = *paramTo + } + + // Add sequence tx + txHash, err := s.ethTxManager.AddWithGas(ctx, paramTo, big.NewInt(0), paramData, s.cfg.GasOffset, nil, gas) + if err != nil { + log.Errorf("error adding sequence to ethtxmanager: %v", err) + return err + } + + // Add new eth tx + txData := ethTxData{ + SentL1Timestamp: time.Now(), + StatusTimestamp: time.Now(), + Status: "*new", + FromBatch: valueFromBatch, + ToBatch: valueToBatch, + OnMonitor: true, + To: valueToAddress, + Gas: gas, + } + + // Add tx to internal structure + s.mutexEthTx.Lock() + s.ethTransactions[txHash] = &txData + txResults := make(map[common.Hash]ethtxmanager.TxResult, 0) + s.copyTxData(txHash, paramData, txResults) + err = s.getResultAndUpdateEthTx(ctx, txHash) + if err != nil { + log.Errorf("error getting result for tx %v: %v", txHash, err) + } + if !resend { + s.latestSentToL1Batch = valueToBatch + } else { + s.ethTransactions[*txOldHash].Status = "*resent" + } + s.mutexEthTx.Unlock() + + // Save sent sequences + err = s.saveSentSequencesTransactions(ctx) + if err != nil { + log.Errorf("error saving tx sequence sent, error: %v", err) + } + return nil +} + +// purgeEthTx purges transactions from memory structures +func (s *SequenceSender) purgeEthTx(ctx context.Context) { + // If sequence sending is stopped, do not purge + if s.seqSendingStopped { + return + } + + // Purge old transactions that are finalized + s.mutexEthTx.Lock() + timePurge := time.Now().Add(-s.cfg.WaitPeriodPurgeTxFile.Duration) + toPurge := make([]common.Hash, 0) + for hash, data := range s.ethTransactions { + if !data.StatusTimestamp.Before(timePurge) { + continue + } + + if !data.OnMonitor || data.Status == ethtxmanager.MonitoredTxStatusFinalized.String() { + toPurge = append(toPurge, hash) + + // Remove from tx monitor + if data.OnMonitor { + err := s.ethTxManager.Remove(ctx, hash) + if err != nil { + log.Warnf("error removing monitor tx %v from ethtxmanager: %v", hash, err) + } else { + log.Infof("removed monitor tx %v from ethtxmanager", hash) + } + } + } + } + + if len(toPurge) > 0 { + var firstPurged uint64 = math.MaxUint64 + var lastPurged uint64 + for i := 0; i < len(toPurge); i++ { + if s.ethTransactions[toPurge[i]].Nonce < firstPurged { + firstPurged = s.ethTransactions[toPurge[i]].Nonce + } + if s.ethTransactions[toPurge[i]].Nonce > lastPurged { + lastPurged = s.ethTransactions[toPurge[i]].Nonce + } + delete(s.ethTransactions, toPurge[i]) + delete(s.ethTxData, toPurge[i]) + } + log.Infof("txs purged count: %d, fromNonce: %d, toNonce: %d", len(toPurge), firstPurged, lastPurged) + } + s.mutexEthTx.Unlock() +} + +// syncEthTxResults syncs results from L1 for transactions in the memory structure +func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) { //nolint:unparam + s.mutexEthTx.Lock() + var ( + txPending uint64 + txSync uint64 + ) + for hash, data := range s.ethTransactions { + if data.Status == ethtxmanager.MonitoredTxStatusFinalized.String() { + continue + } + + err := s.getResultAndUpdateEthTx(ctx, hash) + if err != nil { + log.Errorf("error getting result for tx %v: %v", hash, err) + } + txSync++ + txStatus := s.ethTransactions[hash].Status + // Count if it is not in a final state + if s.ethTransactions[hash].OnMonitor && + txStatus != ethtxmanager.MonitoredTxStatusFailed.String() && + txStatus != ethtxmanager.MonitoredTxStatusSafe.String() && + txStatus != ethtxmanager.MonitoredTxStatusFinalized.String() { + txPending++ + } + } + s.mutexEthTx.Unlock() + + // Save updated sequences transactions + err := s.saveSentSequencesTransactions(ctx) + if err != nil { + log.Errorf("error saving tx sequence, error: %v", err) + } + + log.Infof("%d tx results synchronized (%d in pending state)", txSync, txPending) + return txPending, nil +} + +// syncAllEthTxResults syncs all tx results from L1 +func (s *SequenceSender) syncAllEthTxResults(ctx context.Context) error { + // Get all results + results, err := s.ethTxManager.ResultsByStatus(ctx, nil) + if err != nil { + log.Warnf("error getting results for all tx: %v", err) + return err + } + + // Check and update tx status + numResults := len(results) + s.mutexEthTx.Lock() + for _, result := range results { + txSequence, exists := s.ethTransactions[result.ID] + if !exists { + log.Debugf("transaction %v missing in memory structure. Adding it", result.ID) + // No info: from/to batch and the sent timestamp + s.ethTransactions[result.ID] = ðTxData{ + SentL1Timestamp: time.Time{}, + StatusTimestamp: time.Now(), + OnMonitor: true, + Status: "*missing", + } + txSequence = s.ethTransactions[result.ID] + } + + s.updateEthTxResult(txSequence, result) + } + s.mutexEthTx.Unlock() + + // Save updated sequences transactions + err = s.saveSentSequencesTransactions(ctx) + if err != nil { + log.Errorf("error saving tx sequence, error: %v", err) + } + + log.Infof("%d tx results synchronized", numResults) + return nil +} + +// copyTxData copies tx data in the internal structure +func (s *SequenceSender) copyTxData( + txHash common.Hash, txData []byte, txsResults map[common.Hash]ethtxmanager.TxResult, +) { + s.ethTxData[txHash] = make([]byte, len(txData)) + copy(s.ethTxData[txHash], txData) + + s.ethTransactions[txHash].Txs = make(map[common.Hash]ethTxAdditionalData, 0) + for hash, result := range txsResults { + var gasPrice *big.Int + if result.Tx != nil { + gasPrice = result.Tx.GasPrice() + } + + add := ethTxAdditionalData{ + GasPrice: gasPrice, + RevertMessage: result.RevertMessage, + } + s.ethTransactions[txHash].Txs[hash] = add + } +} + +// updateEthTxResult handles updating transaction state +func (s *SequenceSender) updateEthTxResult(txData *ethTxData, txResult ethtxmanager.MonitoredTxResult) { + if txData.Status != txResult.Status.String() { + log.Infof("update transaction %v to state %s", txResult.ID, txResult.Status.String()) + txData.StatusTimestamp = time.Now() + stTrans := txData.StatusTimestamp.Format("2006-01-02T15:04:05.000-07:00") + + ", " + txData.Status + ", " + txResult.Status.String() + txData.Status = txResult.Status.String() + txData.StateHistory = append(txData.StateHistory, stTrans) + + // Manage according to the state + statusConsolidated := txData.Status == ethtxmanager.MonitoredTxStatusSafe.String() || + txData.Status == ethtxmanager.MonitoredTxStatusFinalized.String() + if txData.Status == ethtxmanager.MonitoredTxStatusFailed.String() { + s.logFatalf("transaction %v result failed!") + } else if statusConsolidated && txData.ToBatch >= s.latestVirtualBatchNumber { + s.latestVirtualTime = txData.StatusTimestamp + } + } + + // Update info received from L1 + txData.Nonce = txResult.Nonce + if txResult.To != nil { + txData.To = *txResult.To + } + if txResult.MinedAtBlockNumber != nil { + txData.MinedAtBlock = *txResult.MinedAtBlockNumber + } + s.copyTxData(txResult.ID, txResult.Data, txResult.Txs) +} + +// getResultAndUpdateEthTx updates the tx status from the ethTxManager +func (s *SequenceSender) getResultAndUpdateEthTx(ctx context.Context, txHash common.Hash) error { + txData, exists := s.ethTransactions[txHash] + if !exists { + s.logger.Errorf("transaction %v not found in memory", txHash) + return errors.New("transaction not found in memory structure") + } + + txResult, err := s.ethTxManager.Result(ctx, txHash) + switch { + case errors.Is(err, ethtxmanager.ErrNotFound): + s.logger.Infof("transaction %v does not exist in ethtxmanager. Marking it", txHash) + txData.OnMonitor = false + // Resend tx + errSend := s.sendTx(ctx, true, &txHash, nil, 0, 0, nil, txData.Gas) + if errSend == nil { + txData.OnMonitor = false + } + + case err != nil: + s.logger.Errorf("error getting result for tx %v: %v", txHash, err) + return err + + default: + s.updateEthTxResult(txData, txResult) + } + + return nil +} + +// loadSentSequencesTransactions loads the file into the memory structure +func (s *SequenceSender) loadSentSequencesTransactions() error { + // Check if file exists + if _, err := os.Stat(s.cfg.SequencesTxFileName); os.IsNotExist(err) { + log.Infof("file not found %s: %v", s.cfg.SequencesTxFileName, err) + return nil + } else if err != nil { + log.Errorf("error opening file %s: %v", s.cfg.SequencesTxFileName, err) + return err + } + + // Read file + data, err := os.ReadFile(s.cfg.SequencesTxFileName) + if err != nil { + log.Errorf("error reading file %s: %v", s.cfg.SequencesTxFileName, err) + return err + } + + // Restore memory structure + s.mutexEthTx.Lock() + err = json.Unmarshal(data, &s.ethTransactions) + s.mutexEthTx.Unlock() + if err != nil { + log.Errorf("error decoding data from %s: %v", s.cfg.SequencesTxFileName, err) + return err + } + + return nil +} + +// saveSentSequencesTransactions saves memory structure into persistent file +func (s *SequenceSender) saveSentSequencesTransactions(ctx context.Context) error { + var err error + + // Purge tx + s.purgeEthTx(ctx) + + // Create file + fileName := s.cfg.SequencesTxFileName[0:strings.IndexRune(s.cfg.SequencesTxFileName, '.')] + ".tmp" + s.sequencesTxFile, err = os.Create(fileName) + if err != nil { + log.Errorf("error creating file %s: %v", fileName, err) + return err + } + defer s.sequencesTxFile.Close() + + // Write data JSON encoded + encoder := json.NewEncoder(s.sequencesTxFile) + encoder.SetIndent("", " ") + s.mutexEthTx.Lock() + err = encoder.Encode(s.ethTransactions) + s.mutexEthTx.Unlock() + if err != nil { + log.Errorf("error writing file %s: %v", fileName, err) + return err + } + + // Rename the new file + err = os.Rename(fileName, s.cfg.SequencesTxFileName) + if err != nil { + log.Errorf("error renaming file %s to %s: %v", fileName, s.cfg.SequencesTxFileName, err) + return err + } + + return nil +} diff --git a/sequencesender/rpc.go b/sequencesender/rpc.go new file mode 100644 index 00000000..a70f5c64 --- /dev/null +++ b/sequencesender/rpc.go @@ -0,0 +1,95 @@ +package sequencesender + +import ( + "encoding/json" + "fmt" + "math/big" + + "github.com/0xPolygon/cdk-rpc/rpc" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/sequencesender/seqsendertypes/rpcbatch" + "github.com/0xPolygon/cdk/state" + "github.com/ethereum/go-ethereum/common" +) + +func (s *SequenceSender) getBatchFromRPC(batchNumber uint64) (*rpcbatch.RPCBatch, error) { + type zkEVMBatch struct { + Blocks []string `json:"blocks"` + BatchL2Data string `json:"batchL2Data"` + Coinbase string `json:"coinbase"` + GlobalExitRoot string `json:"globalExitRoot"` + Closed bool `json:"closed"` + Timestamp string `json:"timestamp"` + } + + zkEVMBatchData := zkEVMBatch{} + + log.Infof("Getting batch %d from RPC", batchNumber) + + response, err := rpc.JSONRPCCall(s.cfg.RPCURL, "zkevm_getBatchByNumber", batchNumber) + if err != nil { + return nil, err + } + + // Check if the response is nil + if response.Result == nil { + return nil, state.ErrNotFound + } + + // Check if the response is an error + if response.Error != nil { + return nil, fmt.Errorf("error in the response calling zkevm_getBatchByNumber: %v", response.Error) + } + + // Get the batch number from the response hex string + err = json.Unmarshal(response.Result, &zkEVMBatchData) + if err != nil { + return nil, fmt.Errorf("error unmarshalling the batch from the response calling zkevm_getBatchByNumber: %w", err) + } + + rpcBatch, err := rpcbatch.New(batchNumber, zkEVMBatchData.Blocks, common.FromHex(zkEVMBatchData.BatchL2Data), + common.HexToHash(zkEVMBatchData.GlobalExitRoot), common.HexToAddress(zkEVMBatchData.Coinbase), zkEVMBatchData.Closed) + if err != nil { + return nil, fmt.Errorf("error creating the rpc batch: %w", err) + } + + if len(zkEVMBatchData.Blocks) > 0 { + lastL2BlockTimestamp, err := s.getL2BlockTimestampFromRPC(zkEVMBatchData.Blocks[len(zkEVMBatchData.Blocks)-1]) + if err != nil { + return nil, fmt.Errorf("error getting the last l2 block timestamp from the rpc: %w", err) + } + rpcBatch.SetLastL2BLockTimestamp(lastL2BlockTimestamp) + } else { + log.Infof("No blocks in the batch, setting the last l2 block timestamp from the batch data") + rpcBatch.SetLastL2BLockTimestamp(new(big.Int).SetBytes(common.FromHex(zkEVMBatchData.Timestamp)).Uint64()) + } + + return rpcBatch, nil +} + +func (s *SequenceSender) getL2BlockTimestampFromRPC(blockHash string) (uint64, error) { + type zkeEVML2Block struct { + Timestamp string `json:"timestamp"` + } + + log.Infof("Getting l2 block timestamp from RPC. Block hash: %s", blockHash) + + response, err := rpc.JSONRPCCall(s.cfg.RPCURL, "eth_getBlockByHash", blockHash, false) + if err != nil { + return 0, err + } + + // Check if the response is an error + if response.Error != nil { + return 0, fmt.Errorf("error in the response calling eth_getBlockByHash: %v", response.Error) + } + + // Get the l2 block from the response + l2Block := zkeEVML2Block{} + err = json.Unmarshal(response.Result, &l2Block) + if err != nil { + return 0, fmt.Errorf("error unmarshalling the l2 block from the response calling eth_getBlockByHash: %w", err) + } + + return new(big.Int).SetBytes(common.FromHex(l2Block.Timestamp)).Uint64(), nil +} diff --git a/sequencesender/seqsendertypes/rpcbatch/rpcbatch.go b/sequencesender/seqsendertypes/rpcbatch/rpcbatch.go new file mode 100644 index 00000000..fafc1841 --- /dev/null +++ b/sequencesender/seqsendertypes/rpcbatch/rpcbatch.go @@ -0,0 +1,132 @@ +package rpcbatch + +import ( + "fmt" + + "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/ethereum/go-ethereum/common" +) + +type RPCBatch struct { + batchNumber uint64 `json:"batchNumber"` + blockHashes []string `json:"blocks"` + batchL2Data []byte `json:"batchL2Data"` + globalExitRoot common.Hash `json:"globalExitRoot"` + coinbase common.Address `json:"coinbase"` + closed bool `json:"closed"` + lastL2BlockTimestamp uint64 `json:"lastL2BlockTimestamp"` + l1InfoTreeIndex uint32 `json:"l1InfoTreeIndex"` +} + +func New(batchNumber uint64, blockHashes []string, batchL2Data []byte, globalExitRoot common.Hash, + coinbase common.Address, closed bool) (*RPCBatch, error) { + return &RPCBatch{ + batchNumber: batchNumber, + blockHashes: blockHashes, + batchL2Data: batchL2Data, + globalExitRoot: globalExitRoot, + coinbase: coinbase, + closed: closed, + }, nil +} + +// DeepCopy +func (b *RPCBatch) DeepCopy() seqsendertypes.Batch { + return &RPCBatch{ + batchNumber: b.batchNumber, + blockHashes: b.blockHashes, + batchL2Data: b.batchL2Data, + globalExitRoot: b.globalExitRoot, + coinbase: b.coinbase, + closed: b.closed, + lastL2BlockTimestamp: b.lastL2BlockTimestamp, + l1InfoTreeIndex: b.l1InfoTreeIndex, + } +} + +// LastCoinbase +func (b *RPCBatch) LastCoinbase() common.Address { + return b.coinbase +} + +// ForcedBatchTimestamp +func (b *RPCBatch) ForcedBatchTimestamp() uint64 { + return 0 +} + +// ForcedGlobalExitRoot +func (b *RPCBatch) ForcedGlobalExitRoot() common.Hash { + return common.Hash{} +} + +// ForcedBlockHashL1 +func (b *RPCBatch) ForcedBlockHashL1() common.Hash { + return common.Hash{} +} + +// L2Data +func (b *RPCBatch) L2Data() []byte { + return b.batchL2Data +} + +// LastL2BLockTimestamp +func (b *RPCBatch) LastL2BLockTimestamp() uint64 { + return b.lastL2BlockTimestamp +} + +// BatchNumber +func (b *RPCBatch) BatchNumber() uint64 { + return b.batchNumber +} + +// GlobalExitRoot +func (b *RPCBatch) GlobalExitRoot() common.Hash { + return b.globalExitRoot +} + +// L1InfoTreeIndex +func (b *RPCBatch) L1InfoTreeIndex() uint32 { + return b.l1InfoTreeIndex +} + +// SetL2Data +func (b *RPCBatch) SetL2Data(data []byte) { + b.batchL2Data = data +} + +// SetLastCoinbase +func (b *RPCBatch) SetLastCoinbase(address common.Address) { + b.coinbase = address +} + +// SetLastL2BLockTimestamp +func (b *RPCBatch) SetLastL2BLockTimestamp(ts uint64) { + b.lastL2BlockTimestamp = ts +} + +// SetL1InfoTreeIndex +func (b *RPCBatch) SetL1InfoTreeIndex(index uint32) { + b.l1InfoTreeIndex = index +} + +// String +func (b *RPCBatch) String() string { + return fmt.Sprintf( + "Batch/RPC: LastCoinbase: %s, ForcedBatchTimestamp: %d, ForcedGlobalExitRoot: %x, ForcedBlockHashL1: %x"+ + ", L2Data: %x, LastL2BLockTimestamp: %d, BatchNumber: %d, GlobalExitRoot: %x, L1InfoTreeIndex: %d", + b.LastCoinbase().String(), + b.ForcedBatchTimestamp(), + b.ForcedGlobalExitRoot().String(), + b.ForcedBlockHashL1().String(), + b.L2Data(), + b.LastL2BLockTimestamp(), + b.BatchNumber(), + b.GlobalExitRoot().String(), + b.L1InfoTreeIndex(), + ) +} + +// IsClosed +func (b *RPCBatch) IsClosed() bool { + return b.closed +} diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 363b5a10..509b1a60 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -2,82 +2,51 @@ package sequencesender import ( "context" - "encoding/json" "errors" "fmt" - "math" - "math/big" "os" - "strings" "sync" "time" - "github.com/0xPolygon/cdk-rpc/rpc" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/0xPolygon/cdk/sequencesender/seqsendertypes/rpcbatch" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state" - "github.com/0xPolygon/cdk/state/datastream" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" ethtxlog "github.com/0xPolygonHermez/zkevm-ethtx-manager/log" "github.com/ethereum/go-ethereum/common" - "google.golang.org/protobuf/proto" ) +const ten = 10 + // SequenceSender represents a sequence sender type SequenceSender struct { - cfg Config - logger *log.Logger - ethTxManager *ethtxmanager.Client - etherman *etherman.Client - latestVirtualBatch uint64 // Latest virtualized batch obtained from L1 - latestVirtualTime time.Time // Latest virtual batch timestamp - latestSentToL1Batch uint64 // Latest batch sent to L1 - wipBatch uint64 // Work in progress batch - sequenceList []uint64 // Sequence of batch number to be send to L1 - sequenceData map[uint64]*sequenceData // All the batch data indexed by batch number - mutexSequence sync.Mutex // Mutex to access sequenceData and sequenceList - ethTransactions map[common.Hash]*ethTxData // All the eth tx sent to L1 indexed by hash - ethTxData map[common.Hash][]byte // Tx data send to or received from L1 - mutexEthTx sync.Mutex // Mutex to access ethTransactions - sequencesTxFile *os.File // Persistence of sent transactions - validStream bool // Not valid while receiving data before the desired batch - fromStreamBatch uint64 // Initial batch to connect to the streaming - latestStreamBatch uint64 // Latest batch received by the streaming - seqSendingStopped bool // If there is a critical error - prevStreamEntry *datastreamer.FileEntry - streamClient *datastreamer.StreamClient - TxBuilder txbuilder.TxBuilder - latestVirtualBatchLock sync.Mutex + cfg Config + logger *log.Logger + ethTxManager *ethtxmanager.Client + etherman *etherman.Client + latestVirtualBatchNumber uint64 // Latest virtualized batch obtained from L1 + latestVirtualTime time.Time // Latest virtual batch timestamp + latestSentToL1Batch uint64 // Latest batch sent to L1 + sequenceList []uint64 // Sequence of batch number to be send to L1 + sequenceData map[uint64]*sequenceData // All the batch data indexed by batch number + mutexSequence sync.Mutex // Mutex to access sequenceData and sequenceList + ethTransactions map[common.Hash]*ethTxData // All the eth tx sent to L1 indexed by hash + ethTxData map[common.Hash][]byte // Tx data send to or received from L1 + mutexEthTx sync.Mutex // Mutex to access ethTransactions + sequencesTxFile *os.File // Persistence of sent transactions + validStream bool // Not valid while receiving data before the desired batch + seqSendingStopped bool // If there is a critical error + TxBuilder txbuilder.TxBuilder + latestVirtualBatchLock sync.Mutex } type sequenceData struct { batchClosed bool batch seqsendertypes.Batch batchRaw *state.BatchRawV2 - batchType datastream.BatchType -} - -type ethTxData struct { - Nonce uint64 `json:"nonce"` - Status string `json:"status"` - SentL1Timestamp time.Time `json:"sentL1Timestamp"` - StatusTimestamp time.Time `json:"statusTimestamp"` - FromBatch uint64 `json:"fromBatch"` - ToBatch uint64 `json:"toBatch"` - MinedAtBlock big.Int `json:"minedAtBlock"` - OnMonitor bool `json:"onMonitor"` - To common.Address `json:"to"` - StateHistory []string `json:"stateHistory"` - Txs map[common.Hash]ethTxAdditionalData `json:"txs"` - Gas uint64 `json:"gas"` -} - -type ethTxAdditionalData struct { - GasPrice *big.Int `json:"gasPrice,omitempty"` - RevertMessage string `json:"revertMessage,omitempty"` } // New inits sequence sender @@ -92,7 +61,6 @@ func New(cfg Config, logger *log.Logger, ethTxData: make(map[common.Hash][]byte), sequenceData: make(map[uint64]*sequenceData), validStream: false, - latestStreamBatch: 0, seqSendingStopped: false, TxBuilder: txBuilder, } @@ -119,16 +87,6 @@ func New(cfg Config, logger *log.Logger, return nil, err } - // Create datastream client - s.streamClient, err = datastreamer.NewClient(s.cfg.StreamClient.Server, 1) - if err != nil { - s.logger.Fatalf("failed to create stream client, error: %v", err) - } else { - s.logger.Infof("new stream client") - } - // Set func to handle the streaming - s.streamClient.SetProcessEntryFunc(s.handleReceivedDataStream) - return &s, nil } @@ -141,7 +99,7 @@ func (s *SequenceSender) Start(ctx context.Context) { var err error // Get latest virtual state batch from L1 - err = s.updateLatestVirtualBatch() + err = s.getLatestVirtualBatch() if err != nil { s.logger.Fatalf("error getting latest sequenced batch, error: %v", err) } @@ -152,39 +110,87 @@ func (s *SequenceSender) Start(ctx context.Context) { s.logger.Fatalf("failed to sync monitored tx results, error: %v", err) } - // Start datastream client - err = s.streamClient.Start() - if err != nil { - s.logger.Fatalf("failed to start stream client, error: %v", err) - } + // Current batch to sequence + s.latestSentToL1Batch = s.latestVirtualBatchNumber - // Set starting point of the streaming - s.fromStreamBatch = s.latestVirtualBatch + // Start retrieving batches from RPC + go func() { + err := s.batchRetrieval(ctx) + if err != nil { + s.logFatalf("error retrieving batches from RPC: %v", err) + } + }() - bookmark := &datastream.BookMark{ - Type: datastream.BookmarkType_BOOKMARK_TYPE_BATCH, - Value: s.fromStreamBatch, - } + // Start sequence sending + go s.sequenceSending(ctx) +} - marshalledBookmark, err := proto.Marshal(bookmark) - if err != nil { - s.logger.Fatalf("failed to marshal bookmark, error: %v", err) - } +// batchRetrieval keeps reading batches from the RPC +func (s *SequenceSender) batchRetrieval(ctx context.Context) error { + ticker := time.NewTicker(s.cfg.GetBatchWaitInterval.Duration) + defer ticker.Stop() + + currentBatchNumber := s.latestVirtualBatchNumber + 1 + for { + select { + case <-ctx.Done(): + s.logger.Info("context cancelled, stopping batch retrieval") + return ctx.Err() + default: + // Try to retrieve batch from RPC + rpcBatch, err := s.getBatchFromRPC(currentBatchNumber) + if err != nil { + if errors.Is(err, ethtxmanager.ErrNotFound) { + s.logger.Infof("batch %d not found in RPC", currentBatchNumber) + } else { + s.logger.Errorf("error getting batch %d from RPC: %v", currentBatchNumber, err) + } + <-ticker.C + continue + } - s.logger.Infof("stream client from bookmark %v", bookmark) + // Check if the batch is closed + if !rpcBatch.IsClosed() { + s.logger.Infof("batch %d is not closed yet", currentBatchNumber) + <-ticker.C + continue + } - // Current batch to sequence - s.wipBatch = s.latestVirtualBatch + 1 - s.latestSentToL1Batch = s.latestVirtualBatch + // Process and decode the batch + if err := s.populateSequenceData(rpcBatch, currentBatchNumber); err != nil { + return err + } - // Start sequence sending - go s.sequenceSending(ctx) + // Increment the batch number for the next iteration + currentBatchNumber++ + } + } +} + +func (s *SequenceSender) populateSequenceData(rpcBatch *rpcbatch.RPCBatch, batchNumber uint64) error { + s.mutexSequence.Lock() + defer s.mutexSequence.Unlock() + + s.sequenceList = append(s.sequenceList, batchNumber) - // Start receiving the streaming - err = s.streamClient.ExecCommandStartBookmark(marshalledBookmark) + // Decode batch to retrieve the l1 info tree index + batchRaw, err := state.DecodeBatchV2(rpcBatch.L2Data()) if err != nil { - s.logger.Fatalf("failed to connect to the streaming: %v", err) + s.logger.Errorf("Failed to decode batch data for batch %d, err: %v", batchNumber, err) + return err + } + + if len(batchRaw.Blocks) > 0 { + rpcBatch.SetL1InfoTreeIndex(batchRaw.Blocks[len(batchRaw.Blocks)-1].IndexL1InfoTree) + } + + s.sequenceData[batchNumber] = &sequenceData{ + batchClosed: rpcBatch.IsClosed(), + batch: rpcBatch, + batchRaw: batchRaw, } + + return nil } // sequenceSending starts loop to check if there are sequences to send and sends them if it's convenient @@ -209,7 +215,7 @@ func (s *SequenceSender) purgeSequences() { toPurge := make([]uint64, 0) for i := 0; i < len(s.sequenceList); i++ { batchNumber := s.sequenceList[i] - if batchNumber <= s.latestVirtualBatch { + if batchNumber <= s.latestVirtualBatchNumber { truncateUntil = i + 1 toPurge = append(toPurge, batchNumber) } @@ -233,218 +239,11 @@ func (s *SequenceSender) purgeSequences() { } } -// purgeEthTx purges transactions from memory structures -func (s *SequenceSender) purgeEthTx(ctx context.Context) { - // If sequence sending is stopped, do not purge - if s.seqSendingStopped { - return - } - - // Purge old transactions that are finalized - s.mutexEthTx.Lock() - defer s.mutexEthTx.Unlock() - timePurge := time.Now().Add(-s.cfg.WaitPeriodPurgeTxFile.Duration) - toPurge := make([]common.Hash, 0) - for hash, data := range s.ethTransactions { - if !data.StatusTimestamp.Before(timePurge) { - continue - } - - if !data.OnMonitor || data.Status == ethtxmanager.MonitoredTxStatusFinalized.String() { - toPurge = append(toPurge, hash) - - // Remove from tx monitor - if data.OnMonitor { - err := s.ethTxManager.Remove(ctx, hash) - if err != nil { - s.logger.Warnf("error removing monitor tx %v from ethtxmanager: %v", hash, err) - } else { - s.logger.Infof("removed monitor tx %v from ethtxmanager", hash) - } - } - } - } - - if len(toPurge) > 0 { - var firstPurged uint64 = math.MaxUint64 - var lastPurged uint64 - for i := 0; i < len(toPurge); i++ { - if s.ethTransactions[toPurge[i]].Nonce < firstPurged { - firstPurged = s.ethTransactions[toPurge[i]].Nonce - } - if s.ethTransactions[toPurge[i]].Nonce > lastPurged { - lastPurged = s.ethTransactions[toPurge[i]].Nonce - } - delete(s.ethTransactions, toPurge[i]) - delete(s.ethTxData, toPurge[i]) - } - s.logger.Infof("txs purged count: %d, fromNonce: %d, toNonce: %d", len(toPurge), firstPurged, lastPurged) - } -} - -// syncEthTxResults syncs results from L1 for transactions in the memory structure -func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) { //nolint:unparam - s.mutexEthTx.Lock() - var txPending uint64 - var txSync uint64 - for hash, data := range s.ethTransactions { - if data.Status == ethtxmanager.MonitoredTxStatusFinalized.String() { - continue - } - - _ = s.getResultAndUpdateEthTx(ctx, hash) - txSync++ - txStatus := s.ethTransactions[hash].Status - // Count if it is not in a final state - if s.ethTransactions[hash].OnMonitor && - txStatus != ethtxmanager.MonitoredTxStatusFailed.String() && - txStatus != ethtxmanager.MonitoredTxStatusSafe.String() && - txStatus != ethtxmanager.MonitoredTxStatusFinalized.String() { - txPending++ - } - } - s.mutexEthTx.Unlock() - - // Save updated sequences transactions - err := s.saveSentSequencesTransactions(ctx) - if err != nil { - s.logger.Errorf("error saving tx sequence, error: %v", err) - } - - s.logger.Infof("%d tx results synchronized (%d in pending state)", txSync, txPending) - return txPending, nil -} - -// syncAllEthTxResults syncs all tx results from L1 -func (s *SequenceSender) syncAllEthTxResults(ctx context.Context) error { - // Get all results - results, err := s.ethTxManager.ResultsByStatus(ctx, nil) - if err != nil { - s.logger.Warnf("error getting results for all tx: %v", err) - return err - } - - // Check and update tx status - numResults := len(results) - s.mutexEthTx.Lock() - for _, result := range results { - txSequence, exists := s.ethTransactions[result.ID] - if !exists { - s.logger.Infof("transaction %v missing in memory structure. Adding it", result.ID) - // No info: from/to batch and the sent timestamp - s.ethTransactions[result.ID] = ðTxData{ - SentL1Timestamp: time.Time{}, - StatusTimestamp: time.Now(), - OnMonitor: true, - Status: "*missing", - } - txSequence = s.ethTransactions[result.ID] - } - - s.updateEthTxResult(txSequence, result) - } - s.mutexEthTx.Unlock() - - // Save updated sequences transactions - err = s.saveSentSequencesTransactions(ctx) - if err != nil { - s.logger.Errorf("error saving tx sequence, error: %v", err) - } - - s.logger.Infof("%d tx results synchronized", numResults) - return nil -} - -// copyTxData copies tx data in the internal structure -func (s *SequenceSender) copyTxData( - txHash common.Hash, txData []byte, txsResults map[common.Hash]ethtxmanager.TxResult, -) { - s.ethTxData[txHash] = make([]byte, len(txData)) - copy(s.ethTxData[txHash], txData) - - s.ethTransactions[txHash].Txs = make(map[common.Hash]ethTxAdditionalData, 0) - for hash, result := range txsResults { - var gasPrice *big.Int - if result.Tx != nil { - gasPrice = result.Tx.GasPrice() - } - - add := ethTxAdditionalData{ - GasPrice: gasPrice, - RevertMessage: result.RevertMessage, - } - s.ethTransactions[txHash].Txs[hash] = add - } -} - -// updateEthTxResult handles updating transaction state -func (s *SequenceSender) updateEthTxResult(txData *ethTxData, txResult ethtxmanager.MonitoredTxResult) { - if txData.Status != txResult.Status.String() { - s.logger.Infof("update transaction %v to state %s", txResult.ID, txResult.Status.String()) - txData.StatusTimestamp = time.Now() - stTrans := txData.StatusTimestamp.Format("2006-01-02T15:04:05.000-07:00") + ", " + - txData.Status + ", " + txResult.Status.String() - - txData.Status = txResult.Status.String() - txData.StateHistory = append(txData.StateHistory, stTrans) - - // Manage according to the state - statusConsolidated := txData.Status == ethtxmanager.MonitoredTxStatusSafe.String() || - txData.Status == ethtxmanager.MonitoredTxStatusFinalized.String() - - if txData.Status == ethtxmanager.MonitoredTxStatusFailed.String() { - s.logFatalf("transaction %v result failed!") - } else if statusConsolidated && txData.ToBatch >= s.latestVirtualBatch { - s.latestVirtualTime = txData.StatusTimestamp - } - } - - // Update info received from L1 - txData.Nonce = txResult.Nonce - if txResult.To != nil { - txData.To = *txResult.To - } - if txResult.MinedAtBlockNumber != nil { - txData.MinedAtBlock = *txResult.MinedAtBlockNumber - } - s.copyTxData(txResult.ID, txResult.Data, txResult.Txs) -} - -// getResultAndUpdateEthTx updates the tx status from the ethTxManager -func (s *SequenceSender) getResultAndUpdateEthTx(ctx context.Context, txHash common.Hash) error { - txData, exists := s.ethTransactions[txHash] - if !exists { - s.logger.Errorf("transaction %v not found in memory", txHash) - return errors.New("transaction not found in memory structure") - } - - txResult, err := s.ethTxManager.Result(ctx, txHash) - switch { - case errors.Is(err, ethtxmanager.ErrNotFound): - s.logger.Infof("transaction %v does not exist in ethtxmanager. Marking it", txHash) - txData.OnMonitor = false - // Resend tx - errSend := s.sendTx(ctx, true, &txHash, nil, 0, 0, nil, txData.Gas) - if errSend == nil { - txData.OnMonitor = false - } - - case err != nil: - s.logger.Errorf("error getting result for tx %v: %v", txHash, err) - return err - - default: - s.updateEthTxResult(txData, txResult) - } - - return nil -} - // tryToSendSequence checks if there is a sequence and it's worth it to send to L1 func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Update latest virtual batch s.logger.Infof("updating virtual batch") - err := s.updateLatestVirtualBatch() + err := s.getLatestVirtualBatch() if err != nil { return } @@ -479,13 +278,12 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { } // Send sequences to L1 - firstSequence := sequence.FirstBatch() - lastSequence := sequence.LastBatch() - lastL2BlockTimestamp := lastSequence.LastL2BLockTimestamp() + firstBatch := sequence.FirstBatch() + lastBatch := sequence.LastBatch() + lastL2BlockTimestamp := lastBatch.LastL2BLockTimestamp() s.logger.Debugf(sequence.String()) - s.logger.Infof("sending sequences to L1. From batch %d to batch %d", - firstSequence.BatchNumber(), lastSequence.BatchNumber()) + s.logger.Infof("sending sequences to L1. From batch %d to batch %d", firstBatch.BatchNumber(), lastBatch.BatchNumber()) // Wait until last L1 block timestamp is L1BlockTimestampMargin seconds above the timestamp // of the last L2 block in the sequence @@ -504,7 +302,7 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { s.logger.Infof("waiting at least %d seconds to send sequences, time difference between last L1 block %d (ts: %d) "+ "and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", waitTime, lastL1BlockHeader.Number, lastL1BlockHeader.Time, - lastSequence.BatchNumber(), lastL2BlockTimestamp, timeMargin, + lastBatch.BatchNumber(), lastL2BlockTimestamp, timeMargin, ) time.Sleep(time.Duration(waitTime) * time.Second) } else { @@ -512,7 +310,7 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { "in the sequence is greater than %d seconds", lastL1BlockHeader.Number, lastL1BlockHeader.Time, - lastSequence.BatchNumber, + lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin, ) @@ -531,22 +329,19 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { if !elapsed { s.logger.Infof("waiting at least %d seconds to send sequences, time difference between now (ts: %d) "+ "and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", - waitTime, currentTime, lastSequence.BatchNumber, lastL2BlockTimestamp, timeMargin) + waitTime, currentTime, lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin) time.Sleep(time.Duration(waitTime) * time.Second) } else { s.logger.Infof("sending sequences now, time difference between now (ts: %d) and last L2 block %d (ts: %d) "+ "in the sequence is also greater than %d seconds", - currentTime, lastSequence.BatchNumber, lastL2BlockTimestamp, timeMargin) + currentTime, lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin) break } } // Send sequences to L1 s.logger.Debugf(sequence.String()) - s.logger.Infof( - "sending sequences to L1. From batch %d to batch %d", - firstSequence.BatchNumber(), lastSequence.BatchNumber(), - ) + s.logger.Infof("sending sequences to L1. From batch %d to batch %d", firstBatch.BatchNumber(), lastBatch.BatchNumber()) tx, err := s.TxBuilder.BuildSequenceBatchesTx(ctx, sequence) if err != nil { @@ -555,12 +350,12 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { } // Get latest virtual state batch from L1 - err = s.updateLatestVirtualBatch() + err = s.getLatestVirtualBatch() if err != nil { s.logger.Fatalf("error getting latest sequenced batch, error: %v", err) } - sequence.SetLastVirtualBatchNumber(s.latestVirtualBatch) + sequence.SetLastVirtualBatchNumber(s.latestVirtualBatchNumber) txToEstimateGas, err := s.TxBuilder.BuildSequenceBatchesTx(ctx, sequence) if err != nil { @@ -575,7 +370,7 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { } // Add sequence tx - err = s.sendTx(ctx, false, nil, tx.To(), firstSequence.BatchNumber(), lastSequence.BatchNumber(), tx.Data(), gas) + err = s.sendTx(ctx, false, nil, tx.To(), firstBatch.BatchNumber(), lastBatch.BatchNumber(), tx.Data(), gas) if err != nil { return } @@ -584,79 +379,6 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { s.purgeSequences() } -// sendTx adds transaction to the ethTxManager to send it to L1 -func (s *SequenceSender) sendTx( - ctx context.Context, resend bool, txOldHash *common.Hash, to *common.Address, - fromBatch uint64, toBatch uint64, data []byte, gas uint64, -) error { - // Params if new tx to send or resend a previous tx - var paramTo *common.Address - var paramData []byte - var valueFromBatch uint64 - var valueToBatch uint64 - var valueToAddress common.Address - - if !resend { - paramTo = to - paramData = data - valueFromBatch = fromBatch - valueToBatch = toBatch - } else { - if txOldHash == nil { - s.logger.Errorf("trying to resend a tx with nil hash") - return errors.New("resend tx with nil hash monitor id") - } - paramTo = &s.ethTransactions[*txOldHash].To - paramData = s.ethTxData[*txOldHash] - valueFromBatch = s.ethTransactions[*txOldHash].FromBatch - valueToBatch = s.ethTransactions[*txOldHash].ToBatch - } - if paramTo != nil { - valueToAddress = *paramTo - } - - // Add sequence tx - txHash, err := s.ethTxManager.AddWithGas(ctx, paramTo, big.NewInt(0), paramData, s.cfg.GasOffset, nil, gas) - if err != nil { - s.logger.Errorf("error adding sequence to ethtxmanager: %v", err) - return err - } - - // Add new eth tx - txData := ethTxData{ - SentL1Timestamp: time.Now(), - StatusTimestamp: time.Now(), - Status: "*new", - FromBatch: valueFromBatch, - ToBatch: valueToBatch, - OnMonitor: true, - To: valueToAddress, - Gas: gas, - } - - // Add tx to internal structure - s.mutexEthTx.Lock() - s.ethTransactions[txHash] = &txData - txResults := make(map[common.Hash]ethtxmanager.TxResult, 0) - s.copyTxData(txHash, paramData, txResults) - _ = s.getResultAndUpdateEthTx(ctx, txHash) - if !resend { - s.latestSentToL1Batch = valueToBatch - } else { - s.ethTransactions[*txOldHash].Status = "*resent" - } - s.mutexEthTx.Unlock() - - // Save sent sequences - err = s.saveSentSequencesTransactions(ctx) - if err != nil { - s.logger.Errorf("error saving tx sequence sent, error: %v", err) - } - return nil -} - -// getSequencesToSend generates sequences to be sent to L1. -// Empty array means there are no sequences to send or it's not worth sending func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes.Sequence, error) { // Add sequences until too big for a single L1 tx or last batch is reached s.mutexSequence.Lock() @@ -665,7 +387,7 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes sequenceBatches := make([]seqsendertypes.Batch, 0) for i := 0; i < len(s.sequenceList); i++ { batchNumber := s.sequenceList[i] - if batchNumber <= s.latestVirtualBatch || batchNumber <= s.latestSentToL1Batch { + if batchNumber <= s.latestVirtualBatchNumber || batchNumber <= s.latestSentToL1Batch { continue } @@ -678,12 +400,6 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes ) } - // Check if batch is closed - if !s.sequenceData[batchNumber].batchClosed { - // Reached current wip batch - break - } - // New potential batch to add to the sequence batch := s.sequenceData[batchNumber].batch.DeepCopy() @@ -734,524 +450,22 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes return nil, nil } -// loadSentSequencesTransactions loads the file into the memory structure -func (s *SequenceSender) loadSentSequencesTransactions() error { - // Check if file exists - if _, err := os.Stat(s.cfg.SequencesTxFileName); os.IsNotExist(err) { - s.logger.Infof("file not found %s: %v", s.cfg.SequencesTxFileName, err) - return nil - } else if err != nil { - s.logger.Errorf("error opening file %s: %v", s.cfg.SequencesTxFileName, err) - return err - } - - // Read file - data, err := os.ReadFile(s.cfg.SequencesTxFileName) - if err != nil { - s.logger.Errorf("error reading file %s: %v", s.cfg.SequencesTxFileName, err) - return err - } - - // Restore memory structure - s.mutexEthTx.Lock() - err = json.Unmarshal(data, &s.ethTransactions) - s.mutexEthTx.Unlock() - if err != nil { - s.logger.Errorf("error decoding data from %s: %v", s.cfg.SequencesTxFileName, err) - return err - } - - return nil -} - -// saveSentSequencesTransactions saves memory structure into persistent file -func (s *SequenceSender) saveSentSequencesTransactions(ctx context.Context) error { - var err error - - // Purge tx - s.purgeEthTx(ctx) - - // Ceate file - fileName := s.cfg.SequencesTxFileName[0:strings.IndexRune(s.cfg.SequencesTxFileName, '.')] + ".tmp" - s.sequencesTxFile, err = os.Create(fileName) - if err != nil { - s.logger.Errorf("error creating file %s: %v", fileName, err) - return err - } - defer s.sequencesTxFile.Close() - - // Write data JSON encoded - encoder := json.NewEncoder(s.sequencesTxFile) - encoder.SetIndent("", " ") - s.mutexEthTx.Lock() - err = encoder.Encode(s.ethTransactions) - s.mutexEthTx.Unlock() - if err != nil { - s.logger.Errorf("error writing file %s: %v", fileName, err) - return err - } - - // Rename the new file - err = os.Rename(fileName, s.cfg.SequencesTxFileName) - if err != nil { - s.logger.Errorf("error renaming file %s to %s: %v", fileName, s.cfg.SequencesTxFileName, err) - return err - } - - return nil -} - -func (s *SequenceSender) entryTypeToString(entryType datastream.EntryType) string { - switch entryType { - case datastream.EntryType_ENTRY_TYPE_BATCH_START: - return "BatchStart" - case datastream.EntryType_ENTRY_TYPE_L2_BLOCK: - return "L2Block" - case datastream.EntryType_ENTRY_TYPE_TRANSACTION: - return "Transaction" - case datastream.EntryType_ENTRY_TYPE_BATCH_END: - return "BatchEnd" - default: - return fmt.Sprintf("%d", entryType) - } -} - -// handleReceivedDataStream manages the events received by the streaming -func (s *SequenceSender) handleReceivedDataStream( - entry *datastreamer.FileEntry, client *datastreamer.StreamClient, server *datastreamer.StreamServer, -) error { - dsType := datastream.EntryType(entry.Type) - - var prevEntryType datastream.EntryType - if s.prevStreamEntry != nil { - prevEntryType = datastream.EntryType(s.prevStreamEntry.Type) - } - - switch dsType { - case datastream.EntryType_ENTRY_TYPE_L2_BLOCK: - // Handle stream entry: L2Block - l2Block := &datastream.L2Block{} - - err := proto.Unmarshal(entry.Data, l2Block) - if err != nil { - s.logger.Errorf("error unmarshalling L2Block: %v", err) - return err - } - - s.logger.Infof("received L2Block entry, l2Block.Number: %d, l2Block.BatchNumber: %d, entry.Number: %d", - l2Block.Number, l2Block.BatchNumber, entry.Number, - ) - - // Sanity checks - if s.prevStreamEntry != nil && - !(prevEntryType == datastream.EntryType_ENTRY_TYPE_BATCH_START || - prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK || - prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) { - s.logger.Fatalf("unexpected L2Block entry received, entry.Number: %d, l2Block.Number: %d, "+ - "prevEntry: %s, prevEntry.Number: %d", - entry.Number, - l2Block.Number, - s.entryTypeToString(prevEntryType), - s.prevStreamEntry.Number, - ) - } else if prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK { - prevL2Block := &datastream.L2Block{} - - err := proto.Unmarshal(s.prevStreamEntry.Data, prevL2Block) - if err != nil { - s.logger.Errorf("error unmarshalling prevL2Block: %v", err) - return err - } - if l2Block.Number != prevL2Block.Number+1 { - s.logger.Fatalf("unexpected L2Block number %d received, it should be %d, entry.Number: %d, prevEntry.Number: %d", - l2Block.Number, prevL2Block.Number+1, entry.Number, s.prevStreamEntry.Number) - } - } - - switch { - case l2Block.BatchNumber <= s.fromStreamBatch: - // Already virtualized - if l2Block.BatchNumber != s.latestStreamBatch { - s.logger.Infof("skipped! batch already virtualized, number %d", l2Block.BatchNumber) - } - - case !s.validStream && l2Block.BatchNumber == s.fromStreamBatch+1: - // Initial case after startup - s.addNewSequenceBatch(l2Block) - s.validStream = true - - case l2Block.BatchNumber > s.wipBatch: - // Handle whether it's only a new block or also a new batch - // Create new sequential batch - s.addNewSequenceBatch(l2Block) - } - - // Latest stream batch - s.latestStreamBatch = l2Block.BatchNumber - if !s.validStream { - return nil - } - - // Add L2 block - s.addNewBatchL2Block(l2Block) - - s.prevStreamEntry = entry - - case datastream.EntryType_ENTRY_TYPE_TRANSACTION: - // Handle stream entry: Transaction - if !s.validStream { - return nil - } - - l2Tx := &datastream.Transaction{} - err := proto.Unmarshal(entry.Data, l2Tx) - if err != nil { - s.logger.Errorf("error unmarshalling Transaction: %v", err) - return err - } - - s.logger.Debugf( - "received Transaction entry, tx.L2BlockNumber: %d, tx.Index: %d, entry.Number: %d", - l2Tx.L2BlockNumber, l2Tx.Index, entry.Number, - ) - - // Sanity checks - if !(prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK || - prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) { - s.logger.Fatalf("unexpected Transaction entry received, entry.Number: %d, transaction.L2BlockNumber: %d, "+ - "transaction.Index: %d, prevEntry: %s, prevEntry.Number: %d", - entry.Number, l2Tx.L2BlockNumber, l2Tx.Index, s.entryTypeToString(prevEntryType), s.prevStreamEntry.Number) - } - - // Sanity check: tx should be decodable - _, err = state.DecodeTx(common.Bytes2Hex(l2Tx.Encoded)) - if err != nil { - s.logger.Fatalf("error decoding tx during sanity check: %v", err) - } - - // Add tx data - s.addNewBlockTx(l2Tx) - - s.prevStreamEntry = entry - - case datastream.EntryType_ENTRY_TYPE_BATCH_START: - // Handle stream entry: BatchStart - if !s.validStream { - return nil - } - - batch := &datastream.BatchStart{} - err := proto.Unmarshal(entry.Data, batch) - if err != nil { - s.logger.Errorf("error unmarshalling BatchStart: %v", err) - return err - } - - s.logger.Infof("received BatchStart entry, batchStart.Number: %d, entry.Number: %d", batch.Number, entry.Number) - - // Add batch start data - s.addInfoSequenceBatchStart(batch) - - s.prevStreamEntry = entry - - case datastream.EntryType_ENTRY_TYPE_BATCH_END: - // Handle stream entry: BatchEnd - if !s.validStream { - return nil - } - - batch := &datastream.BatchEnd{} - err := proto.Unmarshal(entry.Data, batch) - if err != nil { - s.logger.Errorf("error unmarshalling BatchEnd: %v", err) - return err - } - - s.logger.Infof("received BatchEnd entry, batchEnd.Number: %d, entry.Number: %d", batch.Number, entry.Number) - - // Sanity checks - if !(prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK || - prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) { - s.logger.Fatalf( - "unexpected BatchEnd entry received, entry.Number: %d, batchEnd.Number: %d, "+ - "prevEntry.Type: %s, prevEntry.Number: %d", - entry.Number, batch.Number, s.entryTypeToString(prevEntryType), s.prevStreamEntry.Number) - } - - // Add batch end data - s.addInfoSequenceBatchEnd(batch) - - // Close current wip batch - err = s.closeSequenceBatch() - if err != nil { - s.logger.Fatalf("error closing wip batch") - return err - } - - s.prevStreamEntry = entry - } - - return nil -} - -// closeSequenceBatch closes the current batch -func (s *SequenceSender) closeSequenceBatch() error { - s.mutexSequence.Lock() - defer s.mutexSequence.Unlock() - - s.logger.Infof("closing batch %d", s.wipBatch) - - data := s.sequenceData[s.wipBatch] - if data != nil { - data.batchClosed = true - - batchL2Data, err := state.EncodeBatchV2(data.batchRaw) - if err != nil { - s.logger.Errorf("error closing and encoding the batch %d: %v", s.wipBatch, err) - return err - } - - data.batch.SetL2Data(batchL2Data) - } else { - s.logger.Fatalf("wipBatch %d not found in sequenceData slice", s.wipBatch) - } - - // Sanity Check - if s.cfg.SanityCheckRPCURL != "" { - rpcNumberOfBlocks, batchL2Data, err := s.getBatchFromRPC(s.wipBatch) - if err != nil { - s.logger.Fatalf("error getting batch number from RPC while trying to perform sanity check: %v", err) - } else { - dsNumberOfBlocks := len(s.sequenceData[s.wipBatch].batchRaw.Blocks) - if rpcNumberOfBlocks != dsNumberOfBlocks { - s.logger.Fatalf( - "number of blocks in batch %d (%d) does not match the number of blocks in the batch from the RPC (%d)", - s.wipBatch, dsNumberOfBlocks, rpcNumberOfBlocks, - ) - } - - if data.batchType == datastream.BatchType_BATCH_TYPE_REGULAR && - common.Bytes2Hex(data.batch.L2Data()) != batchL2Data { - s.logger.Infof("datastream batchL2Data: %s", common.Bytes2Hex(data.batch.L2Data())) - s.logger.Infof("RPC batchL2Data: %s", batchL2Data) - s.logger.Fatalf("batchL2Data in batch %d does not match batchL2Data from the RPC (%d)", s.wipBatch) - } - - s.logger.Infof("sanity check of batch %d against RPC successful", s.wipBatch) - } - } else { - s.logger.Warnf("config param SanityCheckRPCURL not set, sanity check with RPC can't be done") - } - - return nil -} - -func (s *SequenceSender) getBatchFromRPC(batchNumber uint64) (int, string, error) { - type zkEVMBatch struct { - Blocks []string `mapstructure:"blocks"` - BatchL2Data string `mapstructure:"batchL2Data"` - } - - zkEVMBatchData := zkEVMBatch{} - - response, err := rpc.JSONRPCCall(s.cfg.SanityCheckRPCURL, "zkevm_getBatchByNumber", batchNumber) - if err != nil { - return 0, "", err - } - - // Check if the response is an error - if response.Error != nil { - return 0, "", fmt.Errorf("error in the response calling zkevm_getBatchByNumber: %v", response.Error) - } - - // Get the batch number from the response hex string - err = json.Unmarshal(response.Result, &zkEVMBatchData) - if err != nil { - return 0, "", fmt.Errorf( - "error unmarshalling the batch number from the response calling zkevm_getBatchByNumber: %w", - err, - ) - } - - return len(zkEVMBatchData.Blocks), zkEVMBatchData.BatchL2Data, nil -} - -// addNewSequenceBatch adds a new batch to the sequence -func (s *SequenceSender) addNewSequenceBatch(l2Block *datastream.L2Block) { - s.mutexSequence.Lock() - s.logger.Infof("...new batch, number %d", l2Block.BatchNumber) - - if l2Block.BatchNumber > s.wipBatch+1 { - s.logFatalf("new batch number (%d) is not consecutive to the current one (%d)", l2Block.BatchNumber, s.wipBatch) - } else if l2Block.BatchNumber < s.wipBatch { - s.logFatalf("new batch number (%d) is lower than the current one (%d)", l2Block.BatchNumber, s.wipBatch) - } - - batch := s.TxBuilder.NewBatchFromL2Block(l2Block) - - // Add to the list - s.sequenceList = append(s.sequenceList, l2Block.BatchNumber) - - // Create initial data - batchRaw := state.BatchRawV2{} - data := sequenceData{ - batchClosed: false, - batch: batch, - batchRaw: &batchRaw, - } - s.sequenceData[l2Block.BatchNumber] = &data - - // Update wip batch - s.wipBatch = l2Block.BatchNumber - s.mutexSequence.Unlock() -} - -// addInfoSequenceBatchStart adds info from the batch start -func (s *SequenceSender) addInfoSequenceBatchStart(batch *datastream.BatchStart) { - s.mutexSequence.Lock() - s.logger.Infof( - "batch %d (%s) Start: type %d forkId %d chainId %d", - batch.Number, datastream.BatchType_name[int32(batch.Type)], batch.Type, batch.ForkId, batch.ChainId, - ) - - // Current batch - data := s.sequenceData[s.wipBatch] - if data != nil { - wipBatch := data.batch - if wipBatch.BatchNumber()+1 != batch.Number { - s.logFatalf( - "batch start number (%d) does not match the current consecutive one (%d)", - batch.Number, wipBatch.BatchNumber, - ) - } - data.batchType = batch.Type - } - - s.mutexSequence.Unlock() -} - -// addInfoSequenceBatchEnd adds info from the batch end -func (s *SequenceSender) addInfoSequenceBatchEnd(batch *datastream.BatchEnd) { - s.mutexSequence.Lock() - - // Current batch - data := s.sequenceData[s.wipBatch] - if data != nil { - wipBatch := data.batch - if wipBatch.BatchNumber() == batch.Number { - // wipBatch.StateRoot = common.BytesToHash(batch) TODO: check if this is needed - } else { - s.logFatalf("batch end number (%d) does not match the current one (%d)", batch.Number, wipBatch.BatchNumber) - } - } - - s.mutexSequence.Unlock() -} - -// addNewBatchL2Block adds a new L2 block to the work in progress batch -func (s *SequenceSender) addNewBatchL2Block(l2Block *datastream.L2Block) { - s.mutexSequence.Lock() - defer s.mutexSequence.Unlock() - s.logger.Infof(".....new L2 block, number %d (batch %d) l1infotree %d", - l2Block.Number, l2Block.BatchNumber, l2Block.L1InfotreeIndex) - - // Current batch - data := s.sequenceData[s.wipBatch] - if data != nil { - wipBatchRaw := data.batchRaw - data.batch.SetLastL2BLockTimestamp(l2Block.Timestamp) - // Sanity check: should be the same coinbase within the batch - if common.BytesToAddress(l2Block.Coinbase) != data.batch.LastCoinbase() { - s.logFatalf( - "coinbase changed within the batch! (Previous %v, Current %v)", - data.batch.LastCoinbase, common.BytesToAddress(l2Block.Coinbase), - ) - } - data.batch.SetLastCoinbase(common.BytesToAddress(l2Block.Coinbase)) - if l2Block.L1InfotreeIndex != 0 { - data.batch.SetL1InfoTreeIndex(l2Block.L1InfotreeIndex) - } else { - s.logger.Warnf("L2 Block L1InfotreeIndex is 0, we don't change batch L1InfotreeIndex (%d)", - data.batch.L1InfoTreeIndex()) - } - // New L2 block raw - newBlockRaw := state.L2BlockRaw{} - - // Add L2 block - wipBatchRaw.Blocks = append(wipBatchRaw.Blocks, newBlockRaw) - // Get current L2 block - _, blockRaw := s.getWipL2Block() - if blockRaw == nil { - s.logger.Debugf("wip block %d not found!") - return - } - - // Fill in data - blockRaw.DeltaTimestamp = l2Block.DeltaTimestamp - blockRaw.IndexL1InfoTree = l2Block.L1InfotreeIndex - } -} - -// addNewBlockTx adds a new Tx to the current L2 block -func (s *SequenceSender) addNewBlockTx(l2Tx *datastream.Transaction) { - s.mutexSequence.Lock() - defer s.mutexSequence.Unlock() - s.logger.Debugf("........new tx, length %d EGP %d SR %x..", - len(l2Tx.Encoded), l2Tx.EffectiveGasPricePercentage, l2Tx.ImStateRoot[:8], - ) - - // Current L2 block - _, blockRaw := s.getWipL2Block() - - // New Tx raw - tx, err := state.DecodeTx(common.Bytes2Hex(l2Tx.Encoded)) - if err != nil { - s.logger.Fatalf("error decoding tx: %v", err) - return - } - - l2TxRaw := state.L2TxRaw{ - EfficiencyPercentage: uint8(l2Tx.EffectiveGasPricePercentage), - TxAlreadyEncoded: false, - Tx: tx, - } - - // Add Tx - blockRaw.Transactions = append(blockRaw.Transactions, l2TxRaw) -} - -// getWipL2Block returns index of the array and pointer to the current L2 block (helper func) -func (s *SequenceSender) getWipL2Block() (uint64, *state.L2BlockRaw) { //nolint:unparam - // Current batch - var wipBatchRaw *state.BatchRawV2 - if s.sequenceData[s.wipBatch] != nil { - wipBatchRaw = s.sequenceData[s.wipBatch].batchRaw - } - - // Current wip block - if len(wipBatchRaw.Blocks) > 0 { - blockIndex := uint64(len(wipBatchRaw.Blocks)) - 1 - return blockIndex, &wipBatchRaw.Blocks[blockIndex] - } else { - return 0, nil - } -} - -// updateLatestVirtualBatch queries the value in L1 and updates the latest virtual batch field -func (s *SequenceSender) updateLatestVirtualBatch() error { +// getLatestVirtualBatch queries the value in L1 and updates the latest virtual batch field +func (s *SequenceSender) getLatestVirtualBatch() error { s.latestVirtualBatchLock.Lock() defer s.latestVirtualBatchLock.Unlock() // Get latest virtual state batch from L1 var err error - s.latestVirtualBatch, err = s.etherman.GetLatestBatchNumber() + s.latestVirtualBatchNumber, err = s.etherman.GetLatestBatchNumber() if err != nil { s.logger.Errorf("error getting latest virtual batch, error: %v", err) return errors.New("fail to get latest virtual batch") - } else { - s.logger.Infof("latest virtual batch is %d", s.latestVirtualBatch) } + + s.logger.Infof("latest virtual batch is %d", s.latestVirtualBatchNumber) + return nil } @@ -1286,52 +500,9 @@ func (s *SequenceSender) marginTimeElapsed( // logFatalf logs error, activates flag to stop sequencing, and remains in an infinite loop func (s *SequenceSender) logFatalf(template string, args ...interface{}) { s.seqSendingStopped = true - s.logger.Errorf(template, args...) - s.logger.Errorf("sequence sending stopped.") for { - time.Sleep(1 * time.Second) - } -} - -// printBatch prints data from batch raw V2 -func printBatch(raw *state.BatchRawV2, showBlock bool, showTx bool) { - // Total amount of L2 tx in the batch - totalL2Txs := 0 - for k := 0; k < len(raw.Blocks); k++ { - totalL2Txs += len(raw.Blocks[k].Transactions) - } - - log.Debugf("// #blocks: %d, #L2txs: %d", len(raw.Blocks), totalL2Txs) - - // Blocks info - if showBlock { - numBlocks := len(raw.Blocks) - var firstBlock *state.L2BlockRaw - var lastBlock *state.L2BlockRaw - if numBlocks > 0 { - firstBlock = &raw.Blocks[0] - } - if numBlocks > 1 { - lastBlock = &raw.Blocks[numBlocks-1] - } - if firstBlock != nil { - log.Debugf("// block first (indL1info: %d, delta-timestamp: %d, #L2txs: %d)", - firstBlock.IndexL1InfoTree, firstBlock.DeltaTimestamp, len(firstBlock.Transactions), - ) - // Tx info - if showTx { - for iTx, tx := range firstBlock.Transactions { - v, r, s := tx.Tx.RawSignatureValues() - log.Debugf("// tx(%d) effPct: %d, encoded: %t, v: %v, r: %v, s: %v", - iTx, tx.EfficiencyPercentage, tx.TxAlreadyEncoded, v, r, s, - ) - } - } - } - if lastBlock != nil { - log.Debugf("// block last (indL1info: %d, delta-timestamp: %d, #L2txs: %d)", - lastBlock.DeltaTimestamp, lastBlock.DeltaTimestamp, len(lastBlock.Transactions), - ) - } + s.logger.Errorf(template, args...) + s.logger.Errorf("sequence sending stopped.") + time.Sleep(ten * time.Second) } } diff --git a/sequencesender/sequencesender_test.go b/sequencesender/sequencesender_test.go deleted file mode 100644 index f839cfca..00000000 --- a/sequencesender/sequencesender_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package sequencesender - -import ( - "testing" - - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sequencesender/txbuilder" - "github.com/0xPolygon/cdk/state" - "github.com/0xPolygon/cdk/state/datastream" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -const ( - txStreamEncoded1 = "f86508843b9aca0082520894617b3a3528f9cdd6630fd3301b9c8911f7bf063d0a808207f5a0579b72a1c1ffdd845fba45317540982109298e2ec8d67ddf2cdaf22e80903677a01831e9a01291c7ea246742a5b5a543ca6938bfc3f6958c22be06fad99274e4ac" - txStreamEncoded2 = "f86509843b9aca0082520894617b3a3528f9cdd6630fd3301b9c8911f7bf063d0a808207f5a0908a522075e09485166ffa7630cd2b7013897fa1f1238013677d6f0a86efb3d2a0068b12435fcdc8ee254f3b1df8c5b29ed691eeee6065704f061130935976ca99" - txStreamEncoded3 = "b8b402f8b101268505d21dba0085076c363d8982dc60941929761e87667283f087ea9ab8370c174681b4e980b844095ea7b300000000000000000000000080a64c6d7f12c47b7c66c5b4e20e72bc1fcd5d9effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc001a0dd4db494969139a120e8721842455ec13f82757a4fc49b66d447c7d32d095a1da06ef54068a9aa67ecc4f52d885299a04feb6f3531cdfc771f1412cd3331d1ba4c" -) - -func TestStreamTx(t *testing.T) { - tx1, err := state.DecodeTx(txStreamEncoded1) - require.NoError(t, err) - tx2, err := state.DecodeTx(txStreamEncoded2) - require.NoError(t, err) - tx3, err := state.DecodeTx(txStreamEncoded3) - require.NoError(t, err) - - txTest := state.L2TxRaw{ - EfficiencyPercentage: 129, - TxAlreadyEncoded: false, - Tx: tx1, - } - txTestEncoded := make([]byte, 0) - txTestEncoded, err = txTest.Encode(txTestEncoded) - require.NoError(t, err) - log.Debugf("%s", common.Bytes2Hex(txTestEncoded)) - - batch := state.BatchRawV2{ - Blocks: []state.L2BlockRaw{ - { - ChangeL2BlockHeader: state.ChangeL2BlockHeader{ - DeltaTimestamp: 3633752, - IndexL1InfoTree: 0, - }, - Transactions: []state.L2TxRaw{ - { - EfficiencyPercentage: 129, - TxAlreadyEncoded: false, - Tx: tx1, - }, - { - EfficiencyPercentage: 97, - TxAlreadyEncoded: false, - Tx: tx2, - }, - { - EfficiencyPercentage: 97, - TxAlreadyEncoded: false, - Tx: tx3, - }, - }, - }, - }, - } - - printBatch(&batch, true, true) - - encodedBatch, err := state.EncodeBatchV2(&batch) - require.NoError(t, err) - - decodedBatch, err := state.DecodeBatchV2(encodedBatch) - require.NoError(t, err) - - printBatch(decodedBatch, true, true) -} - -func TestAddNewBatchL2Block(t *testing.T) { - logger := log.GetDefaultLogger() - txBuilder := txbuilder.NewTxBuilderBananaZKEVM(logger, nil, nil, bind.TransactOpts{}, 100, nil, nil, nil) - sut := SequenceSender{ - logger: logger, - cfg: Config{}, - ethTransactions: make(map[common.Hash]*ethTxData), - ethTxData: make(map[common.Hash][]byte), - sequenceData: make(map[uint64]*sequenceData), - validStream: false, - latestStreamBatch: 0, - seqSendingStopped: false, - TxBuilder: txBuilder, - } - - l2Block := datastream.L2Block{ - Number: 1, - BatchNumber: 1, - L1InfotreeIndex: 1, - } - sut.addNewSequenceBatch(&l2Block) - l2Block = datastream.L2Block{ - Number: 2, - BatchNumber: 1, - L1InfotreeIndex: 0, - } - sut.addNewBatchL2Block(&l2Block) - data := sut.sequenceData[sut.wipBatch] - // L1InfotreeIndex 0 is ignored - require.Equal(t, uint32(1), data.batch.L1InfoTreeIndex(), "new block have index=0 and is ignored") - - l2Block = datastream.L2Block{ - Number: 2, - BatchNumber: 1, - L1InfotreeIndex: 5, - } - sut.addNewBatchL2Block(&l2Block) - data = sut.sequenceData[sut.wipBatch] - require.Equal(t, uint32(5), data.batch.L1InfoTreeIndex(), "new block have index=5 and is set") -} diff --git a/test/config/test.config.toml b/test/config/test.config.toml index 92707e39..e5d4382c 100644 --- a/test/config/test.config.toml +++ b/test/config/test.config.toml @@ -13,7 +13,8 @@ SequencesTxFileName = "sequencesender.json" GasOffset = 80000 WaitPeriodPurgeTxFile = "60m" MaxPendingTx = 1 -SanityCheckRPCURL = "http://127.0.0.1:8123" +RPCURL = "http://127.0.0.1:8123" +GetBatchWaitInterval = "10s" [SequenceSender.StreamClient] Server = "127.0.0.1:6900" [SequenceSender.EthTxManager] diff --git a/test/config/test.kurtosis_template.toml b/test/config/test.kurtosis_template.toml index c065cd6d..aec3a3b6 100644 --- a/test/config/test.kurtosis_template.toml +++ b/test/config/test.kurtosis_template.toml @@ -30,7 +30,8 @@ SequencesTxFileName = "sequencesender.json" GasOffset = 80000 WaitPeriodPurgeTxFile = "15m" MaxPendingTx = 1 -SanityCheckRPCURL = "${l2_rpc_addr}" +RPCURL = "${l2_rpc_addr}" +GetBatchWaitInterval = "10s" [SequenceSender.StreamClient] Server = "127.0.0.1:${zkevm_data_streamer_port}" [SequenceSender.EthTxManager] From 8167397a1cce9129444135f88d42676da28ddb74 Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 1 Oct 2024 19:02:07 +0200 Subject: [PATCH 17/53] feat: use config-file on CDK for kurtosis e2e tests (#99) - The e2e test for CDK use a local configuration template instead of the one on kurtosis branch: to allow to develop changes on config file is more easy if we are able to use the config file on CDK repo. After that we can update config file and cdk image on kurtosis repo - The local configuration for run on vscode use the same template (`test/config/kurtosis-cdk-node-config.toml.template`) as the e2e test - Fix error on CI `test-e2e.yml` with the duplicated `ref` key --- .github/workflows/test-e2e.yml | 1 - scripts/local_config | 284 ++++++++++++++---- scripts/run_template.go | 63 ++++ sonar-project.properties | 2 +- .../kurtosis-cdk-node-config.toml.template | 174 +++++++++++ test/config/test.kurtosis_template.toml | 150 --------- test/run-e2e.sh | 2 + 7 files changed, 467 insertions(+), 209 deletions(-) create mode 100644 scripts/run_template.go create mode 100644 test/config/kurtosis-cdk-node-config.toml.template delete mode 100644 test/config/test.kurtosis_template.toml diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index c89275c7..721cbf09 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -65,7 +65,6 @@ jobs: uses: actions/checkout@v4 with: repository: 0xPolygon/kurtosis-cdk - ref: fix/missing_cdk_config_rollupmanager path: "kurtosis-cdk" ref: "v0.2.11" diff --git a/scripts/local_config b/scripts/local_config index aeb008b0..7303b1fd 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -1,30 +1,180 @@ #!/bin/bash #Include common varaibles source $(dirname $0)/../test/scripts/env.sh +############################################################################### +function log_debug() { + echo -e "\033[0;30mDebug: $*" "\033[0m" +} +############################################################################### +function log_error() { + echo -e "\033[0;31mError: $*" "\033[0m" +} +############################################################################### +function log_fatal() { + log_error $* + exit 1 +} +############################################################################### +function ok_or_fatal(){ + if [ $? -ne 0 ]; then + log_fatal $* + fi +} + +############################################################################### +function get_value_from_toml_file(){ + local _FILE="$1" + # KEY = . + local _SECTION="$2" + local _KEY="$3" + local _LINE + local _inside_section=0 + while read -r _LINE; do + # Clean up line from spaces and tabs + _LINE=$(echo $_LINE | tr -d '[:space:]') + #echo $_LINE + if [ $_inside_section -eq 1 ]; then + if [[ "$_LINE" == [* ]]; then + return 1 + fi + #local _key_splitted=(${_LINE//=/ }) + local _key_name=$(echo $_LINE | cut -f 1 -d "=") + local _key_value=$(echo $_LINE | cut -f 2- -d "=") + if [ "$_key_name" == "$_KEY" ]; then + echo $_key_value + return 0 + fi + elif [ "$_LINE" == "[${_SECTION}]" ]; then + _inside_section=1 + fi + + + done < "$_FILE" + return 2 + +} +############################################################################### +function export_key_from_toml_file_or_fatal(){ + local _EXPORTED_VAR_NAME="$1" + local _FILE="$2" + local _SECTION="$3" + local _KEY="$4" + local _VALUE=$(get_value_from_toml_file $_FILE $_SECTION $_KEY) + if [ -z "$_VALUE" ]; then + log_fatal "$FUNCNAME: key $_KEY not found in section $_SECTION" + fi + export $_EXPORTED_VAR_NAME="$_VALUE" + log_debug "$_EXPORTED_VAR_NAME=${!_EXPORTED_VAR_NAME} \t\t\t# file:$_FILE section:$_SECTION key:$_KEY" +} + +############################################################################### +function export_obj_key_from_toml_file_or_fatal(){ + local _EXPORTED_VAR_NAME="$1" + local _FILE="$2" + local _SECTION="$3" + local _KEY="$4" + local _OBJ_KEY="$5" + local _VALUE=$(get_value_from_toml_file $_FILE $_SECTION $_KEY) + if [ -z "$_VALUE" ]; then + log_fatal "$FUNCNAME: obj_key $_KEY not found in section $_SECTION" + fi + local _CLEAN_VALUE=$(echo $_VALUE | tr -d '{' | tr -d '}' | tr ',' '\n') + while read -r _LINE; do + local _key_splitted=(${_LINE//=/ }) + if [ "${_key_splitted[0]}" == "$_OBJ_KEY" ]; then + export $_EXPORTED_VAR_NAME=${_key_splitted[1]} + log_debug "$_EXPORTED_VAR_NAME=${!_EXPORTED_VAR_NAME} \t\t\t# file:$_FILE section:$_SECTION key:$_KEY obj_key:$_OBJ_KEY" + return 0 + fi + done <<< "$_CLEAN_VALUE" + log_fatal "obj_key $_OBJ_KEY not found in section $_SECTION/ $_KEY = $_VALUE" +} +############################################################################### function export_values_of_genesis(){ local _GENESIS_FILE=$1 if [ ! -f $_GENESIS_FILE ]; then - echo "Error: genesis file not found: $_GENESIS_FILE" - exit 1 + log_fatal "Error: genesis file not found: $_GENESIS_FILE" fi export l1_chain_id=$(jq -r '.L1Config.chainId' $_GENESIS_FILE | tr -d '"') export pol_token_address=$(jq -r '.L1Config.polTokenAddress' $_GENESIS_FILE) export zkevm_rollup_address=$(jq -r '.L1Config.polygonZkEVMAddress' $_GENESIS_FILE) export zkevm_rollup_manager_address=$(jq -r '.L1Config.polygonRollupManagerAddress' $_GENESIS_FILE) export zkevm_global_exit_root_address=$(jq -r '.L1Config.polygonZkEVMGlobalExitRootAddress' $_GENESIS_FILE) + export zkevm_rollup_manager_block_number=$(jq -r '.rollupManagerCreationBlockNumber' $_GENESIS_FILE) } +############################################################################### +function export_values_of_cdk_node_config(){ + local _CDK_CONFIG_FILE=$1 + export_key_from_toml_file_or_fatal zkevm_l2_sequencer_address $_CDK_CONFIG_FILE SequenceSender L2Coinbase + export_obj_key_from_toml_file_or_fatal zkevm_l2_keystore_password $_CDK_CONFIG_FILE SequenceSender PrivateKey Password + export_key_from_toml_file_or_fatal l1_chain_id $_CDK_CONFIG_FILE SequenceSender.EthTxManager.Etherman L1ChainID + export_key_from_toml_file_or_fatal zkevm_is_validium $_CDK_CONFIG_FILE Common IsValidiumMode + export_key_from_toml_file_or_fatal zkevm_contract_versions $_CDK_CONFIG_FILE Common ContractVersions + export_key_from_toml_file_or_fatal l2_chain_id $_CDK_CONFIG_FILE Aggregator ChainID + export_key_from_toml_file_or_fatal zkevm_aggregator_port $_CDK_CONFIG_FILE Aggregator Port + export_key_from_toml_file_or_fatal zkevm_l2_agglayer_address $_CDK_CONFIG_FILE Aggregator SenderAddress + export_key_from_toml_file_or_fatal aggregator_db_name $_CDK_CONFIG_FILE Aggregator.DB Name + export_key_from_toml_file_or_fatal aggregator_db_user $_CDK_CONFIG_FILE Aggregator.DB User + export_key_from_toml_file_or_fatal aggregator_db_password $_CDK_CONFIG_FILE Aggregator.DB Password + export_key_from_toml_file_or_fatal zkevm_rollup_fork_id $_CDK_CONFIG_FILE Aggregator ForkId + export is_cdk_validium=$zkevm_is_validium + export zkevm_rollup_chain_id=$l2_chain_id - + if [ "$zkevm_is_validium" == "true" ]; then + log_debug "Validium mode detected... Retrieving the dac_port" + export_value_from_kurtosis_or_fail dac_port zkevm-dac-001 dac + fi +} ############################################################################### -# MAIN +function export_value_from_kurtosis_or_fail(){ + local _EXPORTED_VAR_NAME="$1" + local _SERVICE="$2" + local _END_POINT="$3" + export $_EXPORTED_VAR_NAME=$(kurtosis port print $KURTOSIS_ENCLAVE $_SERVICE $_END_POINT) + if [ -z $_EXPORTED_VAR_NAME ]; then + log_fatal "Error getting kurtosis port: $KURTOSIS_ENCLAVE $_SERVICE $_END_POINT" + fi + log_debug "$_EXPORTED_VAR_NAME=${!_EXPORTED_VAR_NAME} \t\t\t# Kurtosis $KURTOSIS_ENCLAVE $_SERVICE $_END_POINT" +} ############################################################################### -set -o pipefail # enable strict command pipe error detection +function export_portnum_from_kurtosis_or_fail(){ + local _EXPORTED_VAR_NAME="$1" + export_value_from_kurtosis_or_fail "$1" "$2" "$3" > /dev/null + local _VALUE + eval "_VALUE=\$$1" + local _PORT=$(echo "$_VALUE" | cut -f 3 -d ":") + if [ -z $_PORT ]; then + log_fatal "Error getting port number from kurtosis: $2 $3 -> $_VALUE" + fi + export $_EXPORTED_VAR_NAME=$_PORT + log_debug "$_EXPORTED_VAR_NAME=${!_EXPORTED_VAR_NAME} \t\t\t# Kurtosis $KURTOSIS_ENCLAVE $2 $3" +} +############################################################################### +function export_ports_from_kurtosis(){ + export_portnum_from_kurtosis_or_fail l1_rpc_port el-1-geth-lighthouse rpc + export_value_from_kurtosis_or_fail l1_rpc_url el-1-geth-lighthouse rpc + export_value_from_kurtosis_or_fail l2_rpc_url cdk-erigon-node-001 http-rpc + export_portnum_from_kurtosis_or_fail zkevm_rpc_http_port cdk-erigon-node-001 http-rpc + export_portnum_from_kurtosis_or_fail zkevm_data_streamer_port cdk-erigon-sequencer-001 data-streamer + export_portnum_from_kurtosis_or_fail aggregator_db_port postgres-001 postgres + export_portnum_from_kurtosis_or_fail agglayer_port agglayer agglayer + export aggregator_db_hostname="127.0.0.1" +} -which kurtosis > /dev/null -if [ $? -ne 0 ]; then - echo "kurtosis is not installed. Please install it:" +############################################################################### +function export_forced_values(){ + export global_log_level="debug" + export l2_rpc_name="localhost" + export sequencer_name="localhost" + export deployment_suffix="" +} +############################################################################### +function check_requirements(){ + which kurtosis > /dev/null + if [ $? -ne 0 ]; then + log_error "kurtosis is not installed. Please install it:" cat << EOF echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list @@ -33,58 +183,78 @@ if [ $? -ne 0 ]; then EOF exit 1 -fi + fi + if [ -z $TMP_CDK_FOLDER -o -z $KURTOSIS_ENCLAVE ]; then + log_fatal "TMP_CDK_FOLDER or KURTOSIS_ENCLAVE is not set. Must be set on file env.sh" + fi + kurtosis enclave inspect $KURTOSIS_ENCLAVE > /dev/null + if [ $? -ne 0 ]; then + log_error "Error inspecting enclave $KURTOSIS_ENCLAVE" + echo "You must start kurtosis environment before running this script" + echo "- start kurtosis:" + echo " kurtosis clean --all; kurtosis run --enclave $KURTOSIS_ENCLAVE --args-file params.yml --image-download always ." -if [ -z $TMP_CDK_FOLDER -o -z $KURTOSIS_ENCLAVE ]; then - echo "TMP_CDK_FOLDER or KURTOSIS_ENCLAVE is not set. Must be set on file env.sh" - exit 1 -fi -kurtosis enclave inspect $KURTOSIS_ENCLAVE > /dev/null -if [ $? -ne 0 ]; then - echo "Error inspecting enclave $KURTOSIS_ENCLAVE" - echo "You must start kurtosis environment before running this script" - echo "- start kurtosis:" - echo " kurtosis clean --all; kurtosis run --enclave $KURTOSIS_ENCLAVE --args-file params.yml --image-download always ." + exit 1 + fi +} +############################################################################### +function create_dest_folder(){ + export DEST=${TMP_CDK_FOLDER}/local_config + [ ! -d ${DEST} ] && mkdir -p ${DEST} + rm $DEST/* +} +############################################################################### +function download_kurtosis_artifacts(){ + kurtosis files download $KURTOSIS_ENCLAVE genesis $DEST + ok_or_fatal "Error downloading kurtosis artifact genesis to $DEST" + export genesis_file=$DEST/genesis.json + + kurtosis files download $KURTOSIS_ENCLAVE sequencer-keystore $DEST + ok_or_fatal "Error downloading kurtosis artifact sequencer-keystore to $DEST" + export sequencer_keystore_file=$DEST/sequencer.keystore + + kurtosis files download $KURTOSIS_ENCLAVE cdk-node-config-artifact $DEST + ok_or_fatal "Error downloading kurtosis artifact cdk-node-config-artifact to $DEST" +} +############################################################################### +function check_generated_config_file(){ + grep "" $DEST_TEMPLATE_FILE > /dev/null + if [ $? -ne 1 ]; then + log_error "some values are not set, check $ORIG_TEMPLATE_FILE" + echo "" + echo "missing keys in rendered template: $DEST_TEMPLATE_FILE" + echo " " + grep "" $DEST_TEMPLATE_FILE + exit 1 + fi +} +############################################################################### +# MAIN +############################################################################### +set -o pipefail # enable strict command pipe error detection +check_requirements +create_dest_folder + +download_kurtosis_artifacts - exit 1 -fi -DEST=${TMP_CDK_FOLDER}/local_config - -[ ! -d ${DEST} ] && mkdir -p ${DEST} -rm $DEST/* -kurtosis files download $KURTOSIS_ENCLAVE genesis $DEST -[ $? -ne 0 ] && echo "Error downloading genesis" && exit 1 -export genesis_file=$DEST/genesis.json export_values_of_genesis $genesis_file -kurtosis files download $KURTOSIS_ENCLAVE sequencer-keystore $DEST -[ $? -ne 0 ] && echo "Error downloading sequencer-keystore" && exit 1 -export sequencer_keystore_file=$DEST/sequencer.keystore - -l1_rpc_port=$(kurtosis port print $KURTOSIS_ENCLAVE el-1-geth-lighthouse rpc | cut -f 3 -d ":") -[ $? -ne 0 ] && echo "Error getting l1_rpc_port" && exit 1 || export l1_rpc_port && echo "l1_rpc_port=$l1_rpc_port" -l1_rpc_addr=$(kurtosis port print $KURTOSIS_ENCLAVE el-1-geth-lighthouse rpc) -[ $? -ne 0 ] && echo "Error getting l1_rpc_addr" && exit 1 || export l1_rpc_addr && echo "l1_rpc_addr=$l1_rpc_addr" -l2_rpc_addr=$(kurtosis port print $KURTOSIS_ENCLAVE cdk-erigon-node-001 http-rpc) -[ $? -ne 0 ] && echo "Error getting l2_rpc_addr" && exit 1 || export l2_rpc_addr && echo "l2_rpc_addr=$l2_rpc_addr" - -zkevm_data_streamer_port=$(kurtosis port print $KURTOSIS_ENCLAVE cdk-erigon-sequencer-001 data-streamer | cut -f 3 -d ":") -[ $? -ne 0 ] && echo "Error getting zkevm_data_streamer_port" && exit 1 || export zkevm_data_streamer_port && echo "zkevm_data_streamer_port=$zkevm_data_streamer_port" - -kurtosis files download $KURTOSIS_ENCLAVE cdk-node-config-artifact $DEST -export zkevm_l2_sequencer_address=$(cat $DEST/cdk-node-config.toml |grep L2Coinbase | cut -f 2 -d "="| tr -d '"' | tr -d ' ') -export zkevm_l2_keystore_password=$(cat $DEST/cdk-node-config.toml |grep -A1 L2Coinbase | tr ',' '\n' | grep Password | cut -f 2 -d '=' | tr -d '}' | tr -d '"' | tr -d ' ') -export l1_chain_id=$(cat $DEST/cdk-node-config.toml | grep L1ChainID | cut -f 2 -d '=' | head -n 1) -echo $l1_chain_id -export zkevm_is_validium=$(cat $DEST/cdk-node-config.toml | grep IsValidiumMode | cut -f 2 -d '=') -export zkevm_contract_versions=$(cat $DEST/cdk-node-config.toml | grep ContractVersions | cut -f 2 -d '=' | tr -d '"' | tr -d ' ') -if [ "$zkevm_is_validium" == "true" ]; then - echo "Validium mode detected... Retrieving the dac_port" - dac_port=$(kurtosis port print $KURTOSIS_ENCLAVE zkevm-dac-001 dac | cut -f 3 -d ":") - [ $? -ne 0 ] && echo "Error getting dac_port" && exit 1 || export dac_port && echo "dac_port=$dac_port" -fi - -envsubst < test/config/test.kurtosis_template.toml > $DEST/test.kurtosis.toml +export_ports_from_kurtosis +export_values_of_cdk_node_config $DEST/cdk-node-config.toml +export_forced_values + +ORIG_TEMPLATE_FILE=test/config/kurtosis-cdk-node-config.toml.template +DEST_TEMPLATE_FILE=$DEST/test.kurtosis.toml + +# Generate config file +go run scripts/run_template.go $ORIG_TEMPLATE_FILE > $DEST_TEMPLATE_FILE +ok_or_fatal "Error generating template" + +check_generated_config_file + + +echo " " echo "file generated at:" $DEST/test.kurtosis.toml + echo "- to restart kurtosis:" echo " kurtosis clean --all; kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always ." echo " " @@ -102,7 +272,7 @@ cat << EOF "cwd": "\${workspaceFolder}", "args":[ "run", - "-cfg", "$DEST/test.kurtosis.toml", + "-cfg", "$DEST_TEMPLATE_FILE", "-components", "sequence-sender,aggregator", ] }, diff --git a/scripts/run_template.go b/scripts/run_template.go new file mode 100644 index 00000000..b629de5a --- /dev/null +++ b/scripts/run_template.go @@ -0,0 +1,63 @@ +package main + +import ( + "fmt" + "log" + "os" + "regexp" + "strings" + "text/template" +) + +func main() { + tmpl := template.New("t1") + content, err := readFile(os.Args[1]) + if err != nil { + log.Fatalf("Error loading template: %v", err) + } + content = replaceDotsInTemplateVariables(content) + tmpl = template.Must(tmpl.Parse(content)) + + if err := tmpl.Execute(os.Stdout, environmentToMap()); err != nil { + log.Fatalf("Error executing template: %v", err) + } +} +func replaceDotsInTemplateVariables(template string) string { + re := regexp.MustCompile(`{{\s*\.([^{}]*)\s*}}`) + result := re.ReplaceAllStringFunc(template, func(match string) string { + match = strings.ReplaceAll(match[3:], ".", "_") + return "{{." + match + }) + return result +} + +func readFile(filename string) (string, error) { + content, err := os.ReadFile(filename) + if err != nil { + return "", err + } + return string(content), nil +} + +func environmentToMap() map[string]any { + envVars := make(map[string]any) + for _, e := range os.Environ() { + pair := splitAtFirst(e, '=') + fmt.Printf("zzzz env=%s pair=%v\n", e, pair) + envVars[pair[0]] = pair[1] + } + envVars["aggregator_db"] = map[string]string{ + "user": "user", + "name": "Name", + } + return envVars +} + +func splitAtFirst(s string, sep rune) [2]string { + for i, c := range s { + if c == sep { + return [2]string{s[:i], s[i+1:]} + } + } + return [2]string{s, ""} +} diff --git a/sonar-project.properties b/sonar-project.properties index b8f78410..815d53a8 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -7,7 +7,7 @@ sonar.projectName=cdk sonar.organization=0xpolygon sonar.sources=. -sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql,**/mocks_*/* +sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql,**/mocks_*/*, scripts/** sonar.tests=. sonar.test.inclusions=**/*_test.go diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template new file mode 100644 index 00000000..91ddb819 --- /dev/null +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -0,0 +1,174 @@ +ForkUpgradeBatchNumber = 0 +ForkUpgradeNewForkId = 0 + +[Common] +IsValidiumMode = {{.is_cdk_validium}} + +{{if eq .zkevm_rollup_fork_id "12"}} +ContractVersions = "banana" +{{else}} +ContractVersions = "elderberry" +{{end}} + +[Etherman] +URL = "{{.l1_rpc_url}}" + +[Log] +Environment = "development" # "production" or "development" +Level = "{{.global_log_level}}" +Outputs = ["stderr"] + +[SequenceSender] +WaitPeriodSendSequence = "15s" +LastBatchVirtualizationTimeMaxWaitPeriod = "10s" +MaxTxSizeForL1 = 131072 +L2Coinbase = "{{.zkevm_l2_sequencer_address}}" +PrivateKey = {Path = "/etc/cdk/sequencer.keystore", Password = "{{.zkevm_l2_keystore_password}}"} +SequencesTxFileName = "/data/sequencesender.json" +GasOffset = 80000 +WaitPeriodPurgeTxFile = "15m" +MaxPendingTx = 1 +{{if eq .zkevm_rollup_fork_id "12"}} +MaxBatchesForL1 = 300 +BlockFinality="FinalizedBlock" +RPCURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" +GetBatchWaitInterval = "10s" +{{end}} + [SequenceSender.StreamClient] + Server = "{{.sequencer_name}}{{.deployment_suffix}}:{{.zkevm_data_streamer_port}}" + [SequenceSender.EthTxManager] + FrequencyToMonitorTxs = "1s" + WaitTxToBeMined = "2m" + ConsolidationL1ConfirmationBlocks = 5 + {{if eq .zkevm_rollup_fork_id "12"}} + FinalizedStatusL1NumberOfBlocks = 10 + WaitReceiptMaxTime = "250ms" + WaitReceiptCheckInterval = "8s" + {{else}} + FinalizationL1ConfirmationBlocks = 10 + WaitReceiptToBeGenerated = "8s" + {{end}} + PrivateKeys = [ + {Path = "/etc/cdk/sequencer.keystore", Password = "{{.zkevm_l2_keystore_password}}"}, + ] + ForcedGas = 0 + GasPriceMarginFactor = 1 + MaxGasPriceLimit = 0 + PersistenceFilename = "/data/ethtxmanager.json" + [SequenceSender.EthTxManager.Etherman] + URL = "{{.l1_rpc_url}}" + L1ChainID = {{.l1_chain_id}} + HTTPHeaders = [] + +[Aggregator] + FinalProofSanityCheckEnabled = false + Host = "0.0.0.0" + Port = "{{.zkevm_aggregator_port}}" + RetryTime = "30s" + VerifyProofInterval = "30s" + ProofStatePollingInterval = "5s" + TxProfitabilityCheckerType = "acceptall" + TxProfitabilityMinReward = "1.1" + IntervalAfterWhichBatchConsolidateAnyway = "0s" + ChainID = "{{.zkevm_rollup_chain_id}}" + ForkId = {{.zkevm_rollup_fork_id}} + CleanupLockedProofsInterval = "2m0s" + GeneratingProofCleanupThreshold = "10m" + GasOffset = 150000 + UpgradeEtrogBatchNumber = "{{.zkevm_rollup_manager_block_number}}" + WitnessURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" + {{if .is_cdk_validium}} + SenderAddress = "{{.zkevm_l2_agglayer_address}}" + SettlementBackend = "agglayer" + AggLayerTxTimeout = "600s" + AggLayerURL = "http://agglayer:{{.agglayer_port}}" + {{else}} + SenderAddress = "{{.zkevm_l2_aggregator_address}}" + {{end}} + + {{if eq .zkevm_rollup_fork_id "12"}} + UseL1BatchData = true + UseFullWitness = false + MaxWitnessRetrievalWorkers = 2 + SyncModeOnlyEnabled = false + {{end}} + + [Aggregator.SequencerPrivateKey] + Path = "/etc/cdk/sequencer.keystore" + Password = "{{.zkevm_l2_keystore_password}}" + [Aggregator.DB] + Name = "{{.aggregator_db.name}}" + User = "{{.aggregator_db.user}}" + Password = "{{.aggregator_db.password}}" + Host = "{{.aggregator_db.hostname}}" + Port = "{{.aggregator_db.port}}" + EnableLog = false + MaxConns = 200 + [Aggregator.Log] + Environment = "development" # "production" or "development" + Level = "{{.global_log_level}}" + Outputs = ["stderr"] + [Aggregator.StreamClient] + Server = "{{.sequencer_name}}{{.deployment_suffix}}:{{.zkevm_data_streamer_port}}" + [Aggregator.EthTxManager] + FrequencyToMonitorTxs = "1s" + WaitTxToBeMined = "2m" + + {{if eq .zkevm_rollup_fork_id "12"}} + WaitReceiptMaxTime = "250ms" + WaitReceiptCheckInterval = "1s" + {{else}} + GetReceiptMaxTime = "250ms" + GetReceiptWaitInterval = "1s" + {{end}} + + PrivateKeys = [ + {Path = "/etc/cdk/aggregator.keystore", Password = "{{.zkevm_l2_keystore_password}}"}, + ] + ForcedGas = 0 + GasPriceMarginFactor = 1 + MaxGasPriceLimit = 0 + PersistenceFilename = "" + ReadPendingL1Txs = false + SafeStatusL1NumberOfBlocks = 0 + FinalizedStatusL1NumberOfBlocks = 0 + [Aggregator.EthTxManager.Etherman] + URL = "{{.l1_rpc_url}}" + L1ChainID = {{.l1_chain_id}} + HTTPHeaders = [] + [Aggregator.Synchronizer] + [Aggregator.Synchronizer.SQLDB] + DriverName = "sqlite3" + DataSource = "file:/data/aggregator_sync_db.sqlite" + [Aggregator.Synchronizer.Synchronizer] + SyncInterval = "10s" + SyncChunkSize = 1000 + GenesisBlockNumber = "{{.zkevm_rollup_manager_block_number}}" + SyncUpToBlock = "latest" + BlockFinality = "latest" + OverrideStorageCheck = false + [Aggregator.Synchronizer.Etherman] + [Aggregator.Synchronizer.Etherman.Validium] + Enabled = {{.is_cdk_validium}} + + +[L1InfoTreeSync] +DBPath = "/tmp/L1InfoTreeSync" # TODO: put a more realisitic path here +GlobalExitRootAddr = "{{.zkevm_global_exit_root_address}}" +RollupManagerAddr = "{{.zkevm_rollup_manager_address}}" +SyncBlockChunkSize = 10 +BlockFinality = "LatestBlock" +URLRPCL1 = "{{.l1_rpc_url}}" +WaitForNewBlocksPeriod = "1s" +InitialBlock = "{{.zkevm_rollup_manager_block_number}}" + +[NetworkConfig.L1] +{{if eq .zkevm_rollup_fork_id "12"}} +L1ChainID = "{{.l1_chain_id}}" +{{else}} +ChainID = "{{.l1_chain_id}}" +{{end}} +PolAddr = "{{.pol_token_address}}" +ZkEVMAddr = "{{.zkevm_rollup_address}}" +RollupManagerAddr = "{{.zkevm_rollup_manager_address}}" +GlobalExitRootManagerAddr = "{{.zkevm_global_exit_root_address}}" diff --git a/test/config/test.kurtosis_template.toml b/test/config/test.kurtosis_template.toml deleted file mode 100644 index aec3a3b6..00000000 --- a/test/config/test.kurtosis_template.toml +++ /dev/null @@ -1,150 +0,0 @@ -ForkUpgradeBatchNumber = 0 -ForkUpgradeNewForkId = 0 - -[Etherman] - URL = "http://127.0.0.1:${l1_rpc_port}" - -[Common] -IsValidiumMode = ${zkevm_is_validium} -ContractVersions = "${zkevm_contract_versions}" -[Common.Translator] - FullMatchRules = [ - {Old="http://zkevm-dac-001:8484", New="http://127.0.0.1:${dac_port}"}, - ] - -[Log] -Environment = "development" # "production" or "development" -Level = "debug" -Outputs = ["stderr"] - -[SequenceSender] -WaitPeriodSendSequence = "15s" -LastBatchVirtualizationTimeMaxWaitPeriod = "10s" -L1BlockTimestampMargin = "30s" -MaxTxSizeForL1 = 131072 -MaxBatchesForL1 = 2 -L2Coinbase = "${zkevm_l2_sequencer_address}" -PrivateKey = {Path = "${sequencer_keystore_file}", Password = "${zkevm_l2_keystore_password}"} - -SequencesTxFileName = "sequencesender.json" -GasOffset = 80000 -WaitPeriodPurgeTxFile = "15m" -MaxPendingTx = 1 -RPCURL = "${l2_rpc_addr}" -GetBatchWaitInterval = "10s" - [SequenceSender.StreamClient] - Server = "127.0.0.1:${zkevm_data_streamer_port}" - [SequenceSender.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2m" - GetReceiptMaxTime = "250ms" - GetReceiptWaitInterval = "1s" - PrivateKeys = [ - {Path = "${sequencer_keystore_file}", Password = "${zkevm_l2_keystore_password}"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - PersistenceFilename = "ethtxmanager.json" - ReadPendingL1Txs = false - SafeStatusL1NumberOfBlocks = 0 - FinalizedStatusL1NumberOfBlocks = 0 - [SequenceSender.EthTxManager.Etherman] - URL = "http://127.0.0.1:${l1_rpc_port}" - MultiGasProvider = false - L1ChainID = ${l1_chain_id} -[Aggregator] -Host = "0.0.0.0" -Port = 50081 -RetryTime = "5s" -VerifyProofInterval = "10s" -TxProfitabilityCheckerType = "acceptall" -TxProfitabilityMinReward = "1.1" -ProofStatePollingInterval = "5s" -SenderAddress = "" -CleanupLockedProofsInterval = "2m" -GeneratingProofCleanupThreshold = "10m" -BatchProofSanityCheckEnabled = true -ForkId = 9 -GasOffset = 0 -WitnessURL = "localhost:8123" -UseL1BatchData = true -UseFullWitness = false -SettlementBackend = "l1" -AggLayerTxTimeout = "5m" -AggLayerURL = "" -MaxWitnessRetrievalWorkers = 2 -SyncModeOnlyEnabled = false -SequencerPrivateKey = {} - [Aggregator.DB] - Name = "aggregator_db" - User = "aggregator_user" - Password = "aggregator_password" - Host = "cdk-aggregator-db" - Port = "5432" - EnableLog = false - MaxConns = 200 - [Aggregator.Log] - Environment = "development" # "production" or "development" - Level = "info" - Outputs = ["stderr"] - [Aggregator.StreamClient] - Server = "localhost:6900" - [Aggregator.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2m" - GetReceiptMaxTime = "250ms" - GetReceiptWaitInterval = "1s" - PrivateKeys = [ - {Path = "/pk/aggregator.keystore", Password = "testonly"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - PersistenceFilename = "" - ReadPendingL1Txs = false - SafeStatusL1NumberOfBlocks = 0 - FinalizedStatusL1NumberOfBlocks = 0 - [Aggregator.EthTxManager.Etherman] - URL = "" - L1ChainID = ${l1_chain_id} - HTTPHeaders = [] - [Aggregator.Synchronizer] - [Aggregator.Synchronizer.DB] - Name = "sync_db" - User = "sync_user" - Password = "sync_password" - Host = "cdk-l1-sync-db" - Port = "5432" - EnableLog = false - MaxConns = 10 - [Aggregator.Synchronizer.Synchronizer] - SyncInterval = "10s" - SyncChunkSize = 1000 - GenesisBlockNumber = 5511080 - SyncUpToBlock = "finalized" - BlockFinality = "finalized" - OverrideStorageCheck = false - [Aggregator.Synchronizer.Etherman] - [Aggregator.Synchronizer.Etherman.Validium] - Enabled = ${zkevm_is_validium} - - -[L1InfoTreeSync] -DBPath = "/tmp/L1InfoTreeSync.sqlite" -GlobalExitRootAddr="${zkevm_global_exit_root_address}" -RollupManagerAddr="${zkevm_rollup_manager_address}" -SyncBlockChunkSize=100 -BlockFinality="LatestBlock" -# http://el-1-geth-lighthouse:8545 -URLRPCL1="${l1_rpc_addr}" -WaitForNewBlocksPeriod="100ms" -InitialBlock=0 - - -[NetworkConfig.L1] -L1ChainID = ${l1_chain_id} -PolAddr = "${pol_token_address}" -ZkEVMAddr = "${zkevm_rollup_address}" -RollupManagerAddr = "${zkevm_rollup_manager_address}" -GlobalExitRootManagerAddr = "${zkevm_global_exit_root_address}" diff --git a/test/run-e2e.sh b/test/run-e2e.sh index 6a29e416..809f06e6 100755 --- a/test/run-e2e.sh +++ b/test/run-e2e.sh @@ -22,4 +22,6 @@ $BASE_FOLDER/scripts/kurtosis_prepare_params_yml.sh "$KURTOSIS_FOLDER" $DATA_AVA [ $? -ne 0 ] && echo "Error preparing params.yml" && exit 1 kurtosis clean --all +echo "Override cdk config file" +cp $BASE_FOLDER/config/kurtosis-cdk-node-config.toml.template $KURTOSIS_FOLDER/templates/trusted-node/cdk-node-config.toml kurtosis run --enclave cdk-v1 --args-file $DEST_KURTOSIS_PARAMS_YML --image-download always $KURTOSIS_FOLDER From ac5c3187746b101bf00030a9808699125e536304 Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 2 Oct 2024 10:25:04 +0200 Subject: [PATCH 18/53] feat: update zkevm-synchronizer-l1 to v1.0.2 (#102) - update zkevm-synchronizer-l1 from [v1.0.1](https://github.com/0xPolygonHermez/zkevm-synchronizer-l1/releases/tag/v1.0.1) to [v1.0.2](https://github.com/0xPolygonHermez/zkevm-synchronizer-l1/releases/tag/v1.0.2): - fix: [#119](https://github.com/0xPolygonHermez/zkevm-synchronizer-l1/issues/119), fails if there are multiple sequencedBatch in same bock because a SQL have wrong order by - feat: add check to DB configuration - fix: downgrade migrations sql lite, remove scheme prefix from tables names --- go.mod | 2 +- go.sum | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 6396bd68..06afc08d 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240930120324-65816dead447 - github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.1 + github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.2 github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/hermeznetwork/tracerr v0.3.2 diff --git a/go.sum b/go.sum index d20624ff..c1685ecf 100644 --- a/go.sum +++ b/go.sum @@ -6,12 +6,10 @@ github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 h1:Jri+ydl8Puddd github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUxbnIoyumKvlwr0OtpTYMo= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= -github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 h1:QElCysO7f2xaknY/RDjxcs7IVmcgORfsCX2g+YD0Ko4= -github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234/go.mod h1:zBZWxwOHKlw+ghd9roQLgIkDZWA7e7qO3EsfQQT/+oQ= github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240930120324-65816dead447 h1:8nZjZrHZo+P9hTkhwtQ4J6eh9v4MTMtVb9jRDra8h0s= github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240930120324-65816dead447/go.mod h1:4iWpcwMOZJPapUzFB/HjTAM0X/gltHSEzQHE0lOt+eY= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.1 h1:8GbJBNsYO4zrqiBX++et8eQrJDEWEZuo3Ch3M416YnI= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.1/go.mod h1:96i+QSANfbikwlUY3U9MLNtg3656W3dWfbGqH+Od1/k= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.2 h1:DYioOpHDcn7rtojInDTEv7vmnhs8HP6zOSSXSGENM7s= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.2/go.mod h1:96i+QSANfbikwlUY3U9MLNtg3656W3dWfbGqH+Od1/k= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= From 749ceb8eff9c0f213d161f9cb59493c450327946 Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Wed, 2 Oct 2024 18:27:03 +0200 Subject: [PATCH 19/53] refactor: simplify running Kurtosis combinations (#101) Using a set of pre-defined combinations of components, we're going to test the versions we're interested in. --- .github/workflows/lint.yml | 4 +-- .github/workflows/test-e2e.yml | 8 ++++-- scripts/local_config | 2 +- test/Makefile | 22 +++++++++++---- test/bridge-e2e.bats | 4 --- test/combinations/fork12-cdk-validium.yml | 9 ++++++ test/combinations/fork12-rollup.yml | 9 ++++++ test/combinations/fork9-cdk-validium.yml | 12 ++++++++ test/combinations/fork9-rollup.yml | 12 ++++++++ test/run-e2e.sh | 17 ++++++----- test/scripts/kurtosis_prepare_params_yml.sh | 31 --------------------- 11 files changed, 77 insertions(+), 53 deletions(-) create mode 100644 test/combinations/fork12-cdk-validium.yml create mode 100644 test/combinations/fork12-rollup.yml create mode 100644 test/combinations/fork9-cdk-validium.yml create mode 100644 test/combinations/fork9-rollup.yml delete mode 100755 test/scripts/kurtosis_prepare_params_yml.sh diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index bdad36b2..77255d39 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -12,11 +12,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Install Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: go-version: 1.21.x - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 721cbf09..647c5daa 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -13,7 +13,11 @@ jobs: matrix: go-version: [ 1.22.x ] goarch: [ "amd64" ] - e2e-group: [ "elderberry-validium", "elderberry-rollup" ] + e2e-group: + - "fork9-validium" + - "fork9-rollup" + - "fork12-validium" + - "fork12-rollup" runs-on: ubuntu-latest steps: - name: Checkout code @@ -35,7 +39,7 @@ jobs: run: | echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update - sudo apt install kurtosis-cli=0.90.1 + sudo apt install kurtosis-cli=1.3.0 kurtosis version - name: Disable kurtosis analytics diff --git a/scripts/local_config b/scripts/local_config index 7303b1fd..e821f8b7 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -178,7 +178,7 @@ function check_requirements(){ cat << EOF echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list - sudo apt install kurtosis-cli=0.90.1 + sudo apt install kurtosis-cli kurtosis version EOF exit 1 diff --git a/test/Makefile b/test/Makefile index 4833f214..a6ab9467 100644 --- a/test/Makefile +++ b/test/Makefile @@ -46,14 +46,24 @@ generate-mocks-sync: ## Generates mocks for sync, using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go -.PHONY: test-e2e-elderberry-validium -test-e2e-elderberry-validium: stop ## Runs e2e tests checking elderberry/validium - ./run-e2e.sh cdk-validium +.PHONY: test-e2e-fork9-validium +test-e2e-fork9-validium: stop + ./run-e2e.sh fork9 cdk-validium bats . -.PHONY: test-e2e-elderberry-rollup -test-e2e-elderberry-rollup: stop ## Runs e2e tests checking elderberry/rollup - ./run-e2e.sh rollup +.PHONY: test-e2e-fork9-rollup +test-e2e-fork9-rollup: stop + ./run-e2e.sh fork9 rollup + bats . + +.PHONY: test-e2e-fork12-validium +test-e2e-fork12-validium: stop + ./run-e2e.sh fork12 cdk-validium + bats . + +.PHONY: test-e2e-fork12-rollup +test-e2e-fork12-rollup: stop + ./run-e2e.sh fork12 rollup bats . .PHONY: stop diff --git a/test/bridge-e2e.bats b/test/bridge-e2e.bats index 842d87e9..fcea86d9 100644 --- a/test/bridge-e2e.bats +++ b/test/bridge-e2e.bats @@ -4,10 +4,6 @@ setup() { load 'helpers/common' load 'helpers/lxly-bridge-test' - readonly data_availability_mode=${DATA_AVAILABILITY_MODE:-"cdk-validium"} - $PROJECT_ROOT/test/scripts/kurtosis_prepare_params_yml.sh ../kurtosis-cdk $data_availability_mode - [ $? -ne 0 ] && echo "Error preparing params.yml" && exit 1 - if [ -z "$BRIDGE_ADDRESS" ]; then local combined_json_file="/opt/zkevm/combined.json" echo "BRIDGE_ADDRESS env variable is not provided, resolving the bridge address from the Kurtosis CDK '$combined_json_file'" >&3 diff --git a/test/combinations/fork12-cdk-validium.yml b/test/combinations/fork12-cdk-validium.yml new file mode 100644 index 00000000..7772a677 --- /dev/null +++ b/test/combinations/fork12-cdk-validium.yml @@ -0,0 +1,9 @@ +args: + zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.3-fork.12 + zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC10-fork.12 + cdk_node_image: cdk + zkevm_use_gas_token_contract: true + additional_services: + - tx_spammer + data_availability_mode: cdk-validium + sequencer_type: erigon diff --git a/test/combinations/fork12-rollup.yml b/test/combinations/fork12-rollup.yml new file mode 100644 index 00000000..05e1f51f --- /dev/null +++ b/test/combinations/fork12-rollup.yml @@ -0,0 +1,9 @@ +args: + zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.3-fork.12 + zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC10-fork.12 + cdk_node_image: cdk + zkevm_use_gas_token_contract: true + additional_services: + - tx_spammer + data_availability_mode: rollup + sequencer_type: erigon diff --git a/test/combinations/fork9-cdk-validium.yml b/test/combinations/fork9-cdk-validium.yml new file mode 100644 index 00000000..21a20b58 --- /dev/null +++ b/test/combinations/fork9-cdk-validium.yml @@ -0,0 +1,12 @@ +args: + zkevm_contracts_image: leovct/zkevm-contracts:v6.0.0-rc.1-fork.9 + zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.4 + zkevm_node_image: hermeznetwork/zkevm-node:v0.7.3-RC1 + cdk_validium_node_image: 0xpolygon/cdk-validium-node:0.7.0-cdk + cdk_node_image: cdk + zkevm_use_gas_token_contract: true + additional_services: + - pless_zkevm_node + - tx_spammer + data_availability_mode: cdk-validium + sequencer_type: erigon diff --git a/test/combinations/fork9-rollup.yml b/test/combinations/fork9-rollup.yml new file mode 100644 index 00000000..a17daa2a --- /dev/null +++ b/test/combinations/fork9-rollup.yml @@ -0,0 +1,12 @@ +args: + zkevm_contracts_image: leovct/zkevm-contracts:v6.0.0-rc.1-fork.9 + zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.4 + zkevm_node_image: hermeznetwork/zkevm-node:v0.7.3-RC1 + cdk_validium_node_image: 0xpolygon/cdk-validium-node:0.7.0-cdk + cdk_node_image: cdk + zkevm_use_gas_token_contract: true + additional_services: + - pless_zkevm_node + - tx_spammer + data_availability_mode: rollup + sequencer_type: erigon diff --git a/test/run-e2e.sh b/test/run-e2e.sh index 809f06e6..d6a27a19 100755 --- a/test/run-e2e.sh +++ b/test/run-e2e.sh @@ -1,13 +1,19 @@ #!/bin/bash source $(dirname $0)/scripts/env.sh -FORK=elderberry -DATA_AVAILABILITY_MODE=$1 + +FORK=$1 +if [ -z $FORK ]; then + echo "Missing FORK: ['fork9', 'fork12']" + exit 1 +fi + +DATA_AVAILABILITY_MODE=$2 if [ -z $DATA_AVAILABILITY_MODE ]; then echo "Missing DATA_AVAILABILITY_MODE: ['rollup', 'cdk-validium']" exit 1 fi -BASE_FOLDER=$(dirname $0) +BASE_FOLDER=$(dirname $0) docker images -q cdk:latest > /dev/null if [ $? -ne 0 ] ; then echo "Building cdk:latest" @@ -18,10 +24,7 @@ else echo "docker cdk:latest already exists" fi -$BASE_FOLDER/scripts/kurtosis_prepare_params_yml.sh "$KURTOSIS_FOLDER" $DATA_AVAILABILITY_MODE -[ $? -ne 0 ] && echo "Error preparing params.yml" && exit 1 - kurtosis clean --all echo "Override cdk config file" cp $BASE_FOLDER/config/kurtosis-cdk-node-config.toml.template $KURTOSIS_FOLDER/templates/trusted-node/cdk-node-config.toml -kurtosis run --enclave cdk-v1 --args-file $DEST_KURTOSIS_PARAMS_YML --image-download always $KURTOSIS_FOLDER +kurtosis run --enclave cdk-v1 --args-file "combinations/$FORK-$DATA_AVAILABILITY_MODE.yml" --image-download always $KURTOSIS_FOLDER diff --git a/test/scripts/kurtosis_prepare_params_yml.sh b/test/scripts/kurtosis_prepare_params_yml.sh deleted file mode 100755 index 38f44d51..00000000 --- a/test/scripts/kurtosis_prepare_params_yml.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -source $(dirname $0)/env.sh - -if [ -z $DEST_KURTOSIS_PARAMS_YML ]; then - echo "DEST_KURTOSIS_PARAMS_YML is not set. Must be set on file env.sh" - exit 1 -fi - -# Check if the destination params file exists and don't do nothing -if [ -f $DEST_KURTOSIS_PARAMS_YML ]; then - echo "Destination params file already exists" - exit 0 -fi - -KURTOSIS_FOLDER=$1 -if [ -z $KURTOSIS_FOLDER ]; then - echo "Missing param Kurtosis Folder" - exit 1 -fi - -DATA_AVAILABILITY_MODE=$2 -if [ -z $DATA_AVAILABILITY_MODE ]; then - echo "Missing param Data Availability Mode : [rollup, cdk-validium]" - exit 1 -fi - -mkdir -p $(dirname $DEST_KURTOSIS_PARAMS_YML) -cp $KURTOSIS_FOLDER/params.yml $DEST_KURTOSIS_PARAMS_YML -yq -Y --in-place ".args.cdk_node_image = \"cdk\"" $DEST_KURTOSIS_PARAMS_YML -yq -Y --in-place ".args.data_availability_mode = \"$DATA_AVAILABILITY_MODE\"" $DEST_KURTOSIS_PARAMS_YML -yq -Y --in-place ".args.zkevm_use_gas_token_contract = $USE_L1_GAS_TOKEN_CONTRACT" $DEST_KURTOSIS_PARAMS_YML From 2908a2011d36f8393a45c5338d4a30ed9f7046a8 Mon Sep 17 00:00:00 2001 From: Daniel Jones <105369507+djpolygon@users.noreply.github.com> Date: Thu, 3 Oct 2024 08:56:47 -0500 Subject: [PATCH 20/53] feat: Initial for packagers (#90) * Initial for packagers * Adding BUILD_SCRIPT_DISABLED=1 to debian packagers --- .github/workflows/arm_deb_packager.yml | 89 +++++++++++++++++++++ .github/workflows/arm_rpm_packager.yml | 103 +++++++++++++++++++++++++ .github/workflows/x86_deb_packager.yml | 89 +++++++++++++++++++++ .github/workflows/x86_rpm_packager.yml | 102 ++++++++++++++++++++++++ .gitignore | 4 +- packaging/deb/cdk/DEBIAN/postinst | 12 +++ packaging/deb/cdk/DEBIAN/postrm | 8 ++ packaging/systemd/cdk.service.example | 16 ++++ 8 files changed, 422 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/arm_deb_packager.yml create mode 100644 .github/workflows/arm_rpm_packager.yml create mode 100644 .github/workflows/x86_deb_packager.yml create mode 100644 .github/workflows/x86_rpm_packager.yml create mode 100755 packaging/deb/cdk/DEBIAN/postinst create mode 100755 packaging/deb/cdk/DEBIAN/postrm create mode 100644 packaging/systemd/cdk.service.example diff --git a/.github/workflows/arm_deb_packager.yml b/.github/workflows/arm_deb_packager.yml new file mode 100644 index 00000000..64d451c6 --- /dev/null +++ b/.github/workflows/arm_deb_packager.yml @@ -0,0 +1,89 @@ +name: arm_deb_packager + + +on: + push: + branches: + - 'main' + paths: + - '**' + tags: + - 'v*.*.*' + - 'v*.*.*-*' + +jobs: + build: + permissions: + id-token: write + contents: write + runs-on: + labels: arm-runner-2204 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@master + with: + go-version: 1.22.x + # Variables + - name: Adding TAG to ENV + run: echo "GIT_TAG=`echo $(git describe --tags --abbrev=0)`" >> $GITHUB_ENV + - name: adding version + run: | + NUMERIC_VERSION=$( echo ${{ env.GIT_TAG }} | sed 's/[^0-9.]//g' ) + echo "VERSION=$NUMERIC_VERSION" >> $GITHUB_ENV + + - name: go mod download + run: go mod download + + - name: Build the binary + run: make build + + - name: Build the rust binary + run: | + BUILD_SCRIPT_DISABLED=1 + cargo build --release --bin cdk + + - name: making directory structure + run: mkdir -p packaging/deb/cdk/usr/bin/ + - name: copying necessary binary for arm64 + run: cp -rp target/cdk-node packaging/deb/cdk/usr/bin/cdk-node + - name: copying rust binary for arm64 + run: cp -rp target/release/cdk packaging/deb/cdk/usr/bin/cdk + + # Control file creation + - name: Create control file + run: | + echo "Package: cdk" >> packaging/deb/cdk/DEBIAN/control + echo "Version: ${{ env.VERSION }}" >> packaging/deb/cdk/DEBIAN/control + echo "Section: base" >> packaging/deb/cdk/DEBIAN/control + echo "Priority: optional" >> packaging/deb/cdk/DEBIAN/control + echo "Architecture: arm64" >> packaging/deb/cdk/DEBIAN/control + echo "Maintainer: devops@polygon.technology" >> packaging/deb/cdk/DEBIAN/control + echo "Description: cdk binary package" >> packaging/deb/cdk/DEBIAN/control + + - name: Creating package for binary for cdk ${{ env.ARCH }} + run: cp -rp packaging/deb/cdk packaging/deb/cdk-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + + - name: Running package build + run: dpkg-deb --build --root-owner-group packaging/deb/cdk-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + + - name: create checksum for the arm64 package + run: cd packaging/deb/ && sha256sum cdk-${{ env.GIT_TAG }}-${{ env.ARCH }}.deb > cdk-${{ env.GIT_TAG }}-${{ env.ARCH }}.deb.checksum + env: + ARCH: arm64 + + - name: Release cdk Packages + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ env.GIT_TAG }} + prerelease: true + files: | + packaging/deb/cdk**.deb + packaging/deb/cdk**.deb.checksum diff --git a/.github/workflows/arm_rpm_packager.yml b/.github/workflows/arm_rpm_packager.yml new file mode 100644 index 00000000..614b80f2 --- /dev/null +++ b/.github/workflows/arm_rpm_packager.yml @@ -0,0 +1,103 @@ +name: arm_rpm_packager + +on: + push: + branches: + - 'main' + paths: + - '**' + tags: + - 'v*.*.*' + - 'v*.*.*-*' + +jobs: + build: + permissions: + id-token: write + contents: write + runs-on: + labels: arm-runner-2204 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@master + with: + go-version: 1.22.x + - name: Adding TAG to ENV + run: echo "GIT_TAG=`echo $(git describe --tags --abbrev=0)`" >> $GITHUB_ENV + + - name: Adding a TAG.1 to the env + run: echo "GIT_TAG1=`echo $(git describe --tags --abbrev=0)`" | sed 's/-/./g' >> $GITHUB_ENV + + - name: Download deps for project + run: go mod download + + - name: Building cdk-node for amd64 + run: make build + + - name: Building the cdk + run: | + BUILD_SCRIPT_DISABLED=1 + cargo build --release --bin cdk + + - name: Installing some dependencies + run: sudo apt-get update && sudo apt-get install -y rpm + + - name: Setup rpm package for binary + run: | + mkdir -p packaging/rpm/SPECS + mkdir -p packaging/rpm/BUILD + mkdir -p packaging/rpm/RPMS + mkdir -p packaging/rpm/SRPMS + + touch packaging/rpm/cdk.spec + echo "Name: cdk" >> packaging/rpm/SPECS/cdk.spec + echo "Version: ${{ env.GIT_TAG1 }}" >> packaging/rpm/SPECS/cdk.spec + echo "Release: 1%{?dist}" >> packaging/rpm/SPECS/cdk.spec + echo "License: GPL/AGPL" >> packaging/rpm/SPECS/cdk.spec + echo "BuildArch: aarch64" >> packaging/rpm/SPECS/cdk.spec + echo "Summary: cdk rpm package" >> packaging/rpm/SPECS/cdk.spec + + echo "%description" >> packaging/rpm/SPECS/cdk.spec + echo "cdk rpm package" >> packaging/rpm/SPECS/cdk.spec + + echo "%pre" >> packaging/rpm/SPECS/cdk.spec + echo "getent group cdk >/dev/null || groupadd -r cdk" >> packaging/rpm/SPECS/cdk.spec + echo "getent passwd cdk >/dev/null || useradd -s /bin/false -d /opt/cdk -r cdk -g cdk" >> packaging/rpm/SPECS/cdk.spec + + echo "%install" >> packaging/rpm/SPECS/cdk.spec + echo "mkdir -p %{buildroot}/usr/bin" >> packaging/rpm/SPECS/cdk.spec + echo "cp /home/runner/work/cdk/cdk/target/cdk-node %{buildroot}/usr/bin/cdk-node" >> packaging/rpm/SPECS/cdk.spec + echo "cp /home/runner/work/cdk/cdk/target/release/cdk %{buildroot}/usr/bin/cdk" >> packaging/rpm/SPECS/cdk.spec + + echo "%files" >> packaging/rpm/SPECS/cdk.spec + echo "/usr/bin/cdk" >> packaging/rpm/SPECS/cdk.spec + echo "/usr/bin/cdk-node" >> packaging/rpm/SPECS/cdk.spec + + + - name: Construct rpm package + run: | + rpmbuild --define "_topdir /home/runner/work/cdk/cdk/packaging/rpm_build" \ + --define "_builddir %{_topdir}/BUILD" \ + --define "_rpmdir %{_topdir}/RPMS" \ + --define "_srcrpmdir %{_topdir}/SRPMS" \ + --define "__spec_install_post /bin/true" \ + -bb packaging/rpm/SPECS/cdk.spec + + - name: Rename file for post rpm build and for checksum + run: mv /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/aarch64/cdk-${{ env.GIT_TAG1 }}-1.aarch64.rpm /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/aarch64/cdk-${{ env.GIT_TAG1 }}.aarch64.rpm + + - name: Checksum for the rpm package + run: sha256sum /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/aarch64/cdk-${{ env.GIT_TAG1 }}.aarch64.rpm > /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/aarch64/cdk-${{ env.GIT_TAG1 }}.aarch64.rpm.checksum + + - name: Release cdk Packages + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ env.GIT_TAG }} + prerelease: true + files: | + packaging/rpm_build/RPMS/aarch64/cdk-**.rpm + packaging/rpm_build/RPMS/aarch64/cdk-**.rpm.checksum diff --git a/.github/workflows/x86_deb_packager.yml b/.github/workflows/x86_deb_packager.yml new file mode 100644 index 00000000..2568861a --- /dev/null +++ b/.github/workflows/x86_deb_packager.yml @@ -0,0 +1,89 @@ +name: x86_deb_packager + + +on: + push: + branches: + - 'main' + paths: + - '**' + tags: + - 'v*.*.*' + - 'v*.*.*-*' + +jobs: + build: + permissions: + id-token: write + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@master + with: + go-version: 1.22.x + # Variables + - name: Adding TAG to ENV + run: echo "GIT_TAG=`echo $(git describe --tags --abbrev=0)`" >> $GITHUB_ENV + - name: adding version + run: | + NUMERIC_VERSION=$( echo ${{ env.GIT_TAG }} | sed 's/[^0-9.]//g' ) + echo "VERSION=$NUMERIC_VERSION" >> $GITHUB_ENV + + - name: go mod download + run: go mod download + + - name: Build the binary + run: make build + + - name: Build the rust binary + run: | + BUILD_SCRIPT_DISABLED=1 + cargo build --release --bin cdk + + - name: making directory structure + run: mkdir -p packaging/deb/cdk/usr/bin/ + - name: copying necessary binary for amd64 + run: cp -rp target/cdk-node packaging/deb/cdk/usr/bin/cdk-node + - name: copying rust binary for amd64 + run: cp -rp target/release/cdk packaging/deb/cdk/usr/bin/cdk + + # Control file creation + - name: Create control file + run: | + echo "Package: cdk" >> packaging/deb/cdk/DEBIAN/control + echo "Version: ${{ env.VERSION }}" >> packaging/deb/cdk/DEBIAN/control + echo "Section: base" >> packaging/deb/cdk/DEBIAN/control + echo "Priority: optional" >> packaging/deb/cdk/DEBIAN/control + echo "Architecture: amd64" >> packaging/deb/cdk/DEBIAN/control + echo "Maintainer: devops@polygon.technology" >> packaging/deb/cdk/DEBIAN/control + echo "Description: cdk binary package" >> packaging/deb/cdk/DEBIAN/control + + - name: Creating package for binary for cdk ${{ env.ARCH }} + run: cp -rp packaging/deb/cdk packaging/deb/cdk-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + + - name: Running package build + run: dpkg-deb --build --root-owner-group packaging/deb/cdk-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + + - name: Create checksum for the amd64 package + run: cd packaging/deb/ && sha256sum cdk-${{ env.GIT_TAG }}-${{ env.ARCH }}.deb > cdk-${{ env.GIT_TAG }}-${{ env.ARCH }}.deb.checksum + env: + ARCH: amd64 + + + - name: Release cdk Packages + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ env.GIT_TAG }} + prerelease: true + files: | + packaging/deb/cdk**.deb + packaging/deb/cdk**.deb.checksum diff --git a/.github/workflows/x86_rpm_packager.yml b/.github/workflows/x86_rpm_packager.yml new file mode 100644 index 00000000..9f06fb64 --- /dev/null +++ b/.github/workflows/x86_rpm_packager.yml @@ -0,0 +1,102 @@ +name: x86_rpm_packager + +on: + push: + branches: + - 'main' + paths: + - '**' + tags: + - 'v*.*.*' + - 'v*.*.*-*' + +jobs: + build: + permissions: + id-token: write + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@master + with: + go-version: 1.22.x + - name: Adding TAG to ENV + run: echo "GIT_TAG=`echo $(git describe --tags --abbrev=0)`" >> $GITHUB_ENV + + - name: Adding a TAG.1 to the env + run: echo "GIT_TAG1=`echo $(git describe --tags --abbrev=0)`" | sed 's/-/./g' >> $GITHUB_ENV + + - name: Download deps for project + run: go mod download + + - name: Building cdk-node for amd64 + run: make build + + - name: Building the cdk + run: | + BUILD_SCRIPT_DISABLED=1 + cargo build --release --bin cdk + + - name: Installing some dependencies + run: sudo apt-get update && sudo apt-get install -y rpm + + - name: Setup rpm package for binary + run: | + mkdir -p packaging/rpm/SPECS + mkdir -p packaging/rpm/BUILD + mkdir -p packaging/rpm/RPMS + mkdir -p packaging/rpm/SRPMS + + touch packaging/rpm/cdk.spec + echo "Name: cdk" >> packaging/rpm/SPECS/cdk.spec + echo "Version: ${{ env.GIT_TAG1 }}" >> packaging/rpm/SPECS/cdk.spec + echo "Release: 1%{?dist}" >> packaging/rpm/SPECS/cdk.spec + echo "License: GPL/AGPL" >> packaging/rpm/SPECS/cdk.spec + echo "BuildArch: x86_64" >> packaging/rpm/SPECS/cdk.spec + echo "Summary: cdk rpm package" >> packaging/rpm/SPECS/cdk.spec + + echo "%description" >> packaging/rpm/SPECS/cdk.spec + echo "cdk rpm package" >> packaging/rpm/SPECS/cdk.spec + + echo "%pre" >> packaging/rpm/SPECS/cdk.spec + echo "getent group cdk >/dev/null || groupadd -r cdk" >> packaging/rpm/SPECS/cdk.spec + echo "getent passwd cdk >/dev/null || useradd -s /bin/false -d /opt/cdk -r cdk -g cdk" >> packaging/rpm/SPECS/cdk.spec + + echo "%install" >> packaging/rpm/SPECS/cdk.spec + echo "mkdir -p %{buildroot}/usr/bin" >> packaging/rpm/SPECS/cdk.spec + echo "cp /home/runner/work/cdk/cdk/target/cdk-node %{buildroot}/usr/bin/cdk-node" >> packaging/rpm/SPECS/cdk.spec + echo "cp /home/runner/work/cdk/cdk/target/release/cdk %{buildroot}/usr/bin/cdk" >> packaging/rpm/SPECS/cdk.spec + + echo "%files" >> packaging/rpm/SPECS/cdk.spec + echo "/usr/bin/cdk" >> packaging/rpm/SPECS/cdk.spec + echo "/usr/bin/cdk-node" >> packaging/rpm/SPECS/cdk.spec + + + - name: Construct rpm package + run: | + rpmbuild --define "_topdir /home/runner/work/cdk/cdk/packaging/rpm_build" \ + --define "_builddir %{_topdir}/BUILD" \ + --define "_rpmdir %{_topdir}/RPMS" \ + --define "_srcrpmdir %{_topdir}/SRPMS" \ + --define "__spec_install_post /bin/true" \ + -bb packaging/rpm/SPECS/cdk.spec + + - name: Rename file for post rpm build and for checksum + run: mv /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/x86_64/cdk-${{ env.GIT_TAG1 }}-1.x86_64.rpm /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/x86_64/cdk-${{ env.GIT_TAG1 }}.x86_64.rpm + + - name: Checksum for the rpm package + run: sha256sum /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/x86_64/cdk-${{ env.GIT_TAG1 }}.x86_64.rpm > /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/x86_64/cdk-${{ env.GIT_TAG1 }}.x86_64.rpm.checksum + + - name: Release cdk Packages + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ env.GIT_TAG }} + prerelease: true + files: | + packaging/rpm_build/RPMS/x86_64/cdk-**.rpm + packaging/rpm_build/RPMS/x86_64/cdk-**.rpm.checksum diff --git a/.gitignore b/.gitignore index 958ed6ff..abfa990f 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,6 @@ target/ book/ index.html tmp -.vscode \ No newline at end of file +.vscode +.idea +.idea/* diff --git a/packaging/deb/cdk/DEBIAN/postinst b/packaging/deb/cdk/DEBIAN/postinst new file mode 100755 index 00000000..e5765a67 --- /dev/null +++ b/packaging/deb/cdk/DEBIAN/postinst @@ -0,0 +1,12 @@ +#!/bin/bash +# This is a postinstallation script so the service can be configured and started when requested +# +adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent cdk +if [ -d "/opt/cdk" ] +then + echo "Directory /opt/cdk exists." +else + mkdir -p /opt/cdk + chown -R cdk /opt/cdk +fi +systemctl daemon-reload diff --git a/packaging/deb/cdk/DEBIAN/postrm b/packaging/deb/cdk/DEBIAN/postrm new file mode 100755 index 00000000..a2ea87a6 --- /dev/null +++ b/packaging/deb/cdk/DEBIAN/postrm @@ -0,0 +1,8 @@ +#!/bin/bash +# +############### +# Remove cdk-node installs +############## +#rm -rf /lib/systemd/system/cdk-node.service +deluser cdk +#systemctl daemon-reload diff --git a/packaging/systemd/cdk.service.example b/packaging/systemd/cdk.service.example new file mode 100644 index 00000000..d427e775 --- /dev/null +++ b/packaging/systemd/cdk.service.example @@ -0,0 +1,16 @@ +[Unit] + Description=cdk + StartLimitIntervalSec=500 + StartLimitBurst=5 + +[Service] + Restart=on-failure + RestartSec=5s + ExecStart=/usr/bin/cdk --config $config --chain $chain node + Type=simple + KillSignal=SIGINT + User=cdk + TimeoutStopSec=120 + +[Install] + WantedBy=multi-user.target From 7550434f40a1e6855c320d5803a381b9520434ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= <93934272+Stefan-Ethernal@users.noreply.github.com> Date: Fri, 4 Oct 2024 11:31:39 +0200 Subject: [PATCH 21/53] feat: Integrate ethtxmanager with sql lite storage (#97) * chore: fix comments * feat: integrate the v0.2.0 eth tx manager spec * fix: rename DBPath to StoragePath in the ethtxmanager config * chore: rename according to golang guidelines * chore: deprecate the PersistenceFilename config param * test: unit test for obsoleted PersistenceFIlename configuration parameter --- aggoracle/chaingersender/evm.go | 21 ++++----- aggregator/aggregator.go | 11 ++--- aggregator/config.go | 2 +- claimsponsor/evmclaimsponsor.go | 20 ++++----- cmd/run.go | 8 ++-- config/config.go | 15 ++++++- config/config_test.go | 41 ++++++++++++------ config/default.go | 8 ++-- config/example-config.toml | 4 +- db/meddler.go | 28 ++++++------ etherman/config/config.go | 2 +- go.mod | 32 +++++++------- go.sum | 71 ++++++++++++++++--------------- sequencesender/config.go | 2 +- sequencesender/ethtx.go | 25 +++++------ sequencesender/sequencesender.go | 9 ++-- test/config/test.config.toml | 4 +- test/helpers/aggoracle_e2e.go | 58 ++++++++++++++++++------- test/helpers/ethtxmanmock_e2e.go | 4 +- test/helpers/mock_ethtxmanager.go | 24 +++++------ 20 files changed, 222 insertions(+), 167 deletions(-) diff --git a/aggoracle/chaingersender/evm.go b/aggoracle/chaingersender/evm.go index 4ad053c4..3659eb3f 100644 --- a/aggoracle/chaingersender/evm.go +++ b/aggoracle/chaingersender/evm.go @@ -9,7 +9,8 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitroot" cfgTypes "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -26,9 +27,9 @@ type EthClienter interface { type EthTxManager interface { Remove(ctx context.Context, id common.Hash) error ResultsByStatus(ctx context.Context, - statuses []ethtxmanager.MonitoredTxStatus, - ) ([]ethtxmanager.MonitoredTxResult, error) - Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error) + statuses []ethtxtypes.MonitoredTxStatus, + ) ([]ethtxtypes.MonitoredTxResult, error) + Result(ctx context.Context, id common.Hash) (ethtxtypes.MonitoredTxResult, error) Add(ctx context.Context, to *common.Address, value *big.Int, @@ -114,14 +115,14 @@ func (c *EVMChainGERSender) UpdateGERWaitUntilMined(ctx context.Context, ger com c.logger.Error("error calling ethTxMan.Result: ", err) } switch res.Status { - case ethtxmanager.MonitoredTxStatusCreated, - ethtxmanager.MonitoredTxStatusSent: + case ethtxtypes.MonitoredTxStatusCreated, + ethtxtypes.MonitoredTxStatusSent: continue - case ethtxmanager.MonitoredTxStatusFailed: + case ethtxtypes.MonitoredTxStatusFailed: return fmt.Errorf("tx %s failed", res.ID) - case ethtxmanager.MonitoredTxStatusMined, - ethtxmanager.MonitoredTxStatusSafe, - ethtxmanager.MonitoredTxStatusFinalized: + case ethtxtypes.MonitoredTxStatusMined, + ethtxtypes.MonitoredTxStatusSafe, + ethtxtypes.MonitoredTxStatusFinalized: return nil default: c.logger.Error("unexpected tx status: ", res.Status) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 359021eb..4f8536cb 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -24,10 +24,11 @@ import ( "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/cdk/state/datastream" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxlog "github.com/0xPolygon/zkevm-ethtx-manager/log" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" streamlog "github.com/0xPolygonHermez/zkevm-data-streamer/log" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" - ethtxlog "github.com/0xPolygonHermez/zkevm-ethtx-manager/log" synclog "github.com/0xPolygonHermez/zkevm-synchronizer-l1/log" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/state/entities" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" @@ -989,7 +990,7 @@ func (a *Aggregator) settleDirect( } // process monitored batch verifications before starting a next cycle - a.ethTxManager.ProcessPendingMonitoredTxs(ctx, func(result ethtxmanager.MonitoredTxResult) { + a.ethTxManager.ProcessPendingMonitoredTxs(ctx, func(result ethtxtypes.MonitoredTxResult) { a.handleMonitoredTxResult(result) }) @@ -1928,9 +1929,9 @@ func (hc *healthChecker) Watch(req *grpchealth.HealthCheckRequest, server grpche }) } -func (a *Aggregator) handleMonitoredTxResult(result ethtxmanager.MonitoredTxResult) { +func (a *Aggregator) handleMonitoredTxResult(result ethtxtypes.MonitoredTxResult) { mTxResultLogger := ethtxmanager.CreateMonitoredTxResultLogger(result) - if result.Status == ethtxmanager.MonitoredTxStatusFailed { + if result.Status == ethtxtypes.MonitoredTxStatusFailed { mTxResultLogger.Fatal("failed to send batch verification, TODO: review this fatal and define what to do in this case") } diff --git a/aggregator/config.go b/aggregator/config.go index 4550c637..89676e3d 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -10,7 +10,7 @@ import ( "github.com/0xPolygon/cdk/aggregator/db" "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" syncronizerConfig "github.com/0xPolygonHermez/zkevm-synchronizer-l1/config" "github.com/ethereum/go-ethereum/accounts/keystore" ) diff --git a/claimsponsor/evmclaimsponsor.go b/claimsponsor/evmclaimsponsor.go index 5f394b14..12d0c4ca 100644 --- a/claimsponsor/evmclaimsponsor.go +++ b/claimsponsor/evmclaimsponsor.go @@ -9,7 +9,8 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2" configTypes "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -33,9 +34,8 @@ type EthClienter interface { type EthTxManager interface { Remove(ctx context.Context, id common.Hash) error - ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus, - ) ([]ethtxmanager.MonitoredTxResult, error) - Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error) + ResultsByStatus(ctx context.Context, statuses []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error) + Result(ctx context.Context, id common.Hash) (ethtxtypes.MonitoredTxResult, error) Add(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) } @@ -166,14 +166,14 @@ func (c *EVMClaimSponsor) claimStatus(ctx context.Context, id string) (ClaimStat return "", err } switch res.Status { - case ethtxmanager.MonitoredTxStatusCreated, - ethtxmanager.MonitoredTxStatusSent: + case ethtxtypes.MonitoredTxStatusCreated, + ethtxtypes.MonitoredTxStatusSent: return WIPStatus, nil - case ethtxmanager.MonitoredTxStatusFailed: + case ethtxtypes.MonitoredTxStatusFailed: return FailedClaimStatus, nil - case ethtxmanager.MonitoredTxStatusMined, - ethtxmanager.MonitoredTxStatusSafe, - ethtxmanager.MonitoredTxStatusFinalized: + case ethtxtypes.MonitoredTxStatusMined, + ethtxtypes.MonitoredTxStatusSafe, + ethtxtypes.MonitoredTxStatusFinalized: return SuccessClaimStatus, nil default: return "", fmt.Errorf("unexpected tx status: %v", res.Status) diff --git a/cmd/run.go b/cmd/run.go index 68f4acdd..d90a44a1 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -35,10 +35,10 @@ import ( "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/cdk/state/pgstatestorage" "github.com/0xPolygon/cdk/translator" - ethtxman "github.com/0xPolygonHermez/zkevm-ethtx-manager/etherman" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/etherman/etherscan" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" - ethtxlog "github.com/0xPolygonHermez/zkevm-ethtx-manager/log" + ethtxman "github.com/0xPolygon/zkevm-ethtx-manager/etherman" + "github.com/0xPolygon/zkevm-ethtx-manager/etherman/etherscan" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxlog "github.com/0xPolygon/zkevm-ethtx-manager/log" "github.com/ethereum/go-ethereum/ethclient" "github.com/jackc/pgx/v4/pgxpool" "github.com/urfave/cli/v2" diff --git a/config/config.go b/config/config.go index 431d0175..720a11e4 100644 --- a/config/config.go +++ b/config/config.go @@ -18,7 +18,7 @@ import ( "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/sequencesender" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" "github.com/mitchellh/mapstructure" "github.com/spf13/viper" "github.com/urfave/cli/v2" @@ -51,7 +51,10 @@ const ( // FlagMaxAmount is the flag to avoid to use the flag FlagAmount FlagMaxAmount = "max-amount" - deprecatedFieldSyncDB = "Aggregator.Synchronizer.DB is deprecated use Aggregator.Synchronizer.SQLDB instead" + deprecatedFieldSyncDB = "Aggregator.Synchronizer.DB is deprecated. Use Aggregator.Synchronizer.SQLDB instead." + + deprecatedFieldPersistenceFilename = "EthTxManager.PersistenceFilename is deprecated." + + " Use EthTxManager.StoragePath instead." ) type ForbiddenField struct { @@ -65,6 +68,14 @@ var ( FieldName: "aggregator.synchronizer.db.", Reason: deprecatedFieldSyncDB, }, + { + FieldName: "sequencesender.ethtxmanager.persistencefilename", + Reason: deprecatedFieldPersistenceFilename, + }, + { + FieldName: "aggregator.ethtxmanager.persistencefilename", + Reason: deprecatedFieldPersistenceFilename, + }, } ) diff --git a/config/config_test.go b/config/config_test.go index 1aaa24e0..a0844d96 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -34,18 +34,33 @@ func TestLoadConfigWithUnexpectedFields(t *testing.T) { require.NotNil(t, cfg) } -const configWithForbiddenFields = ` -[aggregator.synchronizer.db] -name = "value" -` - func TestLoadConfigWithForbiddenFields(t *testing.T) { - tmpFile, err := os.CreateTemp("", "ut_config") - require.NoError(t, err) - defer os.Remove(tmpFile.Name()) - _, err = tmpFile.Write([]byte(configWithForbiddenFields)) - require.NoError(t, err) - cfg, err := LoadFile(tmpFile.Name()) - require.NoError(t, err) - require.NotNil(t, cfg) + cases := []struct { + name string + input string + }{ + { + name: "[Aggregator.Synchronizer] DB", + input: `[aggregator.synchronizer.db] + name = "value"`, + }, + { + name: "[SequenceSender.EthTxManager] PersistenceFilename", + input: `[SequenceSender.EthTxManager] + PersistenceFilename = "foo.json"`, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + tmpFile, err := os.CreateTemp("", "ut_config") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.Write([]byte(c.input)) + require.NoError(t, err) + cfg, err := LoadFile(tmpFile.Name()) + require.NoError(t, err) + require.NotNil(t, cfg) + }) + } } diff --git a/config/default.go b/config/default.go index d10bf1f8..a7730ec7 100644 --- a/config/default.go +++ b/config/default.go @@ -53,7 +53,7 @@ GetBatchWaitInterval = "10s" ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "ethtxmanager.json" + StoragePath = "ethtxmanager.db" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 0 FinalizedStatusL1NumberOfBlocks = 0 @@ -109,7 +109,7 @@ SequencerPrivateKey = {} ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "" + StoragePath = "" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 0 FinalizedStatusL1NumberOfBlocks = 0 @@ -184,7 +184,7 @@ WaitPeriodNextGER="100ms" ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "/tmp/ethtxmanager-sequencesender.json" + StoragePath = "/tmp/ethtxmanager-sequencesender.db" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 @@ -223,7 +223,7 @@ GasOffset = 0 ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "/tmp/ethtxmanager-claimsopnsor.json" + StoragePath = "/tmp/ethtxmanager-claimsponsor.db" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 diff --git a/config/example-config.toml b/config/example-config.toml index e7207861..c3e222ed 100644 --- a/config/example-config.toml +++ b/config/example-config.toml @@ -31,7 +31,7 @@ MaxPendingTx = 1 ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "ethtxmanager.json" + StoragePath = "ethtxmanager.db" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 0 FinalizedStatusL1NumberOfBlocks = 0 @@ -84,7 +84,7 @@ SequencerPrivateKey = {} ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "" + StoragePath = "" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 0 FinalizedStatusL1NumberOfBlocks = 0 diff --git a/db/meddler.go b/db/meddler.go index e1f55086..fb632fb4 100644 --- a/db/meddler.go +++ b/db/meddler.go @@ -38,7 +38,7 @@ func SliceToSlicePtrs(slice interface{}) interface{} { v := reflect.ValueOf(slice) vLen := v.Len() typ := v.Type().Elem() - res := reflect.MakeSlice(reflect.SliceOf(reflect.PtrTo(typ)), vLen, vLen) + res := reflect.MakeSlice(reflect.SliceOf(reflect.PointerTo(typ)), vLen, vLen) for i := 0; i < vLen; i++ { res.Index(i).Set(v.Index(i).Addr()) } @@ -57,7 +57,7 @@ func SlicePtrsToSlice(slice interface{}) interface{} { return res.Interface() } -// BigIntMeddler encodes or decodes the field value to or from JSON +// BigIntMeddler encodes or decodes the field value to or from string type BigIntMeddler struct{} // PreRead is called before a Scan operation for fields that have the BigIntMeddler @@ -97,16 +97,16 @@ func (b BigIntMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, er return field.String(), nil } -// MerkleProofMeddler encodes or decodes the field value to or from JSON +// MerkleProofMeddler encodes or decodes the field value to or from string type MerkleProofMeddler struct{} -// PreRead is called before a Scan operation for fields that have the ProofMeddler +// PreRead is called before a Scan operation for fields that have the MerkleProofMeddler func (b MerkleProofMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { // give a pointer to a byte buffer to grab the raw data return new(string), nil } -// PostRead is called after a Scan operation for fields that have the ProofMeddler +// PostRead is called after a Scan operation for fields that have the MerkleProofMeddler func (b MerkleProofMeddler) PostRead(fieldPtr, scanTarget interface{}) error { ptr, ok := scanTarget.(*string) if !ok { @@ -129,7 +129,7 @@ func (b MerkleProofMeddler) PostRead(fieldPtr, scanTarget interface{}) error { return nil } -// PreWrite is called before an Insert or Update operation for fields that have the ProofMeddler +// PreWrite is called before an Insert or Update operation for fields that have the MerkleProofMeddler func (b MerkleProofMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { field, ok := fieldPtr.(tree.Proof) if !ok { @@ -143,16 +143,16 @@ func (b MerkleProofMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{ return s, nil } -// HashMeddler encodes or decodes the field value to or from JSON +// HashMeddler encodes or decodes the field value to or from string type HashMeddler struct{} -// PreRead is called before a Scan operation for fields that have the ProofMeddler +// PreRead is called before a Scan operation for fields that have the HashMeddler func (b HashMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { // give a pointer to a byte buffer to grab the raw data return new(string), nil } -// PostRead is called after a Scan operation for fields that have the ProofMeddler +// PostRead is called after a Scan operation for fields that have the HashMeddler func (b HashMeddler) PostRead(fieldPtr, scanTarget interface{}) error { ptr, ok := scanTarget.(*string) if !ok { @@ -169,7 +169,7 @@ func (b HashMeddler) PostRead(fieldPtr, scanTarget interface{}) error { return nil } -// PreWrite is called before an Insert or Update operation for fields that have the ProofMeddler +// PreWrite is called before an Insert or Update operation for fields that have the HashMeddler func (b HashMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { field, ok := fieldPtr.(common.Hash) if !ok { @@ -178,16 +178,16 @@ func (b HashMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err return field.Hex(), nil } -// AddressMeddler encodes or decodes the field value to or from JSON +// AddressMeddler encodes or decodes the field value to or from string type AddressMeddler struct{} -// PreRead is called before a Scan operation for fields that have the ProofMeddler +// PreRead is called before a Scan operation for fields that have the AddressMeddler func (b AddressMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { // give a pointer to a byte buffer to grab the raw data return new(string), nil } -// PostRead is called after a Scan operation for fields that have the ProofMeddler +// PostRead is called after a Scan operation for fields that have the AddressMeddler func (b AddressMeddler) PostRead(fieldPtr, scanTarget interface{}) error { ptr, ok := scanTarget.(*string) if !ok { @@ -204,7 +204,7 @@ func (b AddressMeddler) PostRead(fieldPtr, scanTarget interface{}) error { return nil } -// PreWrite is called before an Insert or Update operation for fields that have the ProofMeddler +// PreWrite is called before an Insert or Update operation for fields that have the AddressMeddler func (b AddressMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { field, ok := fieldPtr.(common.Address) if !ok { diff --git a/etherman/config/config.go b/etherman/config/config.go index c9208ee4..fcf7cd79 100644 --- a/etherman/config/config.go +++ b/etherman/config/config.go @@ -1,7 +1,7 @@ package config import ( - "github.com/0xPolygonHermez/zkevm-ethtx-manager/etherman" + "github.com/0xPolygon/zkevm-ethtx-manager/etherman" "github.com/ethereum/go-ethereum/common" ) diff --git a/go.mod b/go.mod index 06afc08d..0f85874c 100644 --- a/go.mod +++ b/go.mod @@ -6,8 +6,8 @@ require ( github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f github.com/0xPolygon/cdk-data-availability v0.0.9 github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 + github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 - github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240930120324-65816dead447 github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.2 github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 @@ -36,52 +36,52 @@ require ( ) require ( - github.com/DataDog/zstd v1.5.2 // indirect + github.com/DataDog/zstd v1.5.6 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/VictoriaMetrics/fastcache v1.12.2 // indirect github.com/VictoriaMetrics/metrics v1.23.1 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.10.0 // indirect + github.com/bits-and-blooms/bitset v1.14.2 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/errors v1.11.3 // indirect - github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect + github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v1.1.1 // indirect + github.com/cockroachdb/pebble v1.1.2 // indirect github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect - github.com/consensys/gnark-crypto v0.12.1 // indirect + github.com/consensys/gnark-crypto v0.13.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect - github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c // indirect - github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect + github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/deckarep/golang-set/v2 v2.6.0 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/didip/tollbooth/v6 v6.1.2 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/erigontech/mdbx-go v0.27.14 // indirect - github.com/ethereum/c-kzg-4844 v1.0.0 // indirect + github.com/ethereum/c-kzg-4844 v1.0.3 // indirect github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect - github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/getsentry/sentry-go v0.28.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-pkgz/expirable-cache v0.0.3 // indirect github.com/go-stack/stack v1.8.1 // indirect - github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.1-0.20180906183839-65a6292f0157 // indirect @@ -98,7 +98,7 @@ require ( github.com/jackc/puddle v1.3.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jmoiron/sqlx v1.2.0 // indirect - github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/compress v1.17.7 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/ledgerwatch/log/v3 v3.9.0 // indirect @@ -107,7 +107,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.13 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/miguelmota/go-solidity-sha3 v0.1.1 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect @@ -124,7 +124,7 @@ require ( github.com/prometheus/procfs v0.12.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect diff --git a/go.sum b/go.sum index c1685ecf..c3ea3568 100644 --- a/go.sum +++ b/go.sum @@ -4,15 +4,15 @@ github.com/0xPolygon/cdk-data-availability v0.0.9 h1:KkP+hJH9nY5fljpSNtW2pfP5YQC github.com/0xPolygon/cdk-data-availability v0.0.9/go.mod h1:5A+CU4FGeyG8zTDJc0khMzRxPzFzmpRydeEWmLztgh4= github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 h1:Jri+ydl8PudddGacnVLatrCuAG9e1Ut8W4J0GoawssU= github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= +github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 h1:QWE6nKBBHkMEiza723hJk0+oZbLSdQZTX4I48jWw15I= +github.com/0xPolygon/zkevm-ethtx-manager v0.2.0/go.mod h1:lqQmzSo2OXEZItD0R4Cd+lqKFxphXEWgqHefVcGDZZc= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUxbnIoyumKvlwr0OtpTYMo= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= -github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240930120324-65816dead447 h1:8nZjZrHZo+P9hTkhwtQ4J6eh9v4MTMtVb9jRDra8h0s= -github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240930120324-65816dead447/go.mod h1:4iWpcwMOZJPapUzFB/HjTAM0X/gltHSEzQHE0lOt+eY= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.2 h1:DYioOpHDcn7rtojInDTEv7vmnhs8HP6zOSSXSGENM7s= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.2/go.mod h1:96i+QSANfbikwlUY3U9MLNtg3656W3dWfbGqH+Od1/k= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= -github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= +github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= @@ -31,8 +31,8 @@ github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPn github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= -github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.14.2 h1:YXVoyPndbdvcEVcseEovVfp0qjJp7S+i5+xgp/Nfbdc= +github.com/bits-and-blooms/bitset v1.14.2/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= @@ -55,28 +55,28 @@ github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaY github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= -github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= -github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 h1:pU88SPhIFid6/k0egdR5V6eALQYq2qbSmukrkgIh/0A= +github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.1 h1:XnKU22oiCLy2Xn8vp1re67cXg4SAasg/WDt1NtcRFaw= -github.com/cockroachdb/pebble v1.1.1/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA= +github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= -github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/consensys/gnark-crypto v0.13.0 h1:VPULb/v6bbYELAPTDFINEVaMTTybV5GLxDdcjnS+4oc= +github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c h1:uQYC5Z1mdLRPrZhHjHxufI8+2UG/i25QG92j0Er9p6I= -github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= -github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI= -github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= +github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4= +github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -85,18 +85,18 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= -github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/didip/tollbooth/v6 v6.1.2 h1:Kdqxmqw9YTv0uKajBUiWQg+GURL/k4vy9gmLCL01PjQ= github.com/didip/tollbooth/v6 v6.1.2/go.mod h1:xjcse6CTHCLuOkzsWrEgdy9WPJFv+p/x6v+MyfP+O9s= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/erigontech/mdbx-go v0.27.14 h1:IVVeQVCAjZRpAR8bThlP2ISxrOwdV35NZdGwAgotaRw= github.com/erigontech/mdbx-go v0.27.14/go.mod h1:FAMxbOgqOnRDx51j8HjuJZIgznbDwjX7LItd+/UWyA4= -github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= -github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs= +github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-ethereum v1.14.8 h1:NgOWvXS+lauK+zFukEvi85UmmsS/OkV0N23UZ1VTIig= github.com/ethereum/go-ethereum v1.14.8/go.mod h1:TJhyuDq0JDppAkFXgqjwpdlQApywnu/m10kFPxh8vvs= github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 h1:KrE8I4reeVvf7C1tm8elRjj4BdscTYzz/WAbYyf/JI4= @@ -110,8 +110,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= -github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/getsentry/sentry-go v0.28.1 h1:zzaSm/vHmGllRM6Tpx1492r0YDzauArdBfkJRtY6P5k= +github.com/getsentry/sentry-go v0.28.1/go.mod h1:1fQZ+7l7eeJ3wYi82q5Hg8GqAPgefRq+FP/QhafYVgg= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= @@ -135,8 +135,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -172,8 +172,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= @@ -254,8 +254,8 @@ github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhB github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -299,8 +299,8 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0= @@ -318,8 +318,9 @@ github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFV github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -363,8 +364,8 @@ github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= diff --git a/sequencesender/config.go b/sequencesender/config.go index 3e138e49..f264f904 100644 --- a/sequencesender/config.go +++ b/sequencesender/config.go @@ -3,7 +3,7 @@ package sequencesender import ( "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" "github.com/ethereum/go-ethereum/common" ) diff --git a/sequencesender/ethtx.go b/sequencesender/ethtx.go index 1898a8e8..2873c3b5 100644 --- a/sequencesender/ethtx.go +++ b/sequencesender/ethtx.go @@ -11,7 +11,8 @@ import ( "time" "github.com/0xPolygon/cdk/log" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + "github.com/0xPolygon/zkevm-ethtx-manager/types" "github.com/ethereum/go-ethereum/common" ) @@ -89,7 +90,7 @@ func (s *SequenceSender) sendTx(ctx context.Context, resend bool, txOldHash *com // Add tx to internal structure s.mutexEthTx.Lock() s.ethTransactions[txHash] = &txData - txResults := make(map[common.Hash]ethtxmanager.TxResult, 0) + txResults := make(map[common.Hash]types.TxResult, 0) s.copyTxData(txHash, paramData, txResults) err = s.getResultAndUpdateEthTx(ctx, txHash) if err != nil { @@ -126,7 +127,7 @@ func (s *SequenceSender) purgeEthTx(ctx context.Context) { continue } - if !data.OnMonitor || data.Status == ethtxmanager.MonitoredTxStatusFinalized.String() { + if !data.OnMonitor || data.Status == types.MonitoredTxStatusFinalized.String() { toPurge = append(toPurge, hash) // Remove from tx monitor @@ -167,7 +168,7 @@ func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) { txSync uint64 ) for hash, data := range s.ethTransactions { - if data.Status == ethtxmanager.MonitoredTxStatusFinalized.String() { + if data.Status == types.MonitoredTxStatusFinalized.String() { continue } @@ -179,9 +180,9 @@ func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) { txStatus := s.ethTransactions[hash].Status // Count if it is not in a final state if s.ethTransactions[hash].OnMonitor && - txStatus != ethtxmanager.MonitoredTxStatusFailed.String() && - txStatus != ethtxmanager.MonitoredTxStatusSafe.String() && - txStatus != ethtxmanager.MonitoredTxStatusFinalized.String() { + txStatus != types.MonitoredTxStatusFailed.String() && + txStatus != types.MonitoredTxStatusSafe.String() && + txStatus != types.MonitoredTxStatusFinalized.String() { txPending++ } } @@ -239,7 +240,7 @@ func (s *SequenceSender) syncAllEthTxResults(ctx context.Context) error { // copyTxData copies tx data in the internal structure func (s *SequenceSender) copyTxData( - txHash common.Hash, txData []byte, txsResults map[common.Hash]ethtxmanager.TxResult, + txHash common.Hash, txData []byte, txsResults map[common.Hash]types.TxResult, ) { s.ethTxData[txHash] = make([]byte, len(txData)) copy(s.ethTxData[txHash], txData) @@ -260,7 +261,7 @@ func (s *SequenceSender) copyTxData( } // updateEthTxResult handles updating transaction state -func (s *SequenceSender) updateEthTxResult(txData *ethTxData, txResult ethtxmanager.MonitoredTxResult) { +func (s *SequenceSender) updateEthTxResult(txData *ethTxData, txResult types.MonitoredTxResult) { if txData.Status != txResult.Status.String() { log.Infof("update transaction %v to state %s", txResult.ID, txResult.Status.String()) txData.StatusTimestamp = time.Now() @@ -270,9 +271,9 @@ func (s *SequenceSender) updateEthTxResult(txData *ethTxData, txResult ethtxmana txData.StateHistory = append(txData.StateHistory, stTrans) // Manage according to the state - statusConsolidated := txData.Status == ethtxmanager.MonitoredTxStatusSafe.String() || - txData.Status == ethtxmanager.MonitoredTxStatusFinalized.String() - if txData.Status == ethtxmanager.MonitoredTxStatusFailed.String() { + statusConsolidated := txData.Status == types.MonitoredTxStatusSafe.String() || + txData.Status == types.MonitoredTxStatusFinalized.String() + if txData.Status == types.MonitoredTxStatusFailed.String() { s.logFatalf("transaction %v result failed!") } else if statusConsolidated && txData.ToBatch >= s.latestVirtualBatchNumber { s.latestVirtualTime = txData.StatusTimestamp diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 509b1a60..23d4e43a 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -14,8 +14,8 @@ import ( "github.com/0xPolygon/cdk/sequencesender/seqsendertypes/rpcbatch" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" - ethtxlog "github.com/0xPolygonHermez/zkevm-ethtx-manager/log" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxlog "github.com/0xPolygon/zkevm-ethtx-manager/log" "github.com/ethereum/go-ethereum/common" ) @@ -95,11 +95,8 @@ func (s *SequenceSender) Start(ctx context.Context) { // Start ethtxmanager client go s.ethTxManager.Start() - // Get current nonce - var err error - // Get latest virtual state batch from L1 - err = s.getLatestVirtualBatch() + err := s.getLatestVirtualBatch() if err != nil { s.logger.Fatalf("error getting latest sequenced batch, error: %v", err) } diff --git a/test/config/test.config.toml b/test/config/test.config.toml index e5d4382c..61fd4401 100644 --- a/test/config/test.config.toml +++ b/test/config/test.config.toml @@ -28,7 +28,7 @@ GetBatchWaitInterval = "10s" ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "ethtxmanager.json" + StoragePath = "ethtxmanager.db" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 @@ -86,7 +86,7 @@ SequencerPrivateKey = {} ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "" + StoragePath = "" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 0 FinalizedStatusL1NumberOfBlocks = 0 diff --git a/test/helpers/aggoracle_e2e.go b/test/helpers/aggoracle_e2e.go index 125d73cf..a19cfd42 100644 --- a/test/helpers/aggoracle_e2e.go +++ b/test/helpers/aggoracle_e2e.go @@ -64,7 +64,10 @@ func SetupAggoracleWithEVMChain(t *testing.T) *AggoracleWithEVMChainEnv { ctx := context.Background() l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, rd := CommonSetup(t) sender, l2Client, gerL2Contract, gerL2Addr, bridgeL2Contract, bridgeL2Addr, authL2, ethTxManMockL2 := EVMSetup(t) - oracle, err := aggoracle.New(log.GetDefaultLogger(), sender, l1Client.Client(), syncer, etherman.LatestBlock, time.Millisecond*20) //nolint:mnd + oracle, err := aggoracle.New( + log.GetDefaultLogger(), sender, + l1Client.Client(), syncer, + etherman.LatestBlock, time.Millisecond*20) //nolint:mnd require.NoError(t, err) go oracle.Start(ctx) @@ -117,8 +120,11 @@ func CommonSetup(t *testing.T) ( require.NoError(t, err) // Syncer dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, common.Address{}, syncBlockChunkSize, etherman.LatestBlock, reorg, l1Client.Client(), time.Millisecond, 0, periodRetry, retries, - l1infotreesync.FlagAllowWrongContractsAddrs) + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, + gerL1Addr, common.Address{}, + syncBlockChunkSize, etherman.LatestBlock, + reorg, l1Client.Client(), + time.Millisecond, 0, periodRetry, retries, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) go syncer.Start(ctx) @@ -186,7 +192,9 @@ func newSimulatedL1(auth *bind.TransactOpts) ( bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy bridge implementation: %w", err) + return nil, common.Address{}, nil, + common.Address{}, nil, + fmt.Errorf("failed to deploy bridge implementation: %w", err) } client.Commit() @@ -213,7 +221,9 @@ func newSimulatedL1(auth *bind.TransactOpts) ( []byte{}, // gasTokenMetadata ) if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to pack data for proxy initialization: %w", err) + return nil, common.Address{}, nil, + common.Address{}, nil, + fmt.Errorf("failed to pack data for proxy initialization: %w", err) } bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( @@ -224,18 +234,24 @@ func newSimulatedL1(auth *bind.TransactOpts) ( dataCallProxy, ) if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err) + return nil, common.Address{}, nil, + common.Address{}, nil, + fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err) } client.Commit() bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create bridge contract instance: %w", err) + return nil, common.Address{}, nil, + common.Address{}, nil, + fmt.Errorf("failed to create bridge contract instance: %w", err) } checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{Pending: false}) if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get Global Exit Root Manager: %w", err) + return nil, common.Address{}, nil, + common.Address{}, nil, + fmt.Errorf("failed to get Global Exit Root Manager: %w", err) } if precalculatedAddr != checkGERAddr { return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( @@ -244,7 +260,8 @@ func newSimulatedL1(auth *bind.TransactOpts) ( ) } - gerAddr, _, gerContract, err = gerContractL1.DeployGlobalexitrootnopush0(authDeployer, client.Client(), auth.From, bridgeAddr) + gerAddr, _, gerContract, err = gerContractL1.DeployGlobalexitrootnopush0(authDeployer, client.Client(), + auth.From, bridgeAddr) if err != nil { return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy GER contract: %w", err) } @@ -301,7 +318,9 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy bridge implementation: %w", err) + return nil, common.Address{}, nil, + common.Address{}, nil, + fmt.Errorf("failed to deploy bridge implementation: %w", err) } client.Commit() @@ -329,7 +348,9 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( []byte{}, // gasTokenMetadata ) if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to pack data for proxy initialization: %w", err) + return nil, common.Address{}, nil, + common.Address{}, nil, + fmt.Errorf("failed to pack data for proxy initialization: %w", err) } bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( @@ -340,7 +361,9 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( dataCallProxy, ) if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err) + return nil, common.Address{}, nil, + common.Address{}, nil, + fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err) } if bridgeAddr != precalculatedBridgeAddr { return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( @@ -352,12 +375,16 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create bridge contract instance: %w", err) + return nil, common.Address{}, nil, + common.Address{}, nil, + fmt.Errorf("failed to create bridge contract instance: %w", err) } checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{}) if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get Global Exit Root Manager: %w", err) + return nil, common.Address{}, nil, + common.Address{}, nil, + fmt.Errorf("failed to get Global Exit Root Manager: %w", err) } if precalculatedAddr != checkGERAddr { return nil, common.Address{}, nil, common.Address{}, nil, errors.New( @@ -365,7 +392,8 @@ func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( ) } - gerAddr, _, gerContract, err = gerContractEVMChain.DeployPessimisticglobalexitrootnopush0(authDeployer, client.Client(), auth.From) + gerAddr, _, gerContract, err = gerContractEVMChain.DeployPessimisticglobalexitrootnopush0( + authDeployer, client.Client(), auth.From) if err != nil { return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy GER contract: %w", err) } diff --git a/test/helpers/ethtxmanmock_e2e.go b/test/helpers/ethtxmanmock_e2e.go index ebc3513f..0c8ee0f8 100644 --- a/test/helpers/ethtxmanmock_e2e.go +++ b/test/helpers/ethtxmanmock_e2e.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/0xPolygon/cdk/log" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -97,7 +97,7 @@ func NewEthTxManMock( Return(common.Hash{}, nil) // res, err := c.ethTxMan.Result(ctx, id) ethTxMock.On("Result", mock.Anything, mock.Anything). - Return(ethtxmanager.MonitoredTxResult{Status: ethtxmanager.MonitoredTxStatusMined}, nil) + Return(ethtxtypes.MonitoredTxResult{Status: ethtxtypes.MonitoredTxStatusMined}, nil) return ethTxMock } diff --git a/test/helpers/mock_ethtxmanager.go b/test/helpers/mock_ethtxmanager.go index a75f57e9..adb21f26 100644 --- a/test/helpers/mock_ethtxmanager.go +++ b/test/helpers/mock_ethtxmanager.go @@ -9,7 +9,7 @@ import ( context "context" - ethtxmanager "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" mock "github.com/stretchr/testify/mock" @@ -70,22 +70,22 @@ func (_m *EthTxManagerMock) Remove(ctx context.Context, id common.Hash) error { } // Result provides a mock function with given fields: ctx, id -func (_m *EthTxManagerMock) Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error) { +func (_m *EthTxManagerMock) Result(ctx context.Context, id common.Hash) (ethtxtypes.MonitoredTxResult, error) { ret := _m.Called(ctx, id) if len(ret) == 0 { panic("no return value specified for Result") } - var r0 ethtxmanager.MonitoredTxResult + var r0 ethtxtypes.MonitoredTxResult var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (ethtxmanager.MonitoredTxResult, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (ethtxtypes.MonitoredTxResult, error)); ok { return rf(ctx, id) } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ethtxmanager.MonitoredTxResult); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ethtxtypes.MonitoredTxResult); ok { r0 = rf(ctx, id) } else { - r0 = ret.Get(0).(ethtxmanager.MonitoredTxResult) + r0 = ret.Get(0).(ethtxtypes.MonitoredTxResult) } if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { @@ -98,27 +98,27 @@ func (_m *EthTxManagerMock) Result(ctx context.Context, id common.Hash) (ethtxma } // ResultsByStatus provides a mock function with given fields: ctx, statuses -func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus) ([]ethtxmanager.MonitoredTxResult, error) { +func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, statuses []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error) { ret := _m.Called(ctx, statuses) if len(ret) == 0 { panic("no return value specified for ResultsByStatus") } - var r0 []ethtxmanager.MonitoredTxResult + var r0 []ethtxtypes.MonitoredTxResult var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []ethtxmanager.MonitoredTxStatus) ([]ethtxmanager.MonitoredTxResult, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error)); ok { return rf(ctx, statuses) } - if rf, ok := ret.Get(0).(func(context.Context, []ethtxmanager.MonitoredTxStatus) []ethtxmanager.MonitoredTxResult); ok { + if rf, ok := ret.Get(0).(func(context.Context, []ethtxtypes.MonitoredTxStatus) []ethtxtypes.MonitoredTxResult); ok { r0 = rf(ctx, statuses) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]ethtxmanager.MonitoredTxResult) + r0 = ret.Get(0).([]ethtxtypes.MonitoredTxResult) } } - if rf, ok := ret.Get(1).(func(context.Context, []ethtxmanager.MonitoredTxStatus) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, []ethtxtypes.MonitoredTxStatus) error); ok { r1 = rf(ctx, statuses) } else { r1 = ret.Error(1) From db54c2dc179ddff205cd731153a3fb6a49305a76 Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 4 Oct 2024 11:32:18 +0200 Subject: [PATCH 22/53] feat: adapt kurtosis cdk and fix errors (#104) - Some request of ports return the protocol and another no so the code have been updated to support port with protocol or without (`http://127.0.0.1:8023` or `127.0.0.1:8023`) - Also, to support changing end-point names have include a list of 'end-points' to get the URL / port - There was a hardcoded path in config file that have been changed by vars and left the absolute path used by kurtosis as deault value --- scripts/local_config | 97 ++++++++++++++----- scripts/run_template.go | 6 -- .../kurtosis-cdk-node-config.toml.template | 8 +- 3 files changed, 77 insertions(+), 34 deletions(-) diff --git a/scripts/local_config b/scripts/local_config index e821f8b7..08d960db 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -29,6 +29,7 @@ function get_value_from_toml_file(){ local _KEY="$3" local _LINE local _inside_section=0 + local _return_next_line=0 while read -r _LINE; do # Clean up line from spaces and tabs _LINE=$(echo $_LINE | tr -d '[:space:]') @@ -37,12 +38,23 @@ function get_value_from_toml_file(){ if [[ "$_LINE" == [* ]]; then return 1 fi + if [ $_return_next_line -eq 1 ]; then + # sed sentence remove quotes + echo $_LINE | sed 's/^[[:space:]]*"//;s/"$//' + + return 0 + fi #local _key_splitted=(${_LINE//=/ }) local _key_name=$(echo $_LINE | cut -f 1 -d "=") local _key_value=$(echo $_LINE | cut -f 2- -d "=") if [ "$_key_name" == "$_KEY" ]; then - echo $_key_value + if [ $_key_value == "[" ]; then + _return_next_line=1 + else + # sed sentence remove quotes + echo $_key_value | sed 's/^[[:space:]]*"//;s/"$//' return 0 + fi fi elif [ "$_LINE" == "[${_SECTION}]" ]; then _inside_section=1 @@ -74,20 +86,29 @@ function export_obj_key_from_toml_file_or_fatal(){ local _SECTION="$3" local _KEY="$4" local _OBJ_KEY="$5" + log_debug "export_obj_key_from_toml_file_or_fatal: $_EXPORTED_VAR_NAME $_FILE $_SECTION $_KEY $_OBJ_KEY" local _VALUE=$(get_value_from_toml_file $_FILE $_SECTION $_KEY) if [ -z "$_VALUE" ]; then - log_fatal "$FUNCNAME: obj_key $_KEY not found in section $_SECTION" + log_fatal "export_obj_key_from_toml_file_or_fatal: obj_key $_KEY not found in section [$_SECTION]" fi local _CLEAN_VALUE=$(echo $_VALUE | tr -d '{' | tr -d '}' | tr ',' '\n') while read -r _LINE; do local _key_splitted=(${_LINE//=/ }) + if [ "${_key_splitted[0]}" == "$_OBJ_KEY" ]; then - export $_EXPORTED_VAR_NAME=${_key_splitted[1]} + local _KEY_VALUE=${_key_splitted[1]} + if [ "$_KEY_VALUE" == "[" ]; then + read -r _LINE + _KEY_VALUE=$LINE + echo "zzz $_KEY_VALUE" + fi + local _RES=$(echo $_KEY_VALUE | sed 's/^[[:space:]]*"//;s/"$//') + export $_EXPORTED_VAR_NAME="${_RES}" log_debug "$_EXPORTED_VAR_NAME=${!_EXPORTED_VAR_NAME} \t\t\t# file:$_FILE section:$_SECTION key:$_KEY obj_key:$_OBJ_KEY" return 0 fi done <<< "$_CLEAN_VALUE" - log_fatal "obj_key $_OBJ_KEY not found in section $_SECTION/ $_KEY = $_VALUE" + log_fatal "export_obj_key_from_toml_file_or_fatal: obj_key $_OBJ_KEY not found in section $_SECTION/ $_KEY = $_VALUE" } ############################################################################### @@ -107,17 +128,19 @@ function export_values_of_genesis(){ ############################################################################### function export_values_of_cdk_node_config(){ local _CDK_CONFIG_FILE=$1 - export_key_from_toml_file_or_fatal zkevm_l2_sequencer_address $_CDK_CONFIG_FILE SequenceSender L2Coinbase - export_obj_key_from_toml_file_or_fatal zkevm_l2_keystore_password $_CDK_CONFIG_FILE SequenceSender PrivateKey Password - export_key_from_toml_file_or_fatal l1_chain_id $_CDK_CONFIG_FILE SequenceSender.EthTxManager.Etherman L1ChainID - export_key_from_toml_file_or_fatal zkevm_is_validium $_CDK_CONFIG_FILE Common IsValidiumMode - export_key_from_toml_file_or_fatal zkevm_contract_versions $_CDK_CONFIG_FILE Common ContractVersions - export_key_from_toml_file_or_fatal l2_chain_id $_CDK_CONFIG_FILE Aggregator ChainID - export_key_from_toml_file_or_fatal zkevm_aggregator_port $_CDK_CONFIG_FILE Aggregator Port - export_key_from_toml_file_or_fatal zkevm_l2_agglayer_address $_CDK_CONFIG_FILE Aggregator SenderAddress - export_key_from_toml_file_or_fatal aggregator_db_name $_CDK_CONFIG_FILE Aggregator.DB Name - export_key_from_toml_file_or_fatal aggregator_db_user $_CDK_CONFIG_FILE Aggregator.DB User - export_key_from_toml_file_or_fatal aggregator_db_password $_CDK_CONFIG_FILE Aggregator.DB Password + export_key_from_toml_file_or_fatal zkevm_l2_sequencer_address $_CDK_CONFIG_FILE SequenceSender L2Coinbase + export_obj_key_from_toml_file_or_fatal zkevm_l2_sequencer_keystore_password $_CDK_CONFIG_FILE SequenceSender PrivateKey Password + export_key_from_toml_file_or_fatal l1_chain_id $_CDK_CONFIG_FILE SequenceSender.EthTxManager.Etherman L1ChainID + export_key_from_toml_file_or_fatal zkevm_is_validium $_CDK_CONFIG_FILE Common IsValidiumMode + export_key_from_toml_file_or_fatal zkevm_contract_versions $_CDK_CONFIG_FILE Common ContractVersions + export_key_from_toml_file_or_fatal l2_chain_id $_CDK_CONFIG_FILE Aggregator ChainID + export_key_from_toml_file_or_fatal zkevm_aggregator_port $_CDK_CONFIG_FILE Aggregator Port + export_key_from_toml_file_or_fatal zkevm_l2_agglayer_address $_CDK_CONFIG_FILE Aggregator SenderAddress + export_key_from_toml_file_or_fatal aggregator_db_name $_CDK_CONFIG_FILE Aggregator.DB Name + export_key_from_toml_file_or_fatal aggregator_db_user $_CDK_CONFIG_FILE Aggregator.DB User + export_key_from_toml_file_or_fatal aggregator_db_password $_CDK_CONFIG_FILE Aggregator.DB Password + export_obj_key_from_toml_file_or_fatal zkevm_l2_aggregator_keystore_password $_CDK_CONFIG_FILE Aggregator.EthTxManager PrivateKeys Password + export_key_from_toml_file_or_fatal zkevm_rollup_fork_id $_CDK_CONFIG_FILE Aggregator ForkId export is_cdk_validium=$zkevm_is_validium export zkevm_rollup_chain_id=$l2_chain_id @@ -126,13 +149,32 @@ function export_values_of_cdk_node_config(){ log_debug "Validium mode detected... Retrieving the dac_port" export_value_from_kurtosis_or_fail dac_port zkevm-dac-001 dac fi + export zkevm_l2_keystore_password=$zkevm_l2_sequencer_keystore_password } ############################################################################### +# params: +# $1 -> exported variable name +# $2 -> service name +# $3...$n -> endpoint names (will try all of them until one is found) +############################################################################### function export_value_from_kurtosis_or_fail(){ local _EXPORTED_VAR_NAME="$1" - local _SERVICE="$2" - local _END_POINT="$3" - export $_EXPORTED_VAR_NAME=$(kurtosis port print $KURTOSIS_ENCLAVE $_SERVICE $_END_POINT) + shift + local _SERVICE="$1" + shift + local _END_POINT + local _RESULT + log_debug "Trying to get kurtosis value:$_EXPORTED_VAR_NAME = $KURTOSIS_ENCLAVE $_SERVICE $*" + while [ ! -z $1 ]; do + _END_POINT=$1 + shift + log_debug "--- kurtosis value: $KURTOSIS_ENCLAVE $_SERVICE $_END_POINT" + _RESULT=$(kurtosis port print $KURTOSIS_ENCLAVE $_SERVICE $_END_POINT 2>/dev/null) + if [ ! -z $_RESULT ]; then + break + fi + done + export $_EXPORTED_VAR_NAME=$_RESULT if [ -z $_EXPORTED_VAR_NAME ]; then log_fatal "Error getting kurtosis port: $KURTOSIS_ENCLAVE $_SERVICE $_END_POINT" fi @@ -141,10 +183,12 @@ function export_value_from_kurtosis_or_fail(){ ############################################################################### function export_portnum_from_kurtosis_or_fail(){ local _EXPORTED_VAR_NAME="$1" - export_value_from_kurtosis_or_fail "$1" "$2" "$3" > /dev/null + export_value_from_kurtosis_or_fail $* > /dev/null local _VALUE eval "_VALUE=\$$1" - local _PORT=$(echo "$_VALUE" | cut -f 3 -d ":") + # sed sentece eliminate protocol (xyz://) is have it + # kurtosis sometimes include protocol but not always + local _PORT=$(echo "$_VALUE" | sed -E 's|^[a-zA-Z]+://||' | cut -f 2 -d ":") if [ -z $_PORT ]; then log_fatal "Error getting port number from kurtosis: $2 $3 -> $_VALUE" fi @@ -154,13 +198,13 @@ function export_portnum_from_kurtosis_or_fail(){ ############################################################################### function export_ports_from_kurtosis(){ export_portnum_from_kurtosis_or_fail l1_rpc_port el-1-geth-lighthouse rpc - export_value_from_kurtosis_or_fail l1_rpc_url el-1-geth-lighthouse rpc - export_value_from_kurtosis_or_fail l2_rpc_url cdk-erigon-node-001 http-rpc - export_portnum_from_kurtosis_or_fail zkevm_rpc_http_port cdk-erigon-node-001 http-rpc + export_portnum_from_kurtosis_or_fail zkevm_rpc_http_port cdk-erigon-node-001 http-rpc rpc export_portnum_from_kurtosis_or_fail zkevm_data_streamer_port cdk-erigon-sequencer-001 data-streamer export_portnum_from_kurtosis_or_fail aggregator_db_port postgres-001 postgres export_portnum_from_kurtosis_or_fail agglayer_port agglayer agglayer export aggregator_db_hostname="127.0.0.1" + export l1_rpc_url="http://localhost:${l1_rpc_port}" + export l2_rpc_url="http://localhost:${zkevm_rpc_http_port}" } ############################################################################### @@ -211,10 +255,15 @@ function download_kurtosis_artifacts(){ kurtosis files download $KURTOSIS_ENCLAVE sequencer-keystore $DEST ok_or_fatal "Error downloading kurtosis artifact sequencer-keystore to $DEST" - export sequencer_keystore_file=$DEST/sequencer.keystore + export zkevm_l2_sequencer_keystore_file=$DEST/sequencer.keystore kurtosis files download $KURTOSIS_ENCLAVE cdk-node-config-artifact $DEST ok_or_fatal "Error downloading kurtosis artifact cdk-node-config-artifact to $DEST" + + kurtosis files download $KURTOSIS_ENCLAVE aggregator-keystore $DEST + ok_or_fatal "Error downloading kurtosis artifact cdk-node-config-artifact to $DEST" + export zkevm_l2_aggregator_keystore_file=$DEST/aggregator.keystore + } ############################################################################### function check_generated_config_file(){ diff --git a/scripts/run_template.go b/scripts/run_template.go index b629de5a..c9ef58a3 100644 --- a/scripts/run_template.go +++ b/scripts/run_template.go @@ -1,7 +1,6 @@ package main import ( - "fmt" "log" "os" "regexp" @@ -43,13 +42,8 @@ func environmentToMap() map[string]any { envVars := make(map[string]any) for _, e := range os.Environ() { pair := splitAtFirst(e, '=') - fmt.Printf("zzzz env=%s pair=%v\n", e, pair) envVars[pair[0]] = pair[1] } - envVars["aggregator_db"] = map[string]string{ - "user": "user", - "name": "Name", - } return envVars } diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template index 91ddb819..15948b60 100644 --- a/test/config/kurtosis-cdk-node-config.toml.template +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -23,7 +23,7 @@ WaitPeriodSendSequence = "15s" LastBatchVirtualizationTimeMaxWaitPeriod = "10s" MaxTxSizeForL1 = 131072 L2Coinbase = "{{.zkevm_l2_sequencer_address}}" -PrivateKey = {Path = "/etc/cdk/sequencer.keystore", Password = "{{.zkevm_l2_keystore_password}}"} +PrivateKey = {Path = "{{or .zkevm_l2_sequencer_keystore_file "/etc/cdk/sequencer.keystore"}}", Password = "{{.zkevm_l2_keystore_password}}"} SequencesTxFileName = "/data/sequencesender.json" GasOffset = 80000 WaitPeriodPurgeTxFile = "15m" @@ -49,7 +49,7 @@ GetBatchWaitInterval = "10s" WaitReceiptToBeGenerated = "8s" {{end}} PrivateKeys = [ - {Path = "/etc/cdk/sequencer.keystore", Password = "{{.zkevm_l2_keystore_password}}"}, + {Path = "{{or .zkevm_l2_sequencer_keystore_file "/etc/cdk/sequencer.keystore"}}", Password = "{{.zkevm_l2_keystore_password}}"}, ] ForcedGas = 0 GasPriceMarginFactor = 1 @@ -121,9 +121,9 @@ GetBatchWaitInterval = "10s" GetReceiptMaxTime = "250ms" GetReceiptWaitInterval = "1s" {{end}} - + PrivateKeys = [ - {Path = "/etc/cdk/aggregator.keystore", Password = "{{.zkevm_l2_keystore_password}}"}, + {Path = "{{or .zkevm_l2_aggregator_keystore_file "/etc/cdk/aggregator.keystore"}}", Password = "{{.zkevm_l2_keystore_password}}"}, ] ForcedGas = 0 GasPriceMarginFactor = 1 From 53f454e019e8c757645c1b36bbadb17bd9692f00 Mon Sep 17 00:00:00 2001 From: rbpol Date: Fri, 4 Oct 2024 14:02:29 +0100 Subject: [PATCH 23/53] feat: Sequence sender unit tests (#103) --- sequencesender/ethtx.go | 7 +- sequencesender/ethtx_test.go | 786 ++++++++++++++++++++++ sequencesender/mocks/mock_etherman.go | 147 ++++ sequencesender/mocks/mock_ethtxmanager.go | 146 ++++ sequencesender/mocks/mock_txbuilder.go | 367 ++++++++++ sequencesender/rpc.go | 10 +- sequencesender/rpc_test.go | 115 ++++ sequencesender/sequencesender.go | 104 ++- sequencesender/sequencesender_test.go | 596 ++++++++++++++++ test/Makefile | 3 + 10 files changed, 2237 insertions(+), 44 deletions(-) create mode 100644 sequencesender/ethtx_test.go create mode 100644 sequencesender/mocks/mock_etherman.go create mode 100644 sequencesender/mocks/mock_ethtxmanager.go create mode 100644 sequencesender/mocks/mock_txbuilder.go create mode 100644 sequencesender/rpc_test.go create mode 100644 sequencesender/sequencesender_test.go diff --git a/sequencesender/ethtx.go b/sequencesender/ethtx.go index 2873c3b5..32bc62b4 100644 --- a/sequencesender/ethtx.go +++ b/sequencesender/ethtx.go @@ -8,6 +8,7 @@ import ( "math/big" "os" "strings" + "sync/atomic" "time" "github.com/0xPolygon/cdk/log" @@ -97,7 +98,7 @@ func (s *SequenceSender) sendTx(ctx context.Context, resend bool, txOldHash *com log.Errorf("error getting result for tx %v: %v", txHash, err) } if !resend { - s.latestSentToL1Batch = valueToBatch + atomic.StoreUint64(&s.latestSentToL1Batch, valueToBatch) } else { s.ethTransactions[*txOldHash].Status = "*resent" } @@ -114,7 +115,7 @@ func (s *SequenceSender) sendTx(ctx context.Context, resend bool, txOldHash *com // purgeEthTx purges transactions from memory structures func (s *SequenceSender) purgeEthTx(ctx context.Context) { // If sequence sending is stopped, do not purge - if s.seqSendingStopped { + if atomic.LoadUint32(&s.seqSendingStopped) == 1 { return } @@ -275,7 +276,7 @@ func (s *SequenceSender) updateEthTxResult(txData *ethTxData, txResult types.Mon txData.Status == types.MonitoredTxStatusFinalized.String() if txData.Status == types.MonitoredTxStatusFailed.String() { s.logFatalf("transaction %v result failed!") - } else if statusConsolidated && txData.ToBatch >= s.latestVirtualBatchNumber { + } else if statusConsolidated && txData.ToBatch >= atomic.LoadUint64(&s.latestVirtualBatchNumber) { s.latestVirtualTime = txData.StatusTimestamp } } diff --git a/sequencesender/ethtx_test.go b/sequencesender/ethtx_test.go new file mode 100644 index 00000000..06afb640 --- /dev/null +++ b/sequencesender/ethtx_test.go @@ -0,0 +1,786 @@ +package sequencesender + +import ( + "context" + "encoding/json" + "errors" + "math/big" + "os" + "testing" + "time" + + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/sequencesender/mocks" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func Test_sendTx(t *testing.T) { + t.Parallel() + + addr := common.BytesToAddress([]byte{1, 2, 3}) + hash := common.HexToHash("0x1") + oldHash := common.HexToHash("0x2") + + type args struct { + resend bool + txOldHash *common.Hash + to *common.Address + fromBatch uint64 + toBatch uint64 + data []byte + gas uint64 + } + + type state struct { + currentNonce uint64 + ethTxData map[common.Hash][]byte + ethTransactions map[common.Hash]*ethTxData + latestSentToL1Batch uint64 + } + + tests := []struct { + name string + args args + state state + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + expectedState state + expectedErr error + }{ + { + name: "successfully sent", + args: args{ + resend: false, + txOldHash: nil, + to: &addr, + fromBatch: 1, + toBatch: 2, + data: []byte("test"), + gas: 100500, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("AddWithGas", mock.Anything, &addr, big.NewInt(0), []byte("test"), uint64(0), mock.Anything, uint64(100500)).Return(hash, nil) + mngr.On("Result", mock.Anything, hash).Return(ethtxtypes.MonitoredTxResult{ + ID: hash, + Data: []byte{1, 2, 3}, + }, nil) + return mngr + }, + state: state{ + currentNonce: 10, + ethTxData: map[common.Hash][]byte{ + hash: {}, + }, + ethTransactions: map[common.Hash]*ethTxData{ + hash: {}, + }, + latestSentToL1Batch: 0, + }, + expectedState: state{ + currentNonce: 11, + ethTxData: map[common.Hash][]byte{ + hash: {1, 2, 3}, + }, + ethTransactions: map[common.Hash]*ethTxData{ + hash: { + SentL1Timestamp: now, + StatusTimestamp: now, + FromBatch: 1, + ToBatch: 2, + OnMonitor: true, + To: addr, + Gas: 100500, + StateHistory: []string{now.Format("2006-01-02T15:04:05.000-07:00") + ", *new, "}, + Txs: map[common.Hash]ethTxAdditionalData{}, + }, + }, + latestSentToL1Batch: 2, + }, + expectedErr: nil, + }, + { + name: "successfully sent with resend", + args: args{ + resend: true, + txOldHash: &oldHash, + gas: 100500, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("AddWithGas", mock.Anything, &addr, big.NewInt(0), []byte(nil), uint64(0), mock.Anything, uint64(100500)).Return(hash, nil) + mngr.On("Result", mock.Anything, hash).Return(ethtxtypes.MonitoredTxResult{ + ID: hash, + Data: []byte{1, 2, 3}, + }, nil) + return mngr + }, + state: state{ + ethTxData: map[common.Hash][]byte{ + hash: []byte("test"), + }, + ethTransactions: map[common.Hash]*ethTxData{ + oldHash: { + To: addr, + Nonce: 10, + FromBatch: 1, + ToBatch: 2, + }, + }, + latestSentToL1Batch: 0, + }, + expectedState: state{ + currentNonce: 0, + ethTxData: map[common.Hash][]byte{ + hash: {1, 2, 3}, + }, + ethTransactions: map[common.Hash]*ethTxData{ + hash: { + SentL1Timestamp: now, + StatusTimestamp: now, + FromBatch: 1, + ToBatch: 2, + OnMonitor: true, + To: addr, + Gas: 100500, + StateHistory: []string{now.Format("2006-01-02T15:04:05.000-07:00") + ", *new, "}, + Txs: map[common.Hash]ethTxAdditionalData{}, + }, + }, + latestSentToL1Batch: 0, + }, + expectedErr: nil, + }, + { + name: "add with gas returns error", + args: args{ + resend: true, + txOldHash: &oldHash, + gas: 100500, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("AddWithGas", mock.Anything, &addr, big.NewInt(0), []byte(nil), uint64(0), mock.Anything, uint64(100500)).Return(nil, errors.New("failed to add with gas")) + return mngr + }, + state: state{ + ethTxData: map[common.Hash][]byte{ + hash: []byte("test"), + }, + ethTransactions: map[common.Hash]*ethTxData{ + oldHash: { + To: addr, + Nonce: 10, + FromBatch: 1, + ToBatch: 2, + }, + }, + latestSentToL1Batch: 0, + }, + expectedErr: errors.New("failed to add with gas"), + }, + { + name: "empty old hash", + args: args{ + resend: true, + gas: 100500, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + return mngr + }, + state: state{ + ethTxData: map[common.Hash][]byte{ + hash: []byte("test"), + }, + ethTransactions: map[common.Hash]*ethTxData{ + oldHash: { + To: addr, + Nonce: 10, + FromBatch: 1, + ToBatch: 2, + }, + }, + latestSentToL1Batch: 0, + }, + expectedErr: errors.New("resend tx with nil hash monitor id"), + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + tmpFile, err := os.CreateTemp(os.TempDir(), tt.name+".tmp") + require.NoError(t, err) + defer os.RemoveAll(tmpFile.Name() + ".tmp") + + ss := SequenceSender{ + ethTxData: tt.state.ethTxData, + ethTransactions: tt.state.ethTransactions, + ethTxManager: tt.getEthTxManager(t), + latestSentToL1Batch: tt.state.latestSentToL1Batch, + cfg: Config{ + SequencesTxFileName: tmpFile.Name() + ".tmp", + }, + logger: log.GetDefaultLogger(), + } + + err = ss.sendTx(context.Background(), tt.args.resend, tt.args.txOldHash, tt.args.to, tt.args.fromBatch, tt.args.toBatch, tt.args.data, tt.args.gas) + if tt.expectedErr != nil { + require.Equal(t, tt.expectedErr, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedState.ethTxData, ss.ethTxData) + require.Equal(t, len(tt.expectedState.ethTransactions), len(ss.ethTransactions)) + for k, v := range tt.expectedState.ethTransactions { + require.Equal(t, v.Gas, ss.ethTransactions[k].Gas) + require.Equal(t, v.To, ss.ethTransactions[k].To) + require.Equal(t, v.Nonce, ss.ethTransactions[k].Nonce) + require.Equal(t, v.Status, ss.ethTransactions[k].Status) + require.Equal(t, v.FromBatch, ss.ethTransactions[k].FromBatch) + require.Equal(t, v.ToBatch, ss.ethTransactions[k].ToBatch) + require.Equal(t, v.OnMonitor, ss.ethTransactions[k].OnMonitor) + } + require.Equal(t, tt.expectedState.latestSentToL1Batch, ss.latestSentToL1Batch) + } + }) + } +} + +func Test_purgeEthTx(t *testing.T) { + t.Parallel() + + firstTimestamp := time.Now().Add(-time.Hour) + secondTimestamp := time.Now().Add(time.Hour) + + tests := []struct { + name string + seqSendingStopped uint32 + ethTransactions map[common.Hash]*ethTxData + ethTxData map[common.Hash][]byte + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + sequenceList []uint64 + expectedEthTransactions map[common.Hash]*ethTxData + expectedEthTxData map[common.Hash][]byte + }{ + { + name: "sequence sender stopped", + seqSendingStopped: 1, + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + StatusTimestamp: firstTimestamp, + OnMonitor: true, + Status: ethtxtypes.MonitoredTxStatusFinalized.String(), + }, + }, + ethTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {1, 2, 3}, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + return mocks.NewEthTxManagerMock(t) + }, + sequenceList: []uint64{1, 2}, + expectedEthTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + StatusTimestamp: firstTimestamp, + OnMonitor: true, + Status: ethtxtypes.MonitoredTxStatusFinalized.String(), + }, + }, + expectedEthTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {1, 2, 3}, + }, + }, + { + name: "transactions purged", + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + StatusTimestamp: firstTimestamp, + OnMonitor: true, + Status: ethtxtypes.MonitoredTxStatusFinalized.String(), + }, + common.HexToHash("0x2"): { + StatusTimestamp: secondTimestamp, + }, + }, + ethTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {1, 2, 3}, + common.HexToHash("0x2"): {4, 5, 6}, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("Remove", mock.Anything, common.HexToHash("0x1")).Return(nil) + return mngr + }, + sequenceList: []uint64{1, 2}, + expectedEthTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x2"): { + StatusTimestamp: secondTimestamp, + }, + }, + expectedEthTxData: map[common.Hash][]byte{ + common.HexToHash("0x2"): {4, 5, 6}, + }, + }, + { + name: "removed with error", + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + StatusTimestamp: firstTimestamp, + OnMonitor: true, + Status: ethtxtypes.MonitoredTxStatusFinalized.String(), + }, + common.HexToHash("0x2"): { + StatusTimestamp: secondTimestamp, + }, + }, + ethTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {1, 2, 3}, + common.HexToHash("0x2"): {4, 5, 6}, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("Remove", mock.Anything, common.HexToHash("0x1")).Return(errors.New("test err")) + return mngr + }, + sequenceList: []uint64{1, 2}, + expectedEthTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x2"): { + StatusTimestamp: secondTimestamp, + }, + }, + expectedEthTxData: map[common.Hash][]byte{ + common.HexToHash("0x2"): {4, 5, 6}, + }, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mngr := tt.getEthTxManager(t) + ss := SequenceSender{ + seqSendingStopped: tt.seqSendingStopped, + ethTransactions: tt.ethTransactions, + ethTxData: tt.ethTxData, + ethTxManager: mngr, + logger: log.GetDefaultLogger(), + } + + ss.purgeEthTx(context.Background()) + + mngr.AssertExpectations(t) + require.Equal(t, tt.expectedEthTransactions, ss.ethTransactions) + require.Equal(t, tt.expectedEthTxData, ss.ethTxData) + }) + } +} + +func Test_syncEthTxResults(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ethTransactions map[common.Hash]*ethTxData + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + + expectErr error + expectPendingTxs uint64 + }{ + { + name: "successfully synced", + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + StatusTimestamp: time.Now(), + OnMonitor: true, + Status: ethtxtypes.MonitoredTxStatusCreated.String(), + }, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("Result", mock.Anything, common.HexToHash("0x1")).Return(ethtxtypes.MonitoredTxResult{ + ID: common.HexToHash("0x1"), + Data: []byte{1, 2, 3}, + }, nil) + return mngr + }, + expectPendingTxs: 1, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + tmpFile, err := os.CreateTemp(os.TempDir(), tt.name) + require.NoError(t, err) + + mngr := tt.getEthTxManager(t) + ss := SequenceSender{ + ethTransactions: tt.ethTransactions, + ethTxManager: mngr, + ethTxData: make(map[common.Hash][]byte), + cfg: Config{ + SequencesTxFileName: tmpFile.Name() + ".tmp", + }, + logger: log.GetDefaultLogger(), + } + + pendingTxs, err := ss.syncEthTxResults(context.Background()) + if tt.expectErr != nil { + require.Equal(t, tt.expectErr, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectPendingTxs, pendingTxs) + } + + mngr.AssertExpectations(t) + + err = os.RemoveAll(tmpFile.Name() + ".tmp") + require.NoError(t, err) + }) + } +} + +func Test_syncAllEthTxResults(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ethTransactions map[common.Hash]*ethTxData + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + + expectErr error + expectPendingTxs uint64 + }{ + { + name: "successfully synced", + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + StatusTimestamp: time.Now(), + OnMonitor: true, + Status: ethtxtypes.MonitoredTxStatusCreated.String(), + }, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("ResultsByStatus", mock.Anything, []ethtxtypes.MonitoredTxStatus(nil)).Return([]ethtxtypes.MonitoredTxResult{ + { + ID: common.HexToHash("0x1"), + Data: []byte{1, 2, 3}, + }, + }, nil) + return mngr + }, + expectPendingTxs: 1, + }, + { + name: "successfully synced with missing tx", + ethTransactions: map[common.Hash]*ethTxData{}, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("ResultsByStatus", mock.Anything, []ethtxtypes.MonitoredTxStatus(nil)).Return([]ethtxtypes.MonitoredTxResult{ + { + ID: common.HexToHash("0x1"), + Data: []byte{1, 2, 3}, + }, + }, nil) + return mngr + }, + expectPendingTxs: 1, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + tmpFile, err := os.CreateTemp(os.TempDir(), tt.name) + require.NoError(t, err) + + mngr := tt.getEthTxManager(t) + ss := SequenceSender{ + ethTransactions: tt.ethTransactions, + ethTxManager: mngr, + ethTxData: make(map[common.Hash][]byte), + cfg: Config{ + SequencesTxFileName: tmpFile.Name() + ".tmp", + }, + logger: log.GetDefaultLogger(), + } + + err = ss.syncAllEthTxResults(context.Background()) + if tt.expectErr != nil { + require.Equal(t, tt.expectErr, err) + } else { + require.NoError(t, err) + } + + mngr.AssertExpectations(t) + + err = os.RemoveAll(tmpFile.Name() + ".tmp") + require.NoError(t, err) + }) + } +} + +func Test_copyTxData(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + txHash common.Hash + txData []byte + txsResults map[common.Hash]ethtxtypes.TxResult + ethTxData map[common.Hash][]byte + ethTransactions map[common.Hash]*ethTxData + expectedRthTxData map[common.Hash][]byte + expectedEthTransactions map[common.Hash]*ethTxData + }{ + { + name: "successfully copied", + txHash: common.HexToHash("0x1"), + txData: []byte{1, 2, 3}, + txsResults: map[common.Hash]ethtxtypes.TxResult{ + common.HexToHash("0x1"): {}, + }, + ethTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {0, 2, 3}, + }, + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): {}, + }, + expectedRthTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {1, 2, 3}, + }, + expectedEthTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + Txs: map[common.Hash]ethTxAdditionalData{ + common.HexToHash("0x1"): {}, + }, + }, + }, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + s := SequenceSender{ + ethTxData: tt.ethTxData, + ethTransactions: tt.ethTransactions, + } + + s.copyTxData(tt.txHash, tt.txData, tt.txsResults) + require.Equal(t, tt.expectedRthTxData, s.ethTxData) + require.Equal(t, tt.expectedEthTransactions, s.ethTransactions) + }) + } +} + +func Test_getResultAndUpdateEthTx(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + hash common.Hash + ethTransactions map[common.Hash]*ethTxData + ethTxData map[common.Hash][]byte + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + expectedErr error + }{ + { + name: "successfully updated", + hash: common.HexToHash("0x1"), + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): {}, + }, + ethTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {}, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("Result", mock.Anything, common.HexToHash("0x1")).Return(ethtxtypes.MonitoredTxResult{ + ID: common.HexToHash("0x1"), + Data: []byte{1, 2, 3}, + }, nil) + return mngr + }, + expectedErr: nil, + }, + { + name: "not found", + hash: common.HexToHash("0x1"), + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + Gas: 100500, + }, + }, + ethTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {}, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("Result", mock.Anything, common.HexToHash("0x1")).Return(ethtxtypes.MonitoredTxResult{}, ethtxmanager.ErrNotFound) + mngr.On("AddWithGas", mock.Anything, mock.Anything, big.NewInt(0), mock.Anything, mock.Anything, mock.Anything, uint64(100500)).Return(common.Hash{}, nil) + mngr.On("Result", mock.Anything, common.Hash{}).Return(ethtxtypes.MonitoredTxResult{ + ID: common.HexToHash("0x1"), + Data: []byte{1, 2, 3}, + }, nil) + return mngr + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + tmpFile, err := os.CreateTemp(os.TempDir(), tt.name+".tmp") + require.NoError(t, err) + defer os.RemoveAll(tmpFile.Name() + ".tmp") + + ss := SequenceSender{ + ethTransactions: tt.ethTransactions, + ethTxData: tt.ethTxData, + ethTxManager: tt.getEthTxManager(t), + cfg: Config{ + SequencesTxFileName: tmpFile.Name() + ".tmp", + }, + logger: log.GetDefaultLogger(), + } + + err = ss.getResultAndUpdateEthTx(context.Background(), tt.hash) + if tt.expectedErr != nil { + require.Equal(t, tt.expectedErr, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func Test_loadSentSequencesTransactions(t *testing.T) { + t.Parallel() + + tx := ðTxData{ + FromBatch: 1, + ToBatch: 2, + OnMonitor: true, + To: common.BytesToAddress([]byte{1, 2, 3}), + Gas: 100500, + StateHistory: []string{"2021-09-01T15:04:05.000-07:00, *new, "}, + Txs: map[common.Hash]ethTxAdditionalData{}, + } + + tests := []struct { + name string + getFilename func(t *testing.T) string + expectEthTransactions map[common.Hash]*ethTxData + expectErr error + }{ + { + name: "successfully loaded", + getFilename: func(t *testing.T) string { + t.Helper() + + tmpFile, err := os.CreateTemp(os.TempDir(), "test") + require.NoError(t, err) + + ethTxDataBytes, err := json.Marshal(map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): tx, + }) + require.NoError(t, err) + + _, err = tmpFile.Write(ethTxDataBytes) + require.NoError(t, err) + + t.Cleanup(func() { + err := os.Remove(tmpFile.Name()) + require.NoError(t, err) + }) + + return tmpFile.Name() + }, + expectEthTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): tx, + }, + }, + { + name: "file does not exist", + getFilename: func(t *testing.T) string { + t.Helper() + + return "does not exist.tmp" + }, + expectEthTransactions: map[common.Hash]*ethTxData{}, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + s := SequenceSender{ + cfg: Config{ + SequencesTxFileName: tt.getFilename(t), + }, + ethTransactions: map[common.Hash]*ethTxData{}, + logger: log.GetDefaultLogger(), + } + + err := s.loadSentSequencesTransactions() + if tt.expectErr != nil { + require.Equal(t, tt.expectErr, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectEthTransactions, s.ethTransactions) + } + }) + } +} diff --git a/sequencesender/mocks/mock_etherman.go b/sequencesender/mocks/mock_etherman.go new file mode 100644 index 00000000..46a70170 --- /dev/null +++ b/sequencesender/mocks/mock_etherman.go @@ -0,0 +1,147 @@ +// Code generated by mockery v2.40.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthermanMock is an autogenerated mock type for the Etherman type +type EthermanMock struct { + mock.Mock +} + +// CurrentNonce provides a mock function with given fields: ctx, address +func (_m *EthermanMock) CurrentNonce(ctx context.Context, address common.Address) (uint64, error) { + ret := _m.Called(ctx, address) + + if len(ret) == 0 { + panic("no return value specified for CurrentNonce") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) (uint64, error)); ok { + return rf(ctx, address) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) uint64); ok { + r0 = rf(ctx, address) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EstimateGas provides a mock function with given fields: ctx, from, to, value, data +func (_m *EthermanMock) EstimateGas(ctx context.Context, from common.Address, to *common.Address, value *big.Int, data []byte) (uint64, error) { + ret := _m.Called(ctx, from, to, value, data) + + if len(ret) == 0 { + panic("no return value specified for EstimateGas") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *common.Address, *big.Int, []byte) (uint64, error)); ok { + return rf(ctx, from, to, value, data) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *common.Address, *big.Int, []byte) uint64); ok { + r0 = rf(ctx, from, to, value, data) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *common.Address, *big.Int, []byte) error); ok { + r1 = rf(ctx, from, to, value, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLatestBatchNumber provides a mock function with given fields: +func (_m *EthermanMock) GetLatestBatchNumber() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLatestBatchNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLatestBlockHeader provides a mock function with given fields: ctx +func (_m *EthermanMock) GetLatestBlockHeader(ctx context.Context) (*types.Header, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockHeader") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*types.Header, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *types.Header); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewEthermanMock creates a new instance of EthermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthermanMock(t interface { + mock.TestingT + Cleanup(func()) +}) *EthermanMock { + mock := &EthermanMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/sequencesender/mocks/mock_ethtxmanager.go b/sequencesender/mocks/mock_ethtxmanager.go new file mode 100644 index 00000000..f3b456a4 --- /dev/null +++ b/sequencesender/mocks/mock_ethtxmanager.go @@ -0,0 +1,146 @@ +// Code generated by mockery v2.40.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthTxManagerMock is an autogenerated mock type for the EthTxManager type +type EthTxManagerMock struct { + mock.Mock +} + +// AddWithGas provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar, gas +func (_m *EthTxManagerMock) AddWithGas(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar, gas uint64) (common.Hash, error) { + ret := _m.Called(ctx, to, value, data, gasOffset, sidecar, gas) + + if len(ret) == 0 { + panic("no return value specified for AddWithGas") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) (common.Hash, error)); ok { + return rf(ctx, to, value, data, gasOffset, sidecar, gas) + } + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) common.Hash); ok { + r0 = rf(ctx, to, value, data, gasOffset, sidecar, gas) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) error); ok { + r1 = rf(ctx, to, value, data, gasOffset, sidecar, gas) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Remove provides a mock function with given fields: ctx, hash +func (_m *EthTxManagerMock) Remove(ctx context.Context, hash common.Hash) error { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { + r0 = rf(ctx, hash) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Result provides a mock function with given fields: ctx, hash +func (_m *EthTxManagerMock) Result(ctx context.Context, hash common.Hash) (ethtxtypes.MonitoredTxResult, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for Result") + } + + var r0 ethtxtypes.MonitoredTxResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (ethtxtypes.MonitoredTxResult, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ethtxtypes.MonitoredTxResult); ok { + r0 = rf(ctx, hash) + } else { + r0 = ret.Get(0).(ethtxtypes.MonitoredTxResult) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ResultsByStatus provides a mock function with given fields: ctx, status +func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, status []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error) { + ret := _m.Called(ctx, status) + + if len(ret) == 0 { + panic("no return value specified for ResultsByStatus") + } + + var r0 []ethtxtypes.MonitoredTxResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error)); ok { + return rf(ctx, status) + } + if rf, ok := ret.Get(0).(func(context.Context, []ethtxtypes.MonitoredTxStatus) []ethtxtypes.MonitoredTxResult); ok { + r0 = rf(ctx, status) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ethtxtypes.MonitoredTxResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []ethtxtypes.MonitoredTxStatus) error); ok { + r1 = rf(ctx, status) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Start provides a mock function with given fields: +func (_m *EthTxManagerMock) Start() { + _m.Called() +} + +// NewEthTxManagerMock creates a new instance of EthTxManagerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthTxManagerMock(t interface { + mock.TestingT + Cleanup(func()) +}) *EthTxManagerMock { + mock := &EthTxManagerMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/sequencesender/mocks/mock_txbuilder.go b/sequencesender/mocks/mock_txbuilder.go new file mode 100644 index 00000000..0607313b --- /dev/null +++ b/sequencesender/mocks/mock_txbuilder.go @@ -0,0 +1,367 @@ +// Code generated by mockery v2.40.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + datastream "github.com/0xPolygon/cdk/state/datastream" + + mock "github.com/stretchr/testify/mock" + + seqsendertypes "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + + txbuilder "github.com/0xPolygon/cdk/sequencesender/txbuilder" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// TxBuilderMock is an autogenerated mock type for the TxBuilder type +type TxBuilderMock struct { + mock.Mock +} + +type TxBuilderMock_Expecter struct { + mock *mock.Mock +} + +func (_m *TxBuilderMock) EXPECT() *TxBuilderMock_Expecter { + return &TxBuilderMock_Expecter{mock: &_m.Mock} +} + +// BuildSequenceBatchesTx provides a mock function with given fields: ctx, sequences +func (_m *TxBuilderMock) BuildSequenceBatchesTx(ctx context.Context, sequences seqsendertypes.Sequence) (*types.Transaction, error) { + ret := _m.Called(ctx, sequences) + + if len(ret) == 0 { + panic("no return value specified for BuildSequenceBatchesTx") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, seqsendertypes.Sequence) (*types.Transaction, error)); ok { + return rf(ctx, sequences) + } + if rf, ok := ret.Get(0).(func(context.Context, seqsendertypes.Sequence) *types.Transaction); ok { + r0 = rf(ctx, sequences) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, seqsendertypes.Sequence) error); ok { + r1 = rf(ctx, sequences) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TxBuilderMock_BuildSequenceBatchesTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BuildSequenceBatchesTx' +type TxBuilderMock_BuildSequenceBatchesTx_Call struct { + *mock.Call +} + +// BuildSequenceBatchesTx is a helper method to define mock.On call +// - ctx context.Context +// - sequences seqsendertypes.Sequence +func (_e *TxBuilderMock_Expecter) BuildSequenceBatchesTx(ctx interface{}, sequences interface{}) *TxBuilderMock_BuildSequenceBatchesTx_Call { + return &TxBuilderMock_BuildSequenceBatchesTx_Call{Call: _e.mock.On("BuildSequenceBatchesTx", ctx, sequences)} +} + +func (_c *TxBuilderMock_BuildSequenceBatchesTx_Call) Run(run func(ctx context.Context, sequences seqsendertypes.Sequence)) *TxBuilderMock_BuildSequenceBatchesTx_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(seqsendertypes.Sequence)) + }) + return _c +} + +func (_c *TxBuilderMock_BuildSequenceBatchesTx_Call) Return(_a0 *types.Transaction, _a1 error) *TxBuilderMock_BuildSequenceBatchesTx_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *TxBuilderMock_BuildSequenceBatchesTx_Call) RunAndReturn(run func(context.Context, seqsendertypes.Sequence) (*types.Transaction, error)) *TxBuilderMock_BuildSequenceBatchesTx_Call { + _c.Call.Return(run) + return _c +} + +// NewBatchFromL2Block provides a mock function with given fields: l2Block +func (_m *TxBuilderMock) NewBatchFromL2Block(l2Block *datastream.L2Block) seqsendertypes.Batch { + ret := _m.Called(l2Block) + + if len(ret) == 0 { + panic("no return value specified for NewBatchFromL2Block") + } + + var r0 seqsendertypes.Batch + if rf, ok := ret.Get(0).(func(*datastream.L2Block) seqsendertypes.Batch); ok { + r0 = rf(l2Block) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(seqsendertypes.Batch) + } + } + + return r0 +} + +// TxBuilderMock_NewBatchFromL2Block_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewBatchFromL2Block' +type TxBuilderMock_NewBatchFromL2Block_Call struct { + *mock.Call +} + +// NewBatchFromL2Block is a helper method to define mock.On call +// - l2Block *datastream.L2Block +func (_e *TxBuilderMock_Expecter) NewBatchFromL2Block(l2Block interface{}) *TxBuilderMock_NewBatchFromL2Block_Call { + return &TxBuilderMock_NewBatchFromL2Block_Call{Call: _e.mock.On("NewBatchFromL2Block", l2Block)} +} + +func (_c *TxBuilderMock_NewBatchFromL2Block_Call) Run(run func(l2Block *datastream.L2Block)) *TxBuilderMock_NewBatchFromL2Block_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*datastream.L2Block)) + }) + return _c +} + +func (_c *TxBuilderMock_NewBatchFromL2Block_Call) Return(_a0 seqsendertypes.Batch) *TxBuilderMock_NewBatchFromL2Block_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *TxBuilderMock_NewBatchFromL2Block_Call) RunAndReturn(run func(*datastream.L2Block) seqsendertypes.Batch) *TxBuilderMock_NewBatchFromL2Block_Call { + _c.Call.Return(run) + return _c +} + +// NewSequence provides a mock function with given fields: ctx, batches, coinbase +func (_m *TxBuilderMock) NewSequence(ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address) (seqsendertypes.Sequence, error) { + ret := _m.Called(ctx, batches, coinbase) + + if len(ret) == 0 { + panic("no return value specified for NewSequence") + } + + var r0 seqsendertypes.Sequence + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []seqsendertypes.Batch, common.Address) (seqsendertypes.Sequence, error)); ok { + return rf(ctx, batches, coinbase) + } + if rf, ok := ret.Get(0).(func(context.Context, []seqsendertypes.Batch, common.Address) seqsendertypes.Sequence); ok { + r0 = rf(ctx, batches, coinbase) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(seqsendertypes.Sequence) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []seqsendertypes.Batch, common.Address) error); ok { + r1 = rf(ctx, batches, coinbase) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TxBuilderMock_NewSequence_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewSequence' +type TxBuilderMock_NewSequence_Call struct { + *mock.Call +} + +// NewSequence is a helper method to define mock.On call +// - ctx context.Context +// - batches []seqsendertypes.Batch +// - coinbase common.Address +func (_e *TxBuilderMock_Expecter) NewSequence(ctx interface{}, batches interface{}, coinbase interface{}) *TxBuilderMock_NewSequence_Call { + return &TxBuilderMock_NewSequence_Call{Call: _e.mock.On("NewSequence", ctx, batches, coinbase)} +} + +func (_c *TxBuilderMock_NewSequence_Call) Run(run func(ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address)) *TxBuilderMock_NewSequence_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]seqsendertypes.Batch), args[2].(common.Address)) + }) + return _c +} + +func (_c *TxBuilderMock_NewSequence_Call) Return(_a0 seqsendertypes.Sequence, _a1 error) *TxBuilderMock_NewSequence_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *TxBuilderMock_NewSequence_Call) RunAndReturn(run func(context.Context, []seqsendertypes.Batch, common.Address) (seqsendertypes.Sequence, error)) *TxBuilderMock_NewSequence_Call { + _c.Call.Return(run) + return _c +} + +// NewSequenceIfWorthToSend provides a mock function with given fields: ctx, sequenceBatches, l2Coinbase, batchNumber +func (_m *TxBuilderMock) NewSequenceIfWorthToSend(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64) (seqsendertypes.Sequence, error) { + ret := _m.Called(ctx, sequenceBatches, l2Coinbase, batchNumber) + + if len(ret) == 0 { + panic("no return value specified for NewSequenceIfWorthToSend") + } + + var r0 seqsendertypes.Sequence + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []seqsendertypes.Batch, common.Address, uint64) (seqsendertypes.Sequence, error)); ok { + return rf(ctx, sequenceBatches, l2Coinbase, batchNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, []seqsendertypes.Batch, common.Address, uint64) seqsendertypes.Sequence); ok { + r0 = rf(ctx, sequenceBatches, l2Coinbase, batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(seqsendertypes.Sequence) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []seqsendertypes.Batch, common.Address, uint64) error); ok { + r1 = rf(ctx, sequenceBatches, l2Coinbase, batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TxBuilderMock_NewSequenceIfWorthToSend_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewSequenceIfWorthToSend' +type TxBuilderMock_NewSequenceIfWorthToSend_Call struct { + *mock.Call +} + +// NewSequenceIfWorthToSend is a helper method to define mock.On call +// - ctx context.Context +// - sequenceBatches []seqsendertypes.Batch +// - l2Coinbase common.Address +// - batchNumber uint64 +func (_e *TxBuilderMock_Expecter) NewSequenceIfWorthToSend(ctx interface{}, sequenceBatches interface{}, l2Coinbase interface{}, batchNumber interface{}) *TxBuilderMock_NewSequenceIfWorthToSend_Call { + return &TxBuilderMock_NewSequenceIfWorthToSend_Call{Call: _e.mock.On("NewSequenceIfWorthToSend", ctx, sequenceBatches, l2Coinbase, batchNumber)} +} + +func (_c *TxBuilderMock_NewSequenceIfWorthToSend_Call) Run(run func(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64)) *TxBuilderMock_NewSequenceIfWorthToSend_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]seqsendertypes.Batch), args[2].(common.Address), args[3].(uint64)) + }) + return _c +} + +func (_c *TxBuilderMock_NewSequenceIfWorthToSend_Call) Return(_a0 seqsendertypes.Sequence, _a1 error) *TxBuilderMock_NewSequenceIfWorthToSend_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *TxBuilderMock_NewSequenceIfWorthToSend_Call) RunAndReturn(run func(context.Context, []seqsendertypes.Batch, common.Address, uint64) (seqsendertypes.Sequence, error)) *TxBuilderMock_NewSequenceIfWorthToSend_Call { + _c.Call.Return(run) + return _c +} + +// SetCondNewSeq provides a mock function with given fields: cond +func (_m *TxBuilderMock) SetCondNewSeq(cond txbuilder.CondNewSequence) txbuilder.CondNewSequence { + ret := _m.Called(cond) + + if len(ret) == 0 { + panic("no return value specified for SetCondNewSeq") + } + + var r0 txbuilder.CondNewSequence + if rf, ok := ret.Get(0).(func(txbuilder.CondNewSequence) txbuilder.CondNewSequence); ok { + r0 = rf(cond) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(txbuilder.CondNewSequence) + } + } + + return r0 +} + +// TxBuilderMock_SetCondNewSeq_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetCondNewSeq' +type TxBuilderMock_SetCondNewSeq_Call struct { + *mock.Call +} + +// SetCondNewSeq is a helper method to define mock.On call +// - cond txbuilder.CondNewSequence +func (_e *TxBuilderMock_Expecter) SetCondNewSeq(cond interface{}) *TxBuilderMock_SetCondNewSeq_Call { + return &TxBuilderMock_SetCondNewSeq_Call{Call: _e.mock.On("SetCondNewSeq", cond)} +} + +func (_c *TxBuilderMock_SetCondNewSeq_Call) Run(run func(cond txbuilder.CondNewSequence)) *TxBuilderMock_SetCondNewSeq_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(txbuilder.CondNewSequence)) + }) + return _c +} + +func (_c *TxBuilderMock_SetCondNewSeq_Call) Return(_a0 txbuilder.CondNewSequence) *TxBuilderMock_SetCondNewSeq_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *TxBuilderMock_SetCondNewSeq_Call) RunAndReturn(run func(txbuilder.CondNewSequence) txbuilder.CondNewSequence) *TxBuilderMock_SetCondNewSeq_Call { + _c.Call.Return(run) + return _c +} + +// String provides a mock function with given fields: +func (_m *TxBuilderMock) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// TxBuilderMock_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' +type TxBuilderMock_String_Call struct { + *mock.Call +} + +// String is a helper method to define mock.On call +func (_e *TxBuilderMock_Expecter) String() *TxBuilderMock_String_Call { + return &TxBuilderMock_String_Call{Call: _e.mock.On("String")} +} + +func (_c *TxBuilderMock_String_Call) Run(run func()) *TxBuilderMock_String_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *TxBuilderMock_String_Call) Return(_a0 string) *TxBuilderMock_String_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *TxBuilderMock_String_Call) RunAndReturn(run func() string) *TxBuilderMock_String_Call { + _c.Call.Return(run) + return _c +} + +// NewTxBuilderMock creates a new instance of TxBuilderMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTxBuilderMock(t interface { + mock.TestingT + Cleanup(func()) +}) *TxBuilderMock { + mock := &TxBuilderMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/sequencesender/rpc.go b/sequencesender/rpc.go index a70f5c64..a604da37 100644 --- a/sequencesender/rpc.go +++ b/sequencesender/rpc.go @@ -12,7 +12,7 @@ import ( "github.com/ethereum/go-ethereum/common" ) -func (s *SequenceSender) getBatchFromRPC(batchNumber uint64) (*rpcbatch.RPCBatch, error) { +func getBatchFromRPC(addr string, batchNumber uint64) (*rpcbatch.RPCBatch, error) { type zkEVMBatch struct { Blocks []string `json:"blocks"` BatchL2Data string `json:"batchL2Data"` @@ -26,7 +26,7 @@ func (s *SequenceSender) getBatchFromRPC(batchNumber uint64) (*rpcbatch.RPCBatch log.Infof("Getting batch %d from RPC", batchNumber) - response, err := rpc.JSONRPCCall(s.cfg.RPCURL, "zkevm_getBatchByNumber", batchNumber) + response, err := rpc.JSONRPCCall(addr, "zkevm_getBatchByNumber", batchNumber) if err != nil { return nil, err } @@ -54,7 +54,7 @@ func (s *SequenceSender) getBatchFromRPC(batchNumber uint64) (*rpcbatch.RPCBatch } if len(zkEVMBatchData.Blocks) > 0 { - lastL2BlockTimestamp, err := s.getL2BlockTimestampFromRPC(zkEVMBatchData.Blocks[len(zkEVMBatchData.Blocks)-1]) + lastL2BlockTimestamp, err := getL2BlockTimestampFromRPC(addr, zkEVMBatchData.Blocks[len(zkEVMBatchData.Blocks)-1]) if err != nil { return nil, fmt.Errorf("error getting the last l2 block timestamp from the rpc: %w", err) } @@ -67,14 +67,14 @@ func (s *SequenceSender) getBatchFromRPC(batchNumber uint64) (*rpcbatch.RPCBatch return rpcBatch, nil } -func (s *SequenceSender) getL2BlockTimestampFromRPC(blockHash string) (uint64, error) { +func getL2BlockTimestampFromRPC(addr, blockHash string) (uint64, error) { type zkeEVML2Block struct { Timestamp string `json:"timestamp"` } log.Infof("Getting l2 block timestamp from RPC. Block hash: %s", blockHash) - response, err := rpc.JSONRPCCall(s.cfg.RPCURL, "eth_getBlockByHash", blockHash, false) + response, err := rpc.JSONRPCCall(addr, "eth_getBlockByHash", blockHash, false) if err != nil { return 0, err } diff --git a/sequencesender/rpc_test.go b/sequencesender/rpc_test.go new file mode 100644 index 00000000..4774b237 --- /dev/null +++ b/sequencesender/rpc_test.go @@ -0,0 +1,115 @@ +package sequencesender + +import ( + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/0xPolygon/cdk-rpc/rpc" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func Test_getBatchFromRPC(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + batch uint64 + getBatchByNumberResp string + getBlockByHasResp string + getBatchByNumberErr error + getBlockByHashErr error + expectBlocks int + expectData []byte + expectTimestamp uint64 + expectErr error + }{ + { + name: "successfully fetched", + getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":{"blocks":["1", "2", "3"],"batchL2Data":"0x1234567"}}`, + getBlockByHasResp: `{"jsonrpc":"2.0","id":1,"result":{"timestamp":"0x123456"}}`, + batch: 0, + expectBlocks: 3, + expectData: common.FromHex("0x1234567"), + expectTimestamp: 1193046, + expectErr: nil, + }, + { + name: "invalid json", + getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":{"blocks":invalid,"batchL2Data":"test"}}`, + batch: 0, + expectBlocks: 3, + expectData: nil, + expectErr: errors.New("invalid character 'i' looking for beginning of value"), + }, + { + name: "wrong json", + getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":{"blocks":"invalid","batchL2Data":"test"}}`, + batch: 0, + expectBlocks: 3, + expectData: nil, + expectErr: errors.New("error unmarshalling the batch from the response calling zkevm_getBatchByNumber: json: cannot unmarshal string into Go struct field zkEVMBatch.blocks of type []string"), + }, + { + name: "error in the response", + getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":null,"error":{"code":-32602,"message":"Invalid params"}}`, + batch: 0, + expectBlocks: 0, + expectData: nil, + expectErr: errors.New("error in the response calling zkevm_getBatchByNumber: &{-32602 Invalid params }"), + }, + { + name: "http failed", + getBatchByNumberErr: errors.New("failed to fetch"), + batch: 0, + expectBlocks: 0, + expectData: nil, + expectErr: errors.New("invalid status code, expected: 200, found: 500"), + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var req rpc.Request + err := json.NewDecoder(r.Body).Decode(&req) + require.NoError(t, err) + + switch req.Method { + case "zkevm_getBatchByNumber": + if tt.getBatchByNumberErr != nil { + http.Error(w, tt.getBatchByNumberErr.Error(), http.StatusInternalServerError) + return + } + + _, _ = w.Write([]byte(tt.getBatchByNumberResp)) + case "eth_getBlockByHash": + if tt.getBlockByHashErr != nil { + http.Error(w, tt.getBlockByHashErr.Error(), http.StatusInternalServerError) + return + } + _, _ = w.Write([]byte(tt.getBlockByHasResp)) + default: + http.Error(w, "method not found", http.StatusNotFound) + } + })) + defer srv.Close() + + rpcBatch, err := getBatchFromRPC(srv.URL, tt.batch) + if tt.expectErr != nil { + require.Equal(t, tt.expectErr.Error(), err.Error()) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectTimestamp, rpcBatch.LastL2BLockTimestamp()) + require.Equal(t, tt.expectData, rpcBatch.L2Data()) + } + }) + } +} diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 23d4e43a..468866c2 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -4,8 +4,10 @@ import ( "context" "errors" "fmt" + "math/big" "os" "sync" + "sync/atomic" "time" "github.com/0xPolygon/cdk/etherman" @@ -16,17 +18,44 @@ import ( "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" ethtxlog "github.com/0xPolygon/zkevm-ethtx-manager/log" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" ) const ten = 10 +// EthTxManager represents the eth tx manager interface +type EthTxManager interface { + Start() + AddWithGas( + ctx context.Context, + to *common.Address, + value *big.Int, + data []byte, + gasOffset uint64, + sidecar *types.BlobTxSidecar, + gas uint64, + ) (common.Hash, error) + Remove(ctx context.Context, hash common.Hash) error + ResultsByStatus(ctx context.Context, status []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error) + Result(ctx context.Context, hash common.Hash) (ethtxtypes.MonitoredTxResult, error) +} + +// Etherman represents the etherman behaviour +type Etherman interface { + CurrentNonce(ctx context.Context, address common.Address) (uint64, error) + GetLatestBlockHeader(ctx context.Context) (*types.Header, error) + EstimateGas(ctx context.Context, from common.Address, to *common.Address, value *big.Int, data []byte) (uint64, error) + GetLatestBatchNumber() (uint64, error) +} + // SequenceSender represents a sequence sender type SequenceSender struct { cfg Config logger *log.Logger - ethTxManager *ethtxmanager.Client - etherman *etherman.Client + ethTxManager EthTxManager + etherman Etherman latestVirtualBatchNumber uint64 // Latest virtualized batch obtained from L1 latestVirtualTime time.Time // Latest virtual batch timestamp latestSentToL1Batch uint64 // Latest batch sent to L1 @@ -38,7 +67,7 @@ type SequenceSender struct { mutexEthTx sync.Mutex // Mutex to access ethTransactions sequencesTxFile *os.File // Persistence of sent transactions validStream bool // Not valid while receiving data before the desired batch - seqSendingStopped bool // If there is a critical error + seqSendingStopped uint32 // If there is a critical error TxBuilder txbuilder.TxBuilder latestVirtualBatchLock sync.Mutex } @@ -54,15 +83,14 @@ func New(cfg Config, logger *log.Logger, etherman *etherman.Client, txBuilder txbuilder.TxBuilder) (*SequenceSender, error) { // Create sequencesender s := SequenceSender{ - cfg: cfg, - logger: logger, - etherman: etherman, - ethTransactions: make(map[common.Hash]*ethTxData), - ethTxData: make(map[common.Hash][]byte), - sequenceData: make(map[uint64]*sequenceData), - validStream: false, - seqSendingStopped: false, - TxBuilder: txBuilder, + cfg: cfg, + logger: logger, + etherman: etherman, + ethTransactions: make(map[common.Hash]*ethTxData), + ethTxData: make(map[common.Hash][]byte), + sequenceData: make(map[uint64]*sequenceData), + validStream: false, + TxBuilder: txBuilder, } logger.Infof("TxBuilder configuration: %s", txBuilder.String()) @@ -108,7 +136,7 @@ func (s *SequenceSender) Start(ctx context.Context) { } // Current batch to sequence - s.latestSentToL1Batch = s.latestVirtualBatchNumber + atomic.StoreUint64(&s.latestSentToL1Batch, atomic.LoadUint64(&s.latestVirtualBatchNumber)) // Start retrieving batches from RPC go func() { @@ -127,7 +155,7 @@ func (s *SequenceSender) batchRetrieval(ctx context.Context) error { ticker := time.NewTicker(s.cfg.GetBatchWaitInterval.Duration) defer ticker.Stop() - currentBatchNumber := s.latestVirtualBatchNumber + 1 + currentBatchNumber := atomic.LoadUint64(&s.latestVirtualBatchNumber) + 1 for { select { case <-ctx.Done(): @@ -135,7 +163,7 @@ func (s *SequenceSender) batchRetrieval(ctx context.Context) error { return ctx.Err() default: // Try to retrieve batch from RPC - rpcBatch, err := s.getBatchFromRPC(currentBatchNumber) + rpcBatch, err := getBatchFromRPC(s.cfg.RPCURL, currentBatchNumber) if err != nil { if errors.Is(err, ethtxmanager.ErrNotFound) { s.logger.Infof("batch %d not found in RPC", currentBatchNumber) @@ -201,7 +229,7 @@ func (s *SequenceSender) sequenceSending(ctx context.Context) { // purgeSequences purges batches from memory structures func (s *SequenceSender) purgeSequences() { // If sequence sending is stopped, do not purge - if s.seqSendingStopped { + if atomic.LoadUint32(&s.seqSendingStopped) == 1 { return } @@ -212,7 +240,7 @@ func (s *SequenceSender) purgeSequences() { toPurge := make([]uint64, 0) for i := 0; i < len(s.sequenceList); i++ { batchNumber := s.sequenceList[i] - if batchNumber <= s.latestVirtualBatchNumber { + if batchNumber <= atomic.LoadUint64(&s.latestVirtualBatchNumber) { truncateUntil = i + 1 toPurge = append(toPurge, batchNumber) } @@ -253,7 +281,7 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { } // Check if the sequence sending is stopped - if s.seqSendingStopped { + if atomic.LoadUint32(&s.seqSendingStopped) == 1 { s.logger.Warnf("sending is stopped!") return } @@ -293,7 +321,7 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { return } - elapsed, waitTime := s.marginTimeElapsed(lastL2BlockTimestamp, lastL1BlockHeader.Time, timeMargin) + elapsed, waitTime := marginTimeElapsed(lastL2BlockTimestamp, lastL1BlockHeader.Time, timeMargin) if !elapsed { s.logger.Infof("waiting at least %d seconds to send sequences, time difference between last L1 block %d (ts: %d) "+ @@ -320,7 +348,7 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { for { currentTime := uint64(time.Now().Unix()) - elapsed, waitTime := s.marginTimeElapsed(lastL2BlockTimestamp, currentTime, timeMargin) + elapsed, waitTime := marginTimeElapsed(lastL2BlockTimestamp, currentTime, timeMargin) // Wait if the time difference is less than L1BlockTimestampMargin if !elapsed { @@ -352,7 +380,7 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { s.logger.Fatalf("error getting latest sequenced batch, error: %v", err) } - sequence.SetLastVirtualBatchNumber(s.latestVirtualBatchNumber) + sequence.SetLastVirtualBatchNumber(atomic.LoadUint64(&s.latestVirtualBatchNumber)) txToEstimateGas, err := s.TxBuilder.BuildSequenceBatchesTx(ctx, sequence) if err != nil { @@ -384,7 +412,8 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes sequenceBatches := make([]seqsendertypes.Batch, 0) for i := 0; i < len(s.sequenceList); i++ { batchNumber := s.sequenceList[i] - if batchNumber <= s.latestVirtualBatchNumber || batchNumber <= s.latestSentToL1Batch { + if batchNumber <= atomic.LoadUint64(&s.latestVirtualBatchNumber) || + batchNumber <= atomic.LoadUint64(&s.latestSentToL1Batch) { continue } @@ -455,20 +484,32 @@ func (s *SequenceSender) getLatestVirtualBatch() error { // Get latest virtual state batch from L1 var err error - s.latestVirtualBatchNumber, err = s.etherman.GetLatestBatchNumber() + latestVirtualBatchNumber, err := s.etherman.GetLatestBatchNumber() if err != nil { s.logger.Errorf("error getting latest virtual batch, error: %v", err) return errors.New("fail to get latest virtual batch") } - s.logger.Infof("latest virtual batch is %d", s.latestVirtualBatchNumber) + atomic.StoreUint64(&s.latestVirtualBatchNumber, latestVirtualBatchNumber) + + s.logger.Infof("latest virtual batch is %d", latestVirtualBatchNumber) return nil } +// logFatalf logs error, activates flag to stop sequencing, and remains in an infinite loop +func (s *SequenceSender) logFatalf(template string, args ...interface{}) { + atomic.StoreUint32(&s.seqSendingStopped, 1) + for { + s.logger.Errorf(template, args...) + s.logger.Errorf("sequence sending stopped.") + time.Sleep(ten * time.Second) + } +} + // marginTimeElapsed checks if the time between currentTime and l2BlockTimestamp is greater than timeMargin. // If it's greater returns true, otherwise it returns false and the waitTime needed to achieve this timeMargin -func (s *SequenceSender) marginTimeElapsed( +func marginTimeElapsed( l2BlockTimestamp uint64, currentTime uint64, timeMargin int64, ) (bool, int64) { // Check the time difference between L2 block and currentTime @@ -489,17 +530,8 @@ func (s *SequenceSender) marginTimeElapsed( waitTime = timeMargin - timeDiff } return false, waitTime - } else { // timeDiff is greater than timeMargin - return true, 0 } -} -// logFatalf logs error, activates flag to stop sequencing, and remains in an infinite loop -func (s *SequenceSender) logFatalf(template string, args ...interface{}) { - s.seqSendingStopped = true - for { - s.logger.Errorf(template, args...) - s.logger.Errorf("sequence sending stopped.") - time.Sleep(ten * time.Second) - } + // timeDiff is greater than timeMargin + return true, 0 } diff --git a/sequencesender/sequencesender_test.go b/sequencesender/sequencesender_test.go new file mode 100644 index 00000000..432c5d39 --- /dev/null +++ b/sequencesender/sequencesender_test.go @@ -0,0 +1,596 @@ +package sequencesender + +import ( + "errors" + "math/big" + "os" + "testing" + "time" + + types2 "github.com/0xPolygon/cdk/config/types" + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/sequencesender/mocks" + "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/0xPolygon/cdk/sequencesender/txbuilder" + "github.com/0xPolygon/cdk/state" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +const ( + txStreamEncoded1 = "f86508843b9aca0082520894617b3a3528f9cdd6630fd3301b9c8911f7bf063d0a808207f5a0579b72a1c1ffdd845fba45317540982109298e2ec8d67ddf2cdaf22e80903677a01831e9a01291c7ea246742a5b5a543ca6938bfc3f6958c22be06fad99274e4ac" + txStreamEncoded2 = "f86509843b9aca0082520894617b3a3528f9cdd6630fd3301b9c8911f7bf063d0a808207f5a0908a522075e09485166ffa7630cd2b7013897fa1f1238013677d6f0a86efb3d2a0068b12435fcdc8ee254f3b1df8c5b29ed691eeee6065704f061130935976ca99" + txStreamEncoded3 = "b8b402f8b101268505d21dba0085076c363d8982dc60941929761e87667283f087ea9ab8370c174681b4e980b844095ea7b300000000000000000000000080a64c6d7f12c47b7c66c5b4e20e72bc1fcd5d9effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc001a0dd4db494969139a120e8721842455ec13f82757a4fc49b66d447c7d32d095a1da06ef54068a9aa67ecc4f52d885299a04feb6f3531cdfc771f1412cd3331d1ba4c" +) + +var ( + now = time.Now() +) + +func TestMain(t *testing.M) { + t.Run() +} + +func Test_encoding(t *testing.T) { + tx1, err := state.DecodeTx(txStreamEncoded1) + require.NoError(t, err) + tx2, err := state.DecodeTx(txStreamEncoded2) + require.NoError(t, err) + tx3, err := state.DecodeTx(txStreamEncoded3) + require.NoError(t, err) + + txTest := state.L2TxRaw{ + EfficiencyPercentage: 129, + TxAlreadyEncoded: false, + Tx: tx1, + } + txTestEncoded := make([]byte, 0) + txTestEncoded, err = txTest.Encode(txTestEncoded) + require.NoError(t, err) + log.Debugf("%s", common.Bytes2Hex(txTestEncoded)) + + batch := state.BatchRawV2{ + Blocks: []state.L2BlockRaw{ + { + ChangeL2BlockHeader: state.ChangeL2BlockHeader{ + DeltaTimestamp: 3633752, + IndexL1InfoTree: 0, + }, + Transactions: []state.L2TxRaw{ + { + EfficiencyPercentage: 129, + TxAlreadyEncoded: false, + Tx: tx1, + }, + { + EfficiencyPercentage: 97, + TxAlreadyEncoded: false, + Tx: tx2, + }, + { + EfficiencyPercentage: 97, + TxAlreadyEncoded: false, + Tx: tx3, + }, + }, + }, + }, + } + + encodedBatch, err := state.EncodeBatchV2(&batch) + require.NoError(t, err) + + decodedBatch, err := state.DecodeBatchV2(encodedBatch) + require.NoError(t, err) + + require.Equal(t, batch.String(), decodedBatch.String()) +} + +func Test_Start(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + getEtherman func(t *testing.T) *mocks.EthermanMock + batchWaitDuration types2.Duration + expectNonce uint64 + expectLastVirtualBatch uint64 + expectFromStreamBatch uint64 + expectWipBatch uint64 + expectLatestSentToL1Batch uint64 + }{ + { + name: "successfully started", + getEtherman: func(t *testing.T) *mocks.EthermanMock { + t.Helper() + + mngr := mocks.NewEthermanMock(t) + mngr.On("GetLatestBatchNumber").Return(uint64(1), nil) + return mngr + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("Start").Return(nil) + mngr.On("ResultsByStatus", mock.Anything, []ethtxtypes.MonitoredTxStatus(nil)).Return(nil, nil) + return mngr + }, + batchWaitDuration: types2.NewDuration(time.Millisecond), + expectNonce: 3, + expectLastVirtualBatch: 1, + expectFromStreamBatch: 1, + expectWipBatch: 2, + expectLatestSentToL1Batch: 1, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + tmpFile, err := os.CreateTemp(os.TempDir(), tt.name+".tmp") + require.NoError(t, err) + defer os.RemoveAll(tmpFile.Name() + ".tmp") + + s := SequenceSender{ + etherman: tt.getEtherman(t), + ethTxManager: tt.getEthTxManager(t), + cfg: Config{ + SequencesTxFileName: tmpFile.Name() + ".tmp", + GetBatchWaitInterval: tt.batchWaitDuration, + }, + logger: log.GetDefaultLogger(), + } + + ctx, cancel := context.WithCancel(context.Background()) + s.Start(ctx) + time.Sleep(time.Second) + cancel() + time.Sleep(time.Second) + + require.Equal(t, tt.expectLatestSentToL1Batch, s.latestSentToL1Batch) + }) + } +} + +func Test_purgeSequences(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + seqSendingStopped uint32 + sequenceList []uint64 + sequenceData map[uint64]*sequenceData + latestVirtualBatchNumber uint64 + expectedSequenceList []uint64 + expectedSequenceData map[uint64]*sequenceData + }{ + { + name: "sequences purged when seqSendingStopped", + seqSendingStopped: 1, + sequenceList: []uint64{1, 2}, + sequenceData: map[uint64]*sequenceData{ + 1: {}, + 2: {}, + }, + expectedSequenceList: []uint64{1, 2}, + expectedSequenceData: map[uint64]*sequenceData{ + 1: {}, + 2: {}, + }, + }, + { + name: "no sequences purged", + seqSendingStopped: 0, + sequenceList: []uint64{4, 5}, + sequenceData: map[uint64]*sequenceData{ + 4: {}, + 5: {}, + }, + expectedSequenceList: []uint64{4, 5}, + expectedSequenceData: map[uint64]*sequenceData{ + 4: {}, + 5: {}, + }, + }, + { + name: "sequences purged", + seqSendingStopped: 0, + sequenceList: []uint64{4, 5, 6}, + sequenceData: map[uint64]*sequenceData{ + 4: {}, + 5: {}, + 6: {}, + }, + latestVirtualBatchNumber: 5, + expectedSequenceList: []uint64{6}, + expectedSequenceData: map[uint64]*sequenceData{ + 6: {}, + }, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ss := SequenceSender{ + seqSendingStopped: tt.seqSendingStopped, + sequenceList: tt.sequenceList, + sequenceData: tt.sequenceData, + latestVirtualBatchNumber: tt.latestVirtualBatchNumber, + logger: log.GetDefaultLogger(), + } + + ss.purgeSequences() + + require.Equal(t, tt.expectedSequenceList, ss.sequenceList) + require.Equal(t, tt.expectedSequenceData, ss.sequenceData) + }) + } +} + +func Test_tryToSendSequence(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + getEtherman func(t *testing.T) *mocks.EthermanMock + getTxBuilder func(t *testing.T) *mocks.TxBuilderMock + maxPendingTxn uint64 + sequenceList []uint64 + latestSentToL1Batch uint64 + sequenceData map[uint64]*sequenceData + ethTransactions map[common.Hash]*ethTxData + ethTxData map[common.Hash][]byte + + expectErr error + }{ + { + name: "successfully sent", + getEtherman: func(t *testing.T) *mocks.EthermanMock { + t.Helper() + + mngr := mocks.NewEthermanMock(t) + mngr.On("GetLatestBatchNumber").Return(uint64(1), nil) + return mngr + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + return mngr + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + mngr.On("NewSequenceIfWorthToSend", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(txbuilder.NewBananaSequence(etherman.SequenceBanana{}), nil) + return mngr + }, + maxPendingTxn: 10, + sequenceList: []uint64{2}, + latestSentToL1Batch: 1, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + }, + }, + { + name: "successfully sent new sequence", + getEtherman: func(t *testing.T) *mocks.EthermanMock { + t.Helper() + + mngr := mocks.NewEthermanMock(t) + mngr.On("GetLatestBatchNumber").Return(uint64(1), nil) + mngr.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{ + Number: big.NewInt(1), + }, nil) + mngr.On("EstimateGas", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(uint64(100500), nil) + return mngr + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("AddWithGas", mock.Anything, mock.Anything, big.NewInt(0), mock.Anything, mock.Anything, mock.Anything, uint64(100500)).Return(common.Hash{}, nil) + mngr.On("Result", mock.Anything, common.Hash{}).Return(ethtxtypes.MonitoredTxResult{ + ID: common.Hash{}, + Data: []byte{1, 2, 3}, + }, nil) + return mngr + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + mngr.On("NewSequenceIfWorthToSend", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + mngr.On("NewSequence", mock.Anything, mock.Anything, mock.Anything).Return(txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), nil) + mngr.On("BuildSequenceBatchesTx", mock.Anything, mock.Anything).Return(types.NewTx(&types.LegacyTx{}), nil) + return mngr + }, + maxPendingTxn: 10, + sequenceList: []uint64{2}, + latestSentToL1Batch: 1, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + }, + ethTransactions: map[common.Hash]*ethTxData{}, + ethTxData: map[common.Hash][]byte{}, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + tmpFile, err := os.CreateTemp(os.TempDir(), tt.name+".tmp") + require.NoError(t, err) + defer os.RemoveAll(tmpFile.Name() + ".tmp") + + s := SequenceSender{ + ethTxManager: tt.getEthTxManager(t), + etherman: tt.getEtherman(t), + TxBuilder: tt.getTxBuilder(t), + cfg: Config{ + SequencesTxFileName: tmpFile.Name() + ".tmp", + MaxPendingTx: tt.maxPendingTxn, + }, + sequenceList: tt.sequenceList, + latestSentToL1Batch: tt.latestSentToL1Batch, + sequenceData: tt.sequenceData, + ethTransactions: tt.ethTransactions, + ethTxData: tt.ethTxData, + logger: log.GetDefaultLogger(), + } + + s.tryToSendSequence(context.Background()) + }) + } +} + +func Test_getSequencesToSend(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + sequenceList []uint64 + latestSentToL1Batch uint64 + forkUpgradeBatchNumber uint64 + sequenceData map[uint64]*sequenceData + getTxBuilder func(t *testing.T) *mocks.TxBuilderMock + expectedSequence seqsendertypes.Sequence + expectedErr error + }{ + { + name: "successfully get sequence", + sequenceList: []uint64{2}, + latestSentToL1Batch: 1, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + mngr.On("NewSequenceIfWorthToSend", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), nil) + return mngr + }, + expectedSequence: txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), + expectedErr: nil, + }, + { + name: "different coinbase", + sequenceList: []uint64{2, 3}, + latestSentToL1Batch: 1, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + 3: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{ + LastCoinbase: common.HexToAddress("0x2"), + }), + }, + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + mngr.On("NewSequenceIfWorthToSend", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + mngr.On("NewSequence", mock.Anything, mock.Anything, mock.Anything).Return(txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), nil) + return mngr + }, + expectedSequence: txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), + expectedErr: nil, + }, + { + name: "NewSequenceIfWorthToSend return error", + sequenceList: []uint64{2}, + latestSentToL1Batch: 1, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + mngr.On("NewSequenceIfWorthToSend", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("test error")) + return mngr + }, + expectedErr: errors.New("test error"), + }, + { + name: "fork upgrade", + sequenceList: []uint64{2}, + latestSentToL1Batch: 1, + forkUpgradeBatchNumber: 2, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + mngr.On("NewSequenceIfWorthToSend", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + mngr.On("NewSequence", mock.Anything, mock.Anything, mock.Anything).Return(txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), nil) + return mngr + }, + expectedSequence: txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), + expectedErr: nil, + }, + { + name: "fork upgrade passed", + sequenceList: []uint64{2}, + latestSentToL1Batch: 1, + forkUpgradeBatchNumber: 1, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + return mngr + }, + expectedErr: errors.New("aborting sequencing process as we reached the batch 2 where a new forkid is applied (upgrade)"), + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ss := SequenceSender{ + sequenceList: tt.sequenceList, + latestSentToL1Batch: tt.latestSentToL1Batch, + cfg: Config{ + ForkUpgradeBatchNumber: tt.forkUpgradeBatchNumber, + }, + sequenceData: tt.sequenceData, + TxBuilder: tt.getTxBuilder(t), + logger: log.GetDefaultLogger(), + } + + sequence, err := ss.getSequencesToSend(context.Background()) + if tt.expectedErr != nil { + require.Equal(t, tt.expectedErr, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedSequence, sequence) + } + }) + } +} + +func Test_marginTimeElapsed(t *testing.T) { + t.Parallel() + + type args struct { + l2BlockTimestamp uint64 + currentTime uint64 + timeMargin int64 + } + tests := []struct { + name string + args args + want bool + want1 int64 + }{ + { + name: "time elapsed", + args: args{ + l2BlockTimestamp: 100, + currentTime: 200, + timeMargin: 50, + }, + want: true, + want1: 0, + }, + { + name: "time not elapsed", + args: args{ + l2BlockTimestamp: 100, + currentTime: 200, + timeMargin: 150, + }, + want: false, + want1: 50, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + got, got1 := marginTimeElapsed(tt.args.l2BlockTimestamp, tt.args.currentTime, tt.args.timeMargin) + require.Equal(t, tt.want, got, "marginTimeElapsed() got = %v, want %v", got, tt.want) + require.Equal(t, tt.want1, got1, "marginTimeElapsed() got1 = %v, want %v", got1, tt.want1) + }) + } +} diff --git a/test/Makefile b/test/Makefile index a6ab9467..de91c499 100644 --- a/test/Makefile +++ b/test/Makefile @@ -15,6 +15,9 @@ COMMON_MOCKERY_PARAMS=--disable-version-string --with-expecter --exported generate-mocks-sequencesender: ## Generates mocks for sequencesender, using mockery tool rm -Rf ../sequencesender/txbuilder/mocks_txbuilder export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../sequencesender/txbuilder --output ../sequencesender/txbuilder/mocks_txbuilder --outpkg mocks_txbuilder ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=EthTxMngrMock --filename=mock_ethtxmanager.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Etherman --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=EthermanMock --filename=mock_etherman.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StreamClient --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=StreamClientMock --filename=mock_streamclient.go .PHONY: generate-mocks-da generate-mocks-da: ## Generates mocks for dataavailability, using mockery tool From 34af8b051c598cecb39bf2fc7ce3d86477844605 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Fri, 4 Oct 2024 15:16:58 +0200 Subject: [PATCH 24/53] feat: Proof Cleaning (#105) * feat: proof cleaning * feat: improve log --- aggregator/aggregator.go | 67 +++++++++++++++------------------------- aggregator/interfaces.go | 1 + 2 files changed, 26 insertions(+), 42 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 4f8536cb..1c07d340 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -32,6 +32,7 @@ import ( synclog "github.com/0xPolygonHermez/zkevm-synchronizer-l1/log" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/state/entities" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" + "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer/l1_check_block" "github.com/ethereum/go-ethereum/common" "go.uber.org/zap/zapcore" "google.golang.org/grpc" @@ -991,7 +992,7 @@ func (a *Aggregator) settleDirect( // process monitored batch verifications before starting a next cycle a.ethTxManager.ProcessPendingMonitoredTxs(ctx, func(result ethtxtypes.MonitoredTxResult) { - a.handleMonitoredTxResult(result) + a.handleMonitoredTxResult(result, proof.BatchNumber, proof.BatchNumberFinal) }) return true @@ -1929,57 +1930,39 @@ func (hc *healthChecker) Watch(req *grpchealth.HealthCheckRequest, server grpche }) } -func (a *Aggregator) handleMonitoredTxResult(result ethtxtypes.MonitoredTxResult) { +func (a *Aggregator) handleMonitoredTxResult(result ethtxtypes.MonitoredTxResult, firstBatch, lastBatch uint64) { mTxResultLogger := ethtxmanager.CreateMonitoredTxResultLogger(result) if result.Status == ethtxtypes.MonitoredTxStatusFailed { mTxResultLogger.Fatal("failed to send batch verification, TODO: review this fatal and define what to do in this case") } - // TODO: REVIEW THIS + // Wait for the transaction to be finalized, then we can safely delete all recursive + // proofs up to the last batch in this proof - /* - // monitoredIDFormat: "proof-from-%v-to-%v" - idSlice := strings.Split(result.ID, "-") - proofBatchNumberStr := idSlice[2] - proofBatchNumber, err := strconv.ParseUint(proofBatchNumberStr, encoding.Base10, 0) - - if err != nil { - mTxResultLogger.Errorf("failed to read final proof batch number from monitored tx: %v", err) - } - - proofBatchNumberFinalStr := idSlice[4] - proofBatchNumberFinal, err := strconv.ParseUint(proofBatchNumberFinalStr, encoding.Base10, 0) - - if err != nil { - mTxResultLogger.Errorf("failed to read final proof batch number final from monitored tx: %v", err) - } - - log := log.WithFields("txId", result.ID, "batches", fmt.Sprintf("%d-%d", proofBatchNumber, proofBatchNumberFinal)) - log.Info("Final proof verified") - - // wait for the synchronizer to catch up the verified batches - log.Debug("A final proof has been sent, waiting for the network to be synced") - - for !a.isSynced(a.ctx, &proofBatchNumberFinal) { - log.Info("Waiting for synchronizer to sync...") - time.Sleep(a.cfg.RetryTime.Duration) - } + finaLizedBlockNumber, err := l1_check_block.L1FinalizedFetch.BlockNumber(a.ctx, a.etherman) + if err != nil { + mTxResultLogger.Errorf("failed to get finalized block number: %v", err) + } - // network is synced with the final proof, we can safely delete all recursive - // proofs up to the last synced batch - err = a.State.CleanupGeneratedProofs(a.ctx, proofBatchNumberFinal, nil) + for result.MinedAtBlockNumber.Uint64() > finaLizedBlockNumber { + select { + case <-a.ctx.Done(): + return + case <-time.After(a.cfg.RetryTime.Duration): + finaLizedBlockNumber, err = l1_check_block.L1FinalizedFetch.BlockNumber(a.ctx, a.etherman) + if err != nil { + mTxResultLogger.Errorf("failed to get finalized block number: %v", err) + } + } + } - if err != nil { - log.Errorf("Failed to store proof aggregation result: %v", err) - } - */ -} + err = a.state.DeleteGeneratedProofs(a.ctx, firstBatch, lastBatch, nil) + if err != nil { + mTxResultLogger.Errorf("failed to delete generated proofs from %d to %d: %v", firstBatch, lastBatch, err) + } -/* -func buildMonitoredTxID(batchNumber, batchNumberFinal uint64) string { - return fmt.Sprintf(monitoredIDFormat, batchNumber, batchNumberFinal) + mTxResultLogger.Debugf("deleted generated proofs from %d to %d", firstBatch, lastBatch) } -*/ func (a *Aggregator) cleanupLockedProofs() { for { diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index b231de35..85676f69 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -35,6 +35,7 @@ type etherman interface { ) (to *common.Address, data []byte, err error) GetLatestBlockHeader(ctx context.Context) (*types.Header, error) GetBatchAccInputHash(ctx context.Context, batchNumber uint64) (common.Hash, error) + HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) } // aggregatorTxProfitabilityChecker interface for different profitability From 5492f30d9a5b73483cd290e14d6da1ead89dcf79 Mon Sep 17 00:00:00 2001 From: rbpol Date: Fri, 4 Oct 2024 15:02:16 +0100 Subject: [PATCH 25/53] feat: Upgraded cdk-rpc dep (#108) --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 0f85874c..f68dc923 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.4 require ( github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f github.com/0xPolygon/cdk-data-availability v0.0.9 - github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 + github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.2 diff --git a/go.sum b/go.sum index c3ea3568..00c6f093 100644 --- a/go.sum +++ b/go.sum @@ -4,6 +4,8 @@ github.com/0xPolygon/cdk-data-availability v0.0.9 h1:KkP+hJH9nY5fljpSNtW2pfP5YQC github.com/0xPolygon/cdk-data-availability v0.0.9/go.mod h1:5A+CU4FGeyG8zTDJc0khMzRxPzFzmpRydeEWmLztgh4= github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 h1:Jri+ydl8PudddGacnVLatrCuAG9e1Ut8W4J0GoawssU= github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= +github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 h1:FXL/rcO7/GtZ3kRFw+C7J6vmGnl8gcazg+Gh/NVmnas= +github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 h1:QWE6nKBBHkMEiza723hJk0+oZbLSdQZTX4I48jWw15I= github.com/0xPolygon/zkevm-ethtx-manager v0.2.0/go.mod h1:lqQmzSo2OXEZItD0R4Cd+lqKFxphXEWgqHefVcGDZZc= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUxbnIoyumKvlwr0OtpTYMo= From 9414f249c116031bcebbfd73256b5e79466fcaeb Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 4 Oct 2024 17:24:01 +0200 Subject: [PATCH 26/53] fix: cdk-523, update zkevm-synchronizer-l1 to v1.0.3 (#109) --- go.mod | 21 +++++++++++---------- go.sum | 44 ++++++++++++++++++++++---------------------- 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/go.mod b/go.mod index f68dc923..c4968dbb 100644 --- a/go.mod +++ b/go.mod @@ -3,16 +3,16 @@ module github.com/0xPolygon/cdk go 1.22.4 require ( - github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f - github.com/0xPolygon/cdk-data-availability v0.0.9 + github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240826154954-f6182d2b17a2 + github.com/0xPolygon/cdk-data-availability v0.0.10 github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 - github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.2 + github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.3 github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/hermeznetwork/tracerr v0.3.2 - github.com/iden3/go-iden3-crypto v0.0.16 + github.com/iden3/go-iden3-crypto v0.0.17 github.com/invopop/jsonschema v0.12.0 github.com/jackc/pgconn v1.14.3 github.com/jackc/pgx/v4 v4.18.3 @@ -23,7 +23,7 @@ require ( github.com/russross/meddler v1.0.1 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 - github.com/urfave/cli/v2 v2.27.2 + github.com/urfave/cli/v2 v2.27.4 go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel/metric v1.24.0 go.uber.org/zap v1.27.0 @@ -98,7 +98,7 @@ require ( github.com/jackc/puddle v1.3.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jmoiron/sqlx v1.2.0 // indirect - github.com/klauspost/compress v1.17.7 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/ledgerwatch/log/v3 v3.9.0 // indirect @@ -111,6 +111,7 @@ require ( github.com/miguelmota/go-solidity-sha3 v0.1.1 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/onsi/gomega v1.27.10 // indirect @@ -118,10 +119,10 @@ require ( github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.4 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.48.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect @@ -145,7 +146,7 @@ require ( github.com/valyala/fastrand v1.1.0 // indirect github.com/valyala/histogram v1.2.0 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect - github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/multierr v1.10.0 // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect diff --git a/go.sum b/go.sum index 00c6f093..468e2192 100644 --- a/go.sum +++ b/go.sum @@ -1,17 +1,15 @@ -github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f h1:i9oCNDG4N7ha3fNkEKbito/HF3o4gjnW6//cpTwnp8E= -github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f/go.mod h1:mFlcEjsm2YBBsu8atHJ3zyVnwM+Z/fMXpVmIJge+WVU= -github.com/0xPolygon/cdk-data-availability v0.0.9 h1:KkP+hJH9nY5fljpSNtW2pfP5YQCJOsSRzfnl0yT78rI= -github.com/0xPolygon/cdk-data-availability v0.0.9/go.mod h1:5A+CU4FGeyG8zTDJc0khMzRxPzFzmpRydeEWmLztgh4= -github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 h1:Jri+ydl8PudddGacnVLatrCuAG9e1Ut8W4J0GoawssU= -github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= +github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240826154954-f6182d2b17a2 h1:N5qvWG4amhUt6d1F4Kf8AdJZs4z7/xZfE3v/Im2afNM= +github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240826154954-f6182d2b17a2/go.mod h1:mFlcEjsm2YBBsu8atHJ3zyVnwM+Z/fMXpVmIJge+WVU= +github.com/0xPolygon/cdk-data-availability v0.0.10 h1:pVcke2I7GuPH7JeRLKokEOHffPMwEHmJd9yDXHqh9B4= +github.com/0xPolygon/cdk-data-availability v0.0.10/go.mod h1:nn5RmnkzOiugAxizSbaYnA+em79YLLLoR25i0UlKc5Q= github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 h1:FXL/rcO7/GtZ3kRFw+C7J6vmGnl8gcazg+Gh/NVmnas= github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 h1:QWE6nKBBHkMEiza723hJk0+oZbLSdQZTX4I48jWw15I= github.com/0xPolygon/zkevm-ethtx-manager v0.2.0/go.mod h1:lqQmzSo2OXEZItD0R4Cd+lqKFxphXEWgqHefVcGDZZc= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUxbnIoyumKvlwr0OtpTYMo= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.2 h1:DYioOpHDcn7rtojInDTEv7vmnhs8HP6zOSSXSGENM7s= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.2/go.mod h1:96i+QSANfbikwlUY3U9MLNtg3656W3dWfbGqH+Od1/k= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.3 h1:liWFgajbCP3qhDGx+S/bJ0C5F5zXRzHs+fX9pMPr8y8= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.3/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= @@ -196,8 +194,8 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk= -github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= +github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY= +github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= @@ -256,8 +254,8 @@ github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhB github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -317,6 +315,8 @@ github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8oh github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -352,14 +352,14 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= @@ -434,16 +434,16 @@ github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+F github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= -github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI= -github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM= +github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= +github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8= github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ= github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= -github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= -github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= From 909726ebdc09cab62b83811c347ad2fbc9658999 Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Sun, 6 Oct 2024 09:11:15 +0200 Subject: [PATCH 27/53] ci: bump kurtosis tag and cdk-erigon (#106) * ci: bump cdk-erigon Also remove testing fork9-rollup --- .github/workflows/test-e2e.yml | 2 +- test/Makefile | 6 +++--- test/combinations/fork11-rollup.yml | 12 ++++++++++++ test/combinations/fork12-cdk-validium.yml | 1 + test/combinations/fork12-rollup.yml | 1 + test/combinations/fork9-cdk-validium.yml | 1 + test/combinations/fork9-rollup.yml | 12 ------------ test/helpers/common-setup.bash | 2 +- 8 files changed, 20 insertions(+), 17 deletions(-) create mode 100644 test/combinations/fork11-rollup.yml delete mode 100644 test/combinations/fork9-rollup.yml diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 647c5daa..a4886613 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -15,7 +15,7 @@ jobs: goarch: [ "amd64" ] e2e-group: - "fork9-validium" - - "fork9-rollup" + - "fork11-rollup" - "fork12-validium" - "fork12-rollup" runs-on: ubuntu-latest diff --git a/test/Makefile b/test/Makefile index de91c499..b72c101f 100644 --- a/test/Makefile +++ b/test/Makefile @@ -54,9 +54,9 @@ test-e2e-fork9-validium: stop ./run-e2e.sh fork9 cdk-validium bats . -.PHONY: test-e2e-fork9-rollup -test-e2e-fork9-rollup: stop - ./run-e2e.sh fork9 rollup +.PHONY: test-e2e-fork11-rollup +test-e2e-fork11-rollup: stop + ./run-e2e.sh fork11 rollup bats . .PHONY: test-e2e-fork12-validium diff --git a/test/combinations/fork11-rollup.yml b/test/combinations/fork11-rollup.yml new file mode 100644 index 00000000..89f2f6cd --- /dev/null +++ b/test/combinations/fork11-rollup.yml @@ -0,0 +1,12 @@ +args: + zkevm_contracts_image: leovct/zkevm-contracts:v7.0.0-rc.2-fork.11 + zkevm_prover_image: hermeznetwork/zkevm-prover:v7.0.0-RC31-fork.11 + zkevm_node_image: hermeznetwork/zkevm-node:v0.7.0-fork11-RC1 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:0948e33 + cdk_node_image: cdk + zkevm_use_gas_token_contract: true + additional_services: + - pless_zkevm_node + - tx_spammer + data_availability_mode: rollup + sequencer_type: erigon diff --git a/test/combinations/fork12-cdk-validium.yml b/test/combinations/fork12-cdk-validium.yml index 7772a677..cc0a34dc 100644 --- a/test/combinations/fork12-cdk-validium.yml +++ b/test/combinations/fork12-cdk-validium.yml @@ -1,6 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.3-fork.12 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC10-fork.12 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:0948e33 cdk_node_image: cdk zkevm_use_gas_token_contract: true additional_services: diff --git a/test/combinations/fork12-rollup.yml b/test/combinations/fork12-rollup.yml index 05e1f51f..ba8bb440 100644 --- a/test/combinations/fork12-rollup.yml +++ b/test/combinations/fork12-rollup.yml @@ -1,6 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.3-fork.12 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC10-fork.12 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:0948e33 cdk_node_image: cdk zkevm_use_gas_token_contract: true additional_services: diff --git a/test/combinations/fork9-cdk-validium.yml b/test/combinations/fork9-cdk-validium.yml index 21a20b58..13f16d48 100644 --- a/test/combinations/fork9-cdk-validium.yml +++ b/test/combinations/fork9-cdk-validium.yml @@ -3,6 +3,7 @@ args: zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.4 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.3-RC1 cdk_validium_node_image: 0xpolygon/cdk-validium-node:0.7.0-cdk + cdk_erigon_node_image: hermeznetwork/cdk-erigon:0948e33 cdk_node_image: cdk zkevm_use_gas_token_contract: true additional_services: diff --git a/test/combinations/fork9-rollup.yml b/test/combinations/fork9-rollup.yml deleted file mode 100644 index a17daa2a..00000000 --- a/test/combinations/fork9-rollup.yml +++ /dev/null @@ -1,12 +0,0 @@ -args: - zkevm_contracts_image: leovct/zkevm-contracts:v6.0.0-rc.1-fork.9 - zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.4 - zkevm_node_image: hermeznetwork/zkevm-node:v0.7.3-RC1 - cdk_validium_node_image: 0xpolygon/cdk-validium-node:0.7.0-cdk - cdk_node_image: cdk - zkevm_use_gas_token_contract: true - additional_services: - - pless_zkevm_node - - tx_spammer - data_availability_mode: rollup - sequencer_type: erigon diff --git a/test/helpers/common-setup.bash b/test/helpers/common-setup.bash index 415f211d..7cb4dec7 100644 --- a/test/helpers/common-setup.bash +++ b/test/helpers/common-setup.bash @@ -23,4 +23,4 @@ _common_setup() { readonly contracts_service_wrapper=${KURTOSIS_CONTRACTS_WRAPPER:-"kurtosis service exec $enclave $contracts_container"} readonly erigon_rpc_node=${KURTOSIS_ERIGON_RPC:-cdk-erigon-node-001} readonly l2_rpc_url=${L2_ETH_RPC_URL:-"$(kurtosis port print $enclave $erigon_rpc_node http-rpc)"} -} \ No newline at end of file +} From e3b8f749d1726f5b156af740bcd69b40c8fa3809 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Mon, 7 Oct 2024 15:47:31 +0200 Subject: [PATCH 28/53] fix: check ssender config (#110) * fix: check ssender config * feat: add default value --- cmd/run.go | 10 ++++++++-- config/default.go | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index d90a44a1..b113c06e 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -179,6 +179,12 @@ func createSequenceSender( l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, ) *sequencesender.SequenceSender { logger := log.WithFields("module", cdkcommon.SEQUENCE_SENDER) + + // Check config + if cfg.SequenceSender.RPCURL == "" { + logger.Fatal("Required field RPCURL is empty in sequence sender config") + } + ethman, err := etherman.NewClient(ethermanconfig.Config{ EthermanConfig: ethtxman.Config{ URL: cfg.SequenceSender.EthTxManager.Etherman.URL, @@ -200,9 +206,9 @@ func createSequenceSender( logger.Fatal(err) } cfg.SequenceSender.SenderAddress = auth.From - blockFialityType := etherman.BlockNumberFinality(cfg.SequenceSender.BlockFinality) + blockFinalityType := etherman.BlockNumberFinality(cfg.SequenceSender.BlockFinality) - blockFinality, err := blockFialityType.ToBlockNum() + blockFinality, err := blockFinalityType.ToBlockNum() if err != nil { logger.Fatalf("Failed to create block finality. Err: %w, ", err) } diff --git a/config/default.go b/config/default.go index a7730ec7..74eec57d 100644 --- a/config/default.go +++ b/config/default.go @@ -40,7 +40,7 @@ WaitPeriodPurgeTxFile = "15m" MaxPendingTx = 1 MaxBatchesForL1 = 300 BlockFinality = "FinalizedBlock" -RPCURL = "" +RPCURL = "localhost:8123" GetBatchWaitInterval = "10s" [SequenceSender.EthTxManager] FrequencyToMonitorTxs = "1s" From f54a3a8c0f49f05f4037aef57048409c86564383 Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Tue, 8 Oct 2024 17:31:58 +0200 Subject: [PATCH 29/53] fix: if sanity check of l1infotreeUpdateV2 fails, means an error and need to stop syncing (#113) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c4968dbb..cd587fb2 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 - github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.3 + github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4 github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/hermeznetwork/tracerr v0.3.2 diff --git a/go.sum b/go.sum index 468e2192..3de7966e 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 h1:QWE6nKBBHkMEiza723hJk0+oZbLSd github.com/0xPolygon/zkevm-ethtx-manager v0.2.0/go.mod h1:lqQmzSo2OXEZItD0R4Cd+lqKFxphXEWgqHefVcGDZZc= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUxbnIoyumKvlwr0OtpTYMo= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.3 h1:liWFgajbCP3qhDGx+S/bJ0C5F5zXRzHs+fX9pMPr8y8= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.3/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4 h1:+ZbyEpaBZu88jWtov/7iBWvwgBMu5cxlvAFDxsPrnGQ= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= From edabc4caa8abcdc13dd45a5068970ca6e2a31054 Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Tue, 8 Oct 2024 18:35:37 +0200 Subject: [PATCH 30/53] feat: Run cdk-erigon with cdk config files transparently for the user (#88) * feat: dynamically generate erigon config Render the config files in tmp and run erigon passing the path of the config file. * feat: add node components param * docs: update readme * refactor: remove admin address * fix: adapt dockerfile * refactor: tune makefile --- .gitignore | 2 + Cargo.lock | 1117 +++++++++++++++++++--- Cargo.toml | 12 +- Dockerfile | 7 +- Makefile | 9 +- README.md | 24 + config/example-config.toml | 122 --- crates/cdk-config/src/aggregator.rs | 118 +++ crates/cdk-config/src/l1.rs | 39 + crates/cdk-config/src/layer1.rs | 28 - crates/cdk-config/src/lib.rs | 18 +- crates/cdk-config/src/network_config.rs | 16 + crates/cdk-config/src/sequence_sender.rs | 50 + crates/cdk-config/src/telemetry.rs | 7 +- crates/cdk/Cargo.toml | 9 + crates/cdk/build.rs | 2 +- crates/cdk/src/allocs_render.rs | 99 ++ crates/cdk/src/cli.rs | 15 +- crates/cdk/src/config_render.rs | 130 +++ crates/cdk/src/main.rs | 155 ++- 20 files changed, 1603 insertions(+), 376 deletions(-) delete mode 100644 config/example-config.toml create mode 100644 crates/cdk-config/src/aggregator.rs create mode 100644 crates/cdk-config/src/l1.rs delete mode 100644 crates/cdk-config/src/layer1.rs create mode 100644 crates/cdk-config/src/network_config.rs create mode 100644 crates/cdk-config/src/sequence_sender.rs create mode 100644 crates/cdk/src/allocs_render.rs create mode 100644 crates/cdk/src/config_render.rs diff --git a/.gitignore b/.gitignore index abfa990f..fe96efc4 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,5 @@ tmp .vscode .idea .idea/* + +data diff --git a/Cargo.lock b/Cargo.lock index 3044ff93..83b3b597 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -38,6 +38,18 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.3" @@ -47,6 +59,171 @@ dependencies = [ "memchr", ] +[[package]] +name = "alloy-json-rpc" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "alloy-primitives" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more 1.0.0", + "hashbrown 0.14.5", + "hex-literal", + "indexmap 2.6.0", + "itoa", + "k256", + "keccak-asm", + "paste", + "proptest", + "rand", + "ruint", + "rustc-hash", + "serde", + "sha3", + "tiny-keccak", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" +dependencies = [ + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-rpc-client" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-transport", + "alloy-transport-http", + "futures", + "pin-project", + "reqwest 0.12.8", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower 0.5.1", + "tracing", + "url", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68e7f6e8fe5b443f82b3f1e15abfa191128f71569148428e49449d01f6f49e8b" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.68", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b96ce28d2fde09abb6135f410c41fad670a3a770b6776869bd852f1df102e6f" +dependencies = [ + "alloy-sol-macro-input", + "const-hex", + "heck", + "indexmap 2.6.0", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.68", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "906746396a8296537745711630d9185746c0b50c033d5e9d18b0a6eba3d53f90" +dependencies = [ + "const-hex", + "dunce", + "heck", + "proc-macro2", + "quote", + "syn 2.0.68", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d86a533ce22525969661b25dfe296c112d35eb6861f188fd284f8bd4bb3842ae" +dependencies = [ + "alloy-primitives", + "alloy-sol-macro", + "const-hex", +] + +[[package]] +name = "alloy-transport" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" +dependencies = [ + "alloy-json-rpc", + "base64 0.22.1", + "futures-util", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower 0.5.1", + "tracing", + "url", +] + +[[package]] +name = "alloy-transport-http" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "reqwest 0.12.8", + "serde_json", + "tower 0.5.1", + "tracing", + "url", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -117,6 +294,130 @@ version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.0", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand", +] + [[package]] name = "arrayvec" version = "0.7.4" @@ -151,7 +452,7 @@ checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" dependencies = [ "futures", "pharos", - "rustc_version", + "rustc_version 0.4.0", ] [[package]] @@ -228,15 +529,6 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" -[[package]] -name = "beef" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" -dependencies = [ - "serde", -] - [[package]] name = "bit-set" version = "0.5.3" @@ -369,7 +661,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -390,14 +682,23 @@ dependencies = [ name = "cdk" version = "0.1.0" dependencies = [ + "alloy-json-rpc", + "alloy-rpc-client", + "alloy-transport-http", "anyhow", "cdk-config", "clap", "dotenvy", "execute", + "reqwest 0.12.8", + "serde", + "serde_json", + "tempfile", + "tokio", "toml", "tracing", "tracing-subscriber", + "url", ] [[package]] @@ -439,7 +740,7 @@ dependencies = [ "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -500,7 +801,7 @@ checksum = "3b6be4a5df2098cd811f3194f64ddb96c267606bffd9689ac7b0160097b01ad3" dependencies = [ "bs58", "coins-core", - "digest", + "digest 0.10.7", "hmac", "k256", "serde", @@ -533,7 +834,7 @@ dependencies = [ "base64 0.21.7", "bech32", "bs58", - "digest", + "digest 0.10.7", "generic-array 0.14.7", "hex", "ripemd", @@ -751,6 +1052,17 @@ dependencies = [ "serde", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "derive_more" version = "0.99.18" @@ -762,6 +1074,36 @@ dependencies = [ "syn 2.0.68", ] +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", + "unicode-xid", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array 0.14.7", +] + [[package]] name = "digest" version = "0.10.7" @@ -835,7 +1177,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ "der", - "digest", + "digest 0.10.7", "elliptic-curve", "rfc6979", "signature", @@ -856,7 +1198,7 @@ checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ "base16ct", "crypto-bigint", - "digest", + "digest 0.10.7", "ff", "generic-array 0.14.7", "group", @@ -927,7 +1269,7 @@ checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" dependencies = [ "aes", "ctr", - "digest", + "digest 0.10.7", "hex", "hmac", "pbkdf2 0.11.0", @@ -1052,7 +1394,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "reqwest", + "reqwest 0.11.27", "serde", "serde_json", "syn 2.0.68", @@ -1114,8 +1456,8 @@ checksum = "e79e5973c26d4baf0ce55520bd732314328cabe53193286671b47144145b9649" dependencies = [ "chrono", "ethers-core", - "reqwest", - "semver", + "reqwest 0.11.27", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -1139,7 +1481,7 @@ dependencies = [ "futures-locks", "futures-util", "instant", - "reqwest", + "reqwest 0.11.27", "serde", "serde_json", "thiserror", @@ -1171,7 +1513,7 @@ dependencies = [ "jsonwebtoken", "once_cell", "pin-project", - "reqwest", + "reqwest 0.11.27", "serde", "serde_json", "thiserror", @@ -1224,7 +1566,7 @@ dependencies = [ "path-slash", "rayon", "regex", - "semver", + "semver 1.0.23", "serde", "serde_json", "solang-parser", @@ -1290,6 +1632,17 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + [[package]] name = "ff" version = "0.13.0" @@ -1334,6 +1687,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -1468,6 +1836,12 @@ dependencies = [ "slab", ] +[[package]] +name = "futures-utils-wasm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" + [[package]] name = "fxhash" version = "0.2.1" @@ -1522,15 +1896,15 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gloo-net" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43aaa242d1239a8822c15c645f02166398da4f8b5c4bae795c1f5b44e9eee173" +checksum = "c06f627b1a58ca3d42b45d6104bf1e1a03799df472df00988b6ba21accc10580" dependencies = [ "futures-channel", "futures-core", "futures-sink", "gloo-utils", - "http 0.2.12", + "http 1.1.0", "js-sys", "pin-project", "serde", @@ -1589,7 +1963,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.2.6", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -1608,7 +1982,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.2.6", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -1626,6 +2000,16 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", + "serde", +] + +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" [[package]] name = "hashers" @@ -1653,6 +2037,15 @@ name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "hmac" @@ -1660,7 +2053,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest", + "digest 0.10.7", ] [[package]] @@ -1817,6 +2210,22 @@ dependencies = [ "tower-service", ] +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" version = "0.1.6" @@ -1832,7 +2241,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower", + "tower 0.4.13", "tower-service", "tracing", ] @@ -1933,12 +2342,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "serde", ] @@ -1972,6 +2381,15 @@ version = "1.70.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.11.0" @@ -2027,9 +2445,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b089779ad7f80768693755a031cc14a7766aba707cbe886674e3f79e9b7e47" +checksum = "126b48a5acc3c52fbd5381a77898cb60e145123179588a29e7ac48f9c06e401b" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -2045,9 +2463,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08163edd8bcc466c33d79e10f695cdc98c00d1e6ddfb95cec41b6b0279dd5432" +checksum = "bf679a8e0e083c77997f7c4bb4ca826577105906027ae462aac70ff348d02c6a" dependencies = [ "base64 0.22.1", "futures-channel", @@ -2070,13 +2488,11 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79712302e737d23ca0daa178e752c9334846b08321d439fd89af9a384f8c830b" +checksum = "b0e503369a76e195b65af35058add0e6900b794a4e9a9316900ddd3a87a80477" dependencies = [ - "anyhow", "async-trait", - "beef", "bytes", "futures-timer", "futures-util", @@ -2099,9 +2515,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d90064e04fb9d7282b1c71044ea94d0bbc6eff5621c66f1a0bce9e9de7cf3ac" +checksum = "f2c0caba4a6a8efbafeec9baa986aa22a75a96c29d3e4b0091b0098d6470efb5" dependencies = [ "async-trait", "base64 0.22.1", @@ -2117,16 +2533,16 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tower", + "tower 0.4.13", "tracing", "url", ] [[package]] name = "jsonrpsee-proc-macros" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" +checksum = "fc660a9389e2748e794a40673a4155d501f32db667757cdb80edeff0306b489b" dependencies = [ "heck", "proc-macro-crate", @@ -2137,11 +2553,10 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "654afab2e92e5d88ebd8a39d6074483f3f2bfdf91c5ac57fe285e7127cdd4f51" +checksum = "af6e6c9b6d975edcb443565d648b605f3e85a04ec63aa6941811a8894cc9cded" dependencies = [ - "anyhow", "futures-util", "http 1.1.0", "http-body 1.0.0", @@ -2159,17 +2574,16 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "tower", + "tower 0.4.13", "tracing", ] [[package]] name = "jsonrpsee-types" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9c465fbe385238e861fdc4d1c85e04ada6c1fd246161d26385c1b311724d2af" +checksum = "d8fb16314327cbc94fdf7965ef7e4422509cd5597f76d137bd104eb34aeede67" dependencies = [ - "beef", "http 1.1.0", "serde", "serde_json", @@ -2178,9 +2592,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4727ac037f834c6f04c0912cada7532dbddb54e92fbc64e33d6cb8c24af313c9" +checksum = "e0da62b43702bd5640ea305d35df95da30abc878e79a7b4b01feda3beaf35d3c" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -2189,9 +2603,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c28759775f5cb2f1ea9667672d3fe2b0e701d1f4b7b67954e60afe7fd058b5e" +checksum = "39aabf5d6c6f22da8d5b808eea1fab0736059f11fb42f71f141b14f404e5046a" dependencies = [ "http 1.1.0", "jsonrpsee-client-transport", @@ -2237,6 +2651,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + [[package]] name = "lalrpop" version = "0.20.2" @@ -2246,7 +2670,7 @@ dependencies = [ "ascii-canvas", "bit-set", "ena", - "itertools", + "itertools 0.11.0", "lalrpop-util", "petgraph", "regex", @@ -2333,7 +2757,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ "cfg-if", - "digest", + "digest 0.10.7", ] [[package]] @@ -2359,13 +2783,31 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.11" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ + "hermit-abi", "libc", "wasi", - "windows-sys 0.48.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "native-tls" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", ] [[package]] @@ -2490,12 +2932,50 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "openssl" +version = "0.10.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-sys" +version = "0.9.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "option-ext" version = "0.2.0" @@ -2554,7 +3034,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -2568,6 +3048,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + [[package]] name = "path-slash" version = "0.2.1" @@ -2580,7 +3066,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "digest", + "digest 0.10.7", "hmac", "password-hash", "sha2", @@ -2592,7 +3078,7 @@ version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ - "digest", + "digest 0.10.7", "hmac", ] @@ -2611,6 +3097,17 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "pest" +version = "2.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + [[package]] name = "petgraph" version = "0.6.5" @@ -2618,7 +3115,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.2.6", + "indexmap 2.6.0", ] [[package]] @@ -2628,7 +3125,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ "futures", - "rustc_version", + "rustc_version 0.4.0", ] [[package]] @@ -2781,6 +3278,28 @@ dependencies = [ "toml_edit 0.21.1", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "proc-macro2" version = "1.0.86" @@ -2796,6 +3315,8 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ + "bit-set", + "bit-vec", "bitflags 2.6.0", "lazy_static", "num-traits", @@ -2803,9 +3324,17 @@ dependencies = [ "rand_chacha", "rand_xorshift", "regex-syntax 0.8.4", + "rusty-fork", + "tempfile", "unarray", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.36" @@ -2830,6 +3359,7 @@ dependencies = [ "libc", "rand_chacha", "rand_core", + "serde", ] [[package]] @@ -2972,8 +3502,8 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", - "system-configuration", + "sync_wrapper 0.1.2", + "system-configuration 0.5.1", "tokio", "tokio-rustls 0.24.1", "tower-service", @@ -2985,6 +3515,49 @@ dependencies = [ "winreg", ] +[[package]] +name = "reqwest" +version = "0.12.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +dependencies = [ + "base64 0.22.1", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.4.1", + "hyper-rustls 0.27.2", + "hyper-tls", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile 2.1.2", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "system-configuration 0.6.1", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "windows-registry", +] + [[package]] name = "rfc6979" version = "0.4.0" @@ -3031,7 +3604,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" dependencies = [ - "digest", + "digest 0.10.7", ] [[package]] @@ -3062,6 +3635,36 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" +[[package]] +name = "ruint" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp", + "num-bigint", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + [[package]] name = "rustc-demangle" version = "0.1.24" @@ -3070,9 +3673,9 @@ checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" -version = "1.1.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" [[package]] name = "rustc-hex" @@ -3080,13 +3683,22 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver", + "semver 1.0.23", ] [[package]] @@ -3221,6 +3833,18 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "ryu" version = "1.0.18" @@ -3252,7 +3876,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" dependencies = [ "cfg-if", - "derive_more", + "derive_more 0.99.18", "parity-scale-codec", "scale-info-derive", ] @@ -3344,6 +3968,15 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + [[package]] name = "semver" version = "1.0.23" @@ -3353,6 +3986,15 @@ dependencies = [ "serde", ] +[[package]] +name = "semver-parser" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] + [[package]] name = "send_wrapper" version = "0.4.0" @@ -3367,18 +4009,18 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.204" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -3387,20 +4029,21 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.120" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] [[package]] name = "serde_spanned" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -3419,15 +4062,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.3" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e73139bc5ec2d45e6c5fd85be5a46949c1c39a4c18e56915f5eb4c12f975e377" +checksum = "9720086b3357bcb44fce40117d769a4d068c70ecfa190850a980a71755f66fcc" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.6", + "indexmap 2.6.0", "serde", "serde_derive", "serde_json", @@ -3437,9 +4080,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.3" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b80d3d6b56b64335c0180e5ffde23b3c5e08c14c585b51a15bd0e95393f46703" +checksum = "5f1abbfe725f27678f4663bcacb75a83e829fd464c25d78dd038a3a29e307cec" dependencies = [ "darling", "proc-macro2", @@ -3455,7 +4098,7 @@ checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", - "digest", + "digest 0.10.7", ] [[package]] @@ -3466,7 +4109,7 @@ checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", - "digest", + "digest 0.10.7", ] [[package]] @@ -3475,10 +4118,20 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "digest", + "digest 0.10.7", "keccak", ] +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -3494,7 +4147,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ - "digest", + "digest 0.10.7", "rand_core", ] @@ -3563,7 +4216,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c425ce1c59f4b154717592f0bdf4715c3a1d55058883622d3157e1f0908a5b26" dependencies = [ - "itertools", + "itertools 0.11.0", "lalrpop", "lalrpop-util", "phf", @@ -3656,8 +4309,8 @@ dependencies = [ "fs2", "hex", "once_cell", - "reqwest", - "semver", + "reqwest 0.11.27", + "semver 1.0.23", "serde", "serde_json", "sha2", @@ -3688,12 +4341,33 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn-solidity" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab661c8148c2261222a4d641ad5477fd4bea79406a99056096a0b41b35617a5" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "sync_wrapper" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -3702,7 +4376,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "system-configuration-sys 0.6.0", ] [[package]] @@ -3715,6 +4400,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tap" version = "1.0.1" @@ -3723,14 +4418,15 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.10.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", "fastrand", + "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3746,18 +4442,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.62" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2675633b1499176c2dff06b0856a27976a8f9d436737b4cf4f312d4d91d8bbb" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.62" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d20468752b09f49e909e55a5d338caa8bedf615594e9d80bc4c565d30faf798c" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", @@ -3831,32 +4527,41 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "pin-project-lite", "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", "syn 2.0.68", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.24.1" @@ -3921,21 +4626,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.14" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.15", + "toml_edit 0.22.22", ] [[package]] name = "toml_datetime" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ "serde", ] @@ -3946,22 +4651,22 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.6.0", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.15" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59a3a72298453f564e2b111fa896f8d07fabb36f51f06d7e875fc5e0b5a3ef1" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.13", + "winnow 0.6.20", ] [[package]] @@ -3980,17 +4685,31 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -4121,6 +4840,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + [[package]] name = "uint" version = "0.9.5" @@ -4218,12 +4943,27 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "walkdir" version = "2.5.0" @@ -4377,7 +5117,37 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", ] [[package]] @@ -4395,7 +5165,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -4415,18 +5194,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -4437,9 +5216,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -4449,9 +5228,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -4461,15 +5240,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -4479,9 +5258,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -4491,9 +5270,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -4503,9 +5282,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -4515,9 +5294,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" @@ -4530,9 +5309,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.13" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -4558,7 +5337,7 @@ dependencies = [ "js-sys", "log", "pharos", - "rustc_version", + "rustc_version 0.4.0", "send_wrapper 0.6.0", "thiserror", "wasm-bindgen", @@ -4581,11 +5360,45 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "zeroize" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] [[package]] name = "zip" diff --git a/Cargo.toml b/Cargo.toml index 0bb41677..debf9da1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,15 +8,15 @@ version = "0.1.0" edition = "2021" [workspace.dependencies] -serde = { version = "1.0.197", features = ["derive"] } -serde_json = "1.0.116" -serde_with = "3.7.0" -thiserror = "1.0.58" -toml = "0.8.12" +serde = { version = "1.0.210", features = ["derive"] } +serde_json = "1.0.128" +serde_with = "3.10.0" +thiserror = "1.0.64" +toml = "0.8.19" tracing = "0.1.40" tracing-appender = "0.2.3" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } url = { version = "2.5.2", features = ["serde"] } ethers = "2.0.14" -jsonrpsee = { version = "0.23.2", features = ["full"] } +jsonrpsee = { version = "0.24.5", features = ["full"] } diff --git a/Dockerfile b/Dockerfile index f2970447..ac5e759b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,11 +9,12 @@ RUN go mod download # BUILD BINARY COPY . . -RUN make build +RUN make build-go # BUILD RUST BIN -FROM --platform=${BUILDPLATFORM} rust:slim-bullseye AS chef +FROM --platform=${BUILDPLATFORM} rust:slim-bookworm AS chef USER root +RUN apt-get update && apt-get install -y openssl pkg-config libssl-dev RUN cargo install cargo-chef WORKDIR /app @@ -41,7 +42,7 @@ RUN cargo build --release --bin cdk # CONTAINER FOR RUNNING BINARY FROM --platform=${BUILDPLATFORM} debian:bookworm-slim -RUN apt-get update && apt-get install -y ca-certificates postgresql-client +RUN apt-get update && apt-get install -y ca-certificates postgresql-client libssl-dev && rm -rf /var/lib/apt/lists/* COPY --from=builder /app/target/release/cdk /usr/local/bin/ COPY --from=build /go/src/github.com/0xPolygon/cdk/target/cdk-node /usr/local/bin/ diff --git a/Makefile b/Makefile index c653ec1e..2adb0c40 100644 --- a/Makefile +++ b/Makefile @@ -56,7 +56,14 @@ install-linter: check-go check-curl generate-code-from-proto: check-protoc .PHONY: build -build: ## Builds the binary locally into ./dist +build: build-rust build-go ## Builds the binaries locally into ./target + +.PHONY: build-rust +build-rust: + export BUILD_SCRIPT_DISABLED=1 && cargo build --release + +.PHONY: build-go +build-go: $(GOENVVARS) go build -ldflags "all=$(LDFLAGS)" -o $(GOBIN)/$(GOBINARY) $(GOCMD) .PHONY: build-docker diff --git a/README.md b/README.md index 23a4e4e8..9c9480bd 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,30 @@ Setup Kurtosis following this instructions https://github.com/0xPolygon/kurtosis - You can run locally against kurtosis-cdk environment using: [docs/local_debug.md](docs/local_debug.md) +### Build locally + +You can locally build a production release of CDK CLI + cdk-node with: + +``` +make build +``` + +### Run locally + +You can build and run a debug release locally using: + +``` +cargo run +``` + +It will build and run both binaries. +### Running with Kurtosis + +1. Run your kurtosis environment +2. build `cdk-erigon` and make it available in your system's PATH +3. Run `scripts/local_config` +4. cargo run -- --config ./tmp/cdk/local_config/test.kurtosis.toml --chain ./tmp/cdk/local_config/genesis.json erigon + ## Contributing Contributions are very welcomed, the guidelines are currently not available (WIP) diff --git a/config/example-config.toml b/config/example-config.toml deleted file mode 100644 index c3e222ed..00000000 --- a/config/example-config.toml +++ /dev/null @@ -1,122 +0,0 @@ -ForkUpgradeBatchNumber = 0 -ForkUpgradeNewForkId = 0 - -[Log] -Environment = "development" # "production" or "development" -Level = "info" -Outputs = ["stderr"] - -[SequenceSender] -IsValidiumMode = false -WaitPeriodSendSequence = "15s" -LastBatchVirtualizationTimeMaxWaitPeriod = "10s" -L1BlockTimestampMargin = "30s" -MaxTxSizeForL1 = 131072 -L2Coinbase = "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d" -PrivateKey = {Path = "./test/sequencer.keystore", Password = "testonly"} -SequencesTxFileName = "sequencesender.json" -GasOffset = 80000 -WaitPeriodPurgeTxFile = "15m" -MaxPendingTx = 1 - [SequenceSender.StreamClient] - Server = "127.0.0.1:6900" - [SequenceSender.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2m" - GetReceiptMaxTime = "250ms" - GetReceiptWaitInterval = "1s" - PrivateKeys = [ - {Path = "./test/sequencer.keystore", Password = "testonly"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - StoragePath = "ethtxmanager.db" - ReadPendingL1Txs = false - SafeStatusL1NumberOfBlocks = 0 - FinalizedStatusL1NumberOfBlocks = 0 - [SequenceSender.EthTxManager.Etherman] - URL = "http://127.0.0.1:32771" - MultiGasProvider = false - L1ChainID = 1337 -[Aggregator] -Host = "0.0.0.0" -Port = 50081 -RetryTime = "5s" -VerifyProofInterval = "10s" -TxProfitabilityCheckerType = "acceptall" -TxProfitabilityMinReward = "1.1" -ProofStatePollingInterval = "5s" -SenderAddress = "" -CleanupLockedProofsInterval = "2m" -GeneratingProofCleanupThreshold = "10m" -ForkId = 9 -GasOffset = 0 -WitnessURL = "localhost:8123" -UseL1BatchData = true -UseFullWitness = false -SettlementBackend = "l1" -AggLayerTxTimeout = "5m" -AggLayerURL = "" -SequencerPrivateKey = {} - [Aggregator.DB] - Name = "aggregator_db" - User = "aggregator_user" - Password = "master_password" - Host = "localhost" - Port = "32780" - EnableLog = false - MaxConns = 200 - [Aggregator.Log] - Environment = "development" # "production" or "development" - Level = "info" - Outputs = ["stderr"] - [Aggregator.StreamClient] - Server = "localhost:6900" - [Aggregator.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2m" - GetReceiptMaxTime = "250ms" - GetReceiptWaitInterval = "1s" - PrivateKeys = [ - {Path = "/pk/aggregator.keystore", Password = "testonly"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - StoragePath = "" - ReadPendingL1Txs = false - SafeStatusL1NumberOfBlocks = 0 - FinalizedStatusL1NumberOfBlocks = 0 - [Aggregator.EthTxManager.Etherman] - URL = "" - L1ChainID = 11155111 - HTTPHeaders = [] - [Aggregator.Synchronizer] - [Aggregator.Synchronizer.DB] - Name = "sync_db" - User = "sync_user" - Password = "sync_password" - Host = "cdk-l1-sync-db" - Port = "5432" - EnableLog = false - MaxConns = 10 - [Aggregator.Synchronizer.Synchronizer] - SyncInterval = "10s" - SyncChunkSize = 1000 - GenesisBlockNumber = 5511080 - SyncUpToBlock = "finalized" - BlockFinality = "finalized" - OverrideStorageCheck = false - [Aggregator.Synchronizer.Etherman] - [Aggregator.Synchronizer.Etherman.Validium] - Enabled = false - -[RPC] - -[NetworkConfig.L1] -ChainID = 11155111 -PolAddr = "0xEdE9cf798E0fE25D35469493f43E88FeA4a5da0E" -ZkEVMAddr = "0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91" -RollupManagerAddr = "0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2" -GlobalExitRootManagerAddr = "0x1f7ad7caA53e35b4f0D138dC5CBF91aC108a2674" diff --git a/crates/cdk-config/src/aggregator.rs b/crates/cdk-config/src/aggregator.rs new file mode 100644 index 00000000..85a2a06c --- /dev/null +++ b/crates/cdk-config/src/aggregator.rs @@ -0,0 +1,118 @@ +use ethers::types::Address; +use serde::Deserialize; +use url::Url; + +/// The StreamClient configuration. +#[derive(Deserialize, Debug, Clone)] +pub struct StreamClient { + #[serde(rename = "Server")] + pub server: String, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct EthTxManager { + #[serde(rename = "Etherman")] + pub etherman: Etherman, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct Etherman { + #[serde(rename = "URL")] + pub url: String, +} + +/// The Aggregator configuration. +#[derive(Deserialize, Debug, Clone)] +pub struct Aggregator { + #[serde(rename = "ChainID")] + pub chain_id: String, + #[serde(rename = "Host")] + pub host: String, + #[serde(rename = "Port")] + pub port: String, + #[serde(rename = "RetryTime")] + pub retry_time: String, + #[serde(rename = "VerifyProofInterval")] + pub verify_proof_interval: String, + #[serde(rename = "ProofStatePollingInterval")] + pub proof_state_polling_interval: String, + #[serde(rename = "TxProfitabilityCheckerType")] + pub tx_profitability_checker_type: String, + #[serde(rename = "TxProfitabilityMinReward")] + pub tx_profitability_min_reward: String, + #[serde(rename = "IntervalAfterWhichBatchConsolidateAnyway")] + pub interval_after_which_batch_consolidate_anyway: String, + #[serde(rename = "ForkId")] + pub fork_id: u64, + #[serde(rename = "CleanupLockedProofsInterval")] + pub cleanup_locked_proofs_interval: String, + #[serde(rename = "GeneratingProofCleanupThreshold")] + pub generating_proof_cleanup_threshold: String, + #[serde(rename = "GasOffset")] + pub gas_offset: u64, + #[serde(rename = "WitnessURL")] + pub witness_url: Url, + #[serde(rename = "SenderAddress")] + pub sender_address: Address, + #[serde(rename = "SettlementBackend")] + pub settlement_backend: String, + #[serde(rename = "AggLayerTxTimeout")] + pub agg_layer_tx_timeout: String, + #[serde(rename = "AggLayerURL")] + pub agg_layer_url: Url, + #[serde(rename = "UseL1BatchData")] + pub use_l1_batch_data: bool, + #[serde(rename = "UseFullWitness")] + pub use_full_witness: bool, + #[serde(rename = "MaxWitnessRetrievalWorkers")] + pub max_witness_retrieval_workers: u32, + #[serde(rename = "SyncModeOnlyEnabled")] + pub sync_mode_only_enabled: bool, + + #[serde(rename = "StreamClient")] + pub stream_client: StreamClient, + + #[serde(rename = "EthTxManager")] + pub eth_tx_manager: EthTxManager, +} + +#[cfg(any(test, feature = "testutils"))] +impl Default for Aggregator { + fn default() -> Self { + // Values are coming from https://github.com/0xPolygon/agglayer/blob/main/config/default.go#L11 + Self { + chain_id: "1".to_string(), + host: "localhost".to_string(), + port: "8545".to_string(), + retry_time: "10s".to_string(), + verify_proof_interval: "1m".to_string(), + proof_state_polling_interval: "10s".to_string(), + tx_profitability_checker_type: "default".to_string(), + tx_profitability_min_reward: "0.1".to_string(), + interval_after_which_batch_consolidate_anyway: "5m".to_string(), + fork_id: 0, + cleanup_locked_proofs_interval: "1h".to_string(), + generating_proof_cleanup_threshold: "10m".to_string(), + gas_offset: 0, + witness_url: Url::parse("http://localhost:8546").unwrap(), + sender_address: "0x0000000000000000000000000000000000000000" + .parse() + .unwrap(), + settlement_backend: "default".to_string(), + agg_layer_tx_timeout: "30s".to_string(), + agg_layer_url: Url::parse("http://localhost:8547").unwrap(), + use_l1_batch_data: true, + use_full_witness: false, + max_witness_retrieval_workers: 4, + sync_mode_only_enabled: false, + stream_client: StreamClient { + server: "localhost:9092".to_string(), + }, + eth_tx_manager: EthTxManager { + etherman: Etherman { + url: "http://localhost:9093".to_string(), + }, + }, + } + } +} diff --git a/crates/cdk-config/src/l1.rs b/crates/cdk-config/src/l1.rs new file mode 100644 index 00000000..55fb2fb6 --- /dev/null +++ b/crates/cdk-config/src/l1.rs @@ -0,0 +1,39 @@ +use ethers::types::Address; +use serde::Deserialize; + +/// The L1 configuration. +#[derive(Deserialize, Debug, Clone)] +pub struct L1 { + #[serde(rename = "L1ChainID")] + pub l1_chain_id: String, + #[serde(rename = "PolAddr")] + pub pol_addr: Address, + #[serde(rename = "ZkEVMAddr")] + pub zk_evm_addr: Address, + #[serde(rename = "RollupManagerAddr")] + pub rollup_manager_addr: Address, + #[serde(rename = "GlobalExitRootManagerAddr")] + pub global_exit_root_manager_addr: Address, +} + +#[cfg(any(test, feature = "testutils"))] +impl Default for L1 { + fn default() -> Self { + // Values are coming from https://github.com/0xPolygon/agglayer/blob/main/config/default.go#L11 + Self { + l1_chain_id: "1337".to_string(), + pol_addr: "0x5b06837A43bdC3dD9F114558DAf4B26ed49842Ed" + .parse() + .unwrap(), + zk_evm_addr: "0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2" + .parse() + .unwrap(), + rollup_manager_addr: "0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91" + .parse() + .unwrap(), + global_exit_root_manager_addr: "0x1f7ad7caA53e35b4f0D138dC5CBF91aC108a2674" + .parse() + .unwrap(), + } + } +} diff --git a/crates/cdk-config/src/layer1.rs b/crates/cdk-config/src/layer1.rs deleted file mode 100644 index a5bd19d0..00000000 --- a/crates/cdk-config/src/layer1.rs +++ /dev/null @@ -1,28 +0,0 @@ -use ethers::types::Address; -use serde::Deserialize; -use url::Url; - -/// The L1 configuration. -#[derive(Deserialize, Debug, Clone)] -pub struct Layer1 { - #[serde(rename = "ChainID")] - pub chain_id: u64, - #[serde(rename = "NodeURL")] - pub node_url: Url, - #[serde(rename = "RollupManagerContract")] - pub rollup_manager_contract: Address, -} - -#[cfg(any(test, feature = "testutils"))] -impl Default for Layer1 { - fn default() -> Self { - // Values are coming from https://github.com/0xPolygon/agglayer/blob/main/config/default.go#L11 - Self { - chain_id: 1337, - node_url: "http://zkevm-mock-l1-network:8545".parse().unwrap(), - rollup_manager_contract: "0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e" - .parse() - .unwrap(), - } - } -} diff --git a/crates/cdk-config/src/lib.rs b/crates/cdk-config/src/lib.rs index 13298073..cb3ecb8f 100644 --- a/crates/cdk-config/src/lib.rs +++ b/crates/cdk-config/src/lib.rs @@ -6,23 +6,33 @@ use serde::Deserialize; pub(crate) const DEFAULT_IP: std::net::Ipv4Addr = std::net::Ipv4Addr::new(0, 0, 0, 0); -pub(crate) mod layer1; +pub(crate) mod aggregator; +pub(crate) mod l1; pub mod log; +pub(crate) mod network_config; +pub(crate) mod sequence_sender; pub(crate) mod telemetry; -pub use layer1::Layer1; pub use log::Log; +use sequence_sender::SequenceSender; /// The Agglayer configuration. #[derive(Deserialize, Debug)] #[cfg_attr(any(test, feature = "testutils"), derive(Default))] pub struct Config { - /// A map of Zkevm node RPC endpoints for each rollup. - /// /// The log configuration. #[serde(rename = "Log")] pub log: Log, #[serde(rename = "ForkUpgradeBatchNumber")] pub fork_upgrade_batch_number: Option, + + #[serde(rename = "NetworkConfig")] + pub network_config: network_config::NetworkConfig, + + #[serde(rename = "Aggregator")] + pub aggregator: aggregator::Aggregator, + + #[serde(rename = "SequenceSender")] + pub sequence_sender: SequenceSender, } diff --git a/crates/cdk-config/src/network_config.rs b/crates/cdk-config/src/network_config.rs new file mode 100644 index 00000000..ffabffad --- /dev/null +++ b/crates/cdk-config/src/network_config.rs @@ -0,0 +1,16 @@ +use crate::l1::L1; +use serde::Deserialize; + +/// The L1 configuration. +#[derive(Deserialize, Debug, Clone)] +pub struct NetworkConfig { + #[serde(rename = "L1")] + pub l1: L1, +} + +#[cfg(any(test, feature = "testutils"))] +impl Default for NetworkConfig { + fn default() -> Self { + Self { l1: L1::default() } + } +} diff --git a/crates/cdk-config/src/sequence_sender.rs b/crates/cdk-config/src/sequence_sender.rs new file mode 100644 index 00000000..006547e8 --- /dev/null +++ b/crates/cdk-config/src/sequence_sender.rs @@ -0,0 +1,50 @@ +use serde::Deserialize; + +/// The SequenceSender configuration. +#[derive(Deserialize, Debug, Clone)] +pub struct SequenceSender { + #[serde(rename = "WaitPeriodSendSequence")] + pub wait_period_send_sequence: String, + #[serde(rename = "LastBatchVirtualizationTimeMaxWaitPeriod")] + pub last_batch_virtualization_time_max_wait_period: String, + #[serde(rename = "MaxTxSizeForL1")] + pub max_tx_size_for_l1: u32, + #[serde(rename = "L2Coinbase")] + pub l2_coinbase: String, + #[serde(rename = "SequencesTxFileName")] + pub sequences_tx_file_name: String, + #[serde(rename = "GasOffset")] + pub gas_offset: u64, + #[serde(rename = "WaitPeriodPurgeTxFile")] + pub wait_period_purge_tx_file: String, + #[serde(rename = "MaxPendingTx")] + pub max_pending_tx: u32, + #[serde(rename = "MaxBatchesForL1")] + pub max_batches_for_l1: u32, + #[serde(rename = "BlockFinality")] + pub block_finality: String, + #[serde(rename = "RPCURL")] + pub rpc_url: String, + #[serde(rename = "GetBatchWaitInterval")] + pub get_batch_wait_interval: String, +} + +// Default trait implementation +impl Default for SequenceSender { + fn default() -> Self { + Self { + wait_period_send_sequence: "1s".to_string(), + last_batch_virtualization_time_max_wait_period: "1s".to_string(), + max_tx_size_for_l1: 1000, + l2_coinbase: "0x".to_string(), + sequences_tx_file_name: "sequences_tx.json".to_string(), + gas_offset: 0, + wait_period_purge_tx_file: "1s".to_string(), + max_pending_tx: 1000, + max_batches_for_l1: 100, + block_finality: "1s".to_string(), + rpc_url: "http://localhost:8545".to_string(), + get_batch_wait_interval: "1s".to_string(), + } + } +} diff --git a/crates/cdk-config/src/telemetry.rs b/crates/cdk-config/src/telemetry.rs index 407145f2..728611ce 100644 --- a/crates/cdk-config/src/telemetry.rs +++ b/crates/cdk-config/src/telemetry.rs @@ -1,11 +1,10 @@ -use std::net::SocketAddr; - -use serde::Deserialize; - use super::DEFAULT_IP; +use serde::Deserialize; +use std::net::SocketAddr; #[derive(Deserialize, Debug, Clone, Copy)] #[serde(rename_all = "PascalCase")] +#[allow(dead_code)] pub struct TelemetryConfig { #[serde(rename = "PrometheusAddr", default = "default_metrics_api_addr")] pub addr: SocketAddr, diff --git a/crates/cdk/Cargo.toml b/crates/cdk/Cargo.toml index 913fc492..964d8f83 100644 --- a/crates/cdk/Cargo.toml +++ b/crates/cdk/Cargo.toml @@ -13,6 +13,15 @@ execute = "0.2.13" toml = "0.8.14" tracing.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "json"] } +url = { workspace = true, features = ["serde"] } cdk-config = { path = "../cdk-config" } +serde.workspace = true +serde_json.workspace = true +tempfile = "3.12.0" +alloy-rpc-client = "0.4.2" +alloy-transport-http = "0.4.2" +tokio = "1.40.0" +reqwest = "0.12.8" +alloy-json-rpc = "0.4.2" diff --git a/crates/cdk/build.rs b/crates/cdk/build.rs index 1a01704a..59fffda7 100644 --- a/crates/cdk/build.rs +++ b/crates/cdk/build.rs @@ -23,7 +23,7 @@ fn main() { // Call the make command let output = Command::new("make") - .arg("build") // Create a new make command + .arg("build-go") // Create a new make command .current_dir(build_path) // Set the current directory for the command .output() // Execute the command and capture the output .expect("Failed to execute make command"); diff --git a/crates/cdk/src/allocs_render.rs b/crates/cdk/src/allocs_render.rs new file mode 100644 index 00000000..3b881149 --- /dev/null +++ b/crates/cdk/src/allocs_render.rs @@ -0,0 +1,99 @@ +use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; +use serde_json::{self, Value}; +use std::collections::HashMap; +use std::fs::File; +use std::io::Read; +use std::path::Path; + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct Input { + #[serde(rename = "contractName", skip_serializing_if = "Option::is_none")] + contract_name: Option, + #[serde(rename = "accountName", skip_serializing_if = "Option::is_none")] + account_name: Option, + balance: String, + nonce: String, + address: String, + #[serde(skip_serializing_if = "Option::is_none")] + bytecode: Option, + #[serde(skip_serializing_if = "Option::is_none")] + storage: Option>, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Wrapper { + pub root: String, + #[serde(rename = "L1Config")] + pub l1_config: L1Config, + genesis: Vec, + #[serde(rename = "rollupCreationBlockNumber")] + pub rollup_creation_block_number: u64, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct L1Config { + #[serde(rename = "chainId")] + pub chain_id: u64, + #[serde(rename = "polygonZkEVMGlobalExitRootAddress")] + pub zkevm_global_exit_root_address: String, + #[serde(rename = "polygonRollupManagerAddress")] + pub rollup_manager_address: String, + #[serde(rename = "polTokenAddress")] + pub pol_token_address: String, + #[serde(rename = "polygonZkEVMAddress")] + pub zkevm_address: String, +} + +#[derive(Serialize, Deserialize, Debug)] +struct Output { + #[serde(rename = "contractName", skip_serializing_if = "Option::is_none")] + contract_name: Option, + #[serde(rename = "accountName", skip_serializing_if = "Option::is_none")] + account_name: Option, + balance: Option, + nonce: Option, + code: Option, + storage: Option, +} + +pub struct Rendered { + pub output: String, + pub wrapper: Wrapper, +} + +pub fn render_allocs(genesis_file_path: &str) -> Result { + let path = Path::new(genesis_file_path); + let display = path.display(); + + let mut file = File::open(&path).with_context(|| format!("couldn't open {}", display))?; + + let mut data = String::new(); + file.read_to_string(&mut data) + .with_context(|| format!("couldn't read {}", display))?; + + let wrapper: Wrapper = serde_json::from_str(&data) + .with_context(|| format!("couldn't parse JSON from {}", display))?; + + let mut outputs: HashMap = HashMap::new(); + + for input in wrapper.genesis.clone() { + let output = Output { + contract_name: input.contract_name, + account_name: input.account_name, + balance: Some(input.balance), + nonce: Some(input.nonce), + code: input.bytecode, + storage: input.storage.map(|s| serde_json::to_value(s).unwrap()), + }; + outputs.insert(input.address, output); + } + + // outputs.sort_by(|a, b| a.contract_name.cmp(&b.contract_name)); + + Ok(Rendered { + output: serde_json::to_string_pretty(&outputs) + .with_context(|| "couldn't serialize outputs to JSON")?, + wrapper, + }) +} diff --git a/crates/cdk/src/cli.rs b/crates/cdk/src/cli.rs index 1bf29d2c..12acc8a8 100644 --- a/crates/cdk/src/cli.rs +++ b/crates/cdk/src/cli.rs @@ -11,8 +11,6 @@ pub(crate) struct Cli { long, short, value_hint = ValueHint::FilePath, - global = true, - default_value = "config/example-config.toml", env = "CDK_CONFIG_PATH" )] pub(crate) config: PathBuf, @@ -22,8 +20,6 @@ pub(crate) struct Cli { long, short = 'g', value_hint = ValueHint::FilePath, - global = true, - default_value = "config/genesis.json", env = "CDK_GENESIS_PATH" )] pub(crate) chain: PathBuf, @@ -34,6 +30,15 @@ pub(crate) struct Cli { #[derive(Subcommand)] pub(crate) enum Commands { - Node, + Node { + /// Components to run. + #[arg( + long, + short, + value_hint = ValueHint::CommandString, + env = "CDK_COMPONENTS", + )] + components: Option, + }, Erigon, } diff --git a/crates/cdk/src/config_render.rs b/crates/cdk/src/config_render.rs new file mode 100644 index 00000000..2c230c52 --- /dev/null +++ b/crates/cdk/src/config_render.rs @@ -0,0 +1,130 @@ +use crate::allocs_render::Rendered; +use anyhow::Error; +use cdk_config::Config; +use std::fs; +use std::path::PathBuf; +use tempfile::{tempdir, TempDir}; + +pub fn render(config: &Config, genesis_file: PathBuf, timestamp: u64) -> Result { + // Create a temporary directory + let tmp_dir = tempdir()?; + let chain_id = config.aggregator.chain_id.clone(); + let res = crate::allocs_render::render_allocs(genesis_file.to_str().unwrap())?; + // Write the three files to disk + fs::write( + tmp_dir + .path() + .join(format!("dynamic-{}-allocs.json", chain_id.clone())), + res.output.clone(), + )?; + fs::write( + tmp_dir + .path() + .join(format!("dynamic-{}-chainspec.json", chain_id.clone())), + render_chainspec(chain_id.clone()), + )?; + fs::write( + tmp_dir + .path() + .join(format!("dynamic-{}-conf.json", chain_id.clone())), + render_conf(res.wrapper.root.clone(), timestamp), + )?; + + let contents = render_yaml(config, res); + fs::write( + tmp_dir + .path() + .join(format!("dynamic-{}.yaml", chain_id.clone())), + contents, + )?; + + Ok(tmp_dir) +} + +fn render_chainspec(chain_id: String) -> String { + format!( + r#" +{{ + "ChainName": "dynamic-{chain_id}", + "chainId": {chain_id}, + "consensus": "ethash", + "homesteadBlock": 0, + "daoForkBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 9999999999999999999999999999999999999999999999999, + "arrowGlacierBlock": 9999999999999999999999999999999999999999999999999, + "grayGlacierBlock": 9999999999999999999999999999999999999999999999999, + "terminalTotalDifficulty": 58750000000000000000000, + "terminalTotalDifficultyPassed": false, + "shanghaiTime": 9999999999999999999999999999999999999999999999999, + "cancunTime": 9999999999999999999999999999999999999999999999999, + "pragueTime": 9999999999999999999999999999999999999999999999999, + "ethash": {{}} +}} + "# + ) +} + +fn render_conf(root: String, timestamp: u64) -> String { + format!( + r#" +{{ + "root": {:?}, + "timestamp": {:?}, + "gasLimit": 0, + "difficulty": 0 +}} + "#, + root, timestamp + ) +} + +// render_config renders the configuration file for the Erigon node. +fn render_yaml(config: &Config, res: Rendered) -> String { + format!( + r#" +chain: dynamic-{chain_id} +zkevm.l2-chain-id: {chain_id} +zkevm.l2-sequencer-rpc-url: {l2_sequencer_rpc_url} +zkevm.l2-datastreamer-url: {datastreamer_host} +zkevm.l1-chain-id: {l1_chain_id} +zkevm.l1-rpc-url: {l1_rpc_url} +zkevm.address-sequencer: {sequencer_address} +zkevm.address-zkevm: {zkevm_address} +zkevm.address-rollup: {rollup_address} +zkevm.address-ger-manager: {ger_manager_address} +zkevm.l1-matic-contract-address: {pol_token_address} +zkevm.l1-first-block: {l1_first_block} +datadir: ./data/dynamic-{chain_id} + +externalcl: true +http: true +private.api.addr: "localhost:9092" +zkevm.rpc-ratelimit: 250 +zkevm.datastream-version: 3 +http.api: [eth, debug,net,trace,web3,erigon,zkevm] +http.addr: "0.0.0.0" +http.vhosts: any +http.corsdomain: any +ws: true +"#, + chain_id = config.aggregator.chain_id.clone(), + l2_sequencer_rpc_url = config.aggregator.witness_url.to_string(), + datastreamer_host = config.aggregator.stream_client.server, + l1_rpc_url = config.aggregator.eth_tx_manager.etherman.url, + l1_chain_id = config.network_config.l1.l1_chain_id, + sequencer_address = config.sequence_sender.l2_coinbase, + zkevm_address = res.wrapper.l1_config.zkevm_address, + rollup_address = res.wrapper.l1_config.rollup_manager_address, + ger_manager_address = res.wrapper.l1_config.zkevm_global_exit_root_address, + pol_token_address = res.wrapper.l1_config.pol_token_address, + l1_first_block = res.wrapper.rollup_creation_block_number + ) +} diff --git a/crates/cdk/src/main.rs b/crates/cdk/src/main.rs index 99a8a752..7f7f3991 100644 --- a/crates/cdk/src/main.rs +++ b/crates/cdk/src/main.rs @@ -1,4 +1,6 @@ //! Command line interface. +use alloy_rpc_client::ClientBuilder; +use alloy_rpc_client::ReqwestClient; use cdk_config::Config; use clap::Parser; use cli::Cli; @@ -6,28 +8,59 @@ use execute::Execute; use std::env; use std::path::PathBuf; use std::process::Command; -use std::sync::Arc; +use url::Url; +pub mod allocs_render; mod cli; +mod config_render; mod logging; -const CDK_CLIENT_PATH: &str = "cdk-node"; -const CDK_ERIGON_PATH: &str = "cdk-erigon"; +const CDK_CLIENT_BIN: &str = "cdk-node"; +const CDK_ERIGON_BIN: &str = "cdk-erigon"; -fn main() -> anyhow::Result<()> { +#[tokio::main] +async fn main() -> anyhow::Result<()> { dotenvy::dotenv().ok(); let cli = Cli::parse(); + // Read the config + let config = read_config(cli.config.clone())?; + + // Initialize the logger + logging::tracing(&config.log); + + println!( + r#"🐼 + _____ _ _____ _____ _ __ + | __ \ | | / ____| __ \| |/ / + | |__) |__ | |_ _ __ _ ___ _ __ | | | | | | ' / + | ___/ _ \| | | | |/ _` |/ _ \| '_ \ | | | | | | < + | | | (_) | | |_| | (_| | (_) | | | | | |____| |__| | . \ + |_| \___/|_|\__, |\__, |\___/|_| |_| \_____|_____/|_|\_\ + __/ | __/ | + |___/ |___/ +"# + ); + match cli.cmd { - cli::Commands::Node {} => node(cli.config)?, - cli::Commands::Erigon {} => erigon(cli.config)?, + cli::Commands::Node { components } => node(cli.config, components)?, + cli::Commands::Erigon {} => erigon(config, cli.chain).await?, // _ => forward()?, } Ok(()) } +// read_config reads the configuration file and returns the configuration. +fn read_config(config_path: PathBuf) -> anyhow::Result { + let config = std::fs::read_to_string(config_path) + .map_err(|e| anyhow::anyhow!("Failed to read configuration file: {}", e))?; + let config: Config = toml::from_str(&config)?; + + Ok(config) +} + /// This is the main node entrypoint. /// /// This function starts everything needed to run an Agglayer node. @@ -36,29 +69,27 @@ fn main() -> anyhow::Result<()> { /// /// This function returns on fatal error or after graceful shutdown has /// completed. -pub fn node(config_path: PathBuf) -> anyhow::Result<()> { - // Load the configuration file - let config_read = std::fs::read_to_string(config_path.clone()); - let toml_str = match config_read { - Ok(toml) => toml, - Err(e) => { - eprintln!( - "Failed to read configuration file, from path: {}", - config_path.to_str().unwrap() - ); - return Err(e.into()); - } - }; - let config: Arc = Arc::new(toml::from_str(&toml_str)?); - - let mut bin_path = env::var("CARGO_MANIFEST_DIR").unwrap_or(CDK_CLIENT_PATH.into()); - if bin_path != CDK_CLIENT_PATH { - bin_path = format!("{}/../../{}", bin_path, CDK_CLIENT_PATH); +pub fn node(config_path: PathBuf, components: Option) -> anyhow::Result<()> { + // This is to find the erigon binary when running in development mode + // otherwise it will use system path + let mut bin_path = env::var("CARGO_MANIFEST_DIR").unwrap_or(CDK_CLIENT_BIN.into()); + if bin_path != CDK_CLIENT_BIN { + bin_path = format!("{}/../../target/{}", bin_path, CDK_CLIENT_BIN); } + let components_param = match components { + Some(components) => format!("-components={}", components), + None => "".to_string(), + }; + // Run the node passing the config file path as argument let mut command = Command::new(bin_path.clone()); - command.args(&["run", "-cfg", config_path.canonicalize()?.to_str().unwrap()]); + command.args(&[ + "run", + "-cfg", + config_path.canonicalize()?.to_str().unwrap(), + components_param.as_str(), + ]); let output_result = command.execute_output(); let output = match output_result { @@ -82,38 +113,40 @@ pub fn node(config_path: PathBuf) -> anyhow::Result<()> { eprintln!("Interrupted!"); } - // Initialize the logger - logging::tracing(&config.log); - Ok(()) } /// This is the main erigon entrypoint. /// This function starts everything needed to run an Erigon node. -pub fn erigon(config_path: PathBuf) -> anyhow::Result<()> { - // Load the configuration file - let _config: Arc = Arc::new(toml::from_str(&std::fs::read_to_string( - config_path.clone(), - )?)?); - - let mut bin_path = env::var("CARGO_MANIFEST_DIR").unwrap_or(CDK_ERIGON_PATH.into()); - if bin_path != CDK_ERIGON_PATH { - bin_path = format!("{}/../../{}", bin_path, CDK_ERIGON_PATH); - } - - let mut command = Command::new(bin_path); - - // TODO: 1. Prepare erigon config files or flags - - command.args(&["--config", config_path.to_str().unwrap()]); - - let output = command.execute_output().unwrap(); +pub async fn erigon(config: Config, genesis_file: PathBuf) -> anyhow::Result<()> { + // Render configuration files + let chain_id = config.aggregator.chain_id.clone(); + let rpc_url = Url::parse(&config.sequence_sender.rpc_url).unwrap(); + let timestamp = get_timestamp(rpc_url).await.unwrap(); + let erigon_config_path = config_render::render(&config, genesis_file, timestamp)?; + + println!("Starting erigon with config: {:?}", erigon_config_path); + + // Run cdk-erigon in system path + let output = Command::new(CDK_ERIGON_BIN) + .args(&[ + "--config", + erigon_config_path + .path() + .join(format!("dynamic-{}.yaml", chain_id)) + .to_str() + .unwrap(), + ]) + .execute_output() + .unwrap(); if let Some(exit_code) = output.status.code() { - if exit_code == 0 { - println!("Ok."); - } else { - eprintln!("Failed."); + if exit_code != 0 { + eprintln!( + "Failed. Leaving configuration files in: {:?}", + erigon_config_path + ); + std::process::exit(1); } } else { eprintln!("Interrupted!"); @@ -121,3 +154,25 @@ pub fn erigon(config_path: PathBuf) -> anyhow::Result<()> { Ok(()) } + +/// Call the rpc server to retrieve the first batch timestamp +async fn get_timestamp(url: Url) -> Result { + // Instantiate a new client over a transport. + let client: ReqwestClient = ClientBuilder::default().http(url); + + // Prepare a request to the server. + let request = client.request("zkevm_getBatchByNumber", vec!["0"]); + + // Poll the request to completion. + let batch_json: Batch = request.await.unwrap(); + + // Parse the timestamp hex string into u64. + let ts = u64::from_str_radix(batch_json.timestamp.trim_start_matches("0x"), 16)?; + + Ok(ts) +} + +#[derive(serde::Deserialize, Debug, Clone)] +struct Batch { + timestamp: String, +} From d94418f9bed467d9a00233a384771dbc63615bee Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Wed, 9 Oct 2024 09:46:55 +0200 Subject: [PATCH 31/53] ci: fix release tags (#114) * ci: fix release tags * ci: fix regex for release --- .github/workflows/release.yml | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b22f8710..f388cdd0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,10 +1,10 @@ name: Release on: - push: - tags: - # run only against tags that follow semver (https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string) - - 'v[0-9]+.[0-9]+.[0-9]+*' + push: + tags: + - 'v*.*.*' + - 'v*.*.*-*' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -33,9 +33,6 @@ jobs: TAGS: ${{ steps.meta.outputs.tags }} VERSION: ${{ steps.meta.outputs.version }} steps: - - name: validate tag - run: echo ${{ github.ref_name }} | grep -qE '^v[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$' - - name: Prepare run: | platform=${{ matrix.platform }} From f8bd4d944834d6bb8f5c898b16d2db6b5d4e710f Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Wed, 9 Oct 2024 12:45:49 +0200 Subject: [PATCH 32/53] refactor: accept empty config file (#118) This is necessary because we're not going to check for any values at this point. --- crates/cdk-config/src/aggregator.rs | 92 ++++++++++++++++-------- crates/cdk-config/src/sequence_sender.rs | 24 +++---- 2 files changed, 74 insertions(+), 42 deletions(-) diff --git a/crates/cdk-config/src/aggregator.rs b/crates/cdk-config/src/aggregator.rs index 85a2a06c..a5c428c7 100644 --- a/crates/cdk-config/src/aggregator.rs +++ b/crates/cdk-config/src/aggregator.rs @@ -5,77 +5,111 @@ use url::Url; /// The StreamClient configuration. #[derive(Deserialize, Debug, Clone)] pub struct StreamClient { - #[serde(rename = "Server")] + #[serde(rename = "Server", default)] pub server: String, } +impl Default for StreamClient { + fn default() -> Self { + Self { + server: "localhost:9092".to_string(), + } + } +} + #[derive(Deserialize, Debug, Clone)] pub struct EthTxManager { #[serde(rename = "Etherman")] pub etherman: Etherman, } +impl Default for EthTxManager { + fn default() -> Self { + Self { + etherman: Etherman::default(), + } + } +} + #[derive(Deserialize, Debug, Clone)] pub struct Etherman { - #[serde(rename = "URL")] + #[serde(rename = "URL", default)] pub url: String, } +impl Default for Etherman { + fn default() -> Self { + Self { + url: "http://localhost:8545".to_string(), + } + } +} + /// The Aggregator configuration. #[derive(Deserialize, Debug, Clone)] pub struct Aggregator { - #[serde(rename = "ChainID")] + #[serde(rename = "ChainID", default)] pub chain_id: String, - #[serde(rename = "Host")] + #[serde(rename = "Host", default)] pub host: String, - #[serde(rename = "Port")] + #[serde(rename = "Port", default)] pub port: String, - #[serde(rename = "RetryTime")] + #[serde(rename = "RetryTime", default)] pub retry_time: String, - #[serde(rename = "VerifyProofInterval")] + #[serde(rename = "VerifyProofInterval", default)] pub verify_proof_interval: String, - #[serde(rename = "ProofStatePollingInterval")] + #[serde(rename = "ProofStatePollingInterval", default)] pub proof_state_polling_interval: String, - #[serde(rename = "TxProfitabilityCheckerType")] + #[serde(rename = "TxProfitabilityCheckerType", default)] pub tx_profitability_checker_type: String, - #[serde(rename = "TxProfitabilityMinReward")] + #[serde(rename = "TxProfitabilityMinReward", default)] pub tx_profitability_min_reward: String, - #[serde(rename = "IntervalAfterWhichBatchConsolidateAnyway")] + #[serde(rename = "IntervalAfterWhichBatchConsolidateAnyway", default)] pub interval_after_which_batch_consolidate_anyway: String, - #[serde(rename = "ForkId")] + #[serde(rename = "ForkId", default)] pub fork_id: u64, - #[serde(rename = "CleanupLockedProofsInterval")] + #[serde(rename = "CleanupLockedProofsInterval", default)] pub cleanup_locked_proofs_interval: String, - #[serde(rename = "GeneratingProofCleanupThreshold")] + #[serde(rename = "GeneratingProofCleanupThreshold", default)] pub generating_proof_cleanup_threshold: String, - #[serde(rename = "GasOffset")] + #[serde(rename = "GasOffset", default)] pub gas_offset: u64, - #[serde(rename = "WitnessURL")] + #[serde(rename = "WitnessURL", default = "default_url")] pub witness_url: Url, - #[serde(rename = "SenderAddress")] + #[serde(rename = "SenderAddress", default = "default_address")] pub sender_address: Address, - #[serde(rename = "SettlementBackend")] + #[serde(rename = "SettlementBackend", default)] pub settlement_backend: String, - #[serde(rename = "AggLayerTxTimeout")] + #[serde(rename = "AggLayerTxTimeout", default)] pub agg_layer_tx_timeout: String, - #[serde(rename = "AggLayerURL")] + #[serde(rename = "AggLayerURL", default = "default_url")] pub agg_layer_url: Url, - #[serde(rename = "UseL1BatchData")] + #[serde(rename = "UseL1BatchData", default)] pub use_l1_batch_data: bool, - #[serde(rename = "UseFullWitness")] + #[serde(rename = "UseFullWitness", default)] pub use_full_witness: bool, - #[serde(rename = "MaxWitnessRetrievalWorkers")] + #[serde(rename = "MaxWitnessRetrievalWorkers", default)] pub max_witness_retrieval_workers: u32, - #[serde(rename = "SyncModeOnlyEnabled")] + #[serde(rename = "SyncModeOnlyEnabled", default)] pub sync_mode_only_enabled: bool, - #[serde(rename = "StreamClient")] + #[serde(rename = "StreamClient", default)] pub stream_client: StreamClient, - #[serde(rename = "EthTxManager")] + #[serde(rename = "EthTxManager", default)] pub eth_tx_manager: EthTxManager, } +fn default_url() -> Url { + Url::parse("http://localhost:8546").unwrap() +} + +fn default_address() -> Address { + "0x0000000000000000000000000000000000000000" + .parse() + .unwrap() +} + #[cfg(any(test, feature = "testutils"))] impl Default for Aggregator { fn default() -> Self { @@ -94,10 +128,8 @@ impl Default for Aggregator { cleanup_locked_proofs_interval: "1h".to_string(), generating_proof_cleanup_threshold: "10m".to_string(), gas_offset: 0, - witness_url: Url::parse("http://localhost:8546").unwrap(), - sender_address: "0x0000000000000000000000000000000000000000" - .parse() - .unwrap(), + witness_url: default_url(), + sender_address: default_address(), settlement_backend: "default".to_string(), agg_layer_tx_timeout: "30s".to_string(), agg_layer_url: Url::parse("http://localhost:8547").unwrap(), diff --git a/crates/cdk-config/src/sequence_sender.rs b/crates/cdk-config/src/sequence_sender.rs index 006547e8..c4e83cc5 100644 --- a/crates/cdk-config/src/sequence_sender.rs +++ b/crates/cdk-config/src/sequence_sender.rs @@ -3,29 +3,29 @@ use serde::Deserialize; /// The SequenceSender configuration. #[derive(Deserialize, Debug, Clone)] pub struct SequenceSender { - #[serde(rename = "WaitPeriodSendSequence")] + #[serde(rename = "WaitPeriodSendSequence", default)] pub wait_period_send_sequence: String, - #[serde(rename = "LastBatchVirtualizationTimeMaxWaitPeriod")] + #[serde(rename = "LastBatchVirtualizationTimeMaxWaitPeriod", default)] pub last_batch_virtualization_time_max_wait_period: String, - #[serde(rename = "MaxTxSizeForL1")] + #[serde(rename = "MaxTxSizeForL1", default)] pub max_tx_size_for_l1: u32, - #[serde(rename = "L2Coinbase")] + #[serde(rename = "L2Coinbase", default)] pub l2_coinbase: String, - #[serde(rename = "SequencesTxFileName")] + #[serde(rename = "SequencesTxFileName", default)] pub sequences_tx_file_name: String, - #[serde(rename = "GasOffset")] + #[serde(rename = "GasOffset", default)] pub gas_offset: u64, - #[serde(rename = "WaitPeriodPurgeTxFile")] + #[serde(rename = "WaitPeriodPurgeTxFile", default)] pub wait_period_purge_tx_file: String, - #[serde(rename = "MaxPendingTx")] + #[serde(rename = "MaxPendingTx", default)] pub max_pending_tx: u32, - #[serde(rename = "MaxBatchesForL1")] + #[serde(rename = "MaxBatchesForL1", default)] pub max_batches_for_l1: u32, - #[serde(rename = "BlockFinality")] + #[serde(rename = "BlockFinality", default)] pub block_finality: String, - #[serde(rename = "RPCURL")] + #[serde(rename = "RPCURL", default)] pub rpc_url: String, - #[serde(rename = "GetBatchWaitInterval")] + #[serde(rename = "GetBatchWaitInterval", default)] pub get_batch_wait_interval: String, } From 999b7b6c373fef459c5b4edacd70a7ed73aa8510 Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Wed, 9 Oct 2024 16:10:44 +0200 Subject: [PATCH 33/53] refactor: set default on all config values (#119) --- crates/cdk-config/src/aggregator.rs | 1 - crates/cdk-config/src/l1.rs | 11 +++++------ crates/cdk-config/src/lib.rs | 8 ++++---- crates/cdk-config/src/network_config.rs | 3 +-- 4 files changed, 10 insertions(+), 13 deletions(-) diff --git a/crates/cdk-config/src/aggregator.rs b/crates/cdk-config/src/aggregator.rs index a5c428c7..2e059a2f 100644 --- a/crates/cdk-config/src/aggregator.rs +++ b/crates/cdk-config/src/aggregator.rs @@ -110,7 +110,6 @@ fn default_address() -> Address { .unwrap() } -#[cfg(any(test, feature = "testutils"))] impl Default for Aggregator { fn default() -> Self { // Values are coming from https://github.com/0xPolygon/agglayer/blob/main/config/default.go#L11 diff --git a/crates/cdk-config/src/l1.rs b/crates/cdk-config/src/l1.rs index 55fb2fb6..4c339b98 100644 --- a/crates/cdk-config/src/l1.rs +++ b/crates/cdk-config/src/l1.rs @@ -4,19 +4,18 @@ use serde::Deserialize; /// The L1 configuration. #[derive(Deserialize, Debug, Clone)] pub struct L1 { - #[serde(rename = "L1ChainID")] + #[serde(rename = "L1ChainID", alias = "ChainID", default)] pub l1_chain_id: String, - #[serde(rename = "PolAddr")] + #[serde(rename = "PolAddr", default)] pub pol_addr: Address, - #[serde(rename = "ZkEVMAddr")] + #[serde(rename = "ZkEVMAddr", default)] pub zk_evm_addr: Address, - #[serde(rename = "RollupManagerAddr")] + #[serde(rename = "RollupManagerAddr", default)] pub rollup_manager_addr: Address, - #[serde(rename = "GlobalExitRootManagerAddr")] + #[serde(rename = "GlobalExitRootManagerAddr", default)] pub global_exit_root_manager_addr: Address, } -#[cfg(any(test, feature = "testutils"))] impl Default for L1 { fn default() -> Self { // Values are coming from https://github.com/0xPolygon/agglayer/blob/main/config/default.go#L11 diff --git a/crates/cdk-config/src/lib.rs b/crates/cdk-config/src/lib.rs index cb3ecb8f..25478bb7 100644 --- a/crates/cdk-config/src/lib.rs +++ b/crates/cdk-config/src/lib.rs @@ -21,18 +21,18 @@ use sequence_sender::SequenceSender; #[cfg_attr(any(test, feature = "testutils"), derive(Default))] pub struct Config { /// The log configuration. - #[serde(rename = "Log")] + #[serde(rename = "Log", default)] pub log: Log, #[serde(rename = "ForkUpgradeBatchNumber")] pub fork_upgrade_batch_number: Option, - #[serde(rename = "NetworkConfig")] + #[serde(rename = "NetworkConfig", default)] pub network_config: network_config::NetworkConfig, - #[serde(rename = "Aggregator")] + #[serde(rename = "Aggregator", default)] pub aggregator: aggregator::Aggregator, - #[serde(rename = "SequenceSender")] + #[serde(rename = "SequenceSender", default)] pub sequence_sender: SequenceSender, } diff --git a/crates/cdk-config/src/network_config.rs b/crates/cdk-config/src/network_config.rs index ffabffad..3f49b786 100644 --- a/crates/cdk-config/src/network_config.rs +++ b/crates/cdk-config/src/network_config.rs @@ -4,11 +4,10 @@ use serde::Deserialize; /// The L1 configuration. #[derive(Deserialize, Debug, Clone)] pub struct NetworkConfig { - #[serde(rename = "L1")] + #[serde(rename = "L1", default)] pub l1: L1, } -#[cfg(any(test, feature = "testutils"))] impl Default for NetworkConfig { fn default() -> Self { Self { l1: L1::default() } From 3a8a8808fa89582f78d100b927675a315495f7ae Mon Sep 17 00:00:00 2001 From: Rachit Sonthalia <54906134+rachit77@users.noreply.github.com> Date: Thu, 10 Oct 2024 17:28:29 +0530 Subject: [PATCH 34/53] test: add test for aggregator (#100) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * test: mockery setuo * feat: make interface exported * test: add aggregator e2e test * test: wip * test: wip * fix: tests * fix: tests * test: wip * test: refactored * fix: Makefile * fix: lint * fix: test * fix: race condition * fix: apply feedback * fix: apply feedback * fix: apply feedback and remove cyclic dependency * fix: remove comments --------- Co-authored-by: Toni Ramírez --- .gitignore | 2 + aggregator/{ => agglayer}/agglayer_client.go | 2 +- aggregator/{ => agglayer}/agglayer_tx.go | 2 +- aggregator/aggregator.go | 36 +- aggregator/aggregator_test.go | 1573 ++++++++++++++++++ aggregator/config.go | 4 +- aggregator/interfaces.go | 60 +- aggregator/mocks/mock_StreamClient.go | 247 +++ aggregator/mocks/mock_agglayer_client.go | 79 + aggregator/mocks/mock_dbtx.go | 350 ++++ aggregator/mocks/mock_eth_tx_manager.go | 258 +++ aggregator/mocks/mock_etherman.go | 210 +++ aggregator/mocks/mock_prover.go | 271 +++ aggregator/mocks/mock_state.go | 406 +++++ aggregator/mocks/mock_synchronizer.go | 321 ++++ aggregator/profitabilitychecker.go | 10 +- test/Makefile | 16 +- 17 files changed, 3813 insertions(+), 34 deletions(-) rename aggregator/{ => agglayer}/agglayer_client.go (99%) rename aggregator/{ => agglayer}/agglayer_tx.go (98%) create mode 100644 aggregator/aggregator_test.go create mode 100644 aggregator/mocks/mock_StreamClient.go create mode 100644 aggregator/mocks/mock_agglayer_client.go create mode 100644 aggregator/mocks/mock_dbtx.go create mode 100644 aggregator/mocks/mock_eth_tx_manager.go create mode 100644 aggregator/mocks/mock_etherman.go create mode 100644 aggregator/mocks/mock_prover.go create mode 100644 aggregator/mocks/mock_state.go create mode 100644 aggregator/mocks/mock_synchronizer.go diff --git a/.gitignore b/.gitignore index fe96efc4..ce4e0058 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,8 @@ book/ index.html tmp .vscode +coverage.out +coverage.html .idea .idea/* diff --git a/aggregator/agglayer_client.go b/aggregator/agglayer/agglayer_client.go similarity index 99% rename from aggregator/agglayer_client.go rename to aggregator/agglayer/agglayer_client.go index 4726ccc1..dbe48fb2 100644 --- a/aggregator/agglayer_client.go +++ b/aggregator/agglayer/agglayer_client.go @@ -1,4 +1,4 @@ -package aggregator +package agglayer import ( "context" diff --git a/aggregator/agglayer_tx.go b/aggregator/agglayer/agglayer_tx.go similarity index 98% rename from aggregator/agglayer_tx.go rename to aggregator/agglayer/agglayer_tx.go index 30a483ae..f024f570 100644 --- a/aggregator/agglayer_tx.go +++ b/aggregator/agglayer/agglayer_tx.go @@ -1,4 +1,4 @@ -package aggregator +package agglayer import ( "crypto/ecdsa" diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 1c07d340..4d887136 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -16,6 +16,7 @@ import ( "github.com/0xPolygon/cdk-rpc/rpc" cdkTypes "github.com/0xPolygon/cdk-rpc/types" + "github.com/0xPolygon/cdk/aggregator/agglayer" ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" "github.com/0xPolygon/cdk/aggregator/prover" cdkcommon "github.com/0xPolygon/cdk/common" @@ -66,10 +67,10 @@ type Aggregator struct { cfg Config logger *log.Logger - state stateInterface - etherman etherman - ethTxManager *ethtxmanager.Client - streamClient *datastreamer.StreamClient + state StateInterface + etherman Etherman + ethTxManager EthTxManagerClient + streamClient StreamClient l1Syncr synchronizer.Synchronizer halted atomic.Bool @@ -97,7 +98,7 @@ type Aggregator struct { exit context.CancelFunc sequencerPrivateKey *ecdsa.PrivateKey - aggLayerClient AgglayerClientInterface + aggLayerClient agglayer.AgglayerClientInterface } // New creates a new aggregator. @@ -105,8 +106,8 @@ func New( ctx context.Context, cfg Config, logger *log.Logger, - stateInterface stateInterface, - etherman etherman) (*Aggregator, error) { + stateInterface StateInterface, + etherman Etherman) (*Aggregator, error) { var profitabilityChecker aggregatorTxProfitabilityChecker switch cfg.TxProfitabilityCheckerType { @@ -167,12 +168,12 @@ func New( } var ( - aggLayerClient AgglayerClientInterface + aggLayerClient agglayer.AgglayerClientInterface sequencerPrivateKey *ecdsa.PrivateKey ) if !cfg.SyncModeOnlyEnabled && cfg.SettlementBackend == AggLayer { - aggLayerClient = NewAggLayerClient(cfg.AggLayerURL) + aggLayerClient = agglayer.NewAggLayerClient(cfg.AggLayerURL) sequencerPrivateKey, err = newKeyFromKeystore(cfg.SequencerPrivateKey) if err != nil { @@ -921,10 +922,11 @@ func (a *Aggregator) settleWithAggLayer( inputs ethmanTypes.FinalProofInputs) bool { proofStrNo0x := strings.TrimPrefix(inputs.FinalProof.Proof, "0x") proofBytes := common.Hex2Bytes(proofStrNo0x) - tx := Tx{ + + tx := agglayer.Tx{ LastVerifiedBatch: cdkTypes.ArgUint64(proof.BatchNumber - 1), NewVerifiedBatch: cdkTypes.ArgUint64(proof.BatchNumberFinal), - ZKP: ZKP{ + ZKP: agglayer.ZKP{ NewStateRoot: common.BytesToHash(inputs.NewStateRoot), NewLocalExitRoot: common.BytesToHash(inputs.NewLocalExitRoot), Proof: cdkTypes.ArgBytes(proofBytes), @@ -1013,7 +1015,7 @@ func (a *Aggregator) handleFailureToAddVerifyBatchToBeMonitored(ctx context.Cont // buildFinalProof builds and return the final proof for an aggregated/batch proof. func (a *Aggregator) buildFinalProof( - ctx context.Context, prover proverInterface, proof *state.Proof) (*prover.FinalProof, error) { + ctx context.Context, prover ProverInterface, proof *state.Proof) (*prover.FinalProof, error) { tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), @@ -1059,7 +1061,7 @@ func (a *Aggregator) buildFinalProof( // build the final proof. If no proof is provided it looks for a previously // generated proof. If the proof is eligible, then the final proof generation // is triggered. -func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterface, proof *state.Proof) (bool, error) { +func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover ProverInterface, proof *state.Proof) (bool, error) { proverName := prover.Name() proverID := prover.ID() @@ -1245,7 +1247,7 @@ func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state. } func (a *Aggregator) getAndLockProofsToAggregate( - ctx context.Context, prover proverInterface) (*state.Proof, *state.Proof, error) { + ctx context.Context, prover ProverInterface) (*state.Proof, *state.Proof, error) { tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), @@ -1293,7 +1295,7 @@ func (a *Aggregator) getAndLockProofsToAggregate( return proof1, proof2, nil } -func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterface) (bool, error) { +func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover ProverInterface) (bool, error) { proverName := prover.Name() proverID := prover.ID() @@ -1458,7 +1460,7 @@ func (a *Aggregator) getVerifiedBatchAccInputHash(ctx context.Context, batchNumb } func (a *Aggregator) getAndLockBatchToProve( - ctx context.Context, prover proverInterface, + ctx context.Context, prover ProverInterface, ) (*state.Batch, []byte, *state.Proof, error) { proverID := prover.ID() proverName := prover.Name() @@ -1574,7 +1576,7 @@ func (a *Aggregator) getAndLockBatchToProve( return &dbBatch.Batch, dbBatch.Witness, proof, nil } -func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInterface) (bool, error) { +func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover ProverInterface) (bool, error) { tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go new file mode 100644 index 00000000..f906ebbb --- /dev/null +++ b/aggregator/aggregator_test.go @@ -0,0 +1,1573 @@ +package aggregator + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "math/big" + "sync" + "sync/atomic" + "testing" + "time" + + mocks "github.com/0xPolygon/cdk/aggregator/mocks" + "github.com/0xPolygon/cdk/aggregator/prover" + "github.com/0xPolygon/cdk/config/types" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/state" + "github.com/0xPolygon/cdk/state/datastream" + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" + "github.com/ethereum/go-ethereum/common" + ethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +var ( + proofID = "proofId" + proof = "proof" + proverName = "proverName" + proverID = "proverID" +) + +type mox struct { + stateMock *mocks.StateInterfaceMock + ethTxManager *mocks.EthTxManagerClientMock + etherman *mocks.EthermanMock + proverMock *mocks.ProverInterfaceMock + aggLayerClientMock *mocks.AgglayerClientInterfaceMock +} + +func WaitUntil(t *testing.T, wg *sync.WaitGroup, timeout time.Duration) { + t.Helper() + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-done: + case <-time.After(timeout): + t.Fatalf("WaitGroup not done, test time expired after %s", timeout) + } +} + +func Test_resetCurrentBatchData(t *testing.T) { + t.Parallel() + + a := Aggregator{ + currentBatchStreamData: []byte("test"), + currentStreamBatchRaw: state.BatchRawV2{ + Blocks: []state.L2BlockRaw{ + { + BlockNumber: 1, + ChangeL2BlockHeader: state.ChangeL2BlockHeader{}, + Transactions: []state.L2TxRaw{}, + }, + }, + }, + currentStreamL2Block: state.L2BlockRaw{}, + } + + a.resetCurrentBatchData() + + assert.Equal(t, []byte{}, a.currentBatchStreamData) + assert.Equal(t, state.BatchRawV2{Blocks: make([]state.L2BlockRaw, 0)}, a.currentStreamBatchRaw) + assert.Equal(t, state.L2BlockRaw{}, a.currentStreamL2Block) +} + +func Test_handleReorg(t *testing.T) { + t.Parallel() + + mockL1Syncr := new(mocks.SynchronizerInterfaceMock) + mockState := new(mocks.StateInterfaceMock) + reorgData := synchronizer.ReorgExecutionResult{} + + a := &Aggregator{ + l1Syncr: mockL1Syncr, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + ctx: context.Background(), + } + + mockL1Syncr.On("GetLastestVirtualBatchNumber", mock.Anything).Return(uint64(100), nil).Once() + mockState.On("DeleteBatchesNewerThanBatchNumber", mock.Anything, uint64(100), mock.Anything).Return(nil).Once() + + go a.handleReorg(reorgData) + time.Sleep(3 * time.Second) + + assert.True(t, a.halted.Load()) + mockState.AssertExpectations(t) + mockL1Syncr.AssertExpectations(t) +} + +func Test_handleRollbackBatches(t *testing.T) { + t.Parallel() + + mockStreamClient := new(mocks.StreamClientMock) + mockEtherman := new(mocks.EthermanMock) + mockState := new(mocks.StateInterfaceMock) + + // Test data + rollbackData := synchronizer.RollbackBatchesData{ + LastBatchNumber: 100, + } + + mockStreamClient.On("IsStarted").Return(true).Once() + mockStreamClient.On("ResetProcessEntryFunc").Return().Once() + mockStreamClient.On("SetProcessEntryFunc", mock.Anything).Return().Once() + mockStreamClient.On("ExecCommandStop").Return(nil).Once() + mockStreamClient.On("Start").Return(nil).Once() + mockStreamClient.On("ExecCommandStartBookmark", mock.Anything).Return(nil).Once() + mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(90), nil).Once() + mockState.On("DeleteBatchesNewerThanBatchNumber", mock.Anything, rollbackData.LastBatchNumber, nil).Return(nil).Once() + mockState.On("DeleteBatchesOlderThanBatchNumber", mock.Anything, rollbackData.LastBatchNumber, nil).Return(nil).Once() + mockState.On("DeleteUngeneratedProofs", mock.Anything, nil).Return(nil).Once() + mockState.On("DeleteGeneratedProofs", mock.Anything, rollbackData.LastBatchNumber+1, mock.AnythingOfType("uint64"), nil).Return(nil).Once() + + a := Aggregator{ + ctx: context.Background(), + streamClient: mockStreamClient, + etherman: mockEtherman, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + streamClientMutex: &sync.Mutex{}, + currentBatchStreamData: []byte{}, + currentStreamBatchRaw: state.BatchRawV2{}, + currentStreamL2Block: state.L2BlockRaw{}, + } + + a.halted.Store(false) + a.handleRollbackBatches(rollbackData) + + assert.False(t, a.halted.Load()) + mockStreamClient.AssertExpectations(t) + mockEtherman.AssertExpectations(t) + mockState.AssertExpectations(t) +} + +func Test_handleReceivedDataStream_BatchStart(t *testing.T) { + t.Parallel() + + mockState := new(mocks.StateInterfaceMock) + mockL1Syncr := new(mocks.SynchronizerInterfaceMock) + agg := Aggregator{ + state: mockState, + l1Syncr: mockL1Syncr, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + currentStreamBatch: state.Batch{}, + } + + // Prepare a FileEntry for Batch Start + batchStartData, err := proto.Marshal(&datastream.BatchStart{ + Number: 1, + ChainId: 2, + ForkId: 3, + Type: datastream.BatchType_BATCH_TYPE_REGULAR, + }) + assert.NoError(t, err) + + batchStartEntry := &datastreamer.FileEntry{ + Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_BATCH_START), + Data: batchStartData, + } + + // Test the handleReceivedDataStream for Batch Start + err = agg.handleReceivedDataStream(batchStartEntry, nil, nil) + assert.NoError(t, err) + + assert.Equal(t, agg.currentStreamBatch.BatchNumber, uint64(1)) + assert.Equal(t, agg.currentStreamBatch.ChainID, uint64(2)) + assert.Equal(t, agg.currentStreamBatch.ForkID, uint64(3)) + assert.Equal(t, agg.currentStreamBatch.Type, datastream.BatchType_BATCH_TYPE_REGULAR) +} + +func Test_handleReceivedDataStream_BatchEnd(t *testing.T) { + t.Parallel() + + mockState := new(mocks.StateInterfaceMock) + mockL1Syncr := new(mocks.SynchronizerInterfaceMock) + a := Aggregator{ + state: mockState, + l1Syncr: mockL1Syncr, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + currentStreamBatch: state.Batch{ + BatchNumber: uint64(2), + Type: datastream.BatchType_BATCH_TYPE_REGULAR, + Coinbase: common.Address{}, + }, + currentStreamL2Block: state.L2BlockRaw{ + BlockNumber: uint64(10), + }, + currentStreamBatchRaw: state.BatchRawV2{ + Blocks: []state.L2BlockRaw{ + { + BlockNumber: uint64(9), + ChangeL2BlockHeader: state.ChangeL2BlockHeader{}, + Transactions: []state.L2TxRaw{}, + }, + }, + }, + cfg: Config{ + UseL1BatchData: false, + }, + } + + batchEndData, err := proto.Marshal(&datastream.BatchEnd{ + Number: 1, + LocalExitRoot: []byte{1, 2, 3}, + StateRoot: []byte{4, 5, 6}, + Debug: nil, + }) + assert.NoError(t, err) + + batchEndEntry := &datastreamer.FileEntry{ + Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_BATCH_END), + Data: batchEndData, + } + + mockState.On("GetBatch", mock.Anything, a.currentStreamBatch.BatchNumber-1, nil). + Return(&state.DBBatch{ + Batch: state.Batch{ + AccInputHash: common.Hash{}, + }, + }, nil).Once() + mockState.On("GetBatch", mock.Anything, a.currentStreamBatch.BatchNumber, nil). + Return(&state.DBBatch{ + Witness: []byte("test_witness"), + }, nil).Once() + mockState.On("AddBatch", mock.Anything, mock.Anything, nil).Return(nil).Once() + mockL1Syncr.On("GetVirtualBatchByBatchNumber", mock.Anything, a.currentStreamBatch.BatchNumber). + Return(&synchronizer.VirtualBatch{BatchL2Data: []byte{1, 2, 3}}, nil).Once() + mockL1Syncr.On("GetSequenceByBatchNumber", mock.Anything, a.currentStreamBatch.BatchNumber). + Return(&synchronizer.SequencedBatches{ + L1InfoRoot: common.Hash{}, + Timestamp: time.Now(), + }, nil).Once() + + err = a.handleReceivedDataStream(batchEndEntry, nil, nil) + assert.NoError(t, err) + + assert.Equal(t, a.currentBatchStreamData, []byte{}) + assert.Equal(t, a.currentStreamBatchRaw, state.BatchRawV2{Blocks: make([]state.L2BlockRaw, 0)}) + assert.Equal(t, a.currentStreamL2Block, state.L2BlockRaw{}) + + mockState.AssertExpectations(t) + mockL1Syncr.AssertExpectations(t) +} + +func Test_handleReceivedDataStream_L2Block(t *testing.T) { + t.Parallel() + + a := Aggregator{ + currentStreamL2Block: state.L2BlockRaw{ + BlockNumber: uint64(9), + }, + currentStreamBatchRaw: state.BatchRawV2{ + Blocks: []state.L2BlockRaw{}, + }, + currentStreamBatch: state.Batch{}, + } + + // Mock data for L2Block + l2Block := &datastream.L2Block{ + Number: uint64(10), + DeltaTimestamp: uint32(5), + L1InfotreeIndex: uint32(1), + Coinbase: []byte{0x01}, + GlobalExitRoot: []byte{0x02}, + } + + l2BlockData, err := proto.Marshal(l2Block) + assert.NoError(t, err) + + l2BlockEntry := &datastreamer.FileEntry{ + Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_L2_BLOCK), + Data: l2BlockData, + } + + err = a.handleReceivedDataStream(l2BlockEntry, nil, nil) + assert.NoError(t, err) + + assert.Equal(t, uint64(10), a.currentStreamL2Block.BlockNumber) + assert.Equal(t, uint32(5), a.currentStreamL2Block.ChangeL2BlockHeader.DeltaTimestamp) + assert.Equal(t, uint32(1), a.currentStreamL2Block.ChangeL2BlockHeader.IndexL1InfoTree) + assert.Equal(t, 0, len(a.currentStreamL2Block.Transactions)) + assert.Equal(t, uint32(1), a.currentStreamBatch.L1InfoTreeIndex) + assert.Equal(t, common.BytesToAddress([]byte{0x01}), a.currentStreamBatch.Coinbase) + assert.Equal(t, common.BytesToHash([]byte{0x02}), a.currentStreamBatch.GlobalExitRoot) +} + +func Test_handleReceivedDataStream_Transaction(t *testing.T) { + t.Parallel() + + a := Aggregator{ + currentStreamL2Block: state.L2BlockRaw{ + Transactions: []state.L2TxRaw{}, + }, + logger: log.GetDefaultLogger(), + } + + tx := ethTypes.NewTransaction( + 0, + common.HexToAddress("0x01"), + big.NewInt(1000000000000000000), + uint64(21000), + big.NewInt(20000000000), + nil, + ) + + // Encode transaction into RLP format + var buf bytes.Buffer + err := tx.EncodeRLP(&buf) + require.NoError(t, err, "Failed to encode transaction") + + transaction := &datastream.Transaction{ + L2BlockNumber: uint64(10), + Index: uint64(0), + IsValid: true, + Encoded: buf.Bytes(), + EffectiveGasPricePercentage: uint32(90), + } + + transactionData, err := proto.Marshal(transaction) + assert.NoError(t, err) + + transactionEntry := &datastreamer.FileEntry{ + Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_TRANSACTION), + Data: transactionData, + } + + err = a.handleReceivedDataStream(transactionEntry, nil, nil) + assert.NoError(t, err) + + assert.Len(t, a.currentStreamL2Block.Transactions, 1) + assert.Equal(t, uint8(90), a.currentStreamL2Block.Transactions[0].EfficiencyPercentage) + assert.False(t, a.currentStreamL2Block.Transactions[0].TxAlreadyEncoded) + assert.NotNil(t, a.currentStreamL2Block.Transactions[0].Tx) +} + +func Test_sendFinalProofSuccess(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + batchNum := uint64(23) + batchNumFinal := uint64(42) + + recursiveProof := &state.Proof{ + Prover: &proverName, + ProverID: &proverID, + ProofID: &proofID, + BatchNumber: batchNum, + BatchNumberFinal: batchNumFinal, + } + finalProof := &prover.FinalProof{} + + testCases := []struct { + name string + setup func(m mox, a *Aggregator) + asserts func(a *Aggregator) + }{ + { + name: "Successfully settled on Agglayer", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: AggLayer, + AggLayerTxTimeout: types.Duration{Duration: time.Millisecond * 1}, + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("GetRollupId").Return(uint32(1)).Once() + testHash := common.BytesToHash([]byte("test hash")) + m.aggLayerClientMock.On("SendTx", mock.Anything).Return(testHash, nil).Once() + m.aggLayerClientMock.On("WaitTxToBeMined", testHash, mock.Anything).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Successfully settled on L1 (Direct)", + setup: func(m mox, a *Aggregator) { + senderAddr := common.BytesToAddress([]byte("sender address")).Hex() + toAddr := common.BytesToAddress([]byte("to address")) + data := []byte("data") + cfg := Config{ + SettlementBackend: L1, + SenderAddress: senderAddr, + GasOffset: uint64(10), + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("BuildTrustedVerifyBatchesTxData", batchNum-1, batchNumFinal, mock.Anything, common.HexToAddress(senderAddr)).Return(&toAddr, data, nil).Once() + m.ethTxManager.On("Add", mock.Anything, &toAddr, big.NewInt(0), data, a.cfg.GasOffset, (*ethTypes.BlobTxSidecar)(nil)).Return(nil, nil).Once() + m.ethTxManager.On("ProcessPendingMonitoredTxs", mock.Anything, mock.Anything).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + stateMock := mocks.NewStateInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + aggLayerClient := mocks.NewAgglayerClientInterfaceMock(t) + + curve := elliptic.P256() + privateKey, err := ecdsa.GenerateKey(curve, rand.Reader) + require.NoError(err, "error generating key") + + a := Aggregator{ + state: stateMock, + etherman: etherman, + ethTxManager: ethTxManager, + aggLayerClient: aggLayerClient, + finalProof: make(chan finalProofMsg), + logger: log.GetDefaultLogger(), + verifyingProof: false, + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + sequencerPrivateKey: privateKey, + } + a.ctx, a.exit = context.WithCancel(context.Background()) + + m := mox{ + stateMock: stateMock, + ethTxManager: ethTxManager, + etherman: etherman, + aggLayerClientMock: aggLayerClient, + } + if tc.setup != nil { + tc.setup(m, &a) + } + // send a final proof over the channel + go func() { + finalMsg := finalProofMsg{ + proverID: proverID, + recursiveProof: recursiveProof, + finalProof: finalProof, + } + a.finalProof <- finalMsg + time.Sleep(1 * time.Second) + a.exit() + }() + + a.sendFinalProof() + if tc.asserts != nil { + tc.asserts(&a) + } + }) + } +} + +func Test_sendFinalProofError(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + errTest := errors.New("test error") + batchNum := uint64(23) + batchNumFinal := uint64(42) + sender := common.BytesToAddress([]byte("SenderAddress")) + senderAddr := sender.Hex() + + recursiveProof := &state.Proof{ + Prover: &proverName, + ProverID: &proverID, + ProofID: &proofID, + BatchNumber: batchNum, + BatchNumberFinal: batchNumFinal, + } + finalProof := &prover.FinalProof{} + + testCases := []struct { + name string + setup func(mox, *Aggregator) + asserts func(*Aggregator) + }{ + { + name: "Failed to settle on Agglayer: GetBatch error", + setup: func(m mox, a *Aggregator) { + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + // test is done, stop the sendFinalProof method + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(nil, errTest).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Failed to settle on Agglayer: SendTx error", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: AggLayer, + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("GetRollupId").Return(uint32(1)).Once() + m.aggLayerClientMock.On("SendTx", mock.Anything).Run(func(args mock.Arguments) { + // test is done, stop the sendFinalProof method + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(nil, errTest).Once() + m.stateMock.On("UpdateGeneratedProof", mock.Anything, mock.Anything, nil).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Failed to settle on Agglayer: WaitTxToBeMined error", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: AggLayer, + AggLayerTxTimeout: types.Duration{Duration: time.Millisecond * 1}, + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("GetRollupId").Return(uint32(1)).Once() + m.aggLayerClientMock.On("SendTx", mock.Anything).Return(common.Hash{}, nil).Once() + m.aggLayerClientMock.On("WaitTxToBeMined", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(errTest) + m.stateMock.On("UpdateGeneratedProof", mock.Anything, mock.Anything, nil).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Failed to settle on L1 (Direct): BuildTrustedVerifyBatchesTxData error", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: L1, + SenderAddress: senderAddr, + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("BuildTrustedVerifyBatchesTxData", batchNum-1, batchNumFinal, mock.Anything, sender).Run(func(args mock.Arguments) { + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(nil, nil, errTest) + m.stateMock.On("UpdateGeneratedProof", mock.Anything, recursiveProof, nil).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Failed to settle on L1 (Direct): Error Adding TX to ethTxManager", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: L1, + SenderAddress: senderAddr, + GasOffset: uint64(10), + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("BuildTrustedVerifyBatchesTxData", batchNum-1, batchNumFinal, mock.Anything, sender).Return(nil, nil, nil).Once() + m.ethTxManager.On("Add", mock.Anything, mock.Anything, big.NewInt(0), mock.Anything, a.cfg.GasOffset, (*ethTypes.BlobTxSidecar)(nil)).Run(func(args mock.Arguments) { + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(nil, errTest).Once() + m.stateMock.On("UpdateGeneratedProof", mock.Anything, recursiveProof, nil).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + stateMock := mocks.NewStateInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + aggLayerClient := mocks.NewAgglayerClientInterfaceMock(t) + + curve := elliptic.P256() + privateKey, err := ecdsa.GenerateKey(curve, rand.Reader) + require.NoError(err, "error generating key") + + a := Aggregator{ + state: stateMock, + etherman: etherman, + ethTxManager: ethTxManager, + aggLayerClient: aggLayerClient, + finalProof: make(chan finalProofMsg), + logger: log.GetDefaultLogger(), + verifyingProof: false, + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + sequencerPrivateKey: privateKey, + } + a.ctx, a.exit = context.WithCancel(context.Background()) + + m := mox{ + stateMock: stateMock, + ethTxManager: ethTxManager, + etherman: etherman, + aggLayerClientMock: aggLayerClient, + } + if tc.setup != nil { + tc.setup(m, &a) + } + // send a final proof over the channel + go func() { + finalMsg := finalProofMsg{ + proverID: proverID, + recursiveProof: recursiveProof, + finalProof: finalProof, + } + a.finalProof <- finalMsg + }() + + a.sendFinalProof() + if tc.asserts != nil { + tc.asserts(&a) + } + }) + } +} + +func Test_buildFinalProof(t *testing.T) { + assert := assert.New(t) + batchNum := uint64(23) + batchNumFinal := uint64(42) + recursiveProof := &state.Proof{ + ProverID: &proverID, + Proof: "test proof", + ProofID: &proofID, + BatchNumber: batchNum, + BatchNumberFinal: batchNumFinal, + } + finalProofID := "finalProofID" + + testCases := []struct { + name string + setup func(mox, *Aggregator) + asserts func(err error, fProof *prover.FinalProof) + }{ + { + name: "using real prover", + setup: func(m mox, a *Aggregator) { + finalProof := prover.FinalProof{ + Public: &prover.PublicInputsExtended{ + NewStateRoot: []byte("StateRoot"), + NewLocalExitRoot: []byte("LocalExitRoot"), + }, + } + + m.proverMock.On("Name").Return("name").Once() + m.proverMock.On("ID").Return("id").Once() + m.proverMock.On("Addr").Return("addr").Once() + m.proverMock.On("FinalProof", recursiveProof.Proof, a.cfg.SenderAddress).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.Anything, finalProofID).Return(&finalProof, nil).Once() + }, + asserts: func(err error, fProof *prover.FinalProof) { + assert.NoError(err) + assert.True(bytes.Equal([]byte("StateRoot"), fProof.Public.NewStateRoot), "State roots should be equal") + assert.True(bytes.Equal([]byte("LocalExitRoot"), fProof.Public.NewLocalExitRoot), "LocalExit roots should be equal") + }, + }, + { + name: "using mock prover", + setup: func(m mox, a *Aggregator) { + finalProof := prover.FinalProof{ + Public: &prover.PublicInputsExtended{ + NewStateRoot: []byte(mockedStateRoot), + NewLocalExitRoot: []byte(mockedLocalExitRoot), + }, + } + + finalDBBatch := &state.DBBatch{ + Batch: state.Batch{ + StateRoot: common.BytesToHash([]byte("mock StateRoot")), + LocalExitRoot: common.BytesToHash([]byte("mock LocalExitRoot")), + }, + } + + m.proverMock.On("Name").Return("name").Once() + m.proverMock.On("ID").Return("id").Once() + m.proverMock.On("Addr").Return("addr").Once() + m.proverMock.On("FinalProof", recursiveProof.Proof, a.cfg.SenderAddress).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.Anything, finalProofID).Return(&finalProof, nil).Once() + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Return(finalDBBatch, nil).Once() + }, + asserts: func(err error, fProof *prover.FinalProof) { + assert.NoError(err) + expStateRoot := common.BytesToHash([]byte("mock StateRoot")) + expLocalExitRoot := common.BytesToHash([]byte("mock LocalExitRoot")) + assert.True(bytes.Equal(expStateRoot.Bytes(), fProof.Public.NewStateRoot), "State roots should be equal") + assert.True(bytes.Equal(expLocalExitRoot.Bytes(), fProof.Public.NewLocalExitRoot), "LocalExit roots should be equal") + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + proverMock := mocks.NewProverInterfaceMock(t) + stateMock := mocks.NewStateInterfaceMock(t) + m := mox{ + proverMock: proverMock, + stateMock: stateMock, + } + a := Aggregator{ + state: stateMock, + logger: log.GetDefaultLogger(), + cfg: Config{ + SenderAddress: common.BytesToAddress([]byte("from")).Hex(), + }, + } + + tc.setup(m, &a) + fProof, err := a.buildFinalProof(context.Background(), proverMock, recursiveProof) + tc.asserts(err, fProof) + }) + } +} + +func Test_tryBuildFinalProof(t *testing.T) { + assert := assert.New(t) + errTest := errors.New("test error") + from := common.BytesToAddress([]byte("from")) + cfg := Config{ + VerifyProofInterval: types.Duration{Duration: time.Millisecond * 1}, + TxProfitabilityCheckerType: ProfitabilityAcceptAll, + SenderAddress: from.Hex(), + } + latestVerifiedBatchNum := uint64(22) + batchNum := uint64(23) + batchNumFinal := uint64(42) + finalProofID := "finalProofID" + finalProof := prover.FinalProof{ + Proof: "", + Public: &prover.PublicInputsExtended{ + NewStateRoot: []byte("newStateRoot"), + NewLocalExitRoot: []byte("newLocalExitRoot"), + }, + } + proofToVerify := state.Proof{ + ProofID: &proofID, + Proof: proof, + BatchNumber: batchNum, + BatchNumberFinal: batchNumFinal, + } + invalidProof := state.Proof{ + ProofID: &proofID, + Proof: proof, + BatchNumber: uint64(123), + BatchNumberFinal: uint64(456), + } + + proverCtx := context.WithValue(context.Background(), "owner", "prover") //nolint:staticcheck + matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "prover" } + matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "aggregator" } + testCases := []struct { + name string + proof *state.Proof + setup func(mox, *Aggregator) + asserts func(bool, *Aggregator, error) + assertFinalMsg func(*finalProofMsg) + }{ + { + name: "can't verify proof (verifyingProof = true)", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return("addr").Once() + a.verifyingProof = true + }, + asserts: func(result bool, a *Aggregator, err error) { + a.verifyingProof = false // reset + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "can't verify proof (veryfy time not reached yet)", + setup: func(m mox, a *Aggregator) { + a.timeSendFinalProof = time.Now().Add(10 * time.Second) + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return("addr").Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "nil proof, error requesting the proof triggers defer", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr").Twice() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() + proofGeneratingTrueCall := m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() + m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(nil, errTest).Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proofToVerify, nil). + Return(nil). + Once(). + NotBefore(proofGeneratingTrueCall) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "nil proof, error building the proof triggers defer", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr").Twice() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() + proofGeneratingTrueCall := m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() + m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.MatchedBy(matchProverCtxFn), finalProofID).Return(nil, errTest).Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proofToVerify, nil). + Return(nil). + Once(). + NotBefore(proofGeneratingTrueCall) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "nil proof, generic error from GetProofReadyToVerify", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(nil, errTest).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "nil proof, ErrNotFound from GetProofReadyToVerify", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(nil, state.ErrNotFound).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "nil proof gets a proof ready to verify", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return(proverID).Twice() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() + m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() + m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.MatchedBy(matchProverCtxFn), finalProofID).Return(&finalProof, nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.True(result) + assert.NoError(err) + }, + assertFinalMsg: func(msg *finalProofMsg) { + assert.Equal(finalProof.Proof, msg.finalProof.Proof) + assert.Equal(finalProof.Public.NewStateRoot, msg.finalProof.Public.NewStateRoot) + assert.Equal(finalProof.Public.NewLocalExitRoot, msg.finalProof.Public.NewLocalExitRoot) + }, + }, + { + name: "error checking if proof is a complete sequence", + proof: &proofToVerify, + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(false, errTest).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "invalid proof (not consecutive to latest verified batch) rejected", + proof: &invalidProof, + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "invalid proof (not a complete sequence) rejected", + proof: &proofToVerify, + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(false, nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "valid proof", + proof: &proofToVerify, + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return(proverID).Twice() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(true, nil).Once() + m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.MatchedBy(matchProverCtxFn), finalProofID).Return(&finalProof, nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.True(result) + assert.NoError(err) + }, + assertFinalMsg: func(msg *finalProofMsg) { + assert.Equal(finalProof.Proof, msg.finalProof.Proof) + assert.Equal(finalProof.Public.NewStateRoot, msg.finalProof.Public.NewStateRoot) + assert.Equal(finalProof.Public.NewLocalExitRoot, msg.finalProof.Public.NewLocalExitRoot) + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + stateMock := mocks.NewStateInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + proverMock := mocks.NewProverInterfaceMock(t) + + a := Aggregator{ + cfg: cfg, + state: stateMock, + etherman: etherman, + ethTxManager: ethTxManager, + logger: log.GetDefaultLogger(), + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, + finalProof: make(chan finalProofMsg), + } + + aggregatorCtx := context.WithValue(context.Background(), "owner", "aggregator") //nolint:staticcheck + a.ctx, a.exit = context.WithCancel(aggregatorCtx) + m := mox{ + stateMock: stateMock, + ethTxManager: ethTxManager, + etherman: etherman, + proverMock: proverMock, + } + if tc.setup != nil { + tc.setup(m, &a) + } + + var wg sync.WaitGroup + if tc.assertFinalMsg != nil { + // wait for the final proof over the channel + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + msg := <-a.finalProof + tc.assertFinalMsg(&msg) + }() + } + + result, err := a.tryBuildFinalProof(proverCtx, proverMock, tc.proof) + + if tc.asserts != nil { + tc.asserts(result, &a, err) + } + + if tc.assertFinalMsg != nil { + WaitUntil(t, &wg, time.Second) + } + }) + } +} + +func Test_tryAggregateProofs(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + errTest := errors.New("test error") + cfg := Config{ + VerifyProofInterval: types.Duration{Duration: time.Millisecond * 1}, + } + + recursiveProof := "recursiveProof" + proverCtx := context.WithValue(context.Background(), "owner", "prover") //nolint:staticcheck + matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "prover" } + matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "aggregator" } + batchNum := uint64(23) + batchNumFinal := uint64(42) + proof1 := state.Proof{ + Proof: "proof1", + BatchNumber: batchNum, + } + proof2 := state.Proof{ + Proof: "proof2", + BatchNumberFinal: batchNumFinal, + } + testCases := []struct { + name string + setup func(mox, *Aggregator) + asserts func(bool, *Aggregator, error) + }{ + { + name: "getAndLockProofsToAggregate returns generic error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(nil, nil, errTest).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "getAndLockProofsToAggregate returns ErrNotFound", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(nil, nil, state.ErrNotFound).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "getAndLockProofsToAggregate error updating proofs", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(errTest). + Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "AggregatedProof error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() + lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + proof2GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + // Use a type assertion with a check + proofArg, ok := args[1].(*state.Proof) + if !ok { + assert.Fail("Expected argument of type *state.Proof") + } + assert.NotNil(proofArg.GeneratingSince) + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(nil, errTest).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + if !ok { + assert.Fail("Expected argument of type *state.Proof") + } + assert.Nil(proofArg.GeneratingSince) + }). + Return(nil). + Once(). + NotBefore(proof1GeneratingTrueCall) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + if !ok { + assert.Fail("Expected argument of type *state.Proof") + } + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be nil") + }). + Return(nil). + Once(). + NotBefore(proof2GeneratingTrueCall) + dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "WaitRecursiveProof prover error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() + lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + if !ok { + assert.Fail("Expected argument of type *state.Proof") + } + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + proof2GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, errTest).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof1GeneratingTrueCall) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof2GeneratingTrueCall) + dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "unlockProofsToAggregate error after WaitRecursiveProof prover error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return(proverID) + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() + dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, errTest).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(errTest). + Once(). + NotBefore(proof1GeneratingTrueCall) + dbTx.On("Rollback", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "rollback after DeleteGeneratedProofs error in db transaction", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() + lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + proof2GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(errTest).Once() + dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof1GeneratingTrueCall) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof2GeneratingTrueCall) + dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "rollback after AddGeneratedProof error in db transaction", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() + lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + proof2GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(nil).Once() + m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, dbTx).Return(errTest).Once() + dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof1GeneratingTrueCall) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof2GeneratingTrueCall) + dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "time to send final, state error", + setup: func(m mox, a *Aggregator) { + a.cfg.VerifyProofInterval = types.Duration{Duration: time.Nanosecond} + m.proverMock.On("Name").Return(proverName).Times(3) + m.proverMock.On("ID").Return(proverID).Times(3) + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() + dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Twice() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(nil).Once() + expectedInputProver := map[string]interface{}{ + "recursive_proof_1": proof1.Proof, + "recursive_proof_2": proof2.Proof, + } + b, err := json.Marshal(expectedInputProver) + require.NoError(err) + m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, dbTx).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(proof1.BatchNumber, proof.BatchNumber) + assert.Equal(proof2.BatchNumberFinal, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.Equal(string(b), proof.InputProver) + assert.Equal(recursiveProof, proof.Proof) + assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) + }, + ).Return(nil).Once() + + m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(42), errTest).Once() + m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(proof1.BatchNumber, proof.BatchNumber) + assert.Equal(proof2.BatchNumberFinal, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.Equal(string(b), proof.InputProver) + assert.Equal(recursiveProof, proof.Proof) + assert.Nil(proof.GeneratingSince) + }, + ).Return(nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.True(result) + assert.NoError(err) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + stateMock := mocks.NewStateInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + proverMock := mocks.NewProverInterfaceMock(t) + a := Aggregator{ + cfg: cfg, + state: stateMock, + etherman: etherman, + ethTxManager: ethTxManager, + logger: log.GetDefaultLogger(), + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, + finalProof: make(chan finalProofMsg), + } + aggregatorCtx := context.WithValue(context.Background(), "owner", "aggregator") //nolint:staticcheck + a.ctx, a.exit = context.WithCancel(aggregatorCtx) + m := mox{ + stateMock: stateMock, + ethTxManager: ethTxManager, + etherman: etherman, + proverMock: proverMock, + } + if tc.setup != nil { + tc.setup(m, &a) + } + a.resetVerifyProofTime() + + result, err := a.tryAggregateProofs(proverCtx, proverMock) + + if tc.asserts != nil { + tc.asserts(result, &a, err) + } + }) + } +} diff --git a/aggregator/config.go b/aggregator/config.go index 89676e3d..fbbc9c9b 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -152,8 +152,8 @@ type Config struct { // MaxWitnessRetrievalWorkers is the maximum number of workers that will be used to retrieve the witness MaxWitnessRetrievalWorkers int `mapstructure:"MaxWitnessRetrievalWorkers"` - // SyncModeOnlyEnabled is a flag to enable the sync mode only - // In this mode the aggregator will only sync from L1 and will not generate or read the data stream + // SyncModeOnlyEnabled is a flag that activates sync mode exclusively. + // When enabled, the aggregator will sync data only from L1 and will not generate or read the data stream. SyncModeOnlyEnabled bool `mapstructure:"SyncModeOnlyEnabled"` } diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index 85676f69..ee70d07c 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -7,14 +7,18 @@ import ( ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" "github.com/0xPolygon/cdk/aggregator/prover" "github.com/0xPolygon/cdk/state" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/jackc/pgx/v4" ) // Consumer interfaces required by the package. -type proverInterface interface { +type ProverInterface interface { Name() string ID() string Addr() string @@ -26,8 +30,8 @@ type proverInterface interface { WaitFinalProof(ctx context.Context, proofID string) (*prover.FinalProof, error) } -// etherman contains the methods required to interact with ethereum -type etherman interface { +// Etherman contains the methods required to interact with ethereum +type Etherman interface { GetRollupId() uint32 GetLatestVerifiedBatchNum() (uint64, error) BuildTrustedVerifyBatchesTxData( @@ -44,8 +48,8 @@ type aggregatorTxProfitabilityChecker interface { IsProfitable(context.Context, *big.Int) (bool, error) } -// stateInterface gathers the methods to interact with the state. -type stateInterface interface { +// StateInterface gathers the methods to interact with the state. +type StateInterface interface { BeginStateTransaction(ctx context.Context) (pgx.Tx, error) CheckProofContainsCompleteSequences(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) (bool, error) GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Proof, error) @@ -63,3 +67,49 @@ type stateInterface interface { DeleteBatchesOlderThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error DeleteBatchesNewerThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error } + +// StreamClient represents the stream client behaviour +type StreamClient interface { + Start() error + ExecCommandStart(fromEntry uint64) error + ExecCommandStartBookmark(fromBookmark []byte) error + ExecCommandStop() error + ExecCommandGetHeader() (datastreamer.HeaderEntry, error) + ExecCommandGetEntry(fromEntry uint64) (datastreamer.FileEntry, error) + ExecCommandGetBookmark(fromBookmark []byte) (datastreamer.FileEntry, error) + GetFromStream() uint64 + GetTotalEntries() uint64 + SetProcessEntryFunc(f datastreamer.ProcessEntryFunc) + ResetProcessEntryFunc() + IsStarted() bool +} + +// EthTxManagerClient represents the eth tx manager interface +type EthTxManagerClient interface { + Add( + ctx context.Context, + to *common.Address, + value *big.Int, + data []byte, + gasOffset uint64, + sidecar *types.BlobTxSidecar, + ) (common.Hash, error) + AddWithGas( + ctx context.Context, + to *common.Address, + value *big.Int, + data []byte, + gasOffset uint64, + sidecar *types.BlobTxSidecar, + gas uint64, + ) (common.Hash, error) + EncodeBlobData(data []byte) (kzg4844.Blob, error) + MakeBlobSidecar(blobs []kzg4844.Blob) *types.BlobTxSidecar + ProcessPendingMonitoredTxs(ctx context.Context, resultHandler ethtxmanager.ResultHandler) + Remove(ctx context.Context, id common.Hash) error + RemoveAll(ctx context.Context) error + Result(ctx context.Context, id common.Hash) (ethtxtypes.MonitoredTxResult, error) + ResultsByStatus(ctx context.Context, statuses []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error) + Start() + Stop() +} diff --git a/aggregator/mocks/mock_StreamClient.go b/aggregator/mocks/mock_StreamClient.go new file mode 100644 index 00000000..7962d31e --- /dev/null +++ b/aggregator/mocks/mock_StreamClient.go @@ -0,0 +1,247 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + datastreamer "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + mock "github.com/stretchr/testify/mock" +) + +// StreamClientMock is an autogenerated mock type for the StreamClient type +type StreamClientMock struct { + mock.Mock +} + +// ExecCommandGetBookmark provides a mock function with given fields: fromBookmark +func (_m *StreamClientMock) ExecCommandGetBookmark(fromBookmark []byte) (datastreamer.FileEntry, error) { + ret := _m.Called(fromBookmark) + + if len(ret) == 0 { + panic("no return value specified for ExecCommandGetBookmark") + } + + var r0 datastreamer.FileEntry + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (datastreamer.FileEntry, error)); ok { + return rf(fromBookmark) + } + if rf, ok := ret.Get(0).(func([]byte) datastreamer.FileEntry); ok { + r0 = rf(fromBookmark) + } else { + r0 = ret.Get(0).(datastreamer.FileEntry) + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(fromBookmark) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecCommandGetEntry provides a mock function with given fields: fromEntry +func (_m *StreamClientMock) ExecCommandGetEntry(fromEntry uint64) (datastreamer.FileEntry, error) { + ret := _m.Called(fromEntry) + + if len(ret) == 0 { + panic("no return value specified for ExecCommandGetEntry") + } + + var r0 datastreamer.FileEntry + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (datastreamer.FileEntry, error)); ok { + return rf(fromEntry) + } + if rf, ok := ret.Get(0).(func(uint64) datastreamer.FileEntry); ok { + r0 = rf(fromEntry) + } else { + r0 = ret.Get(0).(datastreamer.FileEntry) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(fromEntry) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecCommandGetHeader provides a mock function with given fields: +func (_m *StreamClientMock) ExecCommandGetHeader() (datastreamer.HeaderEntry, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ExecCommandGetHeader") + } + + var r0 datastreamer.HeaderEntry + var r1 error + if rf, ok := ret.Get(0).(func() (datastreamer.HeaderEntry, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() datastreamer.HeaderEntry); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(datastreamer.HeaderEntry) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecCommandStart provides a mock function with given fields: fromEntry +func (_m *StreamClientMock) ExecCommandStart(fromEntry uint64) error { + ret := _m.Called(fromEntry) + + if len(ret) == 0 { + panic("no return value specified for ExecCommandStart") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64) error); ok { + r0 = rf(fromEntry) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ExecCommandStartBookmark provides a mock function with given fields: fromBookmark +func (_m *StreamClientMock) ExecCommandStartBookmark(fromBookmark []byte) error { + ret := _m.Called(fromBookmark) + + if len(ret) == 0 { + panic("no return value specified for ExecCommandStartBookmark") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]byte) error); ok { + r0 = rf(fromBookmark) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ExecCommandStop provides a mock function with given fields: +func (_m *StreamClientMock) ExecCommandStop() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ExecCommandStop") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetFromStream provides a mock function with given fields: +func (_m *StreamClientMock) GetFromStream() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetFromStream") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetTotalEntries provides a mock function with given fields: +func (_m *StreamClientMock) GetTotalEntries() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetTotalEntries") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// IsStarted provides a mock function with given fields: +func (_m *StreamClientMock) IsStarted() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsStarted") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// ResetProcessEntryFunc provides a mock function with given fields: +func (_m *StreamClientMock) ResetProcessEntryFunc() { + _m.Called() +} + +// SetProcessEntryFunc provides a mock function with given fields: f +func (_m *StreamClientMock) SetProcessEntryFunc(f datastreamer.ProcessEntryFunc) { + _m.Called(f) +} + +// Start provides a mock function with given fields: +func (_m *StreamClientMock) Start() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewStreamClientMock creates a new instance of StreamClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStreamClientMock(t interface { + mock.TestingT + Cleanup(func()) +}) *StreamClientMock { + mock := &StreamClientMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_agglayer_client.go b/aggregator/mocks/mock_agglayer_client.go new file mode 100644 index 00000000..2923ebe0 --- /dev/null +++ b/aggregator/mocks/mock_agglayer_client.go @@ -0,0 +1,79 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + agglayer "github.com/0xPolygon/cdk/aggregator/agglayer" + common "github.com/ethereum/go-ethereum/common" + + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// AgglayerClientInterfaceMock is an autogenerated mock type for the AgglayerClientInterface type +type AgglayerClientInterfaceMock struct { + mock.Mock +} + +// SendTx provides a mock function with given fields: signedTx +func (_m *AgglayerClientInterfaceMock) SendTx(signedTx agglayer.SignedTx) (common.Hash, error) { + ret := _m.Called(signedTx) + + if len(ret) == 0 { + panic("no return value specified for SendTx") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(agglayer.SignedTx) (common.Hash, error)); ok { + return rf(signedTx) + } + if rf, ok := ret.Get(0).(func(agglayer.SignedTx) common.Hash); ok { + r0 = rf(signedTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(agglayer.SignedTx) error); ok { + r1 = rf(signedTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WaitTxToBeMined provides a mock function with given fields: hash, ctx +func (_m *AgglayerClientInterfaceMock) WaitTxToBeMined(hash common.Hash, ctx context.Context) error { + ret := _m.Called(hash, ctx) + + if len(ret) == 0 { + panic("no return value specified for WaitTxToBeMined") + } + + var r0 error + if rf, ok := ret.Get(0).(func(common.Hash, context.Context) error); ok { + r0 = rf(hash, ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewAgglayerClientInterfaceMock creates a new instance of AgglayerClientInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAgglayerClientInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *AgglayerClientInterfaceMock { + mock := &AgglayerClientInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_dbtx.go b/aggregator/mocks/mock_dbtx.go new file mode 100644 index 00000000..f870cd57 --- /dev/null +++ b/aggregator/mocks/mock_dbtx.go @@ -0,0 +1,350 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + pgconn "github.com/jackc/pgconn" + mock "github.com/stretchr/testify/mock" + + pgx "github.com/jackc/pgx/v4" +) + +// DbTxMock is an autogenerated mock type for the Tx type +type DbTxMock struct { + mock.Mock +} + +// Begin provides a mock function with given fields: ctx +func (_m *DbTxMock) Begin(ctx context.Context) (pgx.Tx, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Begin") + } + + var r0 pgx.Tx + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (pgx.Tx, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) pgx.Tx); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.Tx) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BeginFunc provides a mock function with given fields: ctx, f +func (_m *DbTxMock) BeginFunc(ctx context.Context, f func(pgx.Tx) error) error { + ret := _m.Called(ctx, f) + + if len(ret) == 0 { + panic("no return value specified for BeginFunc") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, func(pgx.Tx) error) error); ok { + r0 = rf(ctx, f) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Commit provides a mock function with given fields: ctx +func (_m *DbTxMock) Commit(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Commit") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Conn provides a mock function with given fields: +func (_m *DbTxMock) Conn() *pgx.Conn { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Conn") + } + + var r0 *pgx.Conn + if rf, ok := ret.Get(0).(func() *pgx.Conn); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pgx.Conn) + } + } + + return r0 +} + +// CopyFrom provides a mock function with given fields: ctx, tableName, columnNames, rowSrc +func (_m *DbTxMock) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) { + ret := _m.Called(ctx, tableName, columnNames, rowSrc) + + if len(ret) == 0 { + panic("no return value specified for CopyFrom") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Identifier, []string, pgx.CopyFromSource) (int64, error)); ok { + return rf(ctx, tableName, columnNames, rowSrc) + } + if rf, ok := ret.Get(0).(func(context.Context, pgx.Identifier, []string, pgx.CopyFromSource) int64); ok { + r0 = rf(ctx, tableName, columnNames, rowSrc) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, pgx.Identifier, []string, pgx.CopyFromSource) error); ok { + r1 = rf(ctx, tableName, columnNames, rowSrc) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Exec provides a mock function with given fields: ctx, sql, arguments +func (_m *DbTxMock) Exec(ctx context.Context, sql string, arguments ...interface{}) (pgconn.CommandTag, error) { + var _ca []interface{} + _ca = append(_ca, ctx, sql) + _ca = append(_ca, arguments...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Exec") + } + + var r0 pgconn.CommandTag + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (pgconn.CommandTag, error)); ok { + return rf(ctx, sql, arguments...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) pgconn.CommandTag); ok { + r0 = rf(ctx, sql, arguments...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgconn.CommandTag) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, sql, arguments...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LargeObjects provides a mock function with given fields: +func (_m *DbTxMock) LargeObjects() pgx.LargeObjects { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LargeObjects") + } + + var r0 pgx.LargeObjects + if rf, ok := ret.Get(0).(func() pgx.LargeObjects); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(pgx.LargeObjects) + } + + return r0 +} + +// Prepare provides a mock function with given fields: ctx, name, sql +func (_m *DbTxMock) Prepare(ctx context.Context, name string, sql string) (*pgconn.StatementDescription, error) { + ret := _m.Called(ctx, name, sql) + + if len(ret) == 0 { + panic("no return value specified for Prepare") + } + + var r0 *pgconn.StatementDescription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*pgconn.StatementDescription, error)); ok { + return rf(ctx, name, sql) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *pgconn.StatementDescription); ok { + r0 = rf(ctx, name, sql) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pgconn.StatementDescription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, name, sql) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Query provides a mock function with given fields: ctx, sql, args +func (_m *DbTxMock) Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error) { + var _ca []interface{} + _ca = append(_ca, ctx, sql) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Query") + } + + var r0 pgx.Rows + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (pgx.Rows, error)); ok { + return rf(ctx, sql, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) pgx.Rows); ok { + r0 = rf(ctx, sql, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.Rows) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, sql, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryFunc provides a mock function with given fields: ctx, sql, args, scans, f +func (_m *DbTxMock) QueryFunc(ctx context.Context, sql string, args []interface{}, scans []interface{}, f func(pgx.QueryFuncRow) error) (pgconn.CommandTag, error) { + ret := _m.Called(ctx, sql, args, scans, f) + + if len(ret) == 0 { + panic("no return value specified for QueryFunc") + } + + var r0 pgconn.CommandTag + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, []interface{}, []interface{}, func(pgx.QueryFuncRow) error) (pgconn.CommandTag, error)); ok { + return rf(ctx, sql, args, scans, f) + } + if rf, ok := ret.Get(0).(func(context.Context, string, []interface{}, []interface{}, func(pgx.QueryFuncRow) error) pgconn.CommandTag); ok { + r0 = rf(ctx, sql, args, scans, f) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgconn.CommandTag) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, []interface{}, []interface{}, func(pgx.QueryFuncRow) error) error); ok { + r1 = rf(ctx, sql, args, scans, f) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryRow provides a mock function with given fields: ctx, sql, args +func (_m *DbTxMock) QueryRow(ctx context.Context, sql string, args ...interface{}) pgx.Row { + var _ca []interface{} + _ca = append(_ca, ctx, sql) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for QueryRow") + } + + var r0 pgx.Row + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) pgx.Row); ok { + r0 = rf(ctx, sql, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.Row) + } + } + + return r0 +} + +// Rollback provides a mock function with given fields: ctx +func (_m *DbTxMock) Rollback(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Rollback") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SendBatch provides a mock function with given fields: ctx, b +func (_m *DbTxMock) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults { + ret := _m.Called(ctx, b) + + if len(ret) == 0 { + panic("no return value specified for SendBatch") + } + + var r0 pgx.BatchResults + if rf, ok := ret.Get(0).(func(context.Context, *pgx.Batch) pgx.BatchResults); ok { + r0 = rf(ctx, b) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.BatchResults) + } + } + + return r0 +} + +// NewDbTxMock creates a new instance of DbTxMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDbTxMock(t interface { + mock.TestingT + Cleanup(func()) +}) *DbTxMock { + mock := &DbTxMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_eth_tx_manager.go b/aggregator/mocks/mock_eth_tx_manager.go new file mode 100644 index 00000000..8db7a440 --- /dev/null +++ b/aggregator/mocks/mock_eth_tx_manager.go @@ -0,0 +1,258 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + ethtxmanager "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + + kzg4844 "github.com/ethereum/go-ethereum/crypto/kzg4844" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" + + zkevm_ethtx_managertypes "github.com/0xPolygon/zkevm-ethtx-manager/types" +) + +// EthTxManagerClientMock is an autogenerated mock type for the EthTxManagerClient type +type EthTxManagerClientMock struct { + mock.Mock +} + +// Add provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar +func (_m *EthTxManagerClientMock) Add(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) { + ret := _m.Called(ctx, to, value, data, gasOffset, sidecar) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) (common.Hash, error)); ok { + return rf(ctx, to, value, data, gasOffset, sidecar) + } + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) common.Hash); ok { + r0 = rf(ctx, to, value, data, gasOffset, sidecar) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) error); ok { + r1 = rf(ctx, to, value, data, gasOffset, sidecar) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AddWithGas provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar, gas +func (_m *EthTxManagerClientMock) AddWithGas(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar, gas uint64) (common.Hash, error) { + ret := _m.Called(ctx, to, value, data, gasOffset, sidecar, gas) + + if len(ret) == 0 { + panic("no return value specified for AddWithGas") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) (common.Hash, error)); ok { + return rf(ctx, to, value, data, gasOffset, sidecar, gas) + } + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) common.Hash); ok { + r0 = rf(ctx, to, value, data, gasOffset, sidecar, gas) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) error); ok { + r1 = rf(ctx, to, value, data, gasOffset, sidecar, gas) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EncodeBlobData provides a mock function with given fields: data +func (_m *EthTxManagerClientMock) EncodeBlobData(data []byte) (kzg4844.Blob, error) { + ret := _m.Called(data) + + if len(ret) == 0 { + panic("no return value specified for EncodeBlobData") + } + + var r0 kzg4844.Blob + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (kzg4844.Blob, error)); ok { + return rf(data) + } + if rf, ok := ret.Get(0).(func([]byte) kzg4844.Blob); ok { + r0 = rf(data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(kzg4844.Blob) + } + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MakeBlobSidecar provides a mock function with given fields: blobs +func (_m *EthTxManagerClientMock) MakeBlobSidecar(blobs []kzg4844.Blob) *types.BlobTxSidecar { + ret := _m.Called(blobs) + + if len(ret) == 0 { + panic("no return value specified for MakeBlobSidecar") + } + + var r0 *types.BlobTxSidecar + if rf, ok := ret.Get(0).(func([]kzg4844.Blob) *types.BlobTxSidecar); ok { + r0 = rf(blobs) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.BlobTxSidecar) + } + } + + return r0 +} + +// ProcessPendingMonitoredTxs provides a mock function with given fields: ctx, resultHandler +func (_m *EthTxManagerClientMock) ProcessPendingMonitoredTxs(ctx context.Context, resultHandler ethtxmanager.ResultHandler) { + _m.Called(ctx, resultHandler) +} + +// Remove provides a mock function with given fields: ctx, id +func (_m *EthTxManagerClientMock) Remove(ctx context.Context, id common.Hash) error { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RemoveAll provides a mock function with given fields: ctx +func (_m *EthTxManagerClientMock) RemoveAll(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for RemoveAll") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Result provides a mock function with given fields: ctx, id +func (_m *EthTxManagerClientMock) Result(ctx context.Context, id common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for Result") + } + + var r0 zkevm_ethtx_managertypes.MonitoredTxResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) zkevm_ethtx_managertypes.MonitoredTxResult); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(zkevm_ethtx_managertypes.MonitoredTxResult) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ResultsByStatus provides a mock function with given fields: ctx, statuses +func (_m *EthTxManagerClientMock) ResultsByStatus(ctx context.Context, statuses []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error) { + ret := _m.Called(ctx, statuses) + + if len(ret) == 0 { + panic("no return value specified for ResultsByStatus") + } + + var r0 []zkevm_ethtx_managertypes.MonitoredTxResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error)); ok { + return rf(ctx, statuses) + } + if rf, ok := ret.Get(0).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) []zkevm_ethtx_managertypes.MonitoredTxResult); ok { + r0 = rf(ctx, statuses) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]zkevm_ethtx_managertypes.MonitoredTxResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) error); ok { + r1 = rf(ctx, statuses) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Start provides a mock function with given fields: +func (_m *EthTxManagerClientMock) Start() { + _m.Called() +} + +// Stop provides a mock function with given fields: +func (_m *EthTxManagerClientMock) Stop() { + _m.Called() +} + +// NewEthTxManagerClientMock creates a new instance of EthTxManagerClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthTxManagerClientMock(t interface { + mock.TestingT + Cleanup(func()) +}) *EthTxManagerClientMock { + mock := &EthTxManagerClientMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_etherman.go b/aggregator/mocks/mock_etherman.go new file mode 100644 index 00000000..351acef3 --- /dev/null +++ b/aggregator/mocks/mock_etherman.go @@ -0,0 +1,210 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + ethmantypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthermanMock is an autogenerated mock type for the Etherman type +type EthermanMock struct { + mock.Mock +} + +// BuildTrustedVerifyBatchesTxData provides a mock function with given fields: lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary +func (_m *EthermanMock) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch uint64, newVerifiedBatch uint64, inputs *ethmantypes.FinalProofInputs, beneficiary common.Address) (*common.Address, []byte, error) { + ret := _m.Called(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + + if len(ret) == 0 { + panic("no return value specified for BuildTrustedVerifyBatchesTxData") + } + + var r0 *common.Address + var r1 []byte + var r2 error + if rf, ok := ret.Get(0).(func(uint64, uint64, *ethmantypes.FinalProofInputs, common.Address) (*common.Address, []byte, error)); ok { + return rf(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + } + if rf, ok := ret.Get(0).(func(uint64, uint64, *ethmantypes.FinalProofInputs, common.Address) *common.Address); ok { + r0 = rf(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*common.Address) + } + } + + if rf, ok := ret.Get(1).(func(uint64, uint64, *ethmantypes.FinalProofInputs, common.Address) []byte); ok { + r1 = rf(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]byte) + } + } + + if rf, ok := ret.Get(2).(func(uint64, uint64, *ethmantypes.FinalProofInputs, common.Address) error); ok { + r2 = rf(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// GetBatchAccInputHash provides a mock function with given fields: ctx, batchNumber +func (_m *EthermanMock) GetBatchAccInputHash(ctx context.Context, batchNumber uint64) (common.Hash, error) { + ret := _m.Called(ctx, batchNumber) + + if len(ret) == 0 { + panic("no return value specified for GetBatchAccInputHash") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (common.Hash, error)); ok { + return rf(ctx, batchNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) common.Hash); ok { + r0 = rf(ctx, batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLatestBlockHeader provides a mock function with given fields: ctx +func (_m *EthermanMock) GetLatestBlockHeader(ctx context.Context) (*types.Header, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockHeader") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*types.Header, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *types.Header); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLatestVerifiedBatchNum provides a mock function with given fields: +func (_m *EthermanMock) GetLatestVerifiedBatchNum() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLatestVerifiedBatchNum") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRollupId provides a mock function with given fields: +func (_m *EthermanMock) GetRollupId() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetRollupId") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// HeaderByNumber provides a mock function with given fields: ctx, number +func (_m *EthermanMock) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewEthermanMock creates a new instance of EthermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthermanMock(t interface { + mock.TestingT + Cleanup(func()) +}) *EthermanMock { + mock := &EthermanMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_prover.go b/aggregator/mocks/mock_prover.go new file mode 100644 index 00000000..72bd66dc --- /dev/null +++ b/aggregator/mocks/mock_prover.go @@ -0,0 +1,271 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + prover "github.com/0xPolygon/cdk/aggregator/prover" +) + +// ProverInterfaceMock is an autogenerated mock type for the ProverInterface type +type ProverInterfaceMock struct { + mock.Mock +} + +// Addr provides a mock function with given fields: +func (_m *ProverInterfaceMock) Addr() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Addr") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// AggregatedProof provides a mock function with given fields: inputProof1, inputProof2 +func (_m *ProverInterfaceMock) AggregatedProof(inputProof1 string, inputProof2 string) (*string, error) { + ret := _m.Called(inputProof1, inputProof2) + + if len(ret) == 0 { + panic("no return value specified for AggregatedProof") + } + + var r0 *string + var r1 error + if rf, ok := ret.Get(0).(func(string, string) (*string, error)); ok { + return rf(inputProof1, inputProof2) + } + if rf, ok := ret.Get(0).(func(string, string) *string); ok { + r0 = rf(inputProof1, inputProof2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*string) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(inputProof1, inputProof2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BatchProof provides a mock function with given fields: input +func (_m *ProverInterfaceMock) BatchProof(input *prover.StatelessInputProver) (*string, error) { + ret := _m.Called(input) + + if len(ret) == 0 { + panic("no return value specified for BatchProof") + } + + var r0 *string + var r1 error + if rf, ok := ret.Get(0).(func(*prover.StatelessInputProver) (*string, error)); ok { + return rf(input) + } + if rf, ok := ret.Get(0).(func(*prover.StatelessInputProver) *string); ok { + r0 = rf(input) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*string) + } + } + + if rf, ok := ret.Get(1).(func(*prover.StatelessInputProver) error); ok { + r1 = rf(input) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FinalProof provides a mock function with given fields: inputProof, aggregatorAddr +func (_m *ProverInterfaceMock) FinalProof(inputProof string, aggregatorAddr string) (*string, error) { + ret := _m.Called(inputProof, aggregatorAddr) + + if len(ret) == 0 { + panic("no return value specified for FinalProof") + } + + var r0 *string + var r1 error + if rf, ok := ret.Get(0).(func(string, string) (*string, error)); ok { + return rf(inputProof, aggregatorAddr) + } + if rf, ok := ret.Get(0).(func(string, string) *string); ok { + r0 = rf(inputProof, aggregatorAddr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*string) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(inputProof, aggregatorAddr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ID provides a mock function with given fields: +func (_m *ProverInterfaceMock) ID() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// IsIdle provides a mock function with given fields: +func (_m *ProverInterfaceMock) IsIdle() (bool, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsIdle") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func() (bool, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Name provides a mock function with given fields: +func (_m *ProverInterfaceMock) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// WaitFinalProof provides a mock function with given fields: ctx, proofID +func (_m *ProverInterfaceMock) WaitFinalProof(ctx context.Context, proofID string) (*prover.FinalProof, error) { + ret := _m.Called(ctx, proofID) + + if len(ret) == 0 { + panic("no return value specified for WaitFinalProof") + } + + var r0 *prover.FinalProof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*prover.FinalProof, error)); ok { + return rf(ctx, proofID) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *prover.FinalProof); ok { + r0 = rf(ctx, proofID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*prover.FinalProof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, proofID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WaitRecursiveProof provides a mock function with given fields: ctx, proofID +func (_m *ProverInterfaceMock) WaitRecursiveProof(ctx context.Context, proofID string) (string, common.Hash, error) { + ret := _m.Called(ctx, proofID) + + if len(ret) == 0 { + panic("no return value specified for WaitRecursiveProof") + } + + var r0 string + var r1 common.Hash + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, string) (string, common.Hash, error)); ok { + return rf(ctx, proofID) + } + if rf, ok := ret.Get(0).(func(context.Context, string) string); ok { + r0 = rf(ctx, proofID) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) common.Hash); ok { + r1 = rf(ctx, proofID) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(common.Hash) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, string) error); ok { + r2 = rf(ctx, proofID) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewProverInterfaceMock creates a new instance of ProverInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProverInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *ProverInterfaceMock { + mock := &ProverInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_state.go b/aggregator/mocks/mock_state.go new file mode 100644 index 00000000..8879dd05 --- /dev/null +++ b/aggregator/mocks/mock_state.go @@ -0,0 +1,406 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + pgx "github.com/jackc/pgx/v4" + mock "github.com/stretchr/testify/mock" + + state "github.com/0xPolygon/cdk/state" +) + +// StateInterfaceMock is an autogenerated mock type for the StateInterface type +type StateInterfaceMock struct { + mock.Mock +} + +// AddBatch provides a mock function with given fields: ctx, dbBatch, dbTx +func (_m *StateInterfaceMock) AddBatch(ctx context.Context, dbBatch *state.DBBatch, dbTx pgx.Tx) error { + ret := _m.Called(ctx, dbBatch, dbTx) + + if len(ret) == 0 { + panic("no return value specified for AddBatch") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *state.DBBatch, pgx.Tx) error); ok { + r0 = rf(ctx, dbBatch, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddGeneratedProof provides a mock function with given fields: ctx, proof, dbTx +func (_m *StateInterfaceMock) AddGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error { + ret := _m.Called(ctx, proof, dbTx) + + if len(ret) == 0 { + panic("no return value specified for AddGeneratedProof") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) error); ok { + r0 = rf(ctx, proof, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddSequence provides a mock function with given fields: ctx, sequence, dbTx +func (_m *StateInterfaceMock) AddSequence(ctx context.Context, sequence state.Sequence, dbTx pgx.Tx) error { + ret := _m.Called(ctx, sequence, dbTx) + + if len(ret) == 0 { + panic("no return value specified for AddSequence") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, state.Sequence, pgx.Tx) error); ok { + r0 = rf(ctx, sequence, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BeginStateTransaction provides a mock function with given fields: ctx +func (_m *StateInterfaceMock) BeginStateTransaction(ctx context.Context) (pgx.Tx, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for BeginStateTransaction") + } + + var r0 pgx.Tx + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (pgx.Tx, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) pgx.Tx); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.Tx) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CheckProofContainsCompleteSequences provides a mock function with given fields: ctx, proof, dbTx +func (_m *StateInterfaceMock) CheckProofContainsCompleteSequences(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) (bool, error) { + ret := _m.Called(ctx, proof, dbTx) + + if len(ret) == 0 { + panic("no return value specified for CheckProofContainsCompleteSequences") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) (bool, error)); ok { + return rf(ctx, proof, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) bool); ok { + r0 = rf(ctx, proof, dbTx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, *state.Proof, pgx.Tx) error); ok { + r1 = rf(ctx, proof, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CheckProofExistsForBatch provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) CheckProofExistsForBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (bool, error) { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for CheckProofExistsForBatch") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (bool, error)); ok { + return rf(ctx, batchNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) bool); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, batchNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CleanupGeneratedProofs provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) CleanupGeneratedProofs(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for CleanupGeneratedProofs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CleanupLockedProofs provides a mock function with given fields: ctx, duration, dbTx +func (_m *StateInterfaceMock) CleanupLockedProofs(ctx context.Context, duration string, dbTx pgx.Tx) (int64, error) { + ret := _m.Called(ctx, duration, dbTx) + + if len(ret) == 0 { + panic("no return value specified for CleanupLockedProofs") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, pgx.Tx) (int64, error)); ok { + return rf(ctx, duration, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, string, pgx.Tx) int64); ok { + r0 = rf(ctx, duration, dbTx) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, pgx.Tx) error); ok { + r1 = rf(ctx, duration, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteBatchesNewerThanBatchNumber provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) DeleteBatchesNewerThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for DeleteBatchesNewerThanBatchNumber") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteBatchesOlderThanBatchNumber provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) DeleteBatchesOlderThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for DeleteBatchesOlderThanBatchNumber") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteGeneratedProofs provides a mock function with given fields: ctx, batchNumber, batchNumberFinal, dbTx +func (_m *StateInterfaceMock) DeleteGeneratedProofs(ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, batchNumberFinal, dbTx) + + if len(ret) == 0 { + panic("no return value specified for DeleteGeneratedProofs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, batchNumberFinal, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteUngeneratedProofs provides a mock function with given fields: ctx, dbTx +func (_m *StateInterfaceMock) DeleteUngeneratedProofs(ctx context.Context, dbTx pgx.Tx) error { + ret := _m.Called(ctx, dbTx) + + if len(ret) == 0 { + panic("no return value specified for DeleteUngeneratedProofs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) error); ok { + r0 = rf(ctx, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetBatch provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) GetBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.DBBatch, error) { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetBatch") + } + + var r0 *state.DBBatch + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.DBBatch, error)); ok { + return rf(ctx, batchNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.DBBatch); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.DBBatch) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, batchNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProofReadyToVerify provides a mock function with given fields: ctx, lastVerfiedBatchNumber, dbTx +func (_m *StateInterfaceMock) GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Proof, error) { + ret := _m.Called(ctx, lastVerfiedBatchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetProofReadyToVerify") + } + + var r0 *state.Proof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Proof, error)); ok { + return rf(ctx, lastVerfiedBatchNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Proof); ok { + r0 = rf(ctx, lastVerfiedBatchNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, lastVerfiedBatchNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProofsToAggregate provides a mock function with given fields: ctx, dbTx +func (_m *StateInterfaceMock) GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx) (*state.Proof, *state.Proof, error) { + ret := _m.Called(ctx, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetProofsToAggregate") + } + + var r0 *state.Proof + var r1 *state.Proof + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) (*state.Proof, *state.Proof, error)); ok { + return rf(ctx, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) *state.Proof); ok { + r0 = rf(ctx, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, pgx.Tx) *state.Proof); ok { + r1 = rf(ctx, dbTx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*state.Proof) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, pgx.Tx) error); ok { + r2 = rf(ctx, dbTx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// UpdateGeneratedProof provides a mock function with given fields: ctx, proof, dbTx +func (_m *StateInterfaceMock) UpdateGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error { + ret := _m.Called(ctx, proof, dbTx) + + if len(ret) == 0 { + panic("no return value specified for UpdateGeneratedProof") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) error); ok { + r0 = rf(ctx, proof, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewStateInterfaceMock creates a new instance of StateInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *StateInterfaceMock { + mock := &StateInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_synchronizer.go b/aggregator/mocks/mock_synchronizer.go new file mode 100644 index 00000000..28811e8c --- /dev/null +++ b/aggregator/mocks/mock_synchronizer.go @@ -0,0 +1,321 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + synchronizer "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" +) + +// SynchronizerInterfaceMock is an autogenerated mock type for the Synchronizer type +type SynchronizerInterfaceMock struct { + mock.Mock +} + +// GetL1BlockByNumber provides a mock function with given fields: ctx, blockNumber +func (_m *SynchronizerInterfaceMock) GetL1BlockByNumber(ctx context.Context, blockNumber uint64) (*synchronizer.L1Block, error) { + ret := _m.Called(ctx, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for GetL1BlockByNumber") + } + + var r0 *synchronizer.L1Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*synchronizer.L1Block, error)); ok { + return rf(ctx, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *synchronizer.L1Block); ok { + r0 = rf(ctx, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.L1Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetL1InfoRootPerIndex provides a mock function with given fields: ctx, L1InfoTreeIndex +func (_m *SynchronizerInterfaceMock) GetL1InfoRootPerIndex(ctx context.Context, L1InfoTreeIndex uint32) (common.Hash, error) { + ret := _m.Called(ctx, L1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoRootPerIndex") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (common.Hash, error)); ok { + return rf(ctx, L1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) common.Hash); ok { + r0 = rf(ctx, L1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { + r1 = rf(ctx, L1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetL1InfoTreeLeaves provides a mock function with given fields: ctx, indexLeaves +func (_m *SynchronizerInterfaceMock) GetL1InfoTreeLeaves(ctx context.Context, indexLeaves []uint32) (map[uint32]synchronizer.L1InfoTreeLeaf, error) { + ret := _m.Called(ctx, indexLeaves) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoTreeLeaves") + } + + var r0 map[uint32]synchronizer.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []uint32) (map[uint32]synchronizer.L1InfoTreeLeaf, error)); ok { + return rf(ctx, indexLeaves) + } + if rf, ok := ret.Get(0).(func(context.Context, []uint32) map[uint32]synchronizer.L1InfoTreeLeaf); ok { + r0 = rf(ctx, indexLeaves) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[uint32]synchronizer.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []uint32) error); ok { + r1 = rf(ctx, indexLeaves) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLastL1Block provides a mock function with given fields: ctx +func (_m *SynchronizerInterfaceMock) GetLastL1Block(ctx context.Context) (*synchronizer.L1Block, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastL1Block") + } + + var r0 *synchronizer.L1Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*synchronizer.L1Block, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *synchronizer.L1Block); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.L1Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLastestVirtualBatchNumber provides a mock function with given fields: ctx +func (_m *SynchronizerInterfaceMock) GetLastestVirtualBatchNumber(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastestVirtualBatchNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLeafsByL1InfoRoot provides a mock function with given fields: ctx, l1InfoRoot +func (_m *SynchronizerInterfaceMock) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash) ([]synchronizer.L1InfoTreeLeaf, error) { + ret := _m.Called(ctx, l1InfoRoot) + + if len(ret) == 0 { + panic("no return value specified for GetLeafsByL1InfoRoot") + } + + var r0 []synchronizer.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]synchronizer.L1InfoTreeLeaf, error)); ok { + return rf(ctx, l1InfoRoot) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []synchronizer.L1InfoTreeLeaf); ok { + r0 = rf(ctx, l1InfoRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]synchronizer.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, l1InfoRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSequenceByBatchNumber provides a mock function with given fields: ctx, batchNumber +func (_m *SynchronizerInterfaceMock) GetSequenceByBatchNumber(ctx context.Context, batchNumber uint64) (*synchronizer.SequencedBatches, error) { + ret := _m.Called(ctx, batchNumber) + + if len(ret) == 0 { + panic("no return value specified for GetSequenceByBatchNumber") + } + + var r0 *synchronizer.SequencedBatches + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*synchronizer.SequencedBatches, error)); ok { + return rf(ctx, batchNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *synchronizer.SequencedBatches); ok { + r0 = rf(ctx, batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.SequencedBatches) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetVirtualBatchByBatchNumber provides a mock function with given fields: ctx, batchNumber +func (_m *SynchronizerInterfaceMock) GetVirtualBatchByBatchNumber(ctx context.Context, batchNumber uint64) (*synchronizer.VirtualBatch, error) { + ret := _m.Called(ctx, batchNumber) + + if len(ret) == 0 { + panic("no return value specified for GetVirtualBatchByBatchNumber") + } + + var r0 *synchronizer.VirtualBatch + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*synchronizer.VirtualBatch, error)); ok { + return rf(ctx, batchNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *synchronizer.VirtualBatch); ok { + r0 = rf(ctx, batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.VirtualBatch) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsSynced provides a mock function with given fields: +func (_m *SynchronizerInterfaceMock) IsSynced() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsSynced") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// SetCallbackOnReorgDone provides a mock function with given fields: callback +func (_m *SynchronizerInterfaceMock) SetCallbackOnReorgDone(callback func(synchronizer.ReorgExecutionResult)) { + _m.Called(callback) +} + +// SetCallbackOnRollbackBatches provides a mock function with given fields: callback +func (_m *SynchronizerInterfaceMock) SetCallbackOnRollbackBatches(callback func(synchronizer.RollbackBatchesData)) { + _m.Called(callback) +} + +// Stop provides a mock function with given fields: +func (_m *SynchronizerInterfaceMock) Stop() { + _m.Called() +} + +// Sync provides a mock function with given fields: returnOnSync +func (_m *SynchronizerInterfaceMock) Sync(returnOnSync bool) error { + ret := _m.Called(returnOnSync) + + if len(ret) == 0 { + panic("no return value specified for Sync") + } + + var r0 error + if rf, ok := ret.Get(0).(func(bool) error); ok { + r0 = rf(returnOnSync) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewSynchronizerInterfaceMock creates a new instance of SynchronizerInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSynchronizerInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *SynchronizerInterfaceMock { + mock := &SynchronizerInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/profitabilitychecker.go b/aggregator/profitabilitychecker.go index f05799eb..dc91a21e 100644 --- a/aggregator/profitabilitychecker.go +++ b/aggregator/profitabilitychecker.go @@ -18,14 +18,14 @@ const ( // TxProfitabilityCheckerBase checks pol collateral with min reward type TxProfitabilityCheckerBase struct { - State stateInterface + State StateInterface IntervalAfterWhichBatchSentAnyway time.Duration MinReward *big.Int } // NewTxProfitabilityCheckerBase init base tx profitability checker func NewTxProfitabilityCheckerBase( - state stateInterface, interval time.Duration, minReward *big.Int, + state StateInterface, interval time.Duration, minReward *big.Int, ) *TxProfitabilityCheckerBase { return &TxProfitabilityCheckerBase{ State: state, @@ -50,12 +50,12 @@ func (pc *TxProfitabilityCheckerBase) IsProfitable(ctx context.Context, polColla // TxProfitabilityCheckerAcceptAll validate batch anyway and don't check anything type TxProfitabilityCheckerAcceptAll struct { - State stateInterface + State StateInterface IntervalAfterWhichBatchSentAnyway time.Duration } // NewTxProfitabilityCheckerAcceptAll init tx profitability checker that accept all txs -func NewTxProfitabilityCheckerAcceptAll(state stateInterface, interval time.Duration) *TxProfitabilityCheckerAcceptAll { +func NewTxProfitabilityCheckerAcceptAll(state StateInterface, interval time.Duration) *TxProfitabilityCheckerAcceptAll { return &TxProfitabilityCheckerAcceptAll{ State: state, IntervalAfterWhichBatchSentAnyway: interval, @@ -77,7 +77,7 @@ func (pc *TxProfitabilityCheckerAcceptAll) IsProfitable(ctx context.Context, pol } // TODO: now it's impossible to check, when batch got consolidated, bcs it's not saved -// func isConsolidatedBatchAppeared(ctx context.Context, state stateInterface, +// func isConsolidatedBatchAppeared(ctx context.Context, state StateInterface, // intervalAfterWhichBatchConsolidatedAnyway time.Duration) (bool, error) { // batch, err := state.GetLastVerifiedBatch(ctx, nil) // if err != nil { diff --git a/test/Makefile b/test/Makefile index b72c101f..a1b51bb1 100644 --- a/test/Makefile +++ b/test/Makefile @@ -1,5 +1,7 @@ .PHONY: generate-mocks -generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector generate-mocks-sequencesender generate-mocks-da generate-mocks-l1infotreesync generate-mocks-helpers generate-mocks-sync generate-mocks-l1infotreesync +generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector generate-mocks-sequencesender \ + generate-mocks-da generate-mocks-l1infotreesync generate-mocks-helpers \ + generate-mocks-sync generate-mocks-l1infotreesync generate-mocks-aggregator .PHONY: generate-mocks-bridgesync @@ -34,8 +36,6 @@ generate-mocks-l1infotreesync: ## Generates mocks for l1infotreesync, using mock rm -Rf ../l1infotreesync/mocks export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../l1infotreesync --output ../l1infotreesync/mocks --outpkg mocks_l1infotreesync ${COMMON_MOCKERY_PARAMS} export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../l1infotreesync --outpkg=l1infotreesync --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go - - .PHONY: generate-mocks-aggoracle generate-mocks-helpers: ## Generates mocks for helpers , using mockery tool @@ -48,6 +48,16 @@ generate-mocks-sync: ## Generates mocks for sync, using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=processorInterface --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ProcessorMock --filename=mock_processor_test.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go +.PHONY: generate-mocks-aggregator +generate-mocks-aggregator: ## Generates mocks for aggregator, using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ProverInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=ProverInterfaceMock --filename=mock_prover.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Etherman --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthermanMock --filename=mock_etherman.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StateInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StateInterfaceMock --filename=mock_state.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AgglayerClientInterface --dir=../aggregator/agglayer --output=../aggregator/mocks --outpkg=mocks --structname=AgglayerClientInterfaceMock --filename=mock_agglayer_client.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Synchronizer --srcpkg=github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer --output=../aggregator/mocks --outpkg=mocks --structname=SynchronizerInterfaceMock --filename=mock_synchronizer.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StreamClient --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StreamClientMock --filename=mock_streamclient.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManagerClient --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthTxManagerClientMock --filename=mock_eth_tx_manager.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Tx --srcpkg=github.com/jackc/pgx/v4 --output=../aggregator/mocks --outpkg=mocks --structname=DbTxMock --filename=mock_dbtx.go .PHONY: test-e2e-fork9-validium test-e2e-fork9-validium: stop From 72ed0257900d57ecd2392da4d97504004b8ccddd Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Thu, 10 Oct 2024 14:59:46 +0200 Subject: [PATCH 35/53] feat: add versions command (#120) * refactor: not all commands need the same params * feat: versions command --- Cargo.lock | 11 +++++++++ crates/cdk/Cargo.toml | 1 + crates/cdk/src/cli.rs | 49 +++++++++++++++++++++++--------------- crates/cdk/src/helpers.rs | 13 ++++++++++ crates/cdk/src/main.rs | 40 ++++++++++++++++++------------- crates/cdk/src/versions.rs | 47 ++++++++++++++++++++++++++++++++++++ 6 files changed, 125 insertions(+), 36 deletions(-) create mode 100644 crates/cdk/src/helpers.rs create mode 100644 crates/cdk/src/versions.rs diff --git a/Cargo.lock b/Cargo.lock index 83b3b597..b9956840 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -688,6 +688,7 @@ dependencies = [ "anyhow", "cdk-config", "clap", + "colored", "dotenvy", "execute", "reqwest 0.12.8", @@ -851,6 +852,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +[[package]] +name = "colored" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" +dependencies = [ + "lazy_static", + "windows-sys 0.48.0", +] + [[package]] name = "combine" version = "4.6.7" diff --git a/crates/cdk/Cargo.toml b/crates/cdk/Cargo.toml index 964d8f83..0c1f8274 100644 --- a/crates/cdk/Cargo.toml +++ b/crates/cdk/Cargo.toml @@ -14,6 +14,7 @@ toml = "0.8.14" tracing.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "json"] } url = { workspace = true, features = ["serde"] } +colored = "2.0" cdk-config = { path = "../cdk-config" } diff --git a/crates/cdk/src/cli.rs b/crates/cdk/src/cli.rs index 12acc8a8..5b61bd9e 100644 --- a/crates/cdk/src/cli.rs +++ b/crates/cdk/src/cli.rs @@ -5,25 +5,8 @@ use clap::{Parser, Subcommand, ValueHint}; /// Command line interface. #[derive(Parser)] +#[command(author, version, about, long_about = None)] pub(crate) struct Cli { - /// The path to the configuration file. - #[arg( - long, - short, - value_hint = ValueHint::FilePath, - env = "CDK_CONFIG_PATH" - )] - pub(crate) config: PathBuf, - - /// The path to a chain specification file. - #[arg( - long, - short = 'g', - value_hint = ValueHint::FilePath, - env = "CDK_GENESIS_PATH" - )] - pub(crate) chain: PathBuf, - #[command(subcommand)] pub(crate) cmd: Commands, } @@ -31,6 +14,15 @@ pub(crate) struct Cli { #[derive(Subcommand)] pub(crate) enum Commands { Node { + /// The path to the configuration file. + #[arg( + long, + short, + value_hint = ValueHint::FilePath, + env = "CDK_CONFIG_PATH" + )] + config: PathBuf, + /// Components to run. #[arg( long, @@ -40,5 +32,24 @@ pub(crate) enum Commands { )] components: Option, }, - Erigon, + Erigon { + /// The path to the configuration file. + #[arg( + long, + short, + value_hint = ValueHint::FilePath, + env = "CDK_CONFIG_PATH" + )] + config: PathBuf, + + /// The path to a chain specification file. + #[arg( + long, + short = 'g', + value_hint = ValueHint::FilePath, + env = "CDK_GENESIS_PATH" + )] + chain: PathBuf, + }, + Versions, } diff --git a/crates/cdk/src/helpers.rs b/crates/cdk/src/helpers.rs new file mode 100644 index 00000000..4fac948b --- /dev/null +++ b/crates/cdk/src/helpers.rs @@ -0,0 +1,13 @@ +use std::env; + +const CDK_CLIENT_BIN: &str = "cdk-node"; + +pub(crate) fn get_bin_path() -> String { + // This is to find the binary when running in development mode + // otherwise it will use system path + let mut bin_path = env::var("CARGO_MANIFEST_DIR").unwrap_or(CDK_CLIENT_BIN.into()); + if bin_path != CDK_CLIENT_BIN { + bin_path = format!("{}/../../target/{}", bin_path, CDK_CLIENT_BIN); + } + bin_path +} diff --git a/crates/cdk/src/main.rs b/crates/cdk/src/main.rs index 7f7f3991..61e66eab 100644 --- a/crates/cdk/src/main.rs +++ b/crates/cdk/src/main.rs @@ -4,8 +4,8 @@ use alloy_rpc_client::ReqwestClient; use cdk_config::Config; use clap::Parser; use cli::Cli; +use colored::*; use execute::Execute; -use std::env; use std::path::PathBuf; use std::process::Command; use url::Url; @@ -13,9 +13,10 @@ use url::Url; pub mod allocs_render; mod cli; mod config_render; +mod helpers; mod logging; +mod versions; -const CDK_CLIENT_BIN: &str = "cdk-node"; const CDK_ERIGON_BIN: &str = "cdk-erigon"; #[tokio::main] @@ -24,13 +25,8 @@ async fn main() -> anyhow::Result<()> { let cli = Cli::parse(); - // Read the config - let config = read_config(cli.config.clone())?; - - // Initialize the logger - logging::tracing(&config.log); - println!( + "{}", r#"🐼 _____ _ _____ _____ _ __ | __ \ | | / ____| __ \| |/ / @@ -41,12 +37,13 @@ async fn main() -> anyhow::Result<()> { __/ | __/ | |___/ |___/ "# + .purple() ); match cli.cmd { - cli::Commands::Node { components } => node(cli.config, components)?, - cli::Commands::Erigon {} => erigon(config, cli.chain).await?, - // _ => forward()?, + cli::Commands::Node { config, components } => node(config, components)?, + cli::Commands::Erigon { config, chain } => erigon(config, chain).await?, + cli::Commands::Versions {} => versions::versions(), } Ok(()) @@ -70,12 +67,15 @@ fn read_config(config_path: PathBuf) -> anyhow::Result { /// This function returns on fatal error or after graceful shutdown has /// completed. pub fn node(config_path: PathBuf, components: Option) -> anyhow::Result<()> { - // This is to find the erigon binary when running in development mode + // Read the config + let config = read_config(config_path.clone())?; + + // Initialize the logger + logging::tracing(&config.log); + + // This is to find the binary when running in development mode // otherwise it will use system path - let mut bin_path = env::var("CARGO_MANIFEST_DIR").unwrap_or(CDK_CLIENT_BIN.into()); - if bin_path != CDK_CLIENT_BIN { - bin_path = format!("{}/../../target/{}", bin_path, CDK_CLIENT_BIN); - } + let bin_path = helpers::get_bin_path(); let components_param = match components { Some(components) => format!("-components={}", components), @@ -118,7 +118,13 @@ pub fn node(config_path: PathBuf, components: Option) -> anyhow::Result< /// This is the main erigon entrypoint. /// This function starts everything needed to run an Erigon node. -pub async fn erigon(config: Config, genesis_file: PathBuf) -> anyhow::Result<()> { +pub async fn erigon(config_path: PathBuf, genesis_file: PathBuf) -> anyhow::Result<()> { + // Read the config + let config = read_config(config_path.clone())?; + + // Initialize the logger + logging::tracing(&config.log); + // Render configuration files let chain_id = config.aggregator.chain_id.clone(); let rpc_url = Url::parse(&config.sequence_sender.rpc_url).unwrap(); diff --git a/crates/cdk/src/versions.rs b/crates/cdk/src/versions.rs new file mode 100644 index 00000000..77581452 --- /dev/null +++ b/crates/cdk/src/versions.rs @@ -0,0 +1,47 @@ +use colored::*; +use execute::Execute; +use std::io; +use std::process::{Command, Output}; + +fn version() -> Result { + let bin_path = crate::helpers::get_bin_path(); + + // Run the node passing the config file path as argument + let mut command = Command::new(bin_path.clone()); + command.args(&["version"]); + + command.execute_output() +} + +pub(crate) fn versions() { + // Get the version of the cdk-node binary. + let output = version().unwrap(); + let version = String::from_utf8(output.stdout).unwrap(); + + println!("{}", format!("{}", version.trim()).green()); + + let versions = vec![ + ( + "zkEVM Contracts", + "https://github.com/0xPolygonHermez/zkevm-contracts/releases/tag/v8.0.0-rc.4-fork.12", + ), + ("zkEVM Prover", "v8.0.0-RC12"), + ("CDK Erigon", "hermeznetwork/cdk-erigon:0948e33"), + ( + "zkEVM Pool Manager", + "hermeznetwork/zkevm-pool-manager:v0.1.1", + ), + ( + "CDK Data Availability Node", + "0xpolygon/cdk-data-availability:0.0.10", + ), + ]; + + // Multi-line string to print the versions with colors. + let formatted_versions: Vec = versions + .iter() + .map(|(key, value)| format!("{}: {}", key.green(), value.blue())) + .collect(); + + println!("{}", formatted_versions.join("\n")); +} From fe655a6b6ee20675aae5cdc02ae75e697eceaf97 Mon Sep 17 00:00:00 2001 From: rbpol Date: Thu, 10 Oct 2024 18:04:55 +0100 Subject: [PATCH 36/53] feat: Add support for all the contracts on `test/helpers` so it's easy to build E2E tests (#115) --- aggoracle/e2e_test.go | 4 +- bridgesync/e2e_test.go | 44 +- claimsponsor/e2e_test.go | 3 +- .../datacommittee/datacommittee_test.go | 87 +--- l1infotreesync/e2e_test.go | 83 ++-- lastgersync/e2e_test.go | 4 +- reorgdetector/reorgdetector_test.go | 34 +- test/aggoraclehelpers/aggoracle_e2e.go | 199 +++++++++ test/helpers/aggoracle_e2e.go | 419 ------------------ test/helpers/simulated.go | 119 +++++ 10 files changed, 387 insertions(+), 609 deletions(-) create mode 100644 test/aggoraclehelpers/aggoracle_e2e.go delete mode 100644 test/helpers/aggoracle_e2e.go diff --git a/aggoracle/e2e_test.go b/aggoracle/e2e_test.go index 25a8a96d..b1506032 100644 --- a/aggoracle/e2e_test.go +++ b/aggoracle/e2e_test.go @@ -8,7 +8,7 @@ import ( gerContractL1 "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0" "github.com/0xPolygon/cdk/aggoracle" - "github.com/0xPolygon/cdk/test/helpers" + "github.com/0xPolygon/cdk/test/aggoraclehelpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient/simulated" @@ -16,7 +16,7 @@ import ( ) func TestEVM(t *testing.T) { - env := helpers.SetupAggoracleWithEVMChain(t) + env := aggoraclehelpers.SetupAggoracleWithEVMChain(t) runTest(t, env.GERL1Contract, env.AggOracleSender, env.L1Client, env.AuthL1) } diff --git a/bridgesync/e2e_test.go b/bridgesync/e2e_test.go index a19afb8d..c0a22484 100644 --- a/bridgesync/e2e_test.go +++ b/bridgesync/e2e_test.go @@ -8,60 +8,28 @@ import ( "testing" "time" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/test/helpers" - "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/stretchr/testify/require" ) -func newSimulatedClient(t *testing.T, auth *bind.TransactOpts) ( - client *simulated.Backend, - bridgeAddr common.Address, - bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, -) { - t.Helper() - - var err error - balance, _ := big.NewInt(0).SetString("10000000000000000000000000", 10) - address := auth.From - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - } - blockGasLimit := uint64(999999999999999999) - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) - - bridgeAddr, _, bridgeContract, err = polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(auth, client.Client()) - require.NoError(t, err) - client.Commit() - - return -} - func TestBridgeEventE2E(t *testing.T) { ctx := context.Background() dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") dbPathReorg := t.TempDir() - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - client, bridgeAddr, bridgeSc := newSimulatedClient(t, auth) + + client, setup := helpers.SimulatedBackend(t, nil, 0) rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg}) require.NoError(t, err) go rd.Start(ctx) //nolint:errcheck testClient := helpers.TestClient{ClientRenamed: client.Client()} - syncer, err := bridgesync.NewL1(ctx, dbPathSyncer, bridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0) + syncer, err := bridgesync.NewL1(ctx, dbPathSyncer, setup.EBZkevmBridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0) require.NoError(t, err) go syncer.Start(ctx) @@ -71,15 +39,15 @@ func TestBridgeEventE2E(t *testing.T) { for i := 0; i < 100; i++ { bridge := bridgesync.Bridge{ - BlockNum: uint64(2 + i), + BlockNum: uint64(4 + i), Amount: big.NewInt(0), DepositCount: uint32(i), DestinationNetwork: 3, DestinationAddress: common.HexToAddress("f00"), Metadata: []byte{}, } - tx, err := bridgeSc.BridgeAsset( - auth, + tx, err := setup.EBZkevmBridgeContract.BridgeAsset( + setup.UserAuth, bridge.DestinationNetwork, bridge.DestinationAddress, bridge.Amount, diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go index 8a037a58..b4fce499 100644 --- a/claimsponsor/e2e_test.go +++ b/claimsponsor/e2e_test.go @@ -13,6 +13,7 @@ import ( "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/test/aggoraclehelpers" "github.com/0xPolygon/cdk/test/helpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -22,7 +23,7 @@ import ( func TestE2EL1toEVML2(t *testing.T) { // start other needed components ctx := context.Background() - env := helpers.SetupAggoracleWithEVMChain(t) + env := aggoraclehelpers.SetupAggoracleWithEVMChain(t) dbPathBridgeSyncL1 := path.Join(t.TempDir(), "file::memory:?cache=shared") testClient := helpers.TestClient{ClientRenamed: env.L1Client.Client()} bridgeSyncL1, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, env.BridgeL1Addr, 10, etherman.LatestBlock, env.ReorgDetector, testClient, 0, time.Millisecond*10, 0, 0) diff --git a/dataavailability/datacommittee/datacommittee_test.go b/dataavailability/datacommittee/datacommittee_test.go index fcacef3c..7e2a8d3e 100644 --- a/dataavailability/datacommittee/datacommittee_test.go +++ b/dataavailability/datacommittee/datacommittee_test.go @@ -9,9 +9,9 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/banana/polygondatacommittee" "github.com/0xPolygon/cdk/log" erc1967proxy "github.com/0xPolygon/cdk/test/contracts/erc1967proxy" + "github.com/0xPolygon/cdk/test/helpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/stretchr/testify/assert" @@ -20,7 +20,7 @@ import ( func TestUpdateDataCommitteeEvent(t *testing.T) { // Set up testing environment - dac, ethBackend, auth, da := newTestingEnv(t) + dac, ethBackend, da, auth := newSimulatedDacman(t) // Update the committee requiredAmountOfSignatures := big.NewInt(2) @@ -63,82 +63,39 @@ func init() { }) } -// This function prepare the blockchain, the wallet with funds and deploy the smc -func newTestingEnv(t *testing.T) ( - dac *Backend, - ethBackend *simulated.Backend, - auth *bind.TransactOpts, - da *polygondatacommittee.Polygondatacommittee, -) { - t.Helper() - privateKey, err := crypto.GenerateKey() - if err != nil { - log.Fatal(err) - } - auth, err = bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - if err != nil { - log.Fatal(err) - } - dac, ethBackend, da, err = newSimulatedDacman(t, auth) - if err != nil { - log.Fatal(err) - } - - return dac, ethBackend, auth, da -} - // NewSimulatedEtherman creates an etherman that uses a simulated blockchain. It's important to notice that the ChainID of the auth // must be 1337. The address that holds the auth will have an initial balance of 10 ETH -func newSimulatedDacman(t *testing.T, auth *bind.TransactOpts) ( - dacman *Backend, - ethBackend *simulated.Backend, - da *polygondatacommittee.Polygondatacommittee, - err error, +func newSimulatedDacman(t *testing.T) ( + *Backend, + *simulated.Backend, + *polygondatacommittee.Polygondatacommittee, + *bind.TransactOpts, ) { t.Helper() - if auth == nil { - // read only client - return &Backend{}, nil, nil, nil - } - // 10000000 ETH in wei - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) - address := auth.From - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - } - blockGasLimit := uint64(999999999999999999) - client := simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) + + ethBackend, setup := helpers.SimulatedBackend(t, nil, 0) // DAC Setup - addr, _, _, err := smcparis.DeployPolygondatacommittee(auth, client.Client()) - if err != nil { - return &Backend{}, nil, nil, err - } - client.Commit() - proxyAddr, err := deployDACProxy(auth, client.Client(), addr) - if err != nil { - return &Backend{}, nil, nil, err - } + addr, _, _, err := smcparis.DeployPolygondatacommittee(setup.UserAuth, ethBackend.Client()) + require.NoError(t, err) + ethBackend.Commit() - client.Commit() - da, err = polygondatacommittee.NewPolygondatacommittee(proxyAddr, client.Client()) - if err != nil { - return &Backend{}, nil, nil, err - } + proxyAddr, err := deployDACProxy(setup.UserAuth, ethBackend.Client(), addr) + require.NoError(t, err) + ethBackend.Commit() - _, err = da.SetupCommittee(auth, big.NewInt(0), []string{}, []byte{}) - if err != nil { - return &Backend{}, nil, nil, err - } - client.Commit() + da, err := polygondatacommittee.NewPolygondatacommittee(proxyAddr, ethBackend.Client()) + require.NoError(t, err) + + _, err = da.SetupCommittee(setup.UserAuth, big.NewInt(0), []string{}, []byte{}) + require.NoError(t, err) + ethBackend.Commit() c := &Backend{ dataCommitteeContract: da, } - return c, client, da, nil + return c, ethBackend, da, setup.UserAuth } func deployDACProxy(auth *bind.TransactOpts, client bind.ContractBackend, dacImpl common.Address) (common.Address, error) { diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index c522c73a..94596f23 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -2,7 +2,6 @@ package l1infotreesync_test import ( "context" - "errors" "fmt" "math/big" "path" @@ -16,6 +15,7 @@ import ( "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/test/contracts/verifybatchesmock" + "github.com/0xPolygon/cdk/test/helpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -25,61 +25,44 @@ import ( "github.com/stretchr/testify/require" ) -func newSimulatedClient(auth *bind.TransactOpts) ( - client *simulated.Backend, - gerAddr common.Address, - verifyAddr common.Address, - gerContract *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, - verifyContract *verifybatchesmock.Verifybatchesmock, - err error, +func newSimulatedClient(t *testing.T) ( + *simulated.Backend, + *bind.TransactOpts, + common.Address, + common.Address, + *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, + *verifybatchesmock.Verifybatchesmock, ) { + t.Helper() + ctx := context.Background() - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) - address := auth.From - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - } - blockGasLimit := uint64(999999999999999999) - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) + client, setup := helpers.SimulatedBackend(t, nil, 0) - nonce, err := client.Client().PendingNonceAt(ctx, auth.From) - if err != nil { - return - } - precalculatedAddr := crypto.CreateAddress(auth.From, nonce+1) - verifyAddr, _, verifyContract, err = verifybatchesmock.DeployVerifybatchesmock(auth, client.Client(), precalculatedAddr) - if err != nil { - return - } + nonce, err := client.Client().PendingNonceAt(ctx, setup.UserAuth.From) + require.NoError(t, err) + + precalculatedAddr := crypto.CreateAddress(setup.UserAuth.From, nonce+1) + verifyAddr, _, verifyContract, err := verifybatchesmock.DeployVerifybatchesmock(setup.UserAuth, client.Client(), precalculatedAddr) + require.NoError(t, err) client.Commit() - gerAddr, _, gerContract, err = polygonzkevmglobalexitrootv2.DeployPolygonzkevmglobalexitrootv2(auth, client.Client(), verifyAddr, auth.From) - if err != nil { - return - } + gerAddr, _, gerContract, err := polygonzkevmglobalexitrootv2.DeployPolygonzkevmglobalexitrootv2(setup.UserAuth, client.Client(), verifyAddr, setup.UserAuth.From) + require.NoError(t, err) client.Commit() - if precalculatedAddr != gerAddr { - err = errors.New("error calculating addr") - } + require.Equal(t, precalculatedAddr, gerAddr) - return + return client, setup.UserAuth, gerAddr, verifyAddr, gerContract, verifyContract } func TestE2E(t *testing.T) { ctx := context.Background() dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) + rdm := l1infotreesync.NewReorgDetectorMock(t) rdm.On("Subscribe", mock.Anything).Return(&reorgdetector.Subscription{}, nil) rdm.On("AddBlockToTrack", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) - client, gerAddr, verifyAddr, gerSc, verifySC, err := newSimulatedClient(auth) - require.NoError(t, err) + client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) syncer, err := l1infotreesync.New(ctx, dbPath, gerAddr, verifyAddr, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) @@ -165,15 +148,13 @@ func TestWithReorgs(t *testing.T) { ctx := context.Background() dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") dbPathReorg := t.TempDir() - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - client, gerAddr, verifyAddr, gerSc, verifySC, err := newSimulatedClient(auth) - require.NoError(t, err) + + client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) + rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 30)}) require.NoError(t, err) require.NoError(t, rd.Start(ctx)) + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 25, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) @@ -285,15 +266,13 @@ func TestStressAndReorgs(t *testing.T) { ctx := context.Background() dbPathSyncer := path.Join(t.TempDir(), "file:TestStressAndReorgs:memory:?cache=shared") dbPathReorg := t.TempDir() - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - client, gerAddr, verifyAddr, gerSc, verifySC, err := newSimulatedClient(auth) - require.NoError(t, err) + + client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) + rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}) require.NoError(t, err) require.NoError(t, rd.Start(ctx)) + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 100, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) diff --git a/lastgersync/e2e_test.go b/lastgersync/e2e_test.go index 979d55a2..e4d5e407 100644 --- a/lastgersync/e2e_test.go +++ b/lastgersync/e2e_test.go @@ -9,7 +9,7 @@ import ( "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/lastgersync" - "github.com/0xPolygon/cdk/test/helpers" + "github.com/0xPolygon/cdk/test/aggoraclehelpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" @@ -17,7 +17,7 @@ import ( func TestE2E(t *testing.T) { ctx := context.Background() - env := helpers.SetupAggoracleWithEVMChain(t) + env := aggoraclehelpers.SetupAggoracleWithEVMChain(t) dbPathSyncer := t.TempDir() syncer, err := lastgersync.New( ctx, diff --git a/reorgdetector/reorgdetector_test.go b/reorgdetector/reorgdetector_test.go index 7efe0892..c99bb484 100644 --- a/reorgdetector/reorgdetector_test.go +++ b/reorgdetector/reorgdetector_test.go @@ -2,47 +2,21 @@ package reorgdetector import ( "context" - big "math/big" "testing" "time" cdktypes "github.com/0xPolygon/cdk/config/types" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" + "github.com/0xPolygon/cdk/test/helpers" "github.com/stretchr/testify/require" ) -func newSimulatedL1(t *testing.T, auth *bind.TransactOpts) *simulated.Backend { - t.Helper() - - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) - - blockGasLimit := uint64(999999999999999999) - client := simulated.NewBackend(map[common.Address]types.Account{ - auth.From: { - Balance: balance, - }, - }, simulated.WithBlockGasLimit(blockGasLimit)) - client.Commit() - - return client -} - func Test_ReorgDetector(t *testing.T) { const subID = "test" ctx := context.Background() // Simulated L1 - privateKeyL1, err := crypto.GenerateKey() - require.NoError(t, err) - authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337)) - require.NoError(t, err) - clientL1 := newSimulatedL1(t, authL1) - require.NoError(t, err) + clientL1, _ := helpers.SimulatedBackend(t, nil, 0) // Create test DB dir testDir := t.TempDir() @@ -92,6 +66,6 @@ func Test_ReorgDetector(t *testing.T) { headersList, ok := reorgDetector.trackedBlocks[subID] reorgDetector.trackedBlocksLock.Unlock() require.True(t, ok) - require.Equal(t, 1, headersList.len()) // Only block 2 left - require.Equal(t, remainingHeader.Hash(), headersList.get(2).Hash) + require.Equal(t, 1, headersList.len()) // Only block 3 left + require.Equal(t, remainingHeader.Hash(), headersList.get(4).Hash) } diff --git a/test/aggoraclehelpers/aggoracle_e2e.go b/test/aggoraclehelpers/aggoracle_e2e.go new file mode 100644 index 00000000..be362ccc --- /dev/null +++ b/test/aggoraclehelpers/aggoracle_e2e.go @@ -0,0 +1,199 @@ +package aggoraclehelpers + +import ( + "context" + "path" + "testing" + "time" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" + gerContractL1 "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0" + gerContractEVMChain "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitrootnopush0" + "github.com/0xPolygon/cdk/aggoracle" + "github.com/0xPolygon/cdk/aggoracle/chaingersender" + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/reorgdetector" + "github.com/0xPolygon/cdk/test/helpers" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient/simulated" + "github.com/stretchr/testify/require" +) + +const ( + NetworkIDL2 = uint32(1) + syncBlockChunkSize = 10 + retries = 3 + periodRetry = time.Millisecond * 100 +) + +type AggoracleWithEVMChainEnv struct { + L1Client *simulated.Backend + L2Client *simulated.Backend + L1InfoTreeSync *l1infotreesync.L1InfoTreeSync + GERL1Contract *gerContractL1.Globalexitrootnopush0 + GERL1Addr common.Address + GERL2Contract *gerContractEVMChain.Pessimisticglobalexitrootnopush0 + GERL2Addr common.Address + AuthL1 *bind.TransactOpts + AuthL2 *bind.TransactOpts + AggOracle *aggoracle.AggOracle + AggOracleSender aggoracle.ChainSender + ReorgDetector *reorgdetector.ReorgDetector + BridgeL1Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + BridgeL1Addr common.Address + BridgeL2Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + BridgeL2Addr common.Address + NetworkIDL2 uint32 + EthTxManMockL2 *helpers.EthTxManagerMock +} + +func SetupAggoracleWithEVMChain(t *testing.T) *AggoracleWithEVMChainEnv { + t.Helper() + + ctx := context.Background() + l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, rd := CommonSetup(t) + sender, l2Client, gerL2Contract, gerL2Addr, bridgeL2Contract, bridgeL2Addr, authL2, ethTxManMockL2 := EVMSetup(t) + oracle, err := aggoracle.New( + log.GetDefaultLogger(), sender, + l1Client.Client(), syncer, + etherman.LatestBlock, time.Millisecond*20) //nolint:mnd + require.NoError(t, err) + go oracle.Start(ctx) + + return &AggoracleWithEVMChainEnv{ + L1Client: l1Client, + L2Client: l2Client, + L1InfoTreeSync: syncer, + GERL1Contract: gerL1Contract, + GERL1Addr: gerL1Addr, + GERL2Contract: gerL2Contract, + GERL2Addr: gerL2Addr, + AuthL1: authL1, + AuthL2: authL2, + AggOracle: oracle, + AggOracleSender: sender, + ReorgDetector: rd, + BridgeL1Contract: bridgeL1Contract, + BridgeL1Addr: bridgeL1Addr, + BridgeL2Contract: bridgeL2Contract, + BridgeL2Addr: bridgeL2Addr, + NetworkIDL2: NetworkIDL2, + EthTxManMockL2: ethTxManMockL2, + } +} + +func CommonSetup(t *testing.T) ( + *simulated.Backend, + *l1infotreesync.L1InfoTreeSync, + *gerContractL1.Globalexitrootnopush0, + common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, + common.Address, + *bind.TransactOpts, + *reorgdetector.ReorgDetector, +) { + t.Helper() + + // Config and spin up + ctx := context.Background() + + // Simulated L1 + l1Client, authL1, gerL1Addr, gerL1Contract, bridgeL1Addr, bridgeL1Contract := newSimulatedL1(t) + + // Reorg detector + dbPathReorgDetector := t.TempDir() + reorg, err := reorgdetector.New(l1Client.Client(), reorgdetector.Config{DBPath: dbPathReorgDetector}) + require.NoError(t, err) + + // Syncer + dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, + gerL1Addr, common.Address{}, + syncBlockChunkSize, etherman.LatestBlock, + reorg, l1Client.Client(), + time.Millisecond, 0, periodRetry, retries, l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) + go syncer.Start(ctx) + + return l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, reorg +} + +func EVMSetup(t *testing.T) ( + aggoracle.ChainSender, + *simulated.Backend, + *gerContractEVMChain.Pessimisticglobalexitrootnopush0, + common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, + common.Address, + *bind.TransactOpts, + *helpers.EthTxManagerMock, +) { + t.Helper() + + l2Client, authL2, gerL2Addr, gerL2Sc, bridgeL2Addr, bridgeL2Sc := newSimulatedEVMAggSovereignChain(t) + ethTxManMock := helpers.NewEthTxManMock(t, l2Client, authL2) + sender, err := chaingersender.NewEVMChainGERSender(log.GetDefaultLogger(), + gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50) //nolint:mnd + require.NoError(t, err) + + return sender, l2Client, gerL2Sc, gerL2Addr, bridgeL2Sc, bridgeL2Addr, authL2, ethTxManMock +} + +func newSimulatedL1(t *testing.T) ( + *simulated.Backend, + *bind.TransactOpts, + common.Address, + *gerContractL1.Globalexitrootnopush0, + common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, +) { + t.Helper() + + client, setup := helpers.SimulatedBackend(t, nil, 0) + + precalculatedAddr := crypto.CreateAddress(setup.DeployerAuth.From, 2) //nolint:mnd + + gerAddr, _, gerContract, err := gerContractL1.DeployGlobalexitrootnopush0(setup.DeployerAuth, client.Client(), + setup.UserAuth.From, setup.EBZkevmBridgeProxyAddr) + require.NoError(t, err) + client.Commit() + + require.Equal(t, precalculatedAddr, gerAddr) + + return client, setup.UserAuth, gerAddr, gerContract, setup.EBZkevmBridgeProxyAddr, setup.EBZkevmBridgeProxyContract +} + +func newSimulatedEVMAggSovereignChain(t *testing.T) ( + *simulated.Backend, + *bind.TransactOpts, + common.Address, + *gerContractEVMChain.Pessimisticglobalexitrootnopush0, + common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, +) { + t.Helper() + + client, setup := helpers.SimulatedBackend(t, nil, NetworkIDL2) + + precalculatedAddr := crypto.CreateAddress(setup.DeployerAuth.From, 2) //nolint:mnd + + gerAddr, _, gerContract, err := gerContractEVMChain.DeployPessimisticglobalexitrootnopush0( + setup.DeployerAuth, client.Client(), setup.UserAuth.From) + require.NoError(t, err) + client.Commit() + + globalExitRootSetterRole := common.HexToHash("0x7b95520991dfda409891be0afa2635b63540f92ee996fda0bf695a166e5c5176") + _, err = gerContract.GrantRole(setup.DeployerAuth, globalExitRootSetterRole, setup.UserAuth.From) + require.NoError(t, err) + client.Commit() + + hasRole, _ := gerContract.HasRole(&bind.CallOpts{Pending: false}, globalExitRootSetterRole, setup.UserAuth.From) + require.True(t, hasRole) + require.Equal(t, precalculatedAddr, gerAddr) + + return client, setup.UserAuth, gerAddr, gerContract, setup.EBZkevmBridgeProxyAddr, setup.EBZkevmBridgeProxyContract +} diff --git a/test/helpers/aggoracle_e2e.go b/test/helpers/aggoracle_e2e.go deleted file mode 100644 index a19cfd42..00000000 --- a/test/helpers/aggoracle_e2e.go +++ /dev/null @@ -1,419 +0,0 @@ -package helpers - -import ( - "context" - "errors" - "fmt" - "math/big" - "path" - "testing" - "time" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" - gerContractL1 "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0" - gerContractEVMChain "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitrootnopush0" - "github.com/0xPolygon/cdk/aggoracle" - "github.com/0xPolygon/cdk/aggoracle/chaingersender" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/reorgdetector" - "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" - "github.com/stretchr/testify/require" -) - -const ( - NetworkIDL2 = uint32(1) - chainID = 1337 - initialBalance = "10000000000000000000000000" - blockGasLimit = uint64(999999999999999999) - syncBlockChunkSize = 10 - retries = 3 - periodRetry = time.Millisecond * 100 -) - -type AggoracleWithEVMChainEnv struct { - L1Client *simulated.Backend - L2Client *simulated.Backend - L1InfoTreeSync *l1infotreesync.L1InfoTreeSync - GERL1Contract *gerContractL1.Globalexitrootnopush0 - GERL1Addr common.Address - GERL2Contract *gerContractEVMChain.Pessimisticglobalexitrootnopush0 - GERL2Addr common.Address - AuthL1 *bind.TransactOpts - AuthL2 *bind.TransactOpts - AggOracle *aggoracle.AggOracle - AggOracleSender aggoracle.ChainSender - ReorgDetector *reorgdetector.ReorgDetector - BridgeL1Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 - BridgeL1Addr common.Address - BridgeL2Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 - BridgeL2Addr common.Address - NetworkIDL2 uint32 - EthTxManMockL2 *EthTxManagerMock -} - -func SetupAggoracleWithEVMChain(t *testing.T) *AggoracleWithEVMChainEnv { - t.Helper() - - ctx := context.Background() - l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, rd := CommonSetup(t) - sender, l2Client, gerL2Contract, gerL2Addr, bridgeL2Contract, bridgeL2Addr, authL2, ethTxManMockL2 := EVMSetup(t) - oracle, err := aggoracle.New( - log.GetDefaultLogger(), sender, - l1Client.Client(), syncer, - etherman.LatestBlock, time.Millisecond*20) //nolint:mnd - require.NoError(t, err) - go oracle.Start(ctx) - - return &AggoracleWithEVMChainEnv{ - L1Client: l1Client, - L2Client: l2Client, - L1InfoTreeSync: syncer, - GERL1Contract: gerL1Contract, - GERL1Addr: gerL1Addr, - GERL2Contract: gerL2Contract, - GERL2Addr: gerL2Addr, - AuthL1: authL1, - AuthL2: authL2, - AggOracle: oracle, - AggOracleSender: sender, - ReorgDetector: rd, - BridgeL1Contract: bridgeL1Contract, - BridgeL1Addr: bridgeL1Addr, - BridgeL2Contract: bridgeL2Contract, - BridgeL2Addr: bridgeL2Addr, - NetworkIDL2: NetworkIDL2, - EthTxManMockL2: ethTxManMockL2, - } -} - -func CommonSetup(t *testing.T) ( - *simulated.Backend, - *l1infotreesync.L1InfoTreeSync, - *gerContractL1.Globalexitrootnopush0, - common.Address, - *polygonzkevmbridgev2.Polygonzkevmbridgev2, - common.Address, - *bind.TransactOpts, - *reorgdetector.ReorgDetector, -) { - t.Helper() - - // Config and spin up - ctx := context.Background() - // Simulated L1 - privateKeyL1, err := crypto.GenerateKey() - require.NoError(t, err) - authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID)) - require.NoError(t, err) - l1Client, gerL1Addr, gerL1Contract, bridgeL1Addr, bridgeL1Contract, err := newSimulatedL1(authL1) - require.NoError(t, err) - // Reorg detector - dbPathReorgDetector := t.TempDir() - reorg, err := reorgdetector.New(l1Client.Client(), reorgdetector.Config{DBPath: dbPathReorgDetector}) - require.NoError(t, err) - // Syncer - dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, - gerL1Addr, common.Address{}, - syncBlockChunkSize, etherman.LatestBlock, - reorg, l1Client.Client(), - time.Millisecond, 0, periodRetry, retries, l1infotreesync.FlagAllowWrongContractsAddrs) - require.NoError(t, err) - go syncer.Start(ctx) - - return l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, reorg -} - -func EVMSetup(t *testing.T) ( - aggoracle.ChainSender, - *simulated.Backend, - *gerContractEVMChain.Pessimisticglobalexitrootnopush0, - common.Address, - *polygonzkevmbridgev2.Polygonzkevmbridgev2, - common.Address, - *bind.TransactOpts, - *EthTxManagerMock, -) { - t.Helper() - - privateKeyL2, err := crypto.GenerateKey() - require.NoError(t, err) - authL2, err := bind.NewKeyedTransactorWithChainID(privateKeyL2, big.NewInt(chainID)) - require.NoError(t, err) - l2Client, gerL2Addr, gerL2Sc, bridgeL2Addr, bridgeL2Sc, err := newSimulatedEVMAggSovereignChain(authL2) - require.NoError(t, err) - ethTxManMock := NewEthTxManMock(t, l2Client, authL2) - sender, err := chaingersender.NewEVMChainGERSender(log.GetDefaultLogger(), - gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50) //nolint:mnd - require.NoError(t, err) - - return sender, l2Client, gerL2Sc, gerL2Addr, bridgeL2Sc, bridgeL2Addr, authL2, ethTxManMock -} - -func newSimulatedL1(auth *bind.TransactOpts) ( - client *simulated.Backend, - gerAddr common.Address, - gerContract *gerContractL1.Globalexitrootnopush0, - bridgeAddr common.Address, - bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, - err error, -) { - ctx := context.Background() - - privateKeyL1, err := crypto.GenerateKey() - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to generate private key: %w", err) - } - - authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID)) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create transactor: %w", err) - } - - balance, _ := new(big.Int).SetString(initialBalance, 10) //nolint:mnd - address := auth.From - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - authDeployer.From: { - Balance: balance, - }, - } - - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) - - bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to deploy bridge implementation: %w", err) - } - client.Commit() - - nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get pending nonce: %w", err) - } - precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) - bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) - } - if bridgeABI == nil { - err = errors.New("GetABI returned nil") - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) - } - - dataCallProxy, err := bridgeABI.Pack("initialize", - uint32(0), // networkIDMainnet - common.Address{}, // gasTokenAddressMainnet" - uint32(0), // gasTokenNetworkMainnet - precalculatedAddr, - common.Address{}, - []byte{}, // gasTokenMetadata - ) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to pack data for proxy initialization: %w", err) - } - - bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( - authDeployer, - client.Client(), - bridgeImplementationAddr, - authDeployer.From, - dataCallProxy, - ) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err) - } - client.Commit() - - bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to create bridge contract instance: %w", err) - } - - checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{Pending: false}) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to get Global Exit Root Manager: %w", err) - } - if precalculatedAddr != checkGERAddr { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( - "error deploying bridge, unexpected GER addr. Expected %s. Actual %s", - precalculatedAddr.Hex(), checkGERAddr.Hex(), - ) - } - - gerAddr, _, gerContract, err = gerContractL1.DeployGlobalexitrootnopush0(authDeployer, client.Client(), - auth.From, bridgeAddr) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy GER contract: %w", err) - } - client.Commit() - - if precalculatedAddr != gerAddr { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( - "error calculating GER address. Expected %s. Actual %s", - precalculatedAddr.Hex(), gerAddr.Hex(), - ) - } - - return client, gerAddr, gerContract, bridgeAddr, bridgeContract, nil -} - -func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( - client *simulated.Backend, - gerAddr common.Address, - gerContract *gerContractEVMChain.Pessimisticglobalexitrootnopush0, - bridgeAddr common.Address, - bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, - err error, -) { - ctx := context.Background() - - privateKeyL1, err := crypto.GenerateKey() - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to generate private key: %w", err) - } - - authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID)) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create transactor: %w", err) - } - - balance, _ := new(big.Int).SetString(initialBalance, 10) //nolint:mnd - address := auth.From - precalculatedBridgeAddr := crypto.CreateAddress(authDeployer.From, 1) - - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - authDeployer.From: { - Balance: balance, - }, - precalculatedBridgeAddr: { - Balance: balance, - }, - } - - const blockGasLimit = uint64(999999999999999999) - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) - - bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to deploy bridge implementation: %w", err) - } - client.Commit() - - nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get pending nonce: %w", err) - } - precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) - - bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) - } - if bridgeABI == nil { - err = errors.New("GetABI returned nil") - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) - } - - dataCallProxy, err := bridgeABI.Pack("initialize", - NetworkIDL2, - common.Address{}, // gasTokenAddressMainnet" - uint32(0), // gasTokenNetworkMainnet - precalculatedAddr, - common.Address{}, - []byte{}, // gasTokenMetadata - ) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to pack data for proxy initialization: %w", err) - } - - bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( - authDeployer, - client.Client(), - bridgeImplementationAddr, - authDeployer.From, - dataCallProxy, - ) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err) - } - if bridgeAddr != precalculatedBridgeAddr { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( - "error calculating bridge addr. Expected: %s. Actual: %s", - precalculatedBridgeAddr, bridgeAddr, - ) - } - client.Commit() - - bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to create bridge contract instance: %w", err) - } - - checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{}) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to get Global Exit Root Manager: %w", err) - } - if precalculatedAddr != checkGERAddr { - return nil, common.Address{}, nil, common.Address{}, nil, errors.New( - "error deploying bridge, unexpected GER Manager address", - ) - } - - gerAddr, _, gerContract, err = gerContractEVMChain.DeployPessimisticglobalexitrootnopush0( - authDeployer, client.Client(), auth.From) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy GER contract: %w", err) - } - client.Commit() - - globalExitRootSetterRole := common.HexToHash("0x7b95520991dfda409891be0afa2635b63540f92ee996fda0bf695a166e5c5176") - _, err = gerContract.GrantRole(authDeployer, globalExitRootSetterRole, auth.From) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to grant role to GER contract: %w", err) - } - client.Commit() - - hasRole, _ := gerContract.HasRole(&bind.CallOpts{Pending: false}, globalExitRootSetterRole, auth.From) - if !hasRole { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to set role for GER contract") - } - - if precalculatedAddr != gerAddr { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("error calculating GER address") - } - - return client, gerAddr, gerContract, bridgeAddr, bridgeContract, nil -} diff --git a/test/helpers/simulated.go b/test/helpers/simulated.go index eb4cab20..d85baf92 100644 --- a/test/helpers/simulated.go +++ b/test/helpers/simulated.go @@ -1,8 +1,24 @@ package helpers import ( + "math/big" + "testing" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" + "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/ethereum/go-ethereum/rpc" + "github.com/stretchr/testify/require" +) + +const ( + defaultBlockGasLimit = uint64(999999999999999999) + defaultBalance = "10000000000000000000000000" + chainID = 1337 ) type ClientRenamed simulated.Client @@ -14,3 +30,106 @@ type TestClient struct { func (tc TestClient) Client() *rpc.Client { return nil } + +// SimulatedBackendSetup defines the setup for a simulated backend. +type SimulatedBackendSetup struct { + UserAuth *bind.TransactOpts + DeployerAuth *bind.TransactOpts + EBZkevmBridgeAddr common.Address + EBZkevmBridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + EBZkevmBridgeProxyAddr common.Address + EBZkevmBridgeProxyContract *polygonzkevmbridgev2.Polygonzkevmbridgev2 +} + +// SimulatedBackend creates a simulated backend with two accounts: user and deployer. +func SimulatedBackend( + t *testing.T, + balances map[common.Address]types.Account, + ebZkevmBridgeNetwork uint32, +) (*simulated.Backend, *SimulatedBackendSetup) { + t.Helper() + + // Define default balance + balance, ok := new(big.Int).SetString(defaultBalance, 10) //nolint:mnd + require.Truef(t, ok, "failed to set balance") + + // Create user + userPK, err := crypto.GenerateKey() + require.NoError(t, err) + userAuth, err := bind.NewKeyedTransactorWithChainID(userPK, big.NewInt(chainID)) + require.NoError(t, err) + + // Create deployer + deployerPK, err := crypto.GenerateKey() + require.NoError(t, err) + deployerAuth, err := bind.NewKeyedTransactorWithChainID(deployerPK, big.NewInt(chainID)) + require.NoError(t, err) + precalculatedBridgeAddr := crypto.CreateAddress(deployerAuth.From, 1) + + // Define balances map + if balances == nil { + balances = make(map[common.Address]types.Account) + } + balances[userAuth.From] = types.Account{Balance: balance} + balances[deployerAuth.From] = types.Account{Balance: balance} + balances[precalculatedBridgeAddr] = types.Account{Balance: balance} + + client := simulated.NewBackend(balances, simulated.WithBlockGasLimit(defaultBlockGasLimit)) + + // Mine the first block + client.Commit() + + // MUST BE DEPLOYED FIRST + // Deploy zkevm bridge contract + ebZkevmBridgeAddr, _, ebZkevmBridgeContract, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(deployerAuth, client.Client()) + require.NoError(t, err) + client.Commit() + + // Create proxy contract for the bridge + var ebZkevmBridgeProxyAddr common.Address + var ebZkevmBridgeProxyContract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + { + precalculatedAddr := crypto.CreateAddress(deployerAuth.From, 2) //nolint:mnd + + bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() + require.NoError(t, err) + require.NotNil(t, bridgeABI) + + dataCallProxy, err := bridgeABI.Pack("initialize", + ebZkevmBridgeNetwork, + common.Address{}, // gasTokenAddressMainnet + uint32(0), // gasTokenNetworkMainnet + precalculatedAddr, + common.Address{}, + []byte{}, // gasTokenMetadata + ) + require.NoError(t, err) + + ebZkevmBridgeProxyAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( + deployerAuth, + client.Client(), + ebZkevmBridgeAddr, + deployerAuth.From, + dataCallProxy, + ) + require.NoError(t, err) + require.Equal(t, precalculatedBridgeAddr, ebZkevmBridgeProxyAddr) + client.Commit() + + ebZkevmBridgeProxyContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(ebZkevmBridgeProxyAddr, client.Client()) + require.NoError(t, err) + + checkGERAddr, err := ebZkevmBridgeProxyContract.GlobalExitRootManager(&bind.CallOpts{}) + require.NoError(t, err) + require.Equal(t, precalculatedAddr, checkGERAddr) + } + + return client, &SimulatedBackendSetup{ + UserAuth: userAuth, + DeployerAuth: deployerAuth, + EBZkevmBridgeAddr: ebZkevmBridgeAddr, + EBZkevmBridgeContract: ebZkevmBridgeContract, + EBZkevmBridgeProxyAddr: ebZkevmBridgeProxyAddr, + EBZkevmBridgeProxyContract: ebZkevmBridgeProxyContract, + } +} From ef6849a01cf70a399150ce306bbf334a83a6c599 Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Fri, 11 Oct 2024 15:18:59 +0200 Subject: [PATCH 37/53] fix: shorthand and add descriptions (#123) * fix: shorthand and add descriptions * ci: bump cdk-erigon --- crates/cdk/src/cli.rs | 11 +++++++---- test/combinations/fork11-rollup.yml | 2 +- test/combinations/fork12-cdk-validium.yml | 2 +- test/combinations/fork12-rollup.yml | 2 +- test/combinations/fork9-cdk-validium.yml | 2 +- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/crates/cdk/src/cli.rs b/crates/cdk/src/cli.rs index 5b61bd9e..d108543d 100644 --- a/crates/cdk/src/cli.rs +++ b/crates/cdk/src/cli.rs @@ -13,11 +13,12 @@ pub(crate) struct Cli { #[derive(Subcommand)] pub(crate) enum Commands { + /// Run the cdk-node with the provided configuration Node { - /// The path to the configuration file. + /// The path to the configuration file #[arg( long, - short, + short = 'C', value_hint = ValueHint::FilePath, env = "CDK_CONFIG_PATH" )] @@ -32,11 +33,12 @@ pub(crate) enum Commands { )] components: Option, }, + /// Run cdk-erigon node with the provided default configuration Erigon { - /// The path to the configuration file. + /// The path to the configuration file #[arg( long, - short, + short = 'C', value_hint = ValueHint::FilePath, env = "CDK_CONFIG_PATH" )] @@ -51,5 +53,6 @@ pub(crate) enum Commands { )] chain: PathBuf, }, + /// Output the corresponding versions of the components Versions, } diff --git a/test/combinations/fork11-rollup.yml b/test/combinations/fork11-rollup.yml index 89f2f6cd..4fea8d70 100644 --- a/test/combinations/fork11-rollup.yml +++ b/test/combinations/fork11-rollup.yml @@ -2,7 +2,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v7.0.0-rc.2-fork.11 zkevm_prover_image: hermeznetwork/zkevm-prover:v7.0.0-RC31-fork.11 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.0-fork11-RC1 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:0948e33 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:57cda33 cdk_node_image: cdk zkevm_use_gas_token_contract: true additional_services: diff --git a/test/combinations/fork12-cdk-validium.yml b/test/combinations/fork12-cdk-validium.yml index cc0a34dc..345d37ca 100644 --- a/test/combinations/fork12-cdk-validium.yml +++ b/test/combinations/fork12-cdk-validium.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.3-fork.12 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC10-fork.12 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:0948e33 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:57cda33 cdk_node_image: cdk zkevm_use_gas_token_contract: true additional_services: diff --git a/test/combinations/fork12-rollup.yml b/test/combinations/fork12-rollup.yml index ba8bb440..fe0389b8 100644 --- a/test/combinations/fork12-rollup.yml +++ b/test/combinations/fork12-rollup.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.3-fork.12 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC10-fork.12 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:0948e33 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:57cda33 cdk_node_image: cdk zkevm_use_gas_token_contract: true additional_services: diff --git a/test/combinations/fork9-cdk-validium.yml b/test/combinations/fork9-cdk-validium.yml index 13f16d48..17f99890 100644 --- a/test/combinations/fork9-cdk-validium.yml +++ b/test/combinations/fork9-cdk-validium.yml @@ -3,7 +3,7 @@ args: zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.4 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.3-RC1 cdk_validium_node_image: 0xpolygon/cdk-validium-node:0.7.0-cdk - cdk_erigon_node_image: hermeznetwork/cdk-erigon:0948e33 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:57cda33 cdk_node_image: cdk zkevm_use_gas_token_contract: true additional_services: From 3abdb5ae6d98c651017dadc3b0e2ba6183106de7 Mon Sep 17 00:00:00 2001 From: rbpol Date: Mon, 14 Oct 2024 15:02:27 +0100 Subject: [PATCH 38/53] feat: L1 Info Tree sync testing (#124) --- l1infotreesync/e2e_test.go | 7 +- l1infotreesync/processor.go | 5 +- l1infotreesync/processor_test.go | 146 +++++++++++++++++++++++++++++++ sync/evmdriver.go | 3 + 4 files changed, 156 insertions(+), 5 deletions(-) diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 94596f23..61e7ff28 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -56,7 +56,7 @@ func newSimulatedClient(t *testing.T) ( } func TestE2E(t *testing.T) { - ctx := context.Background() + ctx, cancelCtx := context.WithCancel(context.Background()) dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") rdm := l1infotreesync.NewReorgDetectorMock(t) @@ -96,6 +96,11 @@ func TestE2E(t *testing.T) { require.Equal(t, common.Hash(expectedRoot), actualRoot.Hash) } + // Restart syncer + cancelCtx() + ctx = context.Background() + go syncer.Start(ctx) + // Update 3 rollups (verify batches event) 3 times for rollupID := uint32(1); rollupID < 3; rollupID++ { for i := 0; i < 3; i++ { diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index c6a4ef1a..e7115a60 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -248,10 +248,7 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { return err } - if err := tx.Commit(); err != nil { - return err - } - return nil + return tx.Commit() } // ProcessBlock process the events of the block to build the rollup exit tree and the l1 info tree diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go index b31d2237..52a81ce8 100644 --- a/l1infotreesync/processor_test.go +++ b/l1infotreesync/processor_test.go @@ -5,6 +5,7 @@ import ( "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/sync" + "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" "golang.org/x/net/context" @@ -121,3 +122,148 @@ func TestGetLatestInfoUntilBlockIfNotFoundReturnsErrNotFound(t *testing.T) { _, err = sut.GetLatestInfoUntilBlock(ctx, 1) require.Equal(t, db.ErrNotFound, err) } + +func Test_processor_GetL1InfoTreeMerkleProof(t *testing.T) { + t.Parallel() + + testTable := []struct { + name string + getProcessor func(t *testing.T) *processor + idx uint32 + expectedRoot types.Root + expectedErr error + }{ + { + name: "empty tree", + getProcessor: func(t *testing.T) *processor { + t.Helper() + + p, err := newProcessor("file:Test_processor_GetL1InfoTreeMerkleProof_1?mode=memory&cache=shared") + require.NoError(t, err) + + return p + }, + idx: 0, + expectedErr: db.ErrNotFound, + }, + { + name: "single leaf tree", + getProcessor: func(t *testing.T) *processor { + t.Helper() + + p, err := newProcessor("file:Test_processor_GetL1InfoTreeMerkleProof_2?mode=memory&cache=shared") + require.NoError(t, err) + + info := &UpdateL1InfoTree{ + MainnetExitRoot: common.HexToHash("beef"), + RollupExitRoot: common.HexToHash("5ca1e"), + ParentHash: common.HexToHash("1010101"), + Timestamp: 420, + } + err = p.ProcessBlock(context.Background(), sync.Block{ + Num: 1, + Events: []interface{}{ + Event{UpdateL1InfoTree: info}, + }, + }) + require.NoError(t, err) + + return p + }, + idx: 0, + expectedRoot: types.Root{ + Hash: common.HexToHash("beef"), + Index: 0, + BlockNum: 1, + BlockPosition: 0, + }, + }, + } + + for _, tt := range testTable { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + p := tt.getProcessor(t) + proof, root, err := p.GetL1InfoTreeMerkleProof(context.Background(), tt.idx) + if tt.expectedErr != nil { + require.Equal(t, tt.expectedErr, err) + } else { + require.NoError(t, err) + require.NotEmpty(t, proof) + require.NotEmpty(t, root.Hash) + require.Equal(t, tt.expectedRoot.Index, root.Index) + require.Equal(t, tt.expectedRoot.BlockNum, root.BlockNum) + require.Equal(t, tt.expectedRoot.BlockPosition, root.BlockPosition) + } + }) + } +} + +func Test_processor_Reorg(t *testing.T) { + t.Parallel() + + testTable := []struct { + name string + getProcessor func(t *testing.T) *processor + reorgBlock uint64 + expectedErr error + }{ + { + name: "empty tree", + getProcessor: func(t *testing.T) *processor { + t.Helper() + + p, err := newProcessor("file:Test_processor_Reorg_1?mode=memory&cache=shared") + require.NoError(t, err) + return p + }, + reorgBlock: 0, + expectedErr: nil, + }, + { + name: "single leaf tree", + getProcessor: func(t *testing.T) *processor { + t.Helper() + + p, err := newProcessor("file:Test_processor_Reorg_2?mode=memory&cache=shared") + require.NoError(t, err) + + info := &UpdateL1InfoTree{ + MainnetExitRoot: common.HexToHash("beef"), + RollupExitRoot: common.HexToHash("5ca1e"), + ParentHash: common.HexToHash("1010101"), + Timestamp: 420, + } + err = p.ProcessBlock(context.Background(), sync.Block{ + Num: 1, + Events: []interface{}{ + Event{UpdateL1InfoTree: info}, + }, + }) + require.NoError(t, err) + + return p + }, + reorgBlock: 1, + }, + } + + for _, tt := range testTable { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + p := tt.getProcessor(t) + err := p.Reorg(context.Background(), tt.reorgBlock) + if tt.expectedErr != nil { + require.Equal(t, tt.expectedErr, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/sync/evmdriver.go b/sync/evmdriver.go index 7865f645..52eaaaae 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -92,6 +92,9 @@ reset: for { select { + case <-ctx.Done(): + d.log.Info("sync stopped due to context done") + return case b := <-downloadCh: d.log.Debug("handleNewBlock", " blockNum: ", b.Num, " blockHash: ", b.Hash) d.handleNewBlock(ctx, b) From 8e2015f7acf3a2279f2a6e5cc4df536723fdf6a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Mon, 14 Oct 2024 16:23:30 +0200 Subject: [PATCH 39/53] feat: warning on agglayer rate limit (#122) * feat: retry on agglayer rate limit exceeded --- aggregator/agglayer/agglayer_client.go | 7 +++++++ aggregator/aggregator.go | 7 +++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/aggregator/agglayer/agglayer_client.go b/aggregator/agglayer/agglayer_client.go index dbe48fb2..a5222571 100644 --- a/aggregator/agglayer/agglayer_client.go +++ b/aggregator/agglayer/agglayer_client.go @@ -13,6 +13,10 @@ import ( "github.com/ethereum/go-ethereum/common" ) +const errCodeAgglayerRateLimitExceeded int = -10007 + +var ErrAgglayerRateLimitExceeded = fmt.Errorf("agglayer rate limit exceeded") + // AgglayerClientInterface is the interface that defines the methods that the AggLayerClient will implement type AgglayerClientInterface interface { SendTx(signedTx SignedTx) (common.Hash, error) @@ -39,6 +43,9 @@ func (c *AggLayerClient) SendTx(signedTx SignedTx) (common.Hash, error) { } if response.Error != nil { + if response.Error.Code == errCodeAgglayerRateLimitExceeded { + return common.Hash{}, ErrAgglayerRateLimitExceeded + } return common.Hash{}, fmt.Errorf("%v %v", response.Error.Code, response.Error.Message) } diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 4d887136..2003f0e2 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -945,9 +945,12 @@ func (a *Aggregator) settleWithAggLayer( a.logger.Debug("final proof signedTx: ", signedTx.Tx.ZKP.Proof.Hex()) txHash, err := a.aggLayerClient.SendTx(*signedTx) if err != nil { - a.logger.Errorf("failed to send tx to the agglayer: %v", err) + if errors.Is(err, agglayer.ErrAgglayerRateLimitExceeded) { + a.logger.Errorf("%s. Config param VerifyProofInterval should match the agglayer configured rate limit.", err) + } else { + a.logger.Errorf("failed to send tx to the agglayer: %v", err) + } a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) - return false } From 99dc3570cd9aba32ecc533ab10519b6d529f61f7 Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Tue, 15 Oct 2024 15:12:30 +0200 Subject: [PATCH 40/53] chore: bump erigon (#127) --- test/combinations/fork11-rollup.yml | 2 +- test/combinations/fork12-cdk-validium.yml | 2 +- test/combinations/fork12-rollup.yml | 2 +- test/combinations/fork9-cdk-validium.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/combinations/fork11-rollup.yml b/test/combinations/fork11-rollup.yml index 4fea8d70..aff80b2e 100644 --- a/test/combinations/fork11-rollup.yml +++ b/test/combinations/fork11-rollup.yml @@ -2,7 +2,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v7.0.0-rc.2-fork.11 zkevm_prover_image: hermeznetwork/zkevm-prover:v7.0.0-RC31-fork.11 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.0-fork11-RC1 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:57cda33 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:ab3013d cdk_node_image: cdk zkevm_use_gas_token_contract: true additional_services: diff --git a/test/combinations/fork12-cdk-validium.yml b/test/combinations/fork12-cdk-validium.yml index 345d37ca..300abb84 100644 --- a/test/combinations/fork12-cdk-validium.yml +++ b/test/combinations/fork12-cdk-validium.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.3-fork.12 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC10-fork.12 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:57cda33 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:ab3013d cdk_node_image: cdk zkevm_use_gas_token_contract: true additional_services: diff --git a/test/combinations/fork12-rollup.yml b/test/combinations/fork12-rollup.yml index fe0389b8..f0e1a969 100644 --- a/test/combinations/fork12-rollup.yml +++ b/test/combinations/fork12-rollup.yml @@ -1,7 +1,7 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.3-fork.12 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC10-fork.12 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:57cda33 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:ab3013d cdk_node_image: cdk zkevm_use_gas_token_contract: true additional_services: diff --git a/test/combinations/fork9-cdk-validium.yml b/test/combinations/fork9-cdk-validium.yml index 17f99890..481b45c9 100644 --- a/test/combinations/fork9-cdk-validium.yml +++ b/test/combinations/fork9-cdk-validium.yml @@ -3,7 +3,7 @@ args: zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.4 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.3-RC1 cdk_validium_node_image: 0xpolygon/cdk-validium-node:0.7.0-cdk - cdk_erigon_node_image: hermeznetwork/cdk-erigon:57cda33 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:ab3013d cdk_node_image: cdk zkevm_use_gas_token_contract: true additional_services: From 549b10203009c1bc59bd014a9946ab2df2b7458f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Negovanovi=C4=87?= <93934272+Stefan-Ethernal@users.noreply.github.com> Date: Wed, 16 Oct 2024 16:24:07 +0200 Subject: [PATCH 41/53] chore: sequence sender improvements (#117) * chore: reduce map indexing in syncEthTxResults and propagate an error * chore: handle the context.Done and rely on ticker instead of time.Sleep in sequenceSending fn * chore: introduce IsStopped func * fix: failing unit tests * chore: remove the mutex from updateLatestVirtualBatch * chore: simplify the for loop when iterating through batches * chore: simplify the marginTimeElapsed func * chore: simplify waiting for time margin to elapse * chore: simplify purge logic * fix: wait for margin for latest l1 block --------- Co-authored-by: Goran Rojovic --- sequencesender/ethtx.go | 26 ++- sequencesender/sequencesender.go | 236 ++++++++++++++------------ sequencesender/sequencesender_test.go | 42 +++-- 3 files changed, 167 insertions(+), 137 deletions(-) diff --git a/sequencesender/ethtx.go b/sequencesender/ethtx.go index 32bc62b4..a88cff60 100644 --- a/sequencesender/ethtx.go +++ b/sequencesender/ethtx.go @@ -115,7 +115,7 @@ func (s *SequenceSender) sendTx(ctx context.Context, resend bool, txOldHash *com // purgeEthTx purges transactions from memory structures func (s *SequenceSender) purgeEthTx(ctx context.Context) { // If sequence sending is stopped, do not purge - if atomic.LoadUint32(&s.seqSendingStopped) == 1 { + if s.IsStopped() { return } @@ -162,28 +162,30 @@ func (s *SequenceSender) purgeEthTx(ctx context.Context) { } // syncEthTxResults syncs results from L1 for transactions in the memory structure -func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) { //nolint:unparam +func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) { s.mutexEthTx.Lock() var ( txPending uint64 txSync uint64 ) - for hash, data := range s.ethTransactions { - if data.Status == types.MonitoredTxStatusFinalized.String() { + for hash, tx := range s.ethTransactions { + if tx.Status == types.MonitoredTxStatusFinalized.String() { continue } err := s.getResultAndUpdateEthTx(ctx, hash) if err != nil { log.Errorf("error getting result for tx %v: %v", hash, err) + return 0, err } + txSync++ - txStatus := s.ethTransactions[hash].Status + txStatus := types.MonitoredTxStatus(tx.Status) // Count if it is not in a final state - if s.ethTransactions[hash].OnMonitor && - txStatus != types.MonitoredTxStatusFailed.String() && - txStatus != types.MonitoredTxStatusSafe.String() && - txStatus != types.MonitoredTxStatusFinalized.String() { + if tx.OnMonitor && + txStatus != types.MonitoredTxStatusFailed && + txStatus != types.MonitoredTxStatusSafe && + txStatus != types.MonitoredTxStatusFinalized { txPending++ } } @@ -193,6 +195,7 @@ func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) { err := s.saveSentSequencesTransactions(ctx) if err != nil { log.Errorf("error saving tx sequence, error: %v", err) + return 0, err } log.Infof("%d tx results synchronized (%d in pending state)", txSync, txPending) @@ -388,3 +391,8 @@ func (s *SequenceSender) saveSentSequencesTransactions(ctx context.Context) erro return nil } + +// IsStopped returns true in case seqSendingStopped is set to 1, otherwise false +func (s *SequenceSender) IsStopped() bool { + return atomic.LoadUint32(&s.seqSendingStopped) == 1 +} diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 468866c2..0a044356 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -69,7 +69,6 @@ type SequenceSender struct { validStream bool // Not valid while receiving data before the desired batch seqSendingStopped uint32 // If there is a critical error TxBuilder txbuilder.TxBuilder - latestVirtualBatchLock sync.Mutex } type sequenceData struct { @@ -124,7 +123,7 @@ func (s *SequenceSender) Start(ctx context.Context) { go s.ethTxManager.Start() // Get latest virtual state batch from L1 - err := s.getLatestVirtualBatch() + err := s.updateLatestVirtualBatch() if err != nil { s.logger.Fatalf("error getting latest sequenced batch, error: %v", err) } @@ -220,16 +219,27 @@ func (s *SequenceSender) populateSequenceData(rpcBatch *rpcbatch.RPCBatch, batch // sequenceSending starts loop to check if there are sequences to send and sends them if it's convenient func (s *SequenceSender) sequenceSending(ctx context.Context) { + // Create a ticker that fires every WaitPeriodSendSequence + ticker := time.NewTicker(s.cfg.WaitPeriodSendSequence.Duration) + defer ticker.Stop() + for { - s.tryToSendSequence(ctx) - time.Sleep(s.cfg.WaitPeriodSendSequence.Duration) + select { + case <-ctx.Done(): + s.logger.Info("context canceled, stopping sequence sending") + return + + case <-ticker.C: + // Trigger the sequence sending when the ticker fires + s.tryToSendSequence(ctx) + } } } // purgeSequences purges batches from memory structures func (s *SequenceSender) purgeSequences() { // If sequence sending is stopped, do not purge - if atomic.LoadUint32(&s.seqSendingStopped) == 1 { + if s.IsStopped() { return } @@ -238,8 +248,7 @@ func (s *SequenceSender) purgeSequences() { defer s.mutexSequence.Unlock() truncateUntil := 0 toPurge := make([]uint64, 0) - for i := 0; i < len(s.sequenceList); i++ { - batchNumber := s.sequenceList[i] + for i, batchNumber := range s.sequenceList { if batchNumber <= atomic.LoadUint64(&s.latestVirtualBatchNumber) { truncateUntil = i + 1 toPurge = append(toPurge, batchNumber) @@ -249,16 +258,10 @@ func (s *SequenceSender) purgeSequences() { if len(toPurge) > 0 { s.sequenceList = s.sequenceList[truncateUntil:] - var firstPurged uint64 - var lastPurged uint64 - for i := 0; i < len(toPurge); i++ { - if i == 0 { - firstPurged = toPurge[i] - } - if i == len(toPurge)-1 { - lastPurged = toPurge[i] - } - delete(s.sequenceData, toPurge[i]) + firstPurged := toPurge[0] + lastPurged := toPurge[len(toPurge)-1] + for _, batchNum := range toPurge { + delete(s.sequenceData, batchNum) } s.logger.Infof("batches purged count: %d, fromBatch: %d, toBatch: %d", len(toPurge), firstPurged, lastPurged) } @@ -268,27 +271,27 @@ func (s *SequenceSender) purgeSequences() { func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Update latest virtual batch s.logger.Infof("updating virtual batch") - err := s.getLatestVirtualBatch() + err := s.updateLatestVirtualBatch() if err != nil { return } - // Update state of transactions - s.logger.Infof("updating tx results") - countPending, err := s.syncEthTxResults(ctx) - if err != nil { + // Check if the sequence sending is stopped + if s.IsStopped() { + s.logger.Warnf("sending is stopped!") return } - // Check if the sequence sending is stopped - if atomic.LoadUint32(&s.seqSendingStopped) == 1 { - s.logger.Warnf("sending is stopped!") + // Update state of transactions + s.logger.Infof("updating tx results") + pendingTxsCount, err := s.syncEthTxResults(ctx) + if err != nil { return } // Check if reached the maximum number of pending transactions - if countPending >= s.cfg.MaxPendingTx { - s.logger.Infof("max number of pending txs (%d) reached. Waiting for some to be completed", countPending) + if pendingTxsCount >= s.cfg.MaxPendingTx { + s.logger.Infof("max number of pending txs (%d) reached. Waiting for some to be completed", pendingTxsCount) return } @@ -305,7 +308,6 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Send sequences to L1 firstBatch := sequence.FirstBatch() lastBatch := sequence.LastBatch() - lastL2BlockTimestamp := lastBatch.LastL2BLockTimestamp() s.logger.Debugf(sequence.String()) s.logger.Infof("sending sequences to L1. From batch %d to batch %d", firstBatch.BatchNumber(), lastBatch.BatchNumber()) @@ -313,55 +315,27 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Wait until last L1 block timestamp is L1BlockTimestampMargin seconds above the timestamp // of the last L2 block in the sequence timeMargin := int64(s.cfg.L1BlockTimestampMargin.Seconds()) - for { - // Get header of the last L1 block - lastL1BlockHeader, err := s.etherman.GetLatestBlockHeader(ctx) - if err != nil { - s.logger.Errorf("failed to get last L1 block timestamp, err: %v", err) - return - } - elapsed, waitTime := marginTimeElapsed(lastL2BlockTimestamp, lastL1BlockHeader.Time, timeMargin) + err = s.waitForMargin(ctx, lastBatch, timeMargin, "L1 block block timestamp", + func() (uint64, error) { + lastL1BlockHeader, err := s.etherman.GetLatestBlockHeader(ctx) + if err != nil { + return 0, err + } - if !elapsed { - s.logger.Infof("waiting at least %d seconds to send sequences, time difference between last L1 block %d (ts: %d) "+ - "and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", - waitTime, lastL1BlockHeader.Number, lastL1BlockHeader.Time, - lastBatch.BatchNumber(), lastL2BlockTimestamp, timeMargin, - ) - time.Sleep(time.Duration(waitTime) * time.Second) - } else { - s.logger.Infof("continuing, time difference between last L1 block %d (ts: %d) and last L2 block %d (ts: %d) "+ - "in the sequence is greater than %d seconds", - lastL1BlockHeader.Number, - lastL1BlockHeader.Time, - lastBatch.BatchNumber, - lastL2BlockTimestamp, - timeMargin, - ) - break - } + return lastL1BlockHeader.Time, nil + }) + if err != nil { + s.logger.Errorf("error waiting for L1 block time margin: %v", err) + return } - // Sanity check: Wait also until current time is L1BlockTimestampMargin seconds above the - // timestamp of the last L2 block in the sequence - for { - currentTime := uint64(time.Now().Unix()) - - elapsed, waitTime := marginTimeElapsed(lastL2BlockTimestamp, currentTime, timeMargin) - - // Wait if the time difference is less than L1BlockTimestampMargin - if !elapsed { - s.logger.Infof("waiting at least %d seconds to send sequences, time difference between now (ts: %d) "+ - "and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", - waitTime, currentTime, lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin) - time.Sleep(time.Duration(waitTime) * time.Second) - } else { - s.logger.Infof("sending sequences now, time difference between now (ts: %d) and last L2 block %d (ts: %d) "+ - "in the sequence is also greater than %d seconds", - currentTime, lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin) - break - } + // Sanity check: Wait until the current time is also L1BlockTimestampMargin seconds above the last L2 block timestamp + err = s.waitForMargin(ctx, lastBatch, timeMargin, "current time", + func() (uint64, error) { return uint64(time.Now().Unix()), nil }) + if err != nil { + s.logger.Errorf("error waiting for current time margin: %v", err) + return } // Send sequences to L1 @@ -375,20 +349,14 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { } // Get latest virtual state batch from L1 - err = s.getLatestVirtualBatch() + err = s.updateLatestVirtualBatch() if err != nil { s.logger.Fatalf("error getting latest sequenced batch, error: %v", err) } sequence.SetLastVirtualBatchNumber(atomic.LoadUint64(&s.latestVirtualBatchNumber)) - txToEstimateGas, err := s.TxBuilder.BuildSequenceBatchesTx(ctx, sequence) - if err != nil { - s.logger.Errorf("error building sequenceBatches tx to estimate gas: %v", err) - return - } - - gas, err := s.etherman.EstimateGas(ctx, s.cfg.SenderAddress, tx.To(), nil, txToEstimateGas.Data()) + gas, err := s.etherman.EstimateGas(ctx, s.cfg.SenderAddress, tx.To(), nil, tx.Data()) if err != nil { s.logger.Errorf("error estimating gas: ", err) return @@ -404,14 +372,71 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { s.purgeSequences() } +// waitForMargin ensures that the time difference between the last L2 block and the current +// timestamp exceeds the time margin before proceeding. It checks immediately, and if not +// satisfied, it waits using a ticker and rechecks periodically. +// +// Params: +// - ctx: Context to handle cancellation. +// - lastBatch: The last batch in the sequence. +// - timeMargin: Required time difference in seconds. +// - description: A description for logging purposes. +// - getTimeFn: Function to get the current time (e.g., L1 block time or current time). +func (s *SequenceSender) waitForMargin(ctx context.Context, lastBatch seqsendertypes.Batch, + timeMargin int64, description string, getTimeFn func() (uint64, error)) error { + referentTime, err := getTimeFn() + if err != nil { + return err + } + + lastL2BlockTimestamp := lastBatch.LastL2BLockTimestamp() + elapsed, waitTime := marginTimeElapsed(lastL2BlockTimestamp, referentTime, timeMargin) + if elapsed { + s.logger.Infof("time difference for %s exceeds %d seconds, proceeding (batch number: %d, last l2 block ts: %d)", + description, timeMargin, lastBatch.BatchNumber(), lastL2BlockTimestamp) + return nil + } + + s.logger.Infof("waiting %d seconds for %s, margin less than %d seconds (batch number: %d, last l2 block ts: %d)", + waitTime, description, timeMargin, lastBatch.BatchNumber(), lastL2BlockTimestamp) + ticker := time.NewTicker(time.Duration(waitTime) * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + s.logger.Infof("context canceled during %s wait (batch number: %d, last l2 block ts: %d)", + description, lastBatch.BatchNumber(), lastL2BlockTimestamp) + return ctx.Err() + + case <-ticker.C: + referentTime, err = getTimeFn() + if err != nil { + return err + } + + elapsed, waitTime = marginTimeElapsed(lastL2BlockTimestamp, referentTime, timeMargin) + if elapsed { + s.logger.Infof("time margin for %s now exceeds %d seconds, proceeding (batch number: %d, last l2 block ts: %d)", + description, timeMargin, lastBatch.BatchNumber(), lastL2BlockTimestamp) + return nil + } + + s.logger.Infof( + "waiting another %d seconds for %s, margin still less than %d seconds (batch number: %d, last l2 block ts: %d)", + waitTime, description, timeMargin, lastBatch.BatchNumber(), lastL2BlockTimestamp) + ticker.Reset(time.Duration(waitTime) * time.Second) + } + } +} + func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes.Sequence, error) { // Add sequences until too big for a single L1 tx or last batch is reached s.mutexSequence.Lock() defer s.mutexSequence.Unlock() var prevCoinbase common.Address sequenceBatches := make([]seqsendertypes.Batch, 0) - for i := 0; i < len(s.sequenceList); i++ { - batchNumber := s.sequenceList[i] + for _, batchNumber := range s.sequenceList { if batchNumber <= atomic.LoadUint64(&s.latestVirtualBatchNumber) || batchNumber <= atomic.LoadUint64(&s.latestSentToL1Batch) { continue @@ -419,7 +444,7 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes // Check if the next batch belongs to a new forkid, in this case we need to stop sequencing as we need to // wait the upgrade of forkid is completed and s.cfg.NumBatchForkIdUpgrade is disabled (=0) again - if (s.cfg.ForkUpgradeBatchNumber != 0) && (batchNumber == (s.cfg.ForkUpgradeBatchNumber + 1)) { + if s.cfg.ForkUpgradeBatchNumber != 0 && batchNumber == (s.cfg.ForkUpgradeBatchNumber+1) { return nil, fmt.Errorf( "aborting sequencing process as we reached the batch %d where a new forkid is applied (upgrade)", s.cfg.ForkUpgradeBatchNumber+1, @@ -452,7 +477,7 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes // Check if the current batch is the last before a change to a new forkid // In this case we need to close and send the sequence to L1 - if (s.cfg.ForkUpgradeBatchNumber != 0) && (batchNumber == (s.cfg.ForkUpgradeBatchNumber)) { + if s.cfg.ForkUpgradeBatchNumber != 0 && batchNumber == s.cfg.ForkUpgradeBatchNumber { s.logger.Infof("sequence should be sent to L1, as we have reached the batch %d "+ "from which a new forkid is applied (upgrade)", s.cfg.ForkUpgradeBatchNumber, @@ -476,14 +501,9 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes return nil, nil } -// getLatestVirtualBatch queries the value in L1 and updates the latest virtual batch field -func (s *SequenceSender) getLatestVirtualBatch() error { - s.latestVirtualBatchLock.Lock() - defer s.latestVirtualBatchLock.Unlock() - +// updateLatestVirtualBatch queries the value in L1 and updates the latest virtual batch field +func (s *SequenceSender) updateLatestVirtualBatch() error { // Get latest virtual state batch from L1 - var err error - latestVirtualBatchNumber, err := s.etherman.GetLatestBatchNumber() if err != nil { s.logger.Errorf("error getting latest virtual batch, error: %v", err) @@ -491,7 +511,6 @@ func (s *SequenceSender) getLatestVirtualBatch() error { } atomic.StoreUint64(&s.latestVirtualBatchNumber, latestVirtualBatchNumber) - s.logger.Infof("latest virtual batch is %d", latestVirtualBatchNumber) return nil @@ -509,29 +528,20 @@ func (s *SequenceSender) logFatalf(template string, args ...interface{}) { // marginTimeElapsed checks if the time between currentTime and l2BlockTimestamp is greater than timeMargin. // If it's greater returns true, otherwise it returns false and the waitTime needed to achieve this timeMargin -func marginTimeElapsed( - l2BlockTimestamp uint64, currentTime uint64, timeMargin int64, -) (bool, int64) { - // Check the time difference between L2 block and currentTime - var timeDiff int64 - if l2BlockTimestamp >= currentTime { - // L2 block timestamp is above currentTime, negative timeDiff. We do in this way to avoid uint64 overflow - timeDiff = int64(-(l2BlockTimestamp - currentTime)) - } else { - timeDiff = int64(currentTime - l2BlockTimestamp) - } - - // Check if the time difference is less than timeMargin (L1BlockTimestampMargin) +func marginTimeElapsed(l2BlockTimestamp uint64, currentTime uint64, timeMargin int64) (bool, int64) { + if int64(l2BlockTimestamp)-timeMargin > int64(currentTime) { + return true, 0 + } + + timeDiff := int64(currentTime) - int64(l2BlockTimestamp) + + // If the difference is less than the required margin, return false and calculate the remaining wait time if timeDiff < timeMargin { - var waitTime int64 - if timeDiff < 0 { // L2 block timestamp is above currentTime - waitTime = timeMargin + (-timeDiff) - } else { - waitTime = timeMargin - timeDiff - } + // Calculate the wait time needed to reach the timeMargin + waitTime := timeMargin - timeDiff return false, waitTime } - // timeDiff is greater than timeMargin + // Time difference is greater than or equal to timeMargin, no need to wait return true, 0 } diff --git a/sequencesender/sequencesender_test.go b/sequencesender/sequencesender_test.go index 432c5d39..3db4a803 100644 --- a/sequencesender/sequencesender_test.go +++ b/sequencesender/sequencesender_test.go @@ -145,8 +145,9 @@ func Test_Start(t *testing.T) { etherman: tt.getEtherman(t), ethTxManager: tt.getEthTxManager(t), cfg: Config{ - SequencesTxFileName: tmpFile.Name() + ".tmp", - GetBatchWaitInterval: tt.batchWaitDuration, + SequencesTxFileName: tmpFile.Name() + ".tmp", + GetBatchWaitInterval: tt.batchWaitDuration, + WaitPeriodSendSequence: types2.NewDuration(1 * time.Millisecond), }, logger: log.GetDefaultLogger(), } @@ -356,8 +357,9 @@ func Test_tryToSendSequence(t *testing.T) { etherman: tt.getEtherman(t), TxBuilder: tt.getTxBuilder(t), cfg: Config{ - SequencesTxFileName: tmpFile.Name() + ".tmp", - MaxPendingTx: tt.maxPendingTxn, + SequencesTxFileName: tmpFile.Name() + ".tmp", + MaxPendingTx: tt.maxPendingTxn, + WaitPeriodSendSequence: types2.NewDuration(time.Millisecond), }, sequenceList: tt.sequenceList, latestSentToL1Batch: tt.latestSentToL1Batch, @@ -555,10 +557,10 @@ func Test_marginTimeElapsed(t *testing.T) { timeMargin int64 } tests := []struct { - name string - args args - want bool - want1 int64 + name string + args args + expectedIsElapsed bool + expectedWaitTime int64 }{ { name: "time elapsed", @@ -567,8 +569,8 @@ func Test_marginTimeElapsed(t *testing.T) { currentTime: 200, timeMargin: 50, }, - want: true, - want1: 0, + expectedIsElapsed: true, + expectedWaitTime: 0, }, { name: "time not elapsed", @@ -577,8 +579,18 @@ func Test_marginTimeElapsed(t *testing.T) { currentTime: 200, timeMargin: 150, }, - want: false, - want1: 50, + expectedIsElapsed: false, + expectedWaitTime: 50, + }, + { + name: "l2 block in the future (time margin not enough)", + args: args{ + l2BlockTimestamp: 300, + currentTime: 200, + timeMargin: 50, + }, + expectedIsElapsed: true, + expectedWaitTime: 0, }, } @@ -588,9 +600,9 @@ func Test_marginTimeElapsed(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - got, got1 := marginTimeElapsed(tt.args.l2BlockTimestamp, tt.args.currentTime, tt.args.timeMargin) - require.Equal(t, tt.want, got, "marginTimeElapsed() got = %v, want %v", got, tt.want) - require.Equal(t, tt.want1, got1, "marginTimeElapsed() got1 = %v, want %v", got1, tt.want1) + isElapsed, waitTime := marginTimeElapsed(tt.args.l2BlockTimestamp, tt.args.currentTime, tt.args.timeMargin) + require.Equal(t, tt.expectedIsElapsed, isElapsed, "marginTimeElapsed() isElapsed = %t, want %t", isElapsed, tt.expectedIsElapsed) + require.Equal(t, tt.expectedWaitTime, waitTime, "marginTimeElapsed() got1 = %v, want %v", waitTime, tt.expectedWaitTime) }) } } From 67b110eb4b39a8cb473b77530f2e41f097fa0029 Mon Sep 17 00:00:00 2001 From: Rachit Sonthalia <54906134+rachit77@users.noreply.github.com> Date: Wed, 16 Oct 2024 21:52:52 +0530 Subject: [PATCH 42/53] test: L1infotree and Merkletree (#129) * test: l1infotree * test: merkle tree * test: refactor * fix: lint --- l1infotree/tree_test.go | 131 +++++++++ merkletree/key_test.go | 127 ++++++++ merkletree/split_test.go | 276 ++++++++++++++++++ test/testutils/utils.go | 26 ++ test/vectors/l1infotree.go | 21 ++ .../l1-info-tree/proof-vectors.json | 158 ++++++++++ .../l1-info-tree/root-vectors.json | 35 +++ .../src/merkle-tree/smt-hash-bytecode.json | 46 +++ .../merkle-tree/smt-key-contract-code.json | 17 ++ .../merkle-tree/smt-key-contract-length.json | 17 ++ .../merkle-tree/smt-key-contract-storage.json | 38 +++ .../src/merkle-tree/smt-key-eth-balance.json | 42 +++ .../src/merkle-tree/smt-key-eth-nonce.json | 42 +++ 13 files changed, 976 insertions(+) create mode 100644 l1infotree/tree_test.go create mode 100644 merkletree/key_test.go create mode 100644 merkletree/split_test.go create mode 100644 test/testutils/utils.go create mode 100644 test/vectors/l1infotree.go create mode 100644 test/vectors/src/merkle-tree/l1-info-tree/proof-vectors.json create mode 100644 test/vectors/src/merkle-tree/l1-info-tree/root-vectors.json create mode 100644 test/vectors/src/merkle-tree/smt-hash-bytecode.json create mode 100644 test/vectors/src/merkle-tree/smt-key-contract-code.json create mode 100644 test/vectors/src/merkle-tree/smt-key-contract-length.json create mode 100644 test/vectors/src/merkle-tree/smt-key-contract-storage.json create mode 100644 test/vectors/src/merkle-tree/smt-key-eth-balance.json create mode 100644 test/vectors/src/merkle-tree/smt-key-eth-nonce.json diff --git a/l1infotree/tree_test.go b/l1infotree/tree_test.go new file mode 100644 index 00000000..6af4b8b3 --- /dev/null +++ b/l1infotree/tree_test.go @@ -0,0 +1,131 @@ +package l1infotree_test + +import ( + "encoding/hex" + "encoding/json" + "os" + "testing" + + "github.com/0xPolygon/cdk/l1infotree" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/test/vectors" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestComputeTreeRoot(t *testing.T) { + data, err := os.ReadFile("../test/vectors/src/merkle-tree/l1-info-tree/root-vectors.json") + require.NoError(t, err) + var mtTestVectors []vectors.L1InfoTree + err = json.Unmarshal(data, &mtTestVectors) + require.NoError(t, err) + for _, testVector := range mtTestVectors { + input := testVector.PreviousLeafValues + mt, err := l1infotree.NewL1InfoTree(log.GetDefaultLogger(), uint8(32), [][32]byte{}) + require.NoError(t, err) + + var leaves [][32]byte + for _, v := range input { + leaves = append(leaves, v) + } + + if len(leaves) != 0 { + root, err := mt.BuildL1InfoRoot(leaves) + require.NoError(t, err) + require.Equal(t, testVector.CurrentRoot, root) + } + + leaves = append(leaves, testVector.NewLeafValue) + newRoot, err := mt.BuildL1InfoRoot(leaves) + require.NoError(t, err) + require.Equal(t, testVector.NewRoot, newRoot) + } +} + +func TestComputeMerkleProof(t *testing.T) { + logger := log.GetDefaultLogger() + mt, err := l1infotree.NewL1InfoTree(logger, uint8(32), [][32]byte{}) + require.NoError(t, err) + leaves := [][32]byte{ + common.HexToHash("0x83fc198de31e1b2b1a8212d2430fbb7766c13d9ad305637dea3759065606475d"), + common.HexToHash("0x83fc198de31e1b2b1a8212d2430fbb7766c13d9ad305637dea3759065606475d"), + common.HexToHash("0x0349657c7850dc9b2b73010501b01cd6a38911b6a2ad2167c164c5b2a5b344de"), + common.HexToHash("0xb32f96fad8af99f3b3cb90dfbb4849f73435dbee1877e4ac2c213127379549ce"), + common.HexToHash("0x79ffa1294bf48e0dd41afcb23b2929921e4e17f2f81b7163c23078375b06ba4f"), + common.HexToHash("0x0004063b5c83f56a17f580db0908339c01206cdf8b59beb13ce6f146bb025fe2"), + common.HexToHash("0x68e4f2c517c7f60c3664ac6bbe78f904eacdbe84790aa0d15d79ddd6216c556e"), + common.HexToHash("0xf7245f4d84367a189b90873e4563a000702dbfe974b872fdb13323a828c8fb71"), + common.HexToHash("0x0e43332c71c6e2f4a48326258ea17b75d77d3063a4127047dd32a4cb089e62a4"), + common.HexToHash("0xd35a1dc90098c0869a69891094c119eb281cee1a7829d210df1bf8afbea08adc"), + common.HexToHash("0x13bffd0da370d1e80a470821f1bee9607f116881feb708f1ec255da1689164b3"), + common.HexToHash("0x5fa79a24c9bc73cd507b02e5917cef9782529080aa75eacb2bf4e1d45fda7f1d"), + common.HexToHash("0x975b5bbc67345adc6ee6d1d67d1d5cd2a430c231d93e5a8b5a6f00b0c0862215"), + common.HexToHash("0x0d0fa887c045a53ec6212dee58964d0ae89595b7d11745a05c397240a4dceb20"), + common.HexToHash("0xa6ae5bc494a2ee0e5173d0e0b546533973104e0031c69d0cd65cdc7bb4d64670"), + common.HexToHash("0x21ccc18196a8fd74e720c6c129977d80bb804d3331673d6411871df14f7e7ae4"), + common.HexToHash("0xf8b1b98ac75bea8dbed034d0b3cd08b4c9275644c2242781a827e53deb2386c3"), + common.HexToHash("0x26401c418ef8bc5a80380f25f16dfc78b7053a26c0ca425fda294b1678b779fc"), + common.HexToHash("0xc53fd99005361738fc811ce87d194deed34a7f06ebd5371b19a008e8d1e8799f"), + common.HexToHash("0x570bd643e35fbcda95393994812d9212335e6bd4504b3b1dc8f3c6f1eeb247b2"), + common.HexToHash("0xb21ac971d007810540583bd3c0d4f35e0c2f4b62753e51c104a5753c6372caf8"), + common.HexToHash("0xb8dae305b34c749cbbd98993bfd71ec2323e8364861f25b4c5e0ac3c9587e16d"), + common.HexToHash("0x57c7fabd0f70e0059e871953fcb3dd43c6b8a5f348dbe771190cc8b0320336a5"), + common.HexToHash("0x95b0d23c347e2a88fc8e2ab900b09212a1295ab8f169075aa27e8719557d9b06"), + common.HexToHash("0x95b0d23c347e2a88fc8e2ab900b09212a1295ab8f169075aa27e8719557d9b06"), + common.HexToHash("0x95b0d23c347e2a88fc8e2ab900b09212a1295ab8f169075aa27e8719557d9b06"), + } + require.Equal(t, 26, len(leaves)) + siblings, root, err := mt.ComputeMerkleProof(1, leaves) + require.NoError(t, err) + require.Equal(t, "0x4ed479841384358f765966486782abb598ece1d4f834a22474050d66a18ad296", root.String()) + expectedProof := []string{"0x83fc198de31e1b2b1a8212d2430fbb7766c13d9ad305637dea3759065606475d", "0x2815e0bbb1ec18b8b1bc64454a86d072e12ee5d43bb559b44059e01edff0af7a", "0x7fb6cc0f2120368a845cf435da7102ff6e369280f787bc51b8a989fc178f7252", "0x407db5edcdc0ddd4f7327f208f46db40c4c4dbcc46c94a757e1d1654acbd8b72", "0xce2cdd1ef2e87e82264532285998ff37024404ab3a2b77b50eb1ad856ae83e14", "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"} + for i := 0; i < len(siblings); i++ { + require.Equal(t, expectedProof[i], "0x"+hex.EncodeToString(siblings[i][:])) + } +} + +func TestAddLeaf(t *testing.T) { + data, err := os.ReadFile("../test/vectors/src/merkle-tree/l1-info-tree/proof-vectors.json") + require.NoError(t, err) + var mtTestVectors []vectors.L1InfoTreeProof + err = json.Unmarshal(data, &mtTestVectors) + require.NoError(t, err) + testVector := mtTestVectors[3] + var leaves [][32]byte + mt, err := l1infotree.NewL1InfoTree(log.GetDefaultLogger(), uint8(32), leaves) + require.NoError(t, err) + for _, leaf := range testVector.Leaves { + _, count, _ := mt.GetCurrentRootCountAndSiblings() + _, err := mt.AddLeaf(count, leaf) + require.NoError(t, err) + } + log.Debugf("%d leaves added successfully", len(testVector.Leaves)) + root, _, _ := mt.GetCurrentRootCountAndSiblings() + require.Equal(t, testVector.Root, root) + log.Debug("Final root: ", root) +} + +func TestAddLeaf2(t *testing.T) { + data, err := os.ReadFile("../test/vectors/src/merkle-tree/l1-info-tree/root-vectors.json") + require.NoError(t, err) + var mtTestVectors []vectors.L1InfoTree + err = json.Unmarshal(data, &mtTestVectors) + require.NoError(t, err) + for _, testVector := range mtTestVectors { + input := testVector.PreviousLeafValues + + var leaves [][32]byte + for _, v := range input { + leaves = append(leaves, v) + } + mt, err := l1infotree.NewL1InfoTree(log.GetDefaultLogger(), uint8(32), leaves) + require.NoError(t, err) + + initialRoot, count, _ := mt.GetCurrentRootCountAndSiblings() + require.Equal(t, testVector.CurrentRoot, initialRoot) + + newRoot, err := mt.AddLeaf(count, testVector.NewLeafValue) + require.NoError(t, err) + require.Equal(t, testVector.NewRoot, newRoot) + } +} diff --git a/merkletree/key_test.go b/merkletree/key_test.go new file mode 100644 index 00000000..fab056f9 --- /dev/null +++ b/merkletree/key_test.go @@ -0,0 +1,127 @@ +package merkletree + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + "os" + "path" + "runtime" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type testVectorKey struct { + EthAddr string `json:"ethAddr"` + StoragePosition string `json:"storagePosition"` + ExpectedKey string `json:"expectedKey"` +} + +type bytecodeTest struct { + Bytecode string `json:"bytecode"` + ExpectedHash string `json:"expectedHash"` +} + +func init() { + // Change dir to project root + // This is important because we have relative paths to files containing test vectors + _, filename, _, _ := runtime.Caller(0) //nolint:dogsled + dir := path.Join(path.Dir(filename), "../") + + if err := os.Chdir(dir); err != nil { + panic(err) + } +} + +func Test_CommonKeys(t *testing.T) { + tcs := []struct { + description string + testVectorFile string + keyFunc func(common.Address) ([]byte, error) + }{ + { + description: "keyEthAddressBalance", + testVectorFile: "test/vectors/src/merkle-tree/smt-key-eth-balance.json", + keyFunc: KeyEthAddrBalance, + }, + { + description: "keyEthAddressNonce", + testVectorFile: "test/vectors/src/merkle-tree/smt-key-eth-nonce.json", + keyFunc: KeyEthAddrNonce, + }, + { + description: "keyContractCode", + testVectorFile: "test/vectors/src/merkle-tree/smt-key-contract-code.json", + keyFunc: KeyContractCode, + }, + { + description: "keyCodeLength", + testVectorFile: "test/vectors/src/merkle-tree/smt-key-contract-length.json", + keyFunc: KeyCodeLength, + }, + } + for _, tc := range tcs { + tc := tc + + data, err := os.ReadFile(tc.testVectorFile) + require.NoError(t, err) + + var testVectors []testVectorKey + err = json.Unmarshal(data, &testVectors) + require.NoError(t, err) + + for ti, testVector := range testVectors { + t.Run(fmt.Sprintf("%s, test vector %d", tc.description, ti), func(t *testing.T) { + key, err := tc.keyFunc(common.HexToAddress(testVector.EthAddr)) + require.NoError(t, err) + require.Equal(t, len(key), maxBigIntLen) + + expected, _ := new(big.Int).SetString(testVector.ExpectedKey, 10) + assert.Equal(t, hex.EncodeToString(expected.Bytes()), hex.EncodeToString(key)) + }) + } + } +} + +func Test_KeyContractStorage(t *testing.T) { + data, err := os.ReadFile("test/vectors/src/merkle-tree/smt-key-contract-storage.json") + require.NoError(t, err) + + var testVectors []testVectorKey + err = json.Unmarshal(data, &testVectors) + require.NoError(t, err) + + for ti, testVector := range testVectors { + t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { + storagePosition, ok := new(big.Int).SetString(testVector.StoragePosition, 10) + require.True(t, ok) + key, err := KeyContractStorage(common.HexToAddress(testVector.EthAddr), storagePosition.Bytes()) + require.NoError(t, err) + require.Equal(t, len(key), maxBigIntLen) + + expected, _ := new(big.Int).SetString(testVector.ExpectedKey, 10) + assert.Equal(t, hex.EncodeToString(expected.Bytes()), hex.EncodeToString(key)) + }) + } +} + +func Test_byteCodeHash(t *testing.T) { + data, err := os.ReadFile("test/vectors/src/merkle-tree/smt-hash-bytecode.json") + require.NoError(t, err) + + var testVectors []bytecodeTest + err = json.Unmarshal(data, &testVectors) + require.NoError(t, err) + + for ti, testVector := range testVectors { + t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { + hash, err := HashContractBytecode(common.Hex2Bytes(testVector.Bytecode)) + require.NoError(t, err) + assert.Equal(t, common.HexToHash(testVector.ExpectedHash), common.HexToHash(H4ToString(hash))) + }) + } +} diff --git a/merkletree/split_test.go b/merkletree/split_test.go new file mode 100644 index 00000000..eff615db --- /dev/null +++ b/merkletree/split_test.go @@ -0,0 +1,276 @@ +package merkletree + +import ( + "fmt" + "math/big" + "reflect" + "testing" + + "github.com/0xPolygon/cdk/hex" + "github.com/0xPolygon/cdk/test/testutils" + "github.com/stretchr/testify/require" +) + +func TestScalar2Fea(t *testing.T) { + tests := []struct { + name string + input string + expected []uint64 + }{ + { + name: "Zero value", + input: "0", + expected: []uint64{0, 0, 0, 0, 0, 0, 0, 0}, + }, + { + name: "Single 32-bit value", + input: "FFFFFFFF", + expected: []uint64{0xFFFFFFFF, 0, 0, 0, 0, 0, 0, 0}, + }, + { + name: "Mixed bits across chunks (128-bit)", + input: "1234567890ABCDEF1234567890ABCDEF", + expected: []uint64{0x90ABCDEF, 0x12345678, 0x90ABCDEF, 0x12345678, 0, 0, 0, 0}, + }, + { + name: "All bits set in each 32-bit chunk (256-bit)", + input: "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + expected: []uint64{0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + inputVal, success := new(big.Int).SetString(tt.input, 16) + if !success { + t.Fatalf("Invalid input value: %s", tt.input) + } + + result := scalar2fea(inputVal) + + if !reflect.DeepEqual(result, tt.expected) { + t.Errorf("scalar2fea(%s) = %v, want %v", tt.input, result, tt.expected) + } + }) + } +} + +func Test_h4ToScalar(t *testing.T) { + tcs := []struct { + input []uint64 + expected string + }{ + { + input: []uint64{0, 0, 0, 0}, + expected: "0", + }, + { + input: []uint64{0, 1, 2, 3}, + expected: "18831305206160042292187933003464876175252262292329349513216", + }, + } + + for i, tc := range tcs { + tc := tc + t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { + actual := h4ToScalar(tc.input) + expected, ok := new(big.Int).SetString(tc.expected, 10) + require.True(t, ok) + require.Equal(t, expected, actual) + }) + } +} + +func Test_scalarToh4(t *testing.T) { + tcs := []struct { + input string + expected []uint64 + }{ + { + input: "0", + expected: []uint64{0, 0, 0, 0}, + }, + { + input: "18831305206160042292187933003464876175252262292329349513216", + expected: []uint64{0, 1, 2, 3}, + }, + } + + for i, tc := range tcs { + tc := tc + t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { + bi, ok := new(big.Int).SetString(tc.input, 10) + require.True(t, ok) + + actual := scalarToh4(bi) + require.Equal(t, tc.expected, actual) + }) + } +} + +func Test_h4ToString(t *testing.T) { + tcs := []struct { + input []uint64 + expected string + }{ + { + input: []uint64{0, 0, 0, 0}, + expected: "0x0000000000000000000000000000000000000000000000000000000000000000", + }, + { + input: []uint64{0, 1, 2, 3}, + expected: "0x0000000000000003000000000000000200000000000000010000000000000000", + }, + } + + for i, tc := range tcs { + tc := tc + t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { + actual := H4ToString(tc.input) + require.Equal(t, tc.expected, actual) + }) + } +} + +func Test_Conversions(t *testing.T) { + tcs := []struct { + input []uint64 + }{ + { + input: []uint64{0, 0, 0, 0}, + }, + { + input: []uint64{0, 1, 2, 3}, + }, + } + + for i, tc := range tcs { + tc := tc + t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { + resScalar := h4ToScalar(tc.input) + init := scalarToh4(resScalar) + require.Equal(t, tc.input, init) + }) + } +} + +func Test_stringToh4(t *testing.T) { + tcs := []struct { + description string + input string + expected []uint64 + expectedErr bool + expectedErrMsg string + }{ + { + description: "happy path", + input: "cafe", + expected: []uint64{51966, 0, 0, 0}, + }, + { + description: "0x prefix is allowed", + input: "0xcafe", + expected: []uint64{51966, 0, 0, 0}, + }, + + { + description: "non hex input causes error", + input: "yu74", + expectedErr: true, + expectedErrMsg: "could not convert \"yu74\" into big int", + }, + { + description: "empty input causes error", + input: "", + expectedErr: true, + expectedErrMsg: "could not convert \"\" into big int", + }, + } + + for _, tc := range tcs { + tc := tc + t.Run(tc.description, func(t *testing.T) { + actual, err := StringToh4(tc.input) + require.NoError(t, testutils.CheckError(err, tc.expectedErr, tc.expectedErrMsg)) + + require.Equal(t, tc.expected, actual) + }) + } +} + +func Test_ScalarToFilledByteSlice(t *testing.T) { + tcs := []struct { + input string + expected string + }{ + { + input: "0", + expected: "0x0000000000000000000000000000000000000000000000000000000000000000", + }, + { + input: "256", + expected: "0x0000000000000000000000000000000000000000000000000000000000000100", + }, + { + input: "235938498573495379548793890390932048239042839490238", + expected: "0x0000000000000000000000a16f882ee8972432c0a71c5e309ad5f7215690aebe", + }, + { + input: "4309593458485959083095843905390485089430985490434080439904305093450934509490", + expected: "0x098724b9a1bc97eee674cf5b6b56b8fafd83ac49c3da1f2c87c822548bbfdfb2", + }, + { + input: "98999023430240239049320492430858334093493024832984092384902398409234090932489", + expected: "0xdadf762a31e865f150a1456d7db7963c91361b771c8381a3fb879cf5bf91b909", + }, + } + + for i, tc := range tcs { + tc := tc + t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) { + input, ok := big.NewInt(0).SetString(tc.input, 10) + require.True(t, ok) + + actualSlice := ScalarToFilledByteSlice(input) + + actual := hex.EncodeToHex(actualSlice) + + require.Equal(t, tc.expected, actual) + }) + } +} + +func Test_h4ToFilledByteSlice(t *testing.T) { + tcs := []struct { + input []uint64 + expected string + }{ + { + input: []uint64{0, 0, 0, 0}, + expected: "0x0000000000000000000000000000000000000000000000000000000000000000", + }, + { + input: []uint64{0, 1, 2, 3}, + expected: "0x0000000000000003000000000000000200000000000000010000000000000000", + }, + { + input: []uint64{55345354959, 991992992929, 2, 3}, + expected: "0x00000000000000030000000000000002000000e6f763d4a10000000ce2d718cf", + }, + { + input: []uint64{8398349845894398543, 3485942349435495945, 734034022234249459, 5490434584389534589}, + expected: "0x4c31f12a390ec37d0a2fd00ddc52d8f330608e18f597e609748ceeb03ffe024f", + }, + } + + for i, tc := range tcs { + tc := tc + t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) { + actualSlice := h4ToFilledByteSlice(tc.input) + + actual := hex.EncodeToHex(actualSlice) + + require.Equal(t, tc.expected, actual) + }) + } +} diff --git a/test/testutils/utils.go b/test/testutils/utils.go new file mode 100644 index 00000000..cda8de71 --- /dev/null +++ b/test/testutils/utils.go @@ -0,0 +1,26 @@ +package testutils + +import ( + "fmt" + "strings" +) + +// CheckError checks the given error taking into account if it was expected and +// potentially the message it should carry. +func CheckError(err error, expected bool, msg string) error { + if !expected && err != nil { + return fmt.Errorf("unexpected error %w", err) + } + if expected { + if err == nil { + return fmt.Errorf("expected error didn't happen") + } + if msg == "" { + return fmt.Errorf("expected error message not defined") + } + if !strings.HasPrefix(err.Error(), msg) { + return fmt.Errorf("wrong error, expected %q, got %q", msg, err.Error()) + } + } + return nil +} diff --git a/test/vectors/l1infotree.go b/test/vectors/l1infotree.go new file mode 100644 index 00000000..fe675aa0 --- /dev/null +++ b/test/vectors/l1infotree.go @@ -0,0 +1,21 @@ +package vectors + +import ( + "github.com/ethereum/go-ethereum/common" +) + +// L1InfoTree holds the test vector for the merkle tree +type L1InfoTree struct { + PreviousLeafValues []common.Hash `json:"previousLeafValues"` + CurrentRoot common.Hash `json:"currentRoot"` + NewLeafValue common.Hash `json:"newLeafValue"` + NewRoot common.Hash `json:"newRoot"` +} + +// L1InfoTree holds the test vector for the merkle tree +type L1InfoTreeProof struct { + Leaves []common.Hash `json:"leaves"` + Index uint `json:"index"` + Proof []common.Hash `json:"proof"` + Root common.Hash `json:"root"` +} diff --git a/test/vectors/src/merkle-tree/l1-info-tree/proof-vectors.json b/test/vectors/src/merkle-tree/l1-info-tree/proof-vectors.json new file mode 100644 index 00000000..b390a540 --- /dev/null +++ b/test/vectors/src/merkle-tree/l1-info-tree/proof-vectors.json @@ -0,0 +1,158 @@ +[ + { + "leaves": ["0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5","0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f","0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d","0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242"], + "index": 0, + "proof": [ + "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f", + "0xb48c8301099f75206bc93b1512c7b3855b60b4f8cbaedf8679a184d1d450a4f1", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" + ], + "root": "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25721" + }, + { + "leaves": ["0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5","0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f","0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d","0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242"], + "index": 1, + "proof": [ + "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5", + "0xb48c8301099f75206bc93b1512c7b3855b60b4f8cbaedf8679a184d1d450a4f1", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" + ], + "root": "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25721" + }, + { + "leaves": ["0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5","0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f","0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d","0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242"], + "index": 2, + "proof": [ + "0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242", + "0x653142d4a4d6f7985a3f33cad31e011dbee8909846b34c38c7b235ca08828521", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" + ], + "root": "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25721" + }, + { + "leaves": ["0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5","0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f","0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d","0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242"], + "index": 3, + "proof": [ + "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d", + "0x653142d4a4d6f7985a3f33cad31e011dbee8909846b34c38c7b235ca08828521", + "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", + "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", + "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", + "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", + "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", + "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", + "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", + "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", + "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", + "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", + "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", + "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", + "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", + "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", + "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", + "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", + "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", + "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", + "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", + "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", + "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", + "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", + "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", + "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", + "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", + "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", + "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", + "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", + "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", + "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" + ], + "root": "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25721" + } + ] \ No newline at end of file diff --git a/test/vectors/src/merkle-tree/l1-info-tree/root-vectors.json b/test/vectors/src/merkle-tree/l1-info-tree/root-vectors.json new file mode 100644 index 00000000..6d29f5fe --- /dev/null +++ b/test/vectors/src/merkle-tree/l1-info-tree/root-vectors.json @@ -0,0 +1,35 @@ +[ + { + "previousLeafValues": [], + "currentRoot": "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757", + "newLeafValue": "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5", + "newRoot": "0xbf7ddbb59aa018a4c74e061f5172973ff09e4cb7f58405af117fc521f1ca46aa" + }, + { + "previousLeafValues": [ + "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5" + ], + "currentRoot": "0xbf7ddbb59aa018a4c74e061f5172973ff09e4cb7f58405af117fc521f1ca46aa", + "newLeafValue": "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f", + "newRoot": "0xa7042a3ce14f384bbff63f1cee6ee5579193c2d7002e0034854963322cda6128" + }, + { + "previousLeafValues": [ + "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5", + "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f" + ], + "currentRoot": "0xa7042a3ce14f384bbff63f1cee6ee5579193c2d7002e0034854963322cda6128", + "newLeafValue": "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d", + "newRoot": "0x88e652896cb1de5962a0173a222059f51e6b943a2ba6dfc9acbff051ceb1abb5" + }, + { + "previousLeafValues": [ + "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5", + "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f", + "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d" + ], + "currentRoot": "0x88e652896cb1de5962a0173a222059f51e6b943a2ba6dfc9acbff051ceb1abb5", + "newLeafValue": "0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242", + "newRoot": "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25721" + } +] \ No newline at end of file diff --git a/test/vectors/src/merkle-tree/smt-hash-bytecode.json b/test/vectors/src/merkle-tree/smt-hash-bytecode.json new file mode 100644 index 00000000..cadb27fc --- /dev/null +++ b/test/vectors/src/merkle-tree/smt-hash-bytecode.json @@ -0,0 +1,46 @@ +[ + { + "bytecode": "dead", + "expectedHash": "0x2549d1fb0dc984e3098f235473637bd9e40aab1692c87e0afaf58720d2fbb8cd" + }, + { + "bytecode": "123456789abcde123456789abcde123456789abcde123456789abcde123456789abcde123456789abcde123456789abcde123456789abcdeff", + "expectedHash": "0xb26e257fb87ad0976c69af4af03c9ee20449d18b0be000aa749b5b342a445308" + }, + { + "bytecode": "8231e0e8e502600b14bb0a2c9689f7d93d10e9f5451f18f0a9b6f123", + "expectedHash": "0x31cd3428959051f652c12f729473d52c0956368643ff086514f983595c034067" + }, + { + "bytecode": "ce0e8e502600b14bb0a2c9689f7d93d10e9f5451f18f030ec3bb6c5001", + "expectedHash": "0xa29092cb3f80b471d45d2e1bcca7fdcdb1083370e5952b56166cf03e73f24d31" + }, + { + "bytecode": "34665289b71a2cb8bf4c289ae6d17d845457c48bfc18623ca39e141b2e40c5d3", + "expectedHash": "0x26aa5d09e2046f5ab7e311b32c6e34fa52a6dc8257a34b494af84fe1471c589c" + }, + { + "bytecode": "3211bcce6a7d8132020223eef1a03385ba6bd4966b295c2e2211a8d6d9e389fe6bf08f21497774456be2e47fdb6740aa571338c71c38c0a6d7f703007569e64031633ec7c8ef2ba25ad6a248403deb697457fae8a4a7f4525d73a3d4cd93334a894efbb20d0a6391df0aae46bc32005834ed084aeb08887e08eb67cde004fea6f8036b061fa8cb7246af2458a4cef79c648b13ef8ac50d9a8863be1c58a7a9a5940006022611ca35508b993656cf3fd0175579c6983414701134cc0becc51364289d4775b71b67f269a16fe653a00ab75885924777feaa990cce9c561802581b9092e9be2a0d03fd86361e427b94d8600a7edc67c263b35a0be6837e750175b50314c7d4642534b3233c963e397f63e6d7187b114eef1346412de83993cb79bc80e9a921fa59ccccda30e57025ccaa0830e1eb1ea5c87ca6fc887aedabdab1bb4cf6022440960b0e03f5de85137d48392873851d13f8035b67e6a5f5c5bac7598fe2f91673f3875b40faad43357862b76e9c6062b3342f199bec165e3093b8c25e21ac626d718e8aaa0d8aacc034a2da4a6ff3de36891ddd30b22abedf0f72b493e9f16aaa65fddeff83612b1d07989e1d6d1ba7600123645c5920f55678cf518d8f58d73d6227e710bcf6dfcfe309c5d67e4f51fbb18aa3922c07c35e5fefa66c0c57553d5ab9e323591031ecfb0b84", + "expectedHash": "0x41d68cbcc953afc92898502cb7d4464f9674ee5d7484a221670f4d20df59e8d9" + }, + { + "bytecode": "e7190c27f2adfb643bdbaa686b8372df9e8132d079640d43f218bfe3b8fafacfe0d7855d77cc195e05d855d2d828e627be305b34f4de52894d3672515f2e1dcfd4e6909a5406d5dbdff31e38d2400299dc6a5f2509052c76393d57a786afc51c18d63c9bd2edd70b8d82f867c5269556a636d1c8c7fb45884adf264c9ae64731dfac21efb29dcddb5bef66149f7af55f0263dc16f7cd8f58f3eb1f1b5246a76ae64ccf7731df0e17963efa4b786d3365a2f2adfd87767d3bdfa104c443c2c0eb79d408cf0469b592f2863988bee8b42e9955255c3d632edf1a3de2a305f573112575f4958192f1b433089fd928dc8f38263f43be6d7cb83acbe4ac2bc0d44f219edbfcafaa29fabda4c8b41add2525fa38982649fc22ef1221273441e65ce62ea200ed951a1619ee6c053793096788c09406b2f9bd09d579dc1fef5f44ba91460f93aaf278bf3f4d25536c1ecf3af64f83a7a04ec049e54ffa007721cfc1a336089824bff3a23f39421234ba1a5f6113eae0bbfbf6a9295d5d473838a6dae3e34620bf365ff588a1ebce6c5bf9c46038a5f323d23ba7ec5d9afc48127612eec3620fff7472dc342f4e56d5e36fd910a66ec8d95ead3f06eb47f612063ea4d64b90bcf5f199684e99f98732029478d99505ca73b86e0cf4b51c63d9cdb24fb4f54908e4cb98aec0af7587b2a4b9477061f2", + "expectedHash": "0xcab4081cbdc6f8b378ac8a4b82ea15913dcc9de2305b1c8af52446dec891f0ff" + }, + { + "bytecode": "8246af280f1863b4eabf37a548ce764a296fd81be5b23c4fc04ee3540c81a765c795c8beedc4a6a1d67a53fcc9525bc2e2d31369fc64e0f547a4de44bd312af1288604fd183dbb52bbdd445ce870c24b829007162a438eb22fbfb08939f4b314f86f264d17126cfd1cd50028eec5aabd9bb1f5c759938b02f0d9d1cbccc4655735b65cf80cad3ce3fab37fac5833652d68d633a3d12ce024e9d16d1fa8a0c5ba8072286e855594471255dffe96a87568501813bac166a92f356ab38032097e5a68406bc22faee6db58f5ae36f24e877ea72a5dc6978c4a5c7f671e635ec4430da1fd2e9e9587b2a8b64b841108f3f5c7af3be9fef9e940478b021352055320d55bb2bf292a1e796154c5e530284a16dda5aeb29baca584f76487cf20f5bb409cfb6247a6918ebf8df1c854551a2184cd06df5706b2fb4d92a70cece8eb4bcee91934f09c0310efcbff2dbc9aa5b6ea818d96f471f9025cb46e4acd98a4ecc6dd2d647282f05a2c586ffe4b94ac12f36bd65cf6f3903d0228f92df340afb425acc5df8433407de697a44a7780506896799c5dca139ed9498880c5b17739859c30b4caa7945984d3d7d4c8450ad1c0f37c50f280fce45f791ffe12b199eab24eca47223efcdc86d764b57fb3afcdc4b01b6f35a3773f0331912bda3ba36d705a8ea506c99d573255233f13eba6d88280d4bf26d24419fc9b93139b74880d407cb796c2d6d1385b6a456dd7ab5755a8465972ae1d2e34d50a302bb1617cd75251f83af0f2be482d29b78b9b4320accf1c45407bc1b2c6dbbd26a8d8811ca95bd88e59bed19a163ab88a5d9b3e286eb3b4cf7ea388d32d613e0f16331570b93fe79ebc221e8fa8eb22e08b205237435e5395198bef4d264953b3fbd72c761603f1b343e62363369dd3f1c382487655fdeb6aba314f500466fea3293ef6feef458bfaec77f6f1f3ce3525b7fe2df433b07330d179eb427739782c95c8767ab113444208a30a4e78e6e18869972a412ef28f3925bcb857e0716e66814dc31abc37bf20219eb9c60f35ef4e1f10b73889c9094867d813a158d0acb38239da0e3f6d1d8865fe49099dfdb6cb7c55160ddb81d0d0a431eec0ecc19f878cf92f2a9a58d951e5b4a8e3b8e87756577b157aec0b3911fa38814fc752c9377b98bc4477172ba2a9823f33a00d50fde16b148c9815f70e7057e181c757ecded9b0e31d35d2c7707dde7d855e6cf2b5f5496229", + "expectedHash": "0xa0cd4752397ce4e1e50cabbe0ab6f47c8953cf1804b0f53c84fd97179367f7ed" + }, + { + "bytecode": "24bccf22f7178476bc30c4cb690bb1df362e258bb07a0e862910568907815b5e276b7e3df3e94d45865f37ba13af512f42afe02a5bffc775e85f3483bf26a60779985d3713030544c9881a54e81549fdf0efa37529b3fc4416fead3efbaa921b88d182ca2cae3f0552a22533e1be3663708e4ef91ad82a54aa3b3f3bb6a24a2a296f320086b6e9a10dec3e5f67b8cc358ca5086d8f3a84fb0ca90167964a9cea26aa31fab862d86263221f67e52ee5fc4a29a0b6e04472f718e76b6a19f6493d540da2513a5547f9649a4f77dc99e0bb0b1e680da69fa2dd9ec2b3b11e375bc993d300adfcd7fc4fe875aa4d29780015fb77c7bd135aa49e62917043147a3188ece587f88da06fe698793d1e3b1d7cedd93b12664f53defeebccdb816f91fad2fff1214c97e61d1ac6178f278273d656a209a4eb1f532fc34ce808932ade305c27a21bed0bfe4595364f6e6462732f75fc8566bb8d1d6fec34228f510cbb1f72293c5d3cd9cb38b1f77db7d67fef1032f392b156c3a1c8368307bcc1843b1a7661630417289c61d44457ad6455c84d73db4517e771ea8a2041113e9910c9dc64bfbac9e7f6563014c65dce227f4f11b8071385dab30bc5c4e3553c4919a1f14302b491106844ce5b7b5874d879fed8e87ff3edf3ae04f2b77ad4a18051317cac773c3800bdfacf59a027a876e471b8c35d348a6690d6241d47119fb0c44448f03e2030c6999a48f3010e6d85503564d3af954671d0921fcd64d37b937a40d9070ca1b9e41f4cfc1e29b9946fc103418beab9fad91201cf00c13a5900ea2aa4d174fc7bbbbef1cd34b8b4f5853ebed9457fed88645775c8b3f466a474746338bd2a37263e518a538db50e74c9f4ce996e3e6ccd5153d3f2637f51aeb27018f0faf9ee01766c14ba860a689502459878e7383a9e314921237dcd968ea03083c8e66856789dd5e9a8d0b1dcdca37c9262391bf5943f5fcd8fc24637e23b91ccefe485dc19c4ea645a14c5f586be606488eea01307c886b7c8f976071c58c2e8f5c4e494a4f437d5696271fdd7ff0d85da03f0d151e5061676887641a2d178cac44af38e0588bfacfb71c797761d72fdd2a25ee1a181548f95d410cdbab2968fee3b35aee8cdb9fefc344f8cc554384c2aaf25b8277ab965d2fc27651361c7a23608277a2fb22230a15cfe69860d219603dd37f3d819f5877aa732ea67d36f6457614246f8ff11a3ef18d640578f2f95887a4dcc1152e02e51aff0b1a807c6973bb47eb315d922a8fbb00eb926b4c46827f9c48190fbb94d4795e3dfbe1b878e15b5d8dddace7f82451de45cf564a85beadf1768ee25bba3e1f7f41ea85f07534e67e2", + "expectedHash": "0x1e8f01efcb58de6363e4b781cbe9c328439154260c97d5bc810b564297719d81" + }, + { + "bytecode": "ab8ffd3224ad21c4009f6e67a9047209f338f88f4f29bcffcb769695d0963b859c591f85b570858c2eb16262a3c07ca323ed0ae762c6772e4f3494036c82a71f1b5569be153d1b069ceecd5fd88f2055cda4d9a651b70c7f736a4fb5a8eb383db80530e608ce01fe52956ba5ebe2801ae60309c4d84021839544933559178ca84b9cbb53cff9a831d6098d75ed580352f70577479c266443686923596280a36e8b811b858f69d79966d2548ab2d830fd10010b968889abbecb865af6d9f6ae6e95353900c2c260a72f3ae32660c14491ba7a97f9896f63f7f29d021212a4c18148a386ef60b6b16d834645e2fe6c9279fabc0056168aab2c023ea3443bc4ea62185f03730121e2df8cd015cadae0b1dd95f723f738af08c9f8d7d47f35d64e580ad7d6525a5b9648543fd752d5c27e3c5eefde55a3d867d98107da151399d6b8718cf662e07562025447b37ebf3854edee2fb74c51273d775ad4a4ece04ec840ffb3af5e184927ee644ce5a3c872ca354bef7187797062206e8b1d97b599401aca9b401d72e2611ccfdd73c594a7c4aa8cabd3d431212ccc1512efd0cca758d9650b53fc3cda49c74f6a048b947a3f48e92c2702a2f1d585817820a9ecefe3fc90332a718851c96a761d61395d84bf7866862ea57c8d49a4e417afdae540b8f607eac7bc4450a3ec30e28c8889bd50a036c03e633735b793dd6030880661e0a1756937ff9556a52de804ef5fc381b29b73f8bfbd13a8b2155ad919bca8ee1f5ce6d10a608154e6f79faca4d5eda068bf71bf31f944ca5c98baca4a5d7a141d9001dcb18037e30667eed18e39a1eb2ad0124c0dc6b4d3afe421baea72d2c15a658c5651c150b67e79c599839ca00af436833f65db98635959157f70446ddf578ec1a71c485f0bfbb071b23226a1ca34da34cf66c09d969dd01adc65efdf9222d075e7873053e029850bab855cdc4f15180a5571d7579c8bc5f470fa4b2a9ed154c505a513e5f0867cd302d8b6e649237079b7ee0640fe2b5ac8de694da87221a7177dc316c2ea47c1edb6a7699f3651a99f43ce2a885146d006bcb82864508223213d930e963b1305b7255807bc5f93498d832f5ed224c385253aed3b8354c511c3dafea13d3cd5e00523f00a7bf05c2a65fa23c0ac915d7098254ddc821a6f3df7b4d8aa79b4da56b4617382ed11dc8b0561fb037a10c5f9917a68ea5d1d8e7b0fec11628b8ce493b8ff7426cd6c43edc58e80b98b381be0399a51a146041f52db2eb1c4fde3cca60ede53050c9bd72e67ca41155b1ac1508ab4055b4053064af44b8e31c162d447935ef5d821ea108536733d949363e28980d78b0476a6092968f7", + "expectedHash": "0x962f4e833fdddfd0158516a4d4ab0c1936eadeb6f8b4adacc995d73e72afcda4" + }, + { + "bytecode": "60806040523480156200001157600080fd5b5060405162002103380380620021038339810160408190526200003491620002e1565b6200003f3362000125565b60005b620000506001602062000329565b811015620000eb57602181602081106200006e576200006e62000343565b01546021826020811062000086576200008662000343565b0154604080516020810193909352820152606001604051602081830303815290604052805190602001206021826001620000c1919062000359565b60208110620000d457620000d462000343565b015580620000e28162000374565b91505062000042565b50604680546001600160a01b0319166001600160a01b0383161790556200011162000175565b6043556200011e62000250565b50620003b5565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b6041546000908190815b602081101562000248578160011660011415620001e05760018160208110620001ac57620001ac62000343565b0154604080516020810192909252810184905260600160405160208183030381529060405280519060200120925062000224565b8260218260208110620001f757620001f762000343565b01546040805160208101939093528201526060016040516020818303038152906040528051906020012092505b6200023160028362000392565b9150806200023f8162000374565b9150506200017f565b509092915050565b60458054906000620002628362000374565b909155505060435460425460408051602081019390935282015260600160408051808303601f19018152828252805160209182012060455460009081526044835283902055604354604254908452908301527f61014378f82a0d809aefaf87a8ac9505b89c321808287a6e7810f29304c1fce3910160405180910390a1565b600060208284031215620002f457600080fd5b81516001600160a01b03811681146200030c57600080fd5b9392505050565b634e487b7160e01b600052601160045260246000fd5b6000828210156200033e576200033e62000313565b500390565b634e487b7160e01b600052603260045260246000fd5b600082198211156200036f576200036f62000313565b500190565b60006000198214156200038b576200038b62000313565b5060010190565b600082620003b057634e487b7160e01b600052601260045260246000fd5b500490565b611d3e80620003c56000396000f3fe60806040526004361061010e5760003560e01c806355f6bc57116100a55780638da5cb5b11610074578063a71d644411610059578063a71d6444146102fb578063ed6be5c91461032b578063f2fde38b1461035557600080fd5b80638da5cb5b146102b0578063a5392cf6146102db57600080fd5b806355f6bc57146101f95780635ec6a8df14610219578063715018a61461026b5780637d8f04691461028057600080fd5b8063319cf735116100e1578063319cf7351461017d5780633381fe90146101935780633ae05047146101c05780633ed691ef146101d557600080fd5b806301fd904414610113578063029f27931461013c5780630e21fbd7146101525780632dfdf0b514610167575b600080fd5b34801561011f57600080fd5b5061012960425481565b6040519081526020015b60405180910390f35b34801561014857600080fd5b5061012960455481565b61016561016036600461170e565b610375565b005b34801561017357600080fd5b5061012960415481565b34801561018957600080fd5b5061012960435481565b34801561019f57600080fd5b506101296101ae36600461175f565b60446020526000908152604090205481565b3480156101cc57600080fd5b50610129610574565b3480156101e157600080fd5b50604554600090815260446020526040902054610129565b34801561020557600080fd5b5061016561021436600461175f565b610641565b34801561022557600080fd5b506046546102469073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610133565b34801561027757600080fd5b506101656106f8565b34801561028c57600080fd5b506102a061029b36600461186d565b610785565b6040519015158152602001610133565b3480156102bc57600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff16610246565b3480156102e757600080fd5b506101656102f6366004611915565b610931565b34801561030757600080fd5b506102a061031636600461175f565b60476020526000908152604090205460ff1681565b34801561033757600080fd5b50610340600081565b60405163ffffffff9091168152602001610133565b34801561036157600080fd5b506101656103703660046119d2565b610e4d565b73ffffffffffffffffffffffffffffffffffffffff841661042957823414610424576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603060248201527f4272696467653a3a6465706f7369743a20414d4f554e545f444f45535f4e4f5460448201527f5f4d415443485f4d53475f56414c55450000000000000000000000000000000060648201526084015b60405180910390fd5b61044b565b61044b73ffffffffffffffffffffffffffffffffffffffff8516333086610f7a565b63ffffffff82166104de576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602c60248201527f4272696467653a3a6465706f7369743a2044455354494e4154494f4e5f43414e60448201527f545f42455f4d41494e4e45540000000000000000000000000000000000000000606482015260840161041b565b6041546040805173ffffffffffffffffffffffffffffffffffffffff87811682526020820187905263ffffffff8681168385015290851660608301529092166080830152517f0a37f8bae6de7e960aeedce45875d5a75681918316c4bd81f4691152910f8e329181900360a00190a161055b848460008585611056565b610563610574565b60435561056e61125e565b50505050565b6041546000908190815b60208110156106395781600116600114156105d957600181602081106105a6576105a66119ef565b0154604080516020810192909252810184905260600160405160208183030381529060405280519060200120925061061a565b82602182602081106105ed576105ed6119ef565b01546040805160208101939093528201526060016040516020818303038152906040528051906020012092505b610625600283611a4d565b91508061063181611a88565b91505061057e565b509092915050565b60465473ffffffffffffffffffffffffffffffffffffffff1633146106e8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602960248201527f4272696467653a3a757064617465526f6c6c757045786974526f6f743a204f4e60448201527f4c595f524f4c4c55500000000000000000000000000000000000000000000000606482015260840161041b565b60428190556106f561125e565b50565b60005473ffffffffffffffffffffffffffffffffffffffff163314610779576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015260640161041b565b610783600061130b565b565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e088811b821660208401527fffffffffffffffffffffffffffffffffffffffff00000000000000000000000060608c811b82166024860152603885018c90529189901b909216605884015286901b16605c8201526000908190607001604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190528051602090910120905067ffffffffffffffff841660005b60208110156109205781600116600114156108b357868181518110610873576108736119ef565b602002602001015183604051602001610896929190918252602082015260400190565b604051602081830303815290604052805190602001209250610901565b828782815181106108c6576108c66119ef565b60200260200101516040516020016108e8929190918252602082015260400190565b6040516020818303038152906040528051906020012092505b61090c600283611a4d565b91508061091881611a88565b91505061084c565b505090911498975050505050505050565b67ffffffffffffffff841660009081526047602052604090205460ff16156109db576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602a60248201527f4272696467653a3a77697468647261773a20414c52454144595f434c41494d4560448201527f445f574954484452415700000000000000000000000000000000000000000000606482015260840161041b565b63ffffffff871615610a6f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603160248201527f4272696467653a3a77697468647261773a2044455354494e4154494f4e5f4e4560448201527f54574f524b5f4e4f545f4d41494e4e4554000000000000000000000000000000606482015260840161041b565b63ffffffff881615610b03576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602c60248201527f4272696467653a3a77697468647261773a204f524947494e5f4e4554574f524b60448201527f5f4e4f545f4d41494e4e45540000000000000000000000000000000000000000606482015260840161041b565b6000838152604460209081526040918290205482519182018590529181018390526060016040516020818303038152906040528051906020012014610bca576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603160248201527f4272696467653a3a77697468647261773a20474c4f42414c5f455849545f524f60448201527f4f545f444f45535f4e4f545f4d41544348000000000000000000000000000000606482015260840161041b565b610bda8a8a8a8a8a8a8a88610785565b610c40576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f4272696467653a3a77697468647261773a20534d545f494e56414c4944000000604482015260640161041b565b67ffffffffffffffff8416600090815260476020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905573ffffffffffffffffffffffffffffffffffffffff8a16610dad576040805160008082526020820190925273ffffffffffffffffffffffffffffffffffffffff8816908b90604051610cd49190611aed565b60006040518083038185875af1925050503d8060008114610d11576040519150601f19603f3d011682016040523d82523d6000602084013e610d16565b606091505b5050905080610da7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f4272696467653a3a77697468647261773a204554485f5452414e534645525f4660448201527f41494c4544000000000000000000000000000000000000000000000000000000606482015260840161041b565b50610dce565b610dce73ffffffffffffffffffffffffffffffffffffffff8b16878b611380565b6040805167ffffffffffffffff8616815263ffffffff8a16602082015273ffffffffffffffffffffffffffffffffffffffff8c811682840152606082018c90528816608082015290517f8932892d010aea7e4fdefb3764910523c321e06bb52577dc2439501196bf72559181900360a00190a150505050505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610ece576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015260640161041b565b73ffffffffffffffffffffffffffffffffffffffff8116610f71576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201527f6464726573730000000000000000000000000000000000000000000000000000606482015260840161041b565b6106f58161130b565b60405173ffffffffffffffffffffffffffffffffffffffff8085166024830152831660448201526064810182905261056e9085907f23b872dd00000000000000000000000000000000000000000000000000000000906084015b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff00000000000000000000000000000000000000000000000000000000909316929092179091526113db565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e085811b821660208401527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606089811b82166024860152603885018990529186901b909216605884015283901b16605c8201526000906070016040516020818303038152906040528051906020012090506001602060026110fd9190611c2b565b6111079190611c37565b60415410611197576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602a60248201527f4465706f736974436f6e74726163743a5f6465706f7369743a204d45524b4c4560448201527f5f545245455f46554c4c00000000000000000000000000000000000000000000606482015260840161041b565b6001604160008282546111aa9190611c4e565b909155505060415460005b602081101561124b5781600116600114156111e95782600182602081106111de576111de6119ef565b015550611257915050565b600181602081106111fc576111fc6119ef565b015460408051602081019290925281018490526060016040516020818303038152906040528051906020012092506002826112379190611a4d565b91508061124381611a88565b9150506111b5565b50611254611c66565b50505b5050505050565b6045805490600061126e83611a88565b9091555050604354604254604080516020810193909352820152606001604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0018152828252805160209182012060455460009081526044835283902055604354604254908452908301527f61014378f82a0d809aefaf87a8ac9505b89c321808287a6e7810f29304c1fce3910160405180910390a1565b6000805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b60405173ffffffffffffffffffffffffffffffffffffffff83166024820152604481018290526113d69084907fa9059cbb0000000000000000000000000000000000000000000000000000000090606401610fd4565b505050565b600061143d826040518060400160405280602081526020017f5361666545524332303a206c6f772d6c6576656c2063616c6c206661696c65648152508573ffffffffffffffffffffffffffffffffffffffff166114e79092919063ffffffff16565b8051909150156113d6578080602001905181019061145b9190611c95565b6113d6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602a60248201527f5361666545524332303a204552433230206f7065726174696f6e20646964206e60448201527f6f74207375636365656400000000000000000000000000000000000000000000606482015260840161041b565b60606114f68484600085611500565b90505b9392505050565b606082471015611592576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f416464726573733a20696e73756666696369656e742062616c616e636520666f60448201527f722063616c6c0000000000000000000000000000000000000000000000000000606482015260840161041b565b843b6115fa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e7472616374000000604482015260640161041b565b6000808673ffffffffffffffffffffffffffffffffffffffff1685876040516116239190611aed565b60006040518083038185875af1925050503d8060008114611660576040519150601f19603f3d011682016040523d82523d6000602084013e611665565b606091505b5091509150611675828286611680565b979650505050505050565b6060831561168f5750816114f9565b82511561169f5782518084602001fd5b816040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161041b9190611cb7565b73ffffffffffffffffffffffffffffffffffffffff811681146106f557600080fd5b803563ffffffff8116811461170957600080fd5b919050565b6000806000806080858703121561172457600080fd5b843561172f816116d3565b935060208501359250611744604086016116f5565b91506060850135611754816116d3565b939692955090935050565b60006020828403121561177157600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600082601f8301126117b857600080fd5b8135602067ffffffffffffffff808311156117d5576117d5611778565b8260051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f8301168101818110848211171561181857611818611778565b60405293845285810183019383810192508785111561183657600080fd5b83870191505b848210156116755781358352918301919083019061183c565b803567ffffffffffffffff8116811461170957600080fd5b600080600080600080600080610100898b03121561188a57600080fd5b8835611895816116d3565b9750602089013596506118aa60408a016116f5565b95506118b860608a016116f5565b945060808901356118c8816116d3565b935060a089013567ffffffffffffffff8111156118e457600080fd5b6118f08b828c016117a7565b9350506118ff60c08a01611855565b915060e089013590509295985092959890939650565b6000806000806000806000806000806101408b8d03121561193557600080fd5b8a35611940816116d3565b995060208b0135985061195560408c016116f5565b975061196360608c016116f5565b965060808b0135611973816116d3565b955060a08b013567ffffffffffffffff81111561198f57600080fd5b61199b8d828e016117a7565b9550506119aa60c08c01611855565b935060e08b013592506101008b013591506101208b013590509295989b9194979a5092959850565b6000602082840312156119e457600080fd5b81356114f9816116d3565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600082611a83577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415611aba57611aba611a1e565b5060010190565b60005b83811015611adc578181015183820152602001611ac4565b8381111561056e5750506000910152565b60008251611aff818460208701611ac1565b9190910192915050565b600181815b80851115611b6257817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04821115611b4857611b48611a1e565b80851615611b5557918102915b93841c9390800290611b0e565b509250929050565b600082611b7957506001611c25565b81611b8657506000611c25565b8160018114611b9c5760028114611ba657611bc2565b6001915050611c25565b60ff841115611bb757611bb7611a1e565b50506001821b611c25565b5060208310610133831016604e8410600b8410161715611be5575081810a611c25565b611bef8383611b09565b807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04821115611c2157611c21611a1e565b0290505b92915050565b60006114f98383611b6a565b600082821015611c4957611c49611a1e565b500390565b60008219821115611c6157611c61611a1e565b500190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052600160045260246000fd5b600060208284031215611ca757600080fd5b815180151581146114f957600080fd5b6020815260008251806020840152611cd6816040850160208701611ac1565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016919091016040019291505056fea264697066735822122086e50005a89e4c2272c1cba11ff802c1213e2b08c8a20c742826166345c5ee3264736f6c63430008090033", + "expectedHash": "0x83c54e65ccd57395ecb7be6ed8d10ea5505844d1a3e514486affcd8fc8d11a54" + } +] \ No newline at end of file diff --git a/test/vectors/src/merkle-tree/smt-key-contract-code.json b/test/vectors/src/merkle-tree/smt-key-contract-code.json new file mode 100644 index 00000000..4630e2f8 --- /dev/null +++ b/test/vectors/src/merkle-tree/smt-key-contract-code.json @@ -0,0 +1,17 @@ +[ + { + "leafType": 2, + "ethAddr": "0x0000000000000000000000000000000000000000", + "expectedKey": "72618736525103033809705966741823173469010530487114812728907809351129229387686" + }, + { + "leafType": 2, + "ethAddr": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "expectedKey": "100339618010685329502920959863456851722741867804653471565599858216996781583185" + }, + { + "leafType": 2, + "ethAddr": "0xEEF9f339514298C6A857EfCfC1A762aF84438dEE", + "expectedKey": "37702541001567369137011480863022602456875150323680555331519352316148423991760" + } +] \ No newline at end of file diff --git a/test/vectors/src/merkle-tree/smt-key-contract-length.json b/test/vectors/src/merkle-tree/smt-key-contract-length.json new file mode 100644 index 00000000..4fd72832 --- /dev/null +++ b/test/vectors/src/merkle-tree/smt-key-contract-length.json @@ -0,0 +1,17 @@ +[ + { + "leafType": 4, + "ethAddr": "0x0000000000000000000000000000000000000000", + "expectedKey": "41007279171909826356801898715236946089777777871690100429699594563988270638848" + }, + { + "leafType": 4, + "ethAddr": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "expectedKey": "34646114882128150922895390038184820825657559006046553149154512997547296886401" + }, + { + "leafType": 4, + "ethAddr": "0xEEF9f339514298C6A857EfCfC1A762aF84438dEE", + "expectedKey": "22692912702510785895734212421419794952797782834275353854560810371481244756741" + } +] \ No newline at end of file diff --git a/test/vectors/src/merkle-tree/smt-key-contract-storage.json b/test/vectors/src/merkle-tree/smt-key-contract-storage.json new file mode 100644 index 00000000..c743dba0 --- /dev/null +++ b/test/vectors/src/merkle-tree/smt-key-contract-storage.json @@ -0,0 +1,38 @@ +[ + { + "leafType": 3, + "ethAddr": "0x0000000000000000000000000000000000000000", + "storagePosition": "0", + "expectedKey": "12534214928306848758475099215268104288950840610411881349004256209866079801855" + }, + { + "leafType": 3, + "ethAddr": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "storagePosition": "115792089237316195423570985008687907853269984665640564039457584007913129639935", + "expectedKey": "33137250487625679402353497751179166536828572303855484396343252601253270796565" + }, + { + "leafType": 3, + "ethAddr": "0x0000000000000000000000000000000000000000", + "storagePosition": "0", + "expectedKey": "12534214928306848758475099215268104288950840610411881349004256209866079801855" + }, + { + "leafType": 3, + "ethAddr": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "storagePosition": "115792089237316195423570985008687907853269984665640564039457584007913129639935", + "expectedKey": "33137250487625679402353497751179166536828572303855484396343252601253270796565" + }, + { + "leafType": 3, + "ethAddr": "0xEEF9f339514298C6A857EfCfC1A762aF84438dEE", + "storagePosition": "7264", + "expectedKey": "83856652265636757733606490452162031059172593606140564507822531867780274877175" + }, + { + "leafType": 3, + "ethAddr": "0xEEF9f339514298C6A857EfCfC1A762aF84438dEE", + "storagePosition": "7264", + "expectedKey": "83856652265636757733606490452162031059172593606140564507822531867780274877175" + } +] \ No newline at end of file diff --git a/test/vectors/src/merkle-tree/smt-key-eth-balance.json b/test/vectors/src/merkle-tree/smt-key-eth-balance.json new file mode 100644 index 00000000..8c236b71 --- /dev/null +++ b/test/vectors/src/merkle-tree/smt-key-eth-balance.json @@ -0,0 +1,42 @@ +[ + { + "leafType": 0, + "ethAddr": "0x0000000000000000000000000000000000000000", + "expectedKey": "26833593870529421166492422877314944811724038685477806060577465163426988022737" + }, + { + "leafType": 0, + "ethAddr": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "expectedKey": "40127382331240911157907914324528741813743915534659976972051985426947551260673" + }, + { + "leafType": 0, + "ethAddr": "0x0000000000000000000000000000000000000000", + "expectedKey": "26833593870529421166492422877314944811724038685477806060577465163426988022737" + }, + { + "leafType": 0, + "ethAddr": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "expectedKey": "40127382331240911157907914324528741813743915534659976972051985426947551260673" + }, + { + "leafType": 0, + "ethAddr": "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D", + "expectedKey": "45511135140510043104127159862914553230179041101689561753801876001524381766893" + }, + { + "leafType": 0, + "ethAddr": "0x4d5Cf5032B2a844602278b01199ED191A86c93ff", + "expectedKey": "43741542307756197806968621038083986403476477276779293271513030919451674021857" + }, + { + "leafType": 0, + "ethAddr": "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D", + "expectedKey": "45511135140510043104127159862914553230179041101689561753801876001524381766893" + }, + { + "leafType": 0, + "ethAddr": "0x4d5Cf5032B2a844602278b01199ED191A86c93ff", + "expectedKey": "43741542307756197806968621038083986403476477276779293271513030919451674021857" + } +] \ No newline at end of file diff --git a/test/vectors/src/merkle-tree/smt-key-eth-nonce.json b/test/vectors/src/merkle-tree/smt-key-eth-nonce.json new file mode 100644 index 00000000..4deb93a5 --- /dev/null +++ b/test/vectors/src/merkle-tree/smt-key-eth-nonce.json @@ -0,0 +1,42 @@ +[ + { + "leafType": 1, + "ethAddr": "0x0000000000000000000000000000000000000000", + "expectedKey": "28358077366816831193326378002625509892290580640052650926129802772493521696670" + }, + { + "leafType": 1, + "ethAddr": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "expectedKey": "46601686036392058419641250406383581470536953255400137235565480209972996937304" + }, + { + "leafType": 1, + "ethAddr": "0x0000000000000000000000000000000000000000", + "expectedKey": "28358077366816831193326378002625509892290580640052650926129802772493521696670" + }, + { + "leafType": 1, + "ethAddr": "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "expectedKey": "46601686036392058419641250406383581470536953255400137235565480209972996937304" + }, + { + "leafType": 1, + "ethAddr": "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D", + "expectedKey": "98790850219450538256104290071020195215871038788594574188656606927809546484197" + }, + { + "leafType": 1, + "ethAddr": "0x4d5Cf5032B2a844602278b01199ED191A86c93ff", + "expectedKey": "45555081975440947041848992181135418706314820385341877748019327868663035195938" + }, + { + "leafType": 1, + "ethAddr": "0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D", + "expectedKey": "98790850219450538256104290071020195215871038788594574188656606927809546484197" + }, + { + "leafType": 1, + "ethAddr": "0x4d5Cf5032B2a844602278b01199ED191A86c93ff", + "expectedKey": "45555081975440947041848992181135418706314820385341877748019327868663035195938" + } +] \ No newline at end of file From e42a5deb8f9f0ef46cc35bdf156770a14525ee66 Mon Sep 17 00:00:00 2001 From: Rachit Sonthalia <54906134+rachit77@users.noreply.github.com> Date: Wed, 16 Oct 2024 21:54:21 +0530 Subject: [PATCH 43/53] test: added test for tryGenerateBatchProof (#125) --- aggregator/aggregator_test.go | 546 +++++++++++++++++++++++++++++++++- 1 file changed, 538 insertions(+), 8 deletions(-) diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index f906ebbb..657a34cf 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -6,6 +6,7 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -38,12 +39,24 @@ var ( proverID = "proverID" ) +const ( + ownerProver = "prover" + ownerAggregator = "aggregator" + + // changeL2Block + deltaTimeStamp + indexL1InfoTree + codedL2BlockHeader = "0b73e6af6f00000001" + // 2 x [ tx coded in RLP + r,s,v,efficiencyPercentage] + codedRLP2Txs1 = "ee02843b9aca00830186a0944d5cf5032b2a844602278b01199ed191a86c93ff88016345785d8a0000808203e88080bff0e780ba7db409339fd3f71969fa2cbf1b8535f6c725a1499d3318d3ef9c2b6340ddfab84add2c188f9efddb99771db1fe621c981846394ea4f035c85bcdd51bffee03843b9aca00830186a0944d5cf5032b2a844602278b01199ed191a86c93ff88016345785d8a0000808203e880805b346aa02230b22e62f73608de9ff39a162a6c24be9822209c770e3685b92d0756d5316ef954eefc58b068231ccea001fb7ac763ebe03afd009ad71cab36861e1bff" + codedL2Block1 = codedL2BlockHeader + codedRLP2Txs1 +) + type mox struct { stateMock *mocks.StateInterfaceMock ethTxManager *mocks.EthTxManagerClientMock etherman *mocks.EthermanMock proverMock *mocks.ProverInterfaceMock aggLayerClientMock *mocks.AgglayerClientInterfaceMock + synchronizerMock *mocks.SynchronizerInterfaceMock } func WaitUntil(t *testing.T, wg *sync.WaitGroup, timeout time.Duration) { @@ -828,9 +841,9 @@ func Test_tryBuildFinalProof(t *testing.T) { BatchNumberFinal: uint64(456), } - proverCtx := context.WithValue(context.Background(), "owner", "prover") //nolint:staticcheck - matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "prover" } - matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "aggregator" } + proverCtx := context.WithValue(context.Background(), "owner", ownerProver) //nolint:staticcheck + matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == ownerProver } + matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == ownerAggregator } testCases := []struct { name string proof *state.Proof @@ -1046,7 +1059,7 @@ func Test_tryBuildFinalProof(t *testing.T) { finalProof: make(chan finalProofMsg), } - aggregatorCtx := context.WithValue(context.Background(), "owner", "aggregator") //nolint:staticcheck + aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck a.ctx, a.exit = context.WithCancel(aggregatorCtx) m := mox{ stateMock: stateMock, @@ -1092,9 +1105,9 @@ func Test_tryAggregateProofs(t *testing.T) { } recursiveProof := "recursiveProof" - proverCtx := context.WithValue(context.Background(), "owner", "prover") //nolint:staticcheck - matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "prover" } - matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "aggregator" } + proverCtx := context.WithValue(context.Background(), "owner", ownerProver) //nolint:staticcheck + matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == ownerProver } + matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == ownerAggregator } batchNum := uint64(23) batchNumFinal := uint64(42) proof1 := state.Proof{ @@ -1550,7 +1563,7 @@ func Test_tryAggregateProofs(t *testing.T) { timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, finalProof: make(chan finalProofMsg), } - aggregatorCtx := context.WithValue(context.Background(), "owner", "aggregator") //nolint:staticcheck + aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck a.ctx, a.exit = context.WithCancel(aggregatorCtx) m := mox{ stateMock: stateMock, @@ -1571,3 +1584,520 @@ func Test_tryAggregateProofs(t *testing.T) { }) } } + +func Test_tryGenerateBatchProof(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + from := common.BytesToAddress([]byte("from")) + cfg := Config{ + VerifyProofInterval: types.Duration{Duration: time.Duration(10000000)}, + TxProfitabilityCheckerType: ProfitabilityAcceptAll, + SenderAddress: from.Hex(), + IntervalAfterWhichBatchConsolidateAnyway: types.Duration{Duration: time.Second * 1}, + } + lastVerifiedBatchNum := uint64(22) + batchNum := uint64(23) + batchToProve := state.Batch{ + BatchNumber: batchNum, + } + proofID := "proofId" + proverName := "proverName" + proverID := "proverID" + recursiveProof := "recursiveProof" + errTest := errors.New("test error") + proverCtx := context.WithValue(context.Background(), "owner", ownerProver) //nolint:staticcheck + matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == ownerProver } + matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == ownerAggregator } + fixedTimestamp := time.Date(2023, 10, 13, 15, 0, 0, 0, time.UTC) + l1InfoTreeLeaf := []synchronizer.L1InfoTreeLeaf{ + { + GlobalExitRoot: common.Hash{}, + PreviousBlockHash: common.Hash{}, + Timestamp: fixedTimestamp, + }, + { + GlobalExitRoot: common.Hash{}, + PreviousBlockHash: common.Hash{}, + Timestamp: fixedTimestamp, + }, + { + GlobalExitRoot: common.Hash{}, + PreviousBlockHash: common.Hash{}, + Timestamp: fixedTimestamp, + }, + { + GlobalExitRoot: common.Hash{}, + PreviousBlockHash: common.Hash{}, + Timestamp: fixedTimestamp, + }, + } + + testCases := []struct { + name string + setup func(mox, *Aggregator) + asserts func(bool, *Aggregator, error) + }{ + { + name: "getAndLockBatchToProve returns generic error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(0), errTest).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "getAndLockBatchToProve returns ErrNotFound", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(0), state.ErrNotFound).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "BatchProof prover error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr").Twice() + + batchL2Data, err := hex.DecodeString(codedL2Block1) + require.NoError(err) + l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") + batch := state.Batch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: l1InfoRoot, + Timestamp: time.Now(), + Coinbase: common.Address{}, + ChainID: uint64(1), + ForkID: uint64(12), + } + dbBatch := state.DBBatch{ + Witness: []byte("witness"), + Batch: batch, + } + + m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() + sequence := synchronizer.SequencedBatches{ + FromBatchNumber: uint64(10), + ToBatchNumber: uint64(20), + } + m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() + m.stateMock.On("GetBatch", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1, nil).Return(&dbBatch, nil).Once() + m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() + m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) + assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) + }, + ).Return(nil).Once() + m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil).Twice() + m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ + 1: { + BlockNumber: uint64(35), + }, + }, nil).Twice() + + oldDBBatch := state.DBBatch{ + Batch: state.Batch{ + AccInputHash: common.Hash{}, + }, + } + m.stateMock.On("GetBatch", mock.Anything, lastVerifiedBatchNum, nil).Return(&oldDBBatch, nil).Twice() + expectedInputProver, err := a.buildInputProver(context.Background(), &batch, dbBatch.Witness) + require.NoError(err) + + m.proverMock.On("BatchProof", expectedInputProver).Return(nil, errTest).Once() + m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchAggregatorCtxFn), batchToProve.BatchNumber, batchToProve.BatchNumber, nil).Return(nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + //nolint:dupl + { + name: "WaitRecursiveProof prover error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr").Twice() + + batchL2Data, err := hex.DecodeString(codedL2Block1) + require.NoError(err) + l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") + batch := state.Batch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: l1InfoRoot, + Timestamp: time.Now(), + Coinbase: common.Address{}, + ChainID: uint64(1), + ForkID: uint64(12), + } + dbBatch := state.DBBatch{ + Witness: []byte("witness"), + Batch: batch, + } + + m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() + sequence := synchronizer.SequencedBatches{ + FromBatchNumber: uint64(10), + ToBatchNumber: uint64(20), + } + m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() + m.stateMock.On("GetBatch", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1, nil).Return(&dbBatch, nil).Once() + m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() + m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) + assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) + }, + ).Return(nil).Once() + + m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil).Twice() + m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ + 1: { + BlockNumber: uint64(35), + }, + }, nil).Twice() + + oldDBBatch := state.DBBatch{ + Batch: state.Batch{ + AccInputHash: common.Hash{}, + }, + } + m.stateMock.On("GetBatch", mock.Anything, lastVerifiedBatchNum, nil).Return(&oldDBBatch, nil).Twice() + expectedInputProver, err := a.buildInputProver(context.Background(), &batch, dbBatch.Witness) + require.NoError(err) + + m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, errTest).Once() + m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchAggregatorCtxFn), batchToProve.BatchNumber, batchToProve.BatchNumber, nil).Return(nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, //nolint:dupl + //nolint:dupl + { + name: "DeleteBatchProofs error after WaitRecursiveProof prover error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr").Twice() + + batchL2Data, err := hex.DecodeString(codedL2Block1) + require.NoError(err) + l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") + batch := state.Batch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: l1InfoRoot, + Timestamp: time.Now(), + Coinbase: common.Address{}, + ChainID: uint64(1), + ForkID: uint64(12), + } + dbBatch := state.DBBatch{ + Witness: []byte("witness"), + Batch: batch, + } + + m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() + sequence := synchronizer.SequencedBatches{ + FromBatchNumber: uint64(10), + ToBatchNumber: uint64(20), + } + m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() + m.stateMock.On("GetBatch", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1, nil).Return(&dbBatch, nil).Once() + m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() + m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) + assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) + }, + ).Return(nil).Once() + + m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil).Twice() + m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ + 1: { + BlockNumber: uint64(35), + }, + }, nil).Twice() + + oldDBBatch := state.DBBatch{ + Batch: state.Batch{ + AccInputHash: common.Hash{}, + }, + } + m.stateMock.On("GetBatch", mock.Anything, lastVerifiedBatchNum, nil).Return(&oldDBBatch, nil).Twice() + expectedInputProver, err := a.buildInputProver(context.Background(), &batch, dbBatch.Witness) + require.NoError(err) + + m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, errTest).Once() + m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchAggregatorCtxFn), batchToProve.BatchNumber, batchToProve.BatchNumber, nil).Return(errTest).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, //nolint:dupl + { + name: "not time to send final ok", + setup: func(m mox, a *Aggregator) { + a.cfg.BatchProofSanityCheckEnabled = false + m.proverMock.On("Name").Return(proverName).Times(3) + m.proverMock.On("ID").Return(proverID).Times(3) + m.proverMock.On("Addr").Return("addr").Times(3) + + batchL2Data, err := hex.DecodeString(codedL2Block1) + require.NoError(err) + l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") + batch := state.Batch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: l1InfoRoot, + Timestamp: time.Now(), + Coinbase: common.Address{}, + ChainID: uint64(1), + ForkID: uint64(12), + } + dbBatch := state.DBBatch{ + Witness: []byte("witness"), + Batch: batch, + } + + m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() + sequence := synchronizer.SequencedBatches{ + FromBatchNumber: uint64(10), + ToBatchNumber: uint64(20), + } + m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() + m.stateMock.On("GetBatch", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1, nil).Return(&dbBatch, nil).Once() + m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() + m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) + assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) + }, + ).Return(nil).Once() + + m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil).Twice() + m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ + 1: { + BlockNumber: uint64(35), + }, + }, nil).Twice() + + oldDBBatch := state.DBBatch{ + Batch: state.Batch{ + AccInputHash: common.Hash{}, + }, + } + m.stateMock.On("GetBatch", mock.Anything, lastVerifiedBatchNum, nil).Return(&oldDBBatch, nil).Twice() + expectedInputProver, err := a.buildInputProver(context.Background(), &batch, dbBatch.Witness) + require.NoError(err) + + m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) + assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.Equal("", proof.InputProver) + assert.Equal(recursiveProof, proof.Proof) + assert.Nil(proof.GeneratingSince) + }, + ).Return(nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.True(result) + assert.NoError(err) + }, + }, + { + name: "time to send final, state error ok", + setup: func(m mox, a *Aggregator) { + a.cfg.VerifyProofInterval = types.NewDuration(0) + a.cfg.BatchProofSanityCheckEnabled = false + m.proverMock.On("Name").Return(proverName).Times(3) + m.proverMock.On("ID").Return(proverID).Times(3) + m.proverMock.On("Addr").Return("addr").Times(3) + + batchL2Data, err := hex.DecodeString(codedL2Block1) + require.NoError(err) + l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") + batch := state.Batch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: l1InfoRoot, + Timestamp: time.Now(), + Coinbase: common.Address{}, + ChainID: uint64(1), + ForkID: uint64(12), + } + dbBatch := state.DBBatch{ + Witness: []byte("witness"), + Batch: batch, + } + + m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() + sequence := synchronizer.SequencedBatches{ + FromBatchNumber: uint64(10), + ToBatchNumber: uint64(20), + } + m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() + m.stateMock.On("GetBatch", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1, nil).Return(&dbBatch, nil).Once() + m.stateMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() + m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) + assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) + }, + ).Return(nil).Once() + + m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil).Twice() + m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ + 1: { + BlockNumber: uint64(35), + }, + }, nil).Twice() + + oldDBBatch := state.DBBatch{ + Batch: state.Batch{ + AccInputHash: common.Hash{}, + }, + } + m.stateMock.On("GetBatch", mock.Anything, lastVerifiedBatchNum, nil).Return(&oldDBBatch, nil).Twice() + expectedInputProver, err := a.buildInputProver(context.Background(), &batch, dbBatch.Witness) + require.NoError(err) + + m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(42), errTest).Once() + m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) + assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.Equal("", proof.InputProver) + assert.Equal(recursiveProof, proof.Proof) + assert.Nil(proof.GeneratingSince) + }, + ).Return(nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.True(result) + assert.NoError(err) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + stateMock := mocks.NewStateInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + proverMock := mocks.NewProverInterfaceMock(t) + synchronizerMock := mocks.NewSynchronizerInterfaceMock(t) + + a := Aggregator{ + cfg: cfg, + state: stateMock, + etherman: etherman, + ethTxManager: ethTxManager, + logger: log.GetDefaultLogger(), + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, + finalProof: make(chan finalProofMsg), + profitabilityChecker: NewTxProfitabilityCheckerAcceptAll(stateMock, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration), + l1Syncr: synchronizerMock, + } + aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck + a.ctx, a.exit = context.WithCancel(aggregatorCtx) + + m := mox{ + stateMock: stateMock, + ethTxManager: ethTxManager, + etherman: etherman, + proverMock: proverMock, + synchronizerMock: synchronizerMock, + } + if tc.setup != nil { + tc.setup(m, &a) + } + a.resetVerifyProofTime() + + result, err := a.tryGenerateBatchProof(proverCtx, proverMock) + + if tc.asserts != nil { + tc.asserts(result, &a, err) + } + }) + } +} From 00bc136b998f6ddb81912ce5c936b43d93cb0b15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Thu, 17 Oct 2024 13:29:17 +0200 Subject: [PATCH 44/53] feat: currentStreamBatchRaw sanity check (#131) --- aggregator/aggregator.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 2003f0e2..7106b615 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -425,6 +425,12 @@ func (a *Aggregator) handleReceivedDataStream( switch entry.Type { case datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_BATCH_START): + // Check currentStreamBatchRaw is empty as sanity check + if len(a.currentStreamBatchRaw.Blocks) > 0 { + a.logger.Errorf("currentStreamBatchRaw should be empty, "+ + "but it contains %v blocks", len(a.currentStreamBatchRaw.Blocks)) + a.resetCurrentBatchData() + } batch := &datastream.BatchStart{} err := proto.Unmarshal(entry.Data, batch) if err != nil { From 08888a1a6848c5fdf2d9288fd6dc3dcd36f189da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Thu, 17 Oct 2024 13:36:01 +0200 Subject: [PATCH 45/53] feat: currentStreamBatchRaw sanity check (#131) (#132) --- aggregator/aggregator.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 2003f0e2..7106b615 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -425,6 +425,12 @@ func (a *Aggregator) handleReceivedDataStream( switch entry.Type { case datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_BATCH_START): + // Check currentStreamBatchRaw is empty as sanity check + if len(a.currentStreamBatchRaw.Blocks) > 0 { + a.logger.Errorf("currentStreamBatchRaw should be empty, "+ + "but it contains %v blocks", len(a.currentStreamBatchRaw.Blocks)) + a.resetCurrentBatchData() + } batch := &datastream.BatchStart{} err := proto.Unmarshal(entry.Data, batch) if err != nil { From 04f3f3ad1b4e7f3f2172c41cb3eb238064dc6dd8 Mon Sep 17 00:00:00 2001 From: rbpol Date: Thu, 17 Oct 2024 14:44:30 +0100 Subject: [PATCH 46/53] fix: Fixing L1 info tree sync tests (#130) * fix: Fix l1infotreesync tests --- db/meddler.go | 4 ++-- db/sqlite.go | 1 - l1infotreesync/e2e_test.go | 2 ++ sync/evmdriver.go | 1 + 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/db/meddler.go b/db/meddler.go index fb632fb4..8dd17fe8 100644 --- a/db/meddler.go +++ b/db/meddler.go @@ -13,8 +13,8 @@ import ( "github.com/russross/meddler" ) -// initMeddler registers tags to be used to read/write from SQL DBs using meddler -func initMeddler() { +// init registers tags to be used to read/write from SQL DBs using meddler +func init() { meddler.Default = meddler.SQLite meddler.Register("bigint", BigIntMeddler{}) meddler.Register("merkleproof", MerkleProofMeddler{}) diff --git a/db/sqlite.go b/db/sqlite.go index ba8faefb..df0c1d28 100644 --- a/db/sqlite.go +++ b/db/sqlite.go @@ -17,7 +17,6 @@ var ( // NewSQLiteDB creates a new SQLite DB func NewSQLiteDB(dbPath string) (*sql.DB, error) { - initMeddler() db, err := sql.Open("sqlite3", dbPath) if err != nil { return nil, err diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 61e7ff28..70986cbf 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -62,10 +62,12 @@ func TestE2E(t *testing.T) { rdm := l1infotreesync.NewReorgDetectorMock(t) rdm.On("Subscribe", mock.Anything).Return(&reorgdetector.Subscription{}, nil) rdm.On("AddBlockToTrack", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) syncer, err := l1infotreesync.New(ctx, dbPath, gerAddr, verifyAddr, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) + go syncer.Start(ctx) // Update GER 3 times diff --git a/sync/evmdriver.go b/sync/evmdriver.go index 52eaaaae..4e195af2 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -94,6 +94,7 @@ reset: select { case <-ctx.Done(): d.log.Info("sync stopped due to context done") + cancel() return case b := <-downloadCh: d.log.Debug("handleNewBlock", " blockNum: ", b.Num, " blockHash: ", b.Hash) From bccc190c2526862b3515d42068205fc0508969b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Thu, 17 Oct 2024 16:29:43 +0200 Subject: [PATCH 47/53] feat: update DS lib (#133) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index cd587fb2..baa547b0 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/0xPolygon/cdk-data-availability v0.0.10 github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 - github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 + github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7 github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4 github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 diff --git a/go.sum b/go.sum index 3de7966e..9bbafe6a 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 h1:FXL/rcO7/GtZ3 github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 h1:QWE6nKBBHkMEiza723hJk0+oZbLSdQZTX4I48jWw15I= github.com/0xPolygon/zkevm-ethtx-manager v0.2.0/go.mod h1:lqQmzSo2OXEZItD0R4Cd+lqKFxphXEWgqHefVcGDZZc= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUxbnIoyumKvlwr0OtpTYMo= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= +github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7 h1:73sYxRQ9cOmtYBEyHePgEwrVULR+YruSQxVXCt/SmzU= +github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7/go.mod h1:7nM7Ihk+fTG1TQPwdZoGOYd3wprqqyIyjtS514uHzWE= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4 h1:+ZbyEpaBZu88jWtov/7iBWvwgBMu5cxlvAFDxsPrnGQ= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= From c040288a616cc006a21f4a0b21bee6533c154cca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Thu, 17 Oct 2024 16:34:53 +0200 Subject: [PATCH 48/53] fix: Fixing L1 info tree sync tests (#130) (#134) * fix: Fix l1infotreesync tests Co-authored-by: rbpol --- db/meddler.go | 4 ++-- db/sqlite.go | 1 - l1infotreesync/e2e_test.go | 2 ++ sync/evmdriver.go | 1 + 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/db/meddler.go b/db/meddler.go index fb632fb4..8dd17fe8 100644 --- a/db/meddler.go +++ b/db/meddler.go @@ -13,8 +13,8 @@ import ( "github.com/russross/meddler" ) -// initMeddler registers tags to be used to read/write from SQL DBs using meddler -func initMeddler() { +// init registers tags to be used to read/write from SQL DBs using meddler +func init() { meddler.Default = meddler.SQLite meddler.Register("bigint", BigIntMeddler{}) meddler.Register("merkleproof", MerkleProofMeddler{}) diff --git a/db/sqlite.go b/db/sqlite.go index ba8faefb..df0c1d28 100644 --- a/db/sqlite.go +++ b/db/sqlite.go @@ -17,7 +17,6 @@ var ( // NewSQLiteDB creates a new SQLite DB func NewSQLiteDB(dbPath string) (*sql.DB, error) { - initMeddler() db, err := sql.Open("sqlite3", dbPath) if err != nil { return nil, err diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 61e7ff28..70986cbf 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -62,10 +62,12 @@ func TestE2E(t *testing.T) { rdm := l1infotreesync.NewReorgDetectorMock(t) rdm.On("Subscribe", mock.Anything).Return(&reorgdetector.Subscription{}, nil) rdm.On("AddBlockToTrack", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) syncer, err := l1infotreesync.New(ctx, dbPath, gerAddr, verifyAddr, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) + go syncer.Start(ctx) // Update GER 3 times diff --git a/sync/evmdriver.go b/sync/evmdriver.go index 52eaaaae..4e195af2 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -94,6 +94,7 @@ reset: select { case <-ctx.Done(): d.log.Info("sync stopped due to context done") + cancel() return case b := <-downloadCh: d.log.Debug("handleNewBlock", " blockNum: ", b.Num, " blockHash: ", b.Hash) From e76404074784e0f5e0728c1df10904ee542f1a45 Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Thu, 17 Oct 2024 17:59:33 +0200 Subject: [PATCH 49/53] ci: bump kurtosis version (#128) * ci: bump kurtosis version Also bumb necessary versions in combinations. * test: compute gas-price * test: remove txspammer --- .github/workflows/test-e2e.yml | 2 +- scripts/local_config | 2 +- test/bridge-e2e.bats | 1 + test/combinations/fork11-rollup.yml | 5 ++--- test/combinations/fork12-cdk-validium.yml | 8 +++----- test/combinations/fork12-rollup.yml | 8 +++----- test/combinations/fork9-cdk-validium.yml | 3 +-- test/helpers/common-setup.bash | 4 ++-- test/helpers/common.bash | 24 ++++++++++++++++++++-- test/helpers/lxly-bridge-test.bash | 10 ++++++++- test/run-e2e.sh | 2 +- test/scripts/batch_verification_monitor.sh | 2 +- test/scripts/env.sh | 2 +- 13 files changed, 48 insertions(+), 25 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index a4886613..787d2301 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -70,7 +70,7 @@ jobs: with: repository: 0xPolygon/kurtosis-cdk path: "kurtosis-cdk" - ref: "v0.2.11" + ref: "v0.2.14" - name: Setup Bats and bats libs uses: bats-core/bats-action@2.0.0 diff --git a/scripts/local_config b/scripts/local_config index 08d960db..9a1f55cf 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -198,7 +198,7 @@ function export_portnum_from_kurtosis_or_fail(){ ############################################################################### function export_ports_from_kurtosis(){ export_portnum_from_kurtosis_or_fail l1_rpc_port el-1-geth-lighthouse rpc - export_portnum_from_kurtosis_or_fail zkevm_rpc_http_port cdk-erigon-node-001 http-rpc rpc + export_portnum_from_kurtosis_or_fail zkevm_rpc_http_port cdk-erigon-node-001 rpc rpc export_portnum_from_kurtosis_or_fail zkevm_data_streamer_port cdk-erigon-sequencer-001 data-streamer export_portnum_from_kurtosis_or_fail aggregator_db_port postgres-001 postgres export_portnum_from_kurtosis_or_fail agglayer_port agglayer agglayer diff --git a/test/bridge-e2e.bats b/test/bridge-e2e.bats index fcea86d9..f5391d1c 100644 --- a/test/bridge-e2e.bats +++ b/test/bridge-e2e.bats @@ -33,6 +33,7 @@ setup() { readonly sender_addr="$(cast wallet address --private-key $sender_private_key)" readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID() (uint32)') readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID() (uint32)') + gas_price=$(cast gas-price --rpc-url "$l2_rpc_url") } @test "Run deposit" { diff --git a/test/combinations/fork11-rollup.yml b/test/combinations/fork11-rollup.yml index aff80b2e..653adc9d 100644 --- a/test/combinations/fork11-rollup.yml +++ b/test/combinations/fork11-rollup.yml @@ -1,12 +1,11 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v7.0.0-rc.2-fork.11 - zkevm_prover_image: hermeznetwork/zkevm-prover:v7.0.0-RC31-fork.11 + zkevm_prover_image: hermeznetwork/zkevm-prover:v7.0.2-fork.11 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.0-fork11-RC1 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:ab3013d cdk_node_image: cdk zkevm_use_gas_token_contract: true additional_services: - pless_zkevm_node - - tx_spammer data_availability_mode: rollup sequencer_type: erigon diff --git a/test/combinations/fork12-cdk-validium.yml b/test/combinations/fork12-cdk-validium.yml index 300abb84..ed618754 100644 --- a/test/combinations/fork12-cdk-validium.yml +++ b/test/combinations/fork12-cdk-validium.yml @@ -1,10 +1,8 @@ args: - zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.3-fork.12 - zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC10-fork.12 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:ab3013d + zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12 + zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC12-fork.12 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 cdk_node_image: cdk zkevm_use_gas_token_contract: true - additional_services: - - tx_spammer data_availability_mode: cdk-validium sequencer_type: erigon diff --git a/test/combinations/fork12-rollup.yml b/test/combinations/fork12-rollup.yml index f0e1a969..c97a25cf 100644 --- a/test/combinations/fork12-rollup.yml +++ b/test/combinations/fork12-rollup.yml @@ -1,10 +1,8 @@ args: - zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.3-fork.12 - zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC10-fork.12 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:ab3013d + zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12 + zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC12-fork.12 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 cdk_node_image: cdk zkevm_use_gas_token_contract: true - additional_services: - - tx_spammer data_availability_mode: rollup sequencer_type: erigon diff --git a/test/combinations/fork9-cdk-validium.yml b/test/combinations/fork9-cdk-validium.yml index 481b45c9..f60fec9c 100644 --- a/test/combinations/fork9-cdk-validium.yml +++ b/test/combinations/fork9-cdk-validium.yml @@ -1,13 +1,12 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v6.0.0-rc.1-fork.9 zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.4 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.3-RC1 cdk_validium_node_image: 0xpolygon/cdk-validium-node:0.7.0-cdk - cdk_erigon_node_image: hermeznetwork/cdk-erigon:ab3013d cdk_node_image: cdk zkevm_use_gas_token_contract: true additional_services: - pless_zkevm_node - - tx_spammer data_availability_mode: cdk-validium sequencer_type: erigon diff --git a/test/helpers/common-setup.bash b/test/helpers/common-setup.bash index 7cb4dec7..dac81beb 100644 --- a/test/helpers/common-setup.bash +++ b/test/helpers/common-setup.bash @@ -18,9 +18,9 @@ _common_setup() { # Kurtosis enclave and service identifiers - readonly enclave=${KURTOSIS_ENCLAVE:-cdk-v1} + readonly enclave=${KURTOSIS_ENCLAVE:-cdk} readonly contracts_container=${KURTOSIS_CONTRACTS:-contracts-001} readonly contracts_service_wrapper=${KURTOSIS_CONTRACTS_WRAPPER:-"kurtosis service exec $enclave $contracts_container"} readonly erigon_rpc_node=${KURTOSIS_ERIGON_RPC:-cdk-erigon-node-001} - readonly l2_rpc_url=${L2_ETH_RPC_URL:-"$(kurtosis port print $enclave $erigon_rpc_node http-rpc)"} + readonly l2_rpc_url=${L2_ETH_RPC_URL:-"$(kurtosis port print $enclave $erigon_rpc_node rpc)"} } diff --git a/test/helpers/common.bash b/test/helpers/common.bash index 821a1f59..a5ed751c 100644 --- a/test/helpers/common.bash +++ b/test/helpers/common.bash @@ -33,8 +33,15 @@ function deploy_contract() { fi # Send the transaction and capture the output + gas_price=$(cast gas-price --rpc-url "$rpc_url") + local comp_gas_price=$(bc -l <<< "$gas_price * 1.5" | sed 's/\..*//') + if [[ $? -ne 0 ]]; then + echo "Failed to calculate gas price" >&3 + exit 1 + fi local cast_output=$(cast send --rpc-url "$rpc_url" \ --private-key "$private_key" \ + --gas-price $comp_gas_price \ --legacy \ --create "$bytecode" \ 2>&1) @@ -126,7 +133,14 @@ function send_eoa_transaction() { # Send transaction via cast local cast_output tx_hash - cast_output=$(cast send --rpc-url "$rpc_url" --private-key "$private_key" "$receiver_addr" --value "$value" --legacy 2>&1) + gas_price=$(cast gas-price --rpc-url "$rpc_url") + local comp_gas_price=$(bc -l <<< "$gas_price * 1.5" | sed 's/\..*//') + if [[ $? -ne 0 ]]; then + echo "Failed to calculate gas price" >&3 + exit 1 + fi + echo "cast send --gas-price $comp_gas_price --rpc-url $rpc_url --private-key $private_key $receiver_addr --value $value --legacy" >&3 + cast_output=$(cast send --gas-price $comp_gas_price --rpc-url "$rpc_url" --private-key "$private_key" "$receiver_addr" --value "$value" --legacy 2>&1) if [[ $? -ne 0 ]]; then echo "Error: Failed to send transaction. Output:" echo "$cast_output" @@ -159,7 +173,13 @@ function send_smart_contract_transaction() { # Send the smart contract interaction using cast local cast_output tx_hash - cast_output=$(cast send "$receiver_addr" --rpc-url "$rpc_url" --private-key "$private_key" --legacy "$function_sig" "${params[@]}" 2>&1) + gas_price=$(cast gas-price --rpc-url "$rpc_url") + local comp_gas_price=$(bc -l <<< "$gas_price * 1.5" | sed 's/\..*//') + if [[ $? -ne 0 ]]; then + echo "Failed to calculate gas price" >&3 + exit 1 + fi + cast_output=$(cast send "$receiver_addr" --rpc-url "$rpc_url" --private-key "$private_key" --gas-price $comp_gas_price --legacy "$function_sig" "${params[@]}" 2>&1) if [[ $? -ne 0 ]]; then echo "Error: Failed to send transaction. Output:" echo "$cast_output" diff --git a/test/helpers/lxly-bridge-test.bash b/test/helpers/lxly-bridge-test.bash index c1b43533..700e7ad2 100644 --- a/test/helpers/lxly-bridge-test.bash +++ b/test/helpers/lxly-bridge-test.bash @@ -45,6 +45,7 @@ function claim() { echo "We have $claimable_count claimable deposits on network $destination_net. Let's get this party started." >&3 readonly current_deposit=$(mktemp) readonly current_proof=$(mktemp) + local gas_price_factor=1 while read deposit_idx; do echo "Starting claim for tx index: "$deposit_idx >&3 echo "Deposit info:" >&3 @@ -70,7 +71,14 @@ function claim() { cast calldata $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata cast call --rpc-url $l2_rpc_url $bridge_addr $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata else - cast send --legacy --rpc-url $l2_rpc_url --private-key $sender_private_key $bridge_addr $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata + local comp_gas_price=$(bc -l <<< "$gas_price * 1.5" | sed 's/\..*//') + if [[ $? -ne 0 ]]; then + echo "Failed to calculate gas price" >&3 + exit 1 + fi + + echo "cast send --legacy --gas-price $comp_gas_price --rpc-url $l2_rpc_url --private-key $sender_private_key $bridge_addr \"$claim_sig\" \"$in_merkle_proof\" \"$in_rollup_merkle_proof\" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata" >&3 + cast send --legacy --gas-price $comp_gas_price --rpc-url $l2_rpc_url --private-key $sender_private_key $bridge_addr "$claim_sig" "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata fi done < <(seq 0 $((claimable_count - 1))) diff --git a/test/run-e2e.sh b/test/run-e2e.sh index d6a27a19..08a6b2cd 100755 --- a/test/run-e2e.sh +++ b/test/run-e2e.sh @@ -27,4 +27,4 @@ fi kurtosis clean --all echo "Override cdk config file" cp $BASE_FOLDER/config/kurtosis-cdk-node-config.toml.template $KURTOSIS_FOLDER/templates/trusted-node/cdk-node-config.toml -kurtosis run --enclave cdk-v1 --args-file "combinations/$FORK-$DATA_AVAILABILITY_MODE.yml" --image-download always $KURTOSIS_FOLDER +kurtosis run --enclave cdk --args-file "combinations/$FORK-$DATA_AVAILABILITY_MODE.yml" --image-download always $KURTOSIS_FOLDER diff --git a/test/scripts/batch_verification_monitor.sh b/test/scripts/batch_verification_monitor.sh index 9dc18e64..9c923888 100755 --- a/test/scripts/batch_verification_monitor.sh +++ b/test/scripts/batch_verification_monitor.sh @@ -17,7 +17,7 @@ timeout="$2" start_time=$(date +%s) end_time=$((start_time + timeout)) -rpc_url="$(kurtosis port print cdk-v1 cdk-erigon-node-001 http-rpc)" +rpc_url="$(kurtosis port print cdk cdk-erigon-node-001 rpc)" while true; do verified_batches="$(cast to-dec "$(cast rpc --rpc-url "$rpc_url" zkevm_verifiedBatchNumber | sed 's/"//g')")" diff --git a/test/scripts/env.sh b/test/scripts/env.sh index 2afb2af4..063b7d61 100644 --- a/test/scripts/env.sh +++ b/test/scripts/env.sh @@ -1,6 +1,6 @@ #!/bin/bash ### Common variables -KURTOSIS_ENCLAVE=cdk-v1 +KURTOSIS_ENCLAVE=cdk TMP_CDK_FOLDER=tmp/cdk DEST_KURTOSIS_PARAMS_YML=../$TMP_CDK_FOLDER/e2e-params.yml KURTOSIS_FOLDER=../kurtosis-cdk From 9539f5019cf5b9bd8fdb4da548ae936be49fe981 Mon Sep 17 00:00:00 2001 From: Victor Castell <0x@vcastellm.xyz> Date: Fri, 18 Oct 2024 07:06:21 +0000 Subject: [PATCH 50/53] fix: merge conflict --- sequencesender/sequencesender.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index bf56f923..0a044356 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -271,7 +271,7 @@ func (s *SequenceSender) purgeSequences() { func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Update latest virtual batch s.logger.Infof("updating virtual batch") - err := s.getLatestVirtualBatch() + err := s.updateLatestVirtualBatch() if err != nil { return } @@ -349,7 +349,7 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { } // Get latest virtual state batch from L1 - err = s.getLatestVirtualBatch() + err = s.updateLatestVirtualBatch() if err != nil { s.logger.Fatalf("error getting latest sequenced batch, error: %v", err) } From 9e5bea7870002a4d597e39b0a01fc1b33d06b3e0 Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Fri, 18 Oct 2024 10:39:45 +0200 Subject: [PATCH 51/53] feat: Merging config-files with variables (#121) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Allow multiples config-files (it will merge it) in command linea (`-cfg`). It can be used for read `genesis.json` and specific `cdk-node.toml` file - Added new command line flags: - `-save-config-path` to save 'rendered' config file to specific path. It store - `cdk_config.toml`: It's the final config file with all vars set and applied environment vars - `cdk_config.toml.merged`: It's the merged config-files (command lines ones and internals) - `-disable-default-config-vars`: This force to all mandatory params be set on config-files / environment vars - `-allow-deprecated-fields`: This allow that config-files contains deprecated values (to keep compatibility) --------- Co-authored-by: Stefan Negovanović Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> Co-authored-by: Stefan Negovanović <93934272+Stefan-Ethernal@users.noreply.github.com> --- cmd/main.go | 25 +- cmd/run.go | 21 +- config/config.go | 280 ++++++++++------ config/config_render.go | 306 ++++++++++++++++++ config/config_render_test.go | 283 ++++++++++++++++ config/config_test.go | 124 ++++++- config/default.go | 199 ++++++++---- config/network.go | 114 ------- go.mod | 13 +- go.sum | 22 ++ .../kurtosis-cdk-node-config.toml.template | 188 +++-------- 11 files changed, 1126 insertions(+), 449 deletions(-) create mode 100644 config/config_render.go create mode 100644 config/config_render_test.go diff --git a/cmd/main.go b/cmd/main.go index 300851e7..23c01783 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -18,10 +18,10 @@ const ( ) var ( - configFileFlag = cli.StringFlag{ + configFileFlag = cli.StringSliceFlag{ Name: config.FlagCfg, Aliases: []string{"c"}, - Usage: "Configuration `FILE`", + Usage: "Configuration file(s)", Required: true, } customNetworkFlag = cli.StringFlag{ @@ -43,6 +43,24 @@ var ( Required: false, Value: cli.NewStringSlice(common.SEQUENCE_SENDER, common.AGGREGATOR, common.AGGORACLE, common.RPC), } + saveConfigFlag = cli.StringFlag{ + Name: config.FlagSaveConfigPath, + Aliases: []string{"s"}, + Usage: "Save final configuration into to the indicated path (name: cdk-node-config.toml)", + Required: false, + } + disableDefaultConfigVars = cli.BoolFlag{ + Name: config.FlagDisableDefaultConfigVars, + Aliases: []string{"d"}, + Usage: "Disable default configuration variables, all of them must be defined on config files", + Required: false, + } + + allowDeprecatedFields = cli.BoolFlag{ + Name: config.FlagAllowDeprecatedFields, + Usage: "Allow that config-files contains deprecated fields", + Required: false, + } ) func main() { @@ -53,6 +71,9 @@ func main() { &configFileFlag, &yesFlag, &componentsFlag, + &saveConfigFlag, + &disableDefaultConfigVars, + &allowDeprecatedFields, } app.Commands = []*cli.Command{ { diff --git a/cmd/run.go b/cmd/run.go index b113c06e..4bd4dd0d 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -150,20 +150,17 @@ func createAggregator(ctx context.Context, c config.Config, runMigrations bool) } // READ CHAIN ID FROM POE SC - l2ChainID, err := etherman.GetL2ChainID() - if err != nil { - logger.Fatal(err) - } - - st := newState(&c, l2ChainID, stateSQLDB) - c.Aggregator.ChainID = l2ChainID + if c.Aggregator.ChainID == 0 { + l2ChainID, err := etherman.GetL2ChainID() + if err != nil { + logger.Fatal(err) + } + log.Infof("Autodiscover L2ChainID: %d", l2ChainID) + c.Aggregator.ChainID = l2ChainID + } - // Populate Network config - c.Aggregator.Synchronizer.Etherman.Contracts.GlobalExitRootManagerAddr = - c.NetworkConfig.L1Config.GlobalExitRootManagerAddr - c.Aggregator.Synchronizer.Etherman.Contracts.RollupManagerAddr = c.NetworkConfig.L1Config.RollupManagerAddr - c.Aggregator.Synchronizer.Etherman.Contracts.ZkEVMAddr = c.NetworkConfig.L1Config.ZkEVMAddr + st := newState(&c, c.Aggregator.ChainID, stateSQLDB) aggregator, err := aggregator.New(ctx, c.Aggregator, logger, st, etherman) if err != nil { diff --git a/config/config.go b/config/config.go index 720a11e4..b21ba971 100644 --- a/config/config.go +++ b/config/config.go @@ -3,7 +3,8 @@ package config import ( "bytes" "errors" - "path/filepath" + "fmt" + "os" "strings" jRPC "github.com/0xPolygon/cdk-rpc/rpc" @@ -18,8 +19,8 @@ import ( "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/sequencesender" - "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" "github.com/mitchellh/mapstructure" + "github.com/pelletier/go-toml/v2" "github.com/spf13/viper" "github.com/urfave/cli/v2" ) @@ -50,51 +51,82 @@ const ( FlagOutputFile = "output" // FlagMaxAmount is the flag to avoid to use the flag FlagAmount FlagMaxAmount = "max-amount" + // FlagSaveConfigPath is the flag to save the final configuration file + FlagSaveConfigPath = "save-config-path" + // FlagDisableDefaultConfigVars is the flag to force all variables to be set on config-files + FlagDisableDefaultConfigVars = "disable-default-config-vars" + // FlagAllowDeprecatedFields is the flag to allow deprecated fields + FlagAllowDeprecatedFields = "allow-deprecated-fields" deprecatedFieldSyncDB = "Aggregator.Synchronizer.DB is deprecated. Use Aggregator.Synchronizer.SQLDB instead." deprecatedFieldPersistenceFilename = "EthTxManager.PersistenceFilename is deprecated." + " Use EthTxManager.StoragePath instead." + + EnvVarPrefix = "CDK" + ConfigType = "toml" + SaveConfigFileName = "cdk_config.toml" + + DefaultCreationFilePermissions = os.FileMode(0600) ) -type ForbiddenField struct { - FieldName string - Reason string +type DeprecatedFieldsError struct { + // key is the rule and the value is the field's name that matches the rule + Fields map[DeprecatedField][]string +} + +func NewErrDeprecatedFields() *DeprecatedFieldsError { + return &DeprecatedFieldsError{ + Fields: make(map[DeprecatedField][]string), + } +} + +func (e *DeprecatedFieldsError) AddDeprecatedField(fieldName string, rule DeprecatedField) { + p := e.Fields[rule] + e.Fields[rule] = append(p, fieldName) +} + +func (e *DeprecatedFieldsError) Error() string { + res := "found deprecated fields:" + for rule, fieldsMatches := range e.Fields { + res += fmt.Sprintf("\n\t- %s: %s", rule.Reason, strings.Join(fieldsMatches, ", ")) + } + return res +} + +type DeprecatedField struct { + // If the field name ends with a dot means that match a section + FieldNamePattern string + Reason string } var ( - forbiddenFieldsOnConfig = []ForbiddenField{ + deprecatedFieldsOnConfig = []DeprecatedField{ { - FieldName: "aggregator.synchronizer.db.", - Reason: deprecatedFieldSyncDB, + FieldNamePattern: "sequencesender.ethtxmanager.persistencefilename", + Reason: deprecatedFieldPersistenceFilename, }, { - FieldName: "sequencesender.ethtxmanager.persistencefilename", - Reason: deprecatedFieldPersistenceFilename, + FieldNamePattern: "aggregator.synchronizer.db.", + Reason: deprecatedFieldSyncDB, }, + { - FieldName: "aggregator.ethtxmanager.persistencefilename", - Reason: deprecatedFieldPersistenceFilename, + FieldNamePattern: "aggregator.ethtxmanager.persistencefilename", + Reason: deprecatedFieldPersistenceFilename, }, } ) /* -Config represents the configuration of the entire Hermez Node +Config represents the configuration of the entire CDK Node The file is [TOML format] -You could find some examples: - - `config/environments/local/local.node.config.toml`: running a permisionless node - - `config/environments/mainnet/node.config.toml` - - `config/environments/public/node.config.toml` - - `test/config/test.node.config.toml`: configuration for a trusted node used in CI [TOML format]: https://en.wikipedia.org/wiki/TOML */ type Config struct { // Configuration of the etherman (client for access L1) Etherman ethermanconfig.Config - // Configuration for ethereum transaction manager - EthTxManager ethtxmanager.Config // Configuration of the aggregator Aggregator aggregator.Config // Configure Log level for all the services, allow also to store the logs in a file @@ -132,115 +164,179 @@ type Config struct { LastGERSync lastgersync.Config } -// Default parses the default configuration values. -func Default() (*Config, error) { - var cfg Config - viper.SetConfigType("toml") - - err := viper.ReadConfig(bytes.NewBuffer([]byte(DefaultValues))) +// Load loads the configuration +func Load(ctx *cli.Context) (*Config, error) { + configFilePath := ctx.StringSlice(FlagCfg) + filesData, err := readFiles(configFilePath) if err != nil { - return nil, err + return nil, fmt.Errorf("error reading files: Err:%w", err) } + saveConfigPath := ctx.String(FlagSaveConfigPath) + defaultConfigVars := !ctx.Bool(FlagDisableDefaultConfigVars) + allowDeprecatedFields := ctx.Bool(FlagAllowDeprecatedFields) + return LoadFile(filesData, saveConfigPath, defaultConfigVars, allowDeprecatedFields) +} - err = viper.Unmarshal(&cfg, viper.DecodeHook(mapstructure.TextUnmarshallerHookFunc())) - if err != nil { - return nil, err +func readFiles(files []string) ([]FileData, error) { + result := make([]FileData, 0, len(files)) + for _, file := range files { + fileContent, err := readFileToString(file) + if err != nil { + return nil, fmt.Errorf("error reading file content: %s. Err:%w", file, err) + } + fileExtension := getFileExtension(file) + if fileExtension != ConfigType { + fileContent, err = convertFileToToml(fileContent, fileExtension) + if err != nil { + return nil, fmt.Errorf("error converting file: %s from %s to TOML. Err:%w", file, fileExtension, err) + } + } + result = append(result, FileData{Name: file, Content: fileContent}) } - - return &cfg, nil + return result, nil } -func Load(ctx *cli.Context) (*Config, error) { - configFilePath := ctx.String(FlagCfg) - return LoadFile(configFilePath) + +func getFileExtension(fileName string) string { + return fileName[strings.LastIndex(fileName, ".")+1:] } // Load loads the configuration -func LoadFile(configFilePath string) (*Config, error) { - cfg, err := Default() +func LoadFileFromString(configFileData string, configType string) (*Config, error) { + cfg := &Config{} + err := loadString(cfg, configFileData, configType, true, EnvVarPrefix) if err != nil { - return nil, err + return cfg, err } - expectedKeys := viper.AllKeys() - if configFilePath != "" { - dirName, fileName := filepath.Split(configFilePath) + return cfg, nil +} - fileExtension := strings.TrimPrefix(filepath.Ext(fileName), ".") - fileNameWithoutExtension := strings.TrimSuffix(fileName, "."+fileExtension) +func SaveConfigToFile(cfg *Config, saveConfigPath string) error { + marshaled, err := toml.Marshal(cfg) + if err != nil { + log.Errorf("Can't marshal config to toml. Err: %w", err) + return err + } + return SaveDataToFile(saveConfigPath, "final config file", marshaled) +} - viper.AddConfigPath(dirName) - viper.SetConfigName(fileNameWithoutExtension) - viper.SetConfigType(fileExtension) +func SaveDataToFile(fullPath, reason string, data []byte) error { + log.Infof("Writing %s to: %s", reason, fullPath) + err := os.WriteFile(fullPath, data, DefaultCreationFilePermissions) + if err != nil { + err = fmt.Errorf("error writing %s to file %s. Err: %w", reason, fullPath, err) + log.Error(err) + return err } + return nil +} + +// Load loads the configuration +func LoadFile(files []FileData, saveConfigPath string, + setDefaultVars bool, allowDeprecatedFields bool) (*Config, error) { + log.Infof("Loading configuration: saveConfigPath: %s, setDefaultVars: %t, allowDeprecatedFields: %t", + saveConfigPath, setDefaultVars, allowDeprecatedFields) + fileData := make([]FileData, 0) + if setDefaultVars { + log.Info("Setting default vars") + fileData = append(fileData, FileData{Name: "default_mandatory_vars", Content: DefaultMandatoryVars}) + } + fileData = append(fileData, FileData{Name: "default_vars", Content: DefaultVars}) + fileData = append(fileData, FileData{Name: "default_values", Content: DefaultValues}) + fileData = append(fileData, files...) - viper.AutomaticEnv() - replacer := strings.NewReplacer(".", "_") - viper.SetEnvKeyReplacer(replacer) - viper.SetEnvPrefix("CDK") + merger := NewConfigRender(fileData, EnvVarPrefix) - err = viper.ReadInConfig() + renderedCfg, err := merger.Render() if err != nil { - var configNotFoundError viper.ConfigFileNotFoundError - if errors.As(err, &configNotFoundError) { - log.Error("config file not found") - } else { - log.Errorf("error reading config file: ", err) + return nil, err + } + if saveConfigPath != "" { + fullPath := saveConfigPath + "/" + SaveConfigFileName + ".merged" + err = SaveDataToFile(fullPath, "merged config file", []byte(renderedCfg)) + if err != nil { return nil, err } } - - decodeHooks := []viper.DecoderConfigOption{ - // this allows arrays to be decoded from env var separated by ",", example: MY_VAR="value1,value2,value3" - viper.DecodeHook( - mapstructure.ComposeDecodeHookFunc( - mapstructure.TextUnmarshallerHookFunc(), - mapstructure.StringToSliceHookFunc(","), - ), - ), + cfg, err := LoadFileFromString(renderedCfg, ConfigType) + // If allowDeprecatedFields is true, we ignore the deprecated fields + if err != nil && allowDeprecatedFields { + var customErr *DeprecatedFieldsError + if errors.As(err, &customErr) { + log.Warnf("detected deprecated fields: %s", err.Error()) + err = nil + } } - err = viper.Unmarshal(&cfg, decodeHooks...) if err != nil { return nil, err } - if expectedKeys != nil { - configKeys := viper.AllKeys() - unexpectedFields := getUnexpectedFields(configKeys, expectedKeys) - for _, field := range unexpectedFields { - forbbidenInfo := getForbiddenField(field) - if forbbidenInfo != nil { - log.Warnf("forbidden field %s in config file: %s", field, forbbidenInfo.Reason) - } else { - log.Debugf("field %s in config file doesnt have a default value", field) - } + if saveConfigPath != "" { + fullPath := saveConfigPath + "/" + SaveConfigFileName + err = SaveConfigToFile(cfg, fullPath) + if err != nil { + return nil, err } } return cfg, nil } -func getForbiddenField(fieldName string) *ForbiddenField { - for _, forbiddenField := range forbiddenFieldsOnConfig { - if forbiddenField.FieldName == fieldName || strings.HasPrefix(fieldName, forbiddenField.FieldName) { - return &forbiddenField - } +// Load loads the configuration +func loadString(cfg *Config, configData string, configType string, + allowEnvVars bool, envPrefix string) error { + viper.SetConfigType(configType) + if allowEnvVars { + replacer := strings.NewReplacer(".", "_") + viper.SetEnvKeyReplacer(replacer) + viper.SetEnvPrefix(envPrefix) + viper.AutomaticEnv() } + err := viper.ReadConfig(bytes.NewBuffer([]byte(configData))) + if err != nil { + return err + } + decodeHooks := []viper.DecoderConfigOption{ + // this allows arrays to be decoded from env var separated by ",", example: MY_VAR="value1,value2,value3" + viper.DecodeHook(mapstructure.ComposeDecodeHookFunc( + mapstructure.TextUnmarshallerHookFunc(), mapstructure.StringToSliceHookFunc(","))), + } + + err = viper.Unmarshal(&cfg, decodeHooks...) + if err != nil { + return err + } + configKeys := viper.AllKeys() + err = checkDeprecatedFields(configKeys) + if err != nil { + return err + } + return nil } -func getUnexpectedFields(keysOnFile, expectedConfigKeys []string) []string { - wrongFields := make([]string, 0) - for _, key := range keysOnFile { - if !contains(expectedConfigKeys, key) { - wrongFields = append(wrongFields, key) +func checkDeprecatedFields(keysOnConfig []string) error { + err := NewErrDeprecatedFields() + for _, key := range keysOnConfig { + forbbidenInfo := getDeprecatedField(key) + if forbbidenInfo != nil { + err.AddDeprecatedField(key, *forbbidenInfo) } } - return wrongFields + if len(err.Fields) > 0 { + return err + } + return nil } -func contains(keys []string, key string) bool { - for _, k := range keys { - if k == key { - return true +func getDeprecatedField(fieldName string) *DeprecatedField { + for _, deprecatedField := range deprecatedFieldsOnConfig { + if deprecatedField.FieldNamePattern == fieldName { + return &deprecatedField + } + // If the field name ends with a dot, it means FieldNamePattern* + if deprecatedField.FieldNamePattern[len(deprecatedField.FieldNamePattern)-1] == '.' && + strings.HasPrefix(fieldName, deprecatedField.FieldNamePattern) { + return &deprecatedField } } - return false + return nil } diff --git a/config/config_render.go b/config/config_render.go new file mode 100644 index 00000000..10a0a673 --- /dev/null +++ b/config/config_render.go @@ -0,0 +1,306 @@ +package config + +import ( + "fmt" + "io" + "os" + "regexp" + "strings" + + "github.com/0xPolygon/cdk/log" + "github.com/knadh/koanf/parsers/json" + "github.com/knadh/koanf/parsers/toml" + "github.com/knadh/koanf/providers/rawbytes" + "github.com/knadh/koanf/v2" + "github.com/valyala/fasttemplate" +) + +const ( + startTag = "{{" + endTag = "}}" +) + +var ( + ErrCycleVars = fmt.Errorf("cycle vars") + ErrMissingVars = fmt.Errorf("missing vars") + ErrUnsupportedConfigFileType = fmt.Errorf("unsupported config file type") +) + +type FileData struct { + Name string + Content string +} + +type ConfigRender struct { + // 0: default, 1: specific + FilesData []FileData + // Function to resolve environment variables typically: Os.LookupEnv + LookupEnvFunc func(key string) (string, bool) + EnvinormentPrefix string +} + +func NewConfigRender(filesData []FileData, envinormentPrefix string) *ConfigRender { + return &ConfigRender{ + FilesData: filesData, + LookupEnvFunc: os.LookupEnv, + EnvinormentPrefix: envinormentPrefix, + } +} + +// - Merge all files +// - Resolve all variables inside +func (c *ConfigRender) Render() (string, error) { + mergedData, err := c.Merge() + if err != nil { + return "", fmt.Errorf("fail to merge files. Err: %w", err) + } + return c.ResolveVars(mergedData) +} + +func (c *ConfigRender) Merge() (string, error) { + k := koanf.New(".") + for _, data := range c.FilesData { + dataToml := c.convertVarsToStrings(data.Content) + err := k.Load(rawbytes.Provider([]byte(dataToml)), toml.Parser()) + if err != nil { + log.Errorf("error loading file %s. Err:%v.FileData: %v", data.Name, err, dataToml) + return "", fmt.Errorf("fail to load converted template %s to toml. Err: %w", data.Name, err) + } + } + marshaled, err := k.Marshal(toml.Parser()) + if err != nil { + return "", fmt.Errorf("fail to marshal to toml. Err: %w", err) + } + out2 := RemoveQuotesForVars(string(marshaled)) + return out2, err +} + +func (c *ConfigRender) ResolveVars(fullConfigData string) (string, error) { + // Read values, the values that are indirections get the var string "{{tag}}" + // this step doesn't resolve any var + tpl, valuesDefined, err := c.ReadTemplateAdnDefinedValues(fullConfigData) + if err != nil { + return "", err + } + // It fills the defined vars, if a var is not defined keep the template form: + // A={{B}} + renderedTemplateWithResolverVars := c.executeTemplate(tpl, valuesDefined, true) + renderedTemplateWithResolverVars = RemoveTypeMarks(renderedTemplateWithResolverVars) + // ? there are unresolved vars??. This means that is not a cycle, just + // a missing value + unresolvedVars := c.GetUnresolvedVars(tpl, valuesDefined, true) + if len(unresolvedVars) > 0 { + return renderedTemplateWithResolverVars, fmt.Errorf("missing vars: %v. Err: %w", unresolvedVars, ErrMissingVars) + } + // If there are still vars on configfile it means there are cycles: + // Cycles are vars that depend on each other: + // A= {{B}} and B= {{A}} + // Also can be bigger cycles: + // A= {{B}} and B= {{C}} and C= {{A}} + // The way to detect that is, after resolving all vars if there are still vars to resolve, + // then there is a cycle + finalConfigData, err := c.ResolveCycle(renderedTemplateWithResolverVars) + if err != nil { + return fullConfigData, err + } + return finalConfigData, err +} + +// ResolveCycle resolve the cycle vars: +// It iterate to configData, each step must reduce the number of 'vars' +// if not means that there are cycle vars +func (c *ConfigRender) ResolveCycle(partialResolvedConfigData string) (string, error) { + tmpData := RemoveQuotesForVars(partialResolvedConfigData) + pendinVars := c.GetVars(tmpData) + if len(pendinVars) == 0 { + // Nothing to do resolve + return partialResolvedConfigData, nil + } + log.Debugf("ResolveCycle: pending vars: %v", pendinVars) + previousData := tmpData + for ok := true; ok; ok = len(pendinVars) > 0 { + previousVars := pendinVars + tpl, valuesDefined, err := c.ReadTemplateAdnDefinedValues(previousData) + if err != nil { + log.Errorf("resolveCycle: fails ReadTemplateAdnDefinedValues. Err: %v. Data:%s", err, previousData) + return "", fmt.Errorf("fails to read template ResolveCycle. Err: %w", err) + } + renderedTemplateWithResolverVars := c.executeTemplate(tpl, valuesDefined, true) + tmpData = RemoveQuotesForVars(renderedTemplateWithResolverVars) + tmpData = RemoveTypeMarks(tmpData) + + pendinVars = c.GetVars(tmpData) + if len(pendinVars) == len(previousVars) { + return partialResolvedConfigData, fmt.Errorf("not resolved cycle vars: %v. Err: %w", pendinVars, ErrCycleVars) + } + previousData = tmpData + } + return previousData, nil +} + +// The variables in data must be in format template: +// A={{B}} no A="{{B}}" +func (c *ConfigRender) ReadTemplateAdnDefinedValues(data string) (*fasttemplate.Template, + map[string]interface{}, error) { + tpl, err := fasttemplate.NewTemplate(data, startTag, endTag) + if err != nil { + return nil, nil, fmt.Errorf("fail to load template ReadTemplateAdnDefinedValues. Err:%w", err) + } + out := c.convertVarsToStrings(data) + k := koanf.New(".") + err = k.Load(rawbytes.Provider([]byte(out)), toml.Parser()) + if err != nil { + return nil, nil, fmt.Errorf("error ReadTemplateAdnDefinedValues parsing"+ + " data koanf.Load.Content: %s. Err: %w", out, err) + } + return tpl, k.All(), nil +} + +func (c *ConfigRender) convertVarsToStrings(data string) string { + re := regexp.MustCompile(`=\s*\{\{([^}:]+)\}\}`) + data = re.ReplaceAllString(data, `= "{{${1}:int}}"`) + return data +} + +func RemoveQuotesForVars(data string) string { + re := regexp.MustCompile(`=\s*\"\{\{([^}:]+:int)\}\}\"`) + data = re.ReplaceAllStringFunc(data, func(match string) string { + submatch := re.FindStringSubmatch(match) + if len(submatch) > 1 { + parts := strings.Split(submatch[1], ":") + return "= {{" + parts[0] + "}}" + } + return match + }) + return data +} + +func RemoveTypeMarks(data string) string { + re := regexp.MustCompile(`\{\{([^}:]+:int)\}\}`) + data = re.ReplaceAllStringFunc(data, func(match string) string { + submatch := re.FindStringSubmatch(match) + if len(submatch) > 1 { + parts := strings.Split(submatch[1], ":") + return "{{" + parts[0] + "}}" + } + return match + }) + return data +} + +func (c *ConfigRender) executeTemplate(tpl *fasttemplate.Template, + data map[string]interface{}, + useEnv bool) string { + return tpl.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { + if useEnv { + if v, ok := c.findTagInEnvironment(tag); ok { + tmp := fmt.Sprintf("%v", v) + return w.Write([]byte(tmp)) + } + } + if v, ok := data[tag]; ok { + tmp := fmt.Sprintf("%v", v) + return w.Write([]byte(tmp)) + } + + v := composeVarKeyForTemplate(tag) + return w.Write([]byte(v)) + }) +} + +// GetUnresolvedVars returns the vars in template that are no on data +// In this case we don't use environment variables +func (c *ConfigRender) GetUnresolvedVars(tpl *fasttemplate.Template, + data map[string]interface{}, useEnv bool) []string { + var unresolved []string + tpl.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { + if useEnv { + if v, ok := c.findTagInEnvironment(tag); ok { + return w.Write([]byte(v)) + } + } + if _, ok := data[tag]; !ok { + if !contains(unresolved, tag) { + unresolved = append(unresolved, tag) + } + } + return w.Write([]byte("")) + }) + return unresolved +} + +func contains(vars []string, search string) bool { + for _, v := range vars { + if v == search { + return true + } + } + return false +} + +// GetVars returns the vars in template +func (c *ConfigRender) GetVars(configData string) []string { + tpl, err := fasttemplate.NewTemplate(configData, startTag, endTag) + if err != nil { + return []string{} + } + vars := unresolvedVars(tpl, map[string]interface{}{}) + return vars +} + +func (c *ConfigRender) findTagInEnvironment(tag string) (string, bool) { + envTag := c.composeVarKeyForEnvirnonment(tag) + if v, ok := c.LookupEnvFunc(envTag); ok { + return v, true + } + return "", false +} + +func (c *ConfigRender) composeVarKeyForEnvirnonment(key string) string { + return c.EnvinormentPrefix + "_" + strings.ReplaceAll(key, ".", "_") +} + +func composeVarKeyForTemplate(key string) string { + return startTag + key + endTag +} + +func readFileToString(filename string) (string, error) { + content, err := os.ReadFile(filename) + if err != nil { + return "", err + } + return string(content), nil +} + +func unresolvedVars(tpl *fasttemplate.Template, data map[string]interface{}) []string { + var unresolved []string + tpl.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { + if _, ok := data[tag]; !ok { + unresolved = append(unresolved, tag) + } + return w.Write([]byte("")) + }) + return unresolved +} + +func convertFileToToml(fileData string, fileType string) (string, error) { + switch strings.ToLower(fileType) { + case "json": + k := koanf.New(".") + err := k.Load(rawbytes.Provider([]byte(fileData)), json.Parser()) + if err != nil { + return fileData, fmt.Errorf("error loading json file. Err: %w", err) + } + config := k.Raw() + tomlData, err := toml.Parser().Marshal(config) + if err != nil { + return fileData, fmt.Errorf("error converting json to toml. Err: %w", err) + } + return string(tomlData), nil + case "yml", "yaml", "ini": + return fileData, fmt.Errorf("cant convert from %s to TOML. Err: %w", fileType, ErrUnsupportedConfigFileType) + default: + log.Warnf("filetype %s unknown, assuming is a TOML file", fileType) + return fileData, nil + } +} diff --git a/config/config_render_test.go b/config/config_render_test.go new file mode 100644 index 00000000..0636ed75 --- /dev/null +++ b/config/config_render_test.go @@ -0,0 +1,283 @@ +package config + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +var ( + s = []string{"A= {{B}}\n", "B= {{C}}\nC={{A}}\n"} +) + +type testCaseData struct { + name string + contents []string + envVars map[string]string + expectedMerged string + expectedRenderConfig string + expectedError error +} + +func TestConfigRenderMerge(t *testing.T) { + var tests = []testCaseData{ + { + name: "Merge 2 elements", + contents: []string{"A=1\n", "B=2\n"}, + expectedRenderConfig: "A = 1\nB = 2\n", + }, + { + name: "Merge 3 elements", + contents: []string{"A=1\n", "B=2\n", "C=3\n"}, + expectedRenderConfig: "A = 1\nB = 2\nC = 3\n", + }, + { + name: "Merge 3 elements, overlapped", + contents: []string{"A=1\n", "A=2\nB=2\n", "A=3\nC=3\n"}, + expectedRenderConfig: "A = 3\nB = 2\nC = 3\n", + }, + { + name: "Merge 3 elements, overlapped final var", + contents: []string{"A=1\n", "A=2\nB=2\n", "A={{VAR}}\nC=3\n"}, + expectedRenderConfig: "A = {{VAR}}\nB = 2\nC = 3\n", + expectedError: ErrMissingVars, + }, + } + executeCases(t, tests) +} + +func TestConfigRenderDetectCycle(t *testing.T) { + var tests = []testCaseData{ + { + name: "Cycle 3 elements", + contents: []string{"A= {{B}}\n", "B= {{C}}\nC={{A}}\n"}, + expectedMerged: "A = {{B}}\nB = {{C}}\nC = {{A}}\n", + expectedRenderConfig: "A = {{B}}\nB = {{C}}\nC = {{A}}\n", + expectedError: ErrCycleVars, + }, + { + name: "Cycle 2 elements", + contents: []string{"A= {{B}}\n", "B= {{A}}\n"}, + expectedRenderConfig: "A = {{B}}\nB = {{A}}\n", + expectedError: ErrCycleVars, + }, + { + name: "Cycle 1 elements", + contents: []string{"A= {{A}}\n", ""}, + expectedRenderConfig: "A = {{A}}\n", + expectedError: ErrCycleVars, + }, + } + executeCases(t, tests) +} + +func TestConfigRenderTypes(t *testing.T) { + var tests = []testCaseData{ + { + name: "Cycle 3 elements B, break var", + contents: []string{"INT_VALUE={{MY_INT}}\n STR_VALUE= \"{{MY_STR}}\"\n MYBOOL={{MY_BOOL}}\n", + "MY_STR=\"a string\"\nMY_INT=4\nMY_BOOL=true\nNO_RESOLVED={{NOT_DEFINED_VAR}}\n"}, + envVars: map[string]string{"UTCR_B": "4"}, + expectedError: ErrMissingVars, + expectedRenderConfig: "INT_VALUE = 4\nMYBOOL = true\nMY_BOOL = true\nMY_INT = 4\nMY_STR = \"a string\"\nNO_RESOLVED = {{NOT_DEFINED_VAR}}\nSTR_VALUE = \"a string\"\n", + }, + } + executeCases(t, tests) +} + +func TestConfigRenderComposedValues(t *testing.T) { + var tests = []testCaseData{ + { + name: "Composed var", + contents: []string{"A=\"path\"\n", "B= \"{{A}}to\"\n"}, + expectedRenderConfig: "A = \"path\"\nB = \"pathto\"\n", + }, + } + executeCases(t, tests) +} + +func TestConfigRenderCycleBrokenByEnvVar(t *testing.T) { + var tests = []testCaseData{ + { + name: "Cycle 3 elements B, break var", + contents: []string{"A= {{B}}\n", "B= {{C}}\nC={{A}}\n"}, + envVars: map[string]string{"UTCR_B": "4"}, + expectedRenderConfig: "A = 4\nB = 4\nC = 4\n", + }, + { + name: "Cycle 3 elements A, break var", + contents: []string{"A= {{B}}\n", "B= {{C}}\nC={{A}}\n"}, + envVars: map[string]string{"UTCR_A": "4"}, + expectedRenderConfig: "A = 4\nB = 4\nC = 4\n", + }, + { + name: "Cycle 3 elements C, break var", + contents: []string{"A= {{B}}\n", "B= {{C}}\nC={{A}}\n"}, + envVars: map[string]string{"UTCR_C": "4"}, + expectedRenderConfig: "A = 4\nB = 4\nC = 4\n", + }, + } + executeCases(t, tests) +} + +func TestConfigRenderOverrideByEnvVars(t *testing.T) { + var tests = []testCaseData{ + { + name: "Variable is not set in config file but override as number", + contents: []string{"A={{C}}\n"}, + envVars: map[string]string{"UTCR_C": "4"}, + expectedRenderConfig: "A = 4\n", + }, + // Notice that the exported variable have the quotes + { + name: "Variable is not set in config file but override as string", + contents: []string{"A={{C}}\n"}, + envVars: map[string]string{"UTCR_C": "\"4\""}, + expectedRenderConfig: "A = \"4\"\n", + }, + } + executeCases(t, tests) +} + +func TestConfigRenderPropagateType(t *testing.T) { + var tests = []testCaseData{ + { + name: "propagateType: set directly", + contents: []string{"A=\"hello\"\n", "B= \"{{A}}\"\n"}, + expectedRenderConfig: "A = \"hello\"\nB = \"hello\"\n", + }, + { + name: "propagateType: set directly", + contents: []string{"A=\"hello\"\n", "B=\"{{A}}\"\n"}, + envVars: map[string]string{"UTCR_A": "you"}, + expectedRenderConfig: "A = \"hello\"\nB = \"you\"\n", + }, + } + executeCases(t, tests) +} + +func TestConfigRenderComplexStruct(t *testing.T) { + defaultValues := ` + [Etherman] + URL="http://generic_url" + ForkIDChunkSize=100 + [Etherman.EthermanConfig] + URL="http://localhost:8545" +` + confiFile := ` + [Etherman.EthermanConfig] + URL="{{Etherman.URL}}" + ` + var tests = []testCaseData{ + { + name: "Complex struct merge", + contents: []string{defaultValues, confiFile}, + expectedRenderConfig: "\n[Etherman]\n ForkIDChunkSize = 100\n URL = \"http://generic_url\"\n\n [Etherman.EthermanConfig]\n URL = \"http://generic_url\"\n", + }, + // This test Etherman.URL doesnt change because is not a var, it will change value on viper stage + { + name: "Complex struct merge override env-var, but we must propagate the string type", + contents: []string{defaultValues, confiFile}, + envVars: map[string]string{"UTCR_Etherman_URL": "env"}, + expectedRenderConfig: "\n[Etherman]\n ForkIDChunkSize = 100\n URL = \"http://generic_url\"\n\n [Etherman.EthermanConfig]\n URL = \"env\"\n", + }, + } + executeCases(t, tests) +} + +func TestConfigRenderConvertFileToToml(t *testing.T) { + jsonFile := `{ + "rollupCreationBlockNumber": 63, + "rollupManagerCreationBlockNumber": 57, + "genesisBlockNumber": 63, + "L1Config": { + "chainId": 271828, + "polygonZkEVMGlobalExitRootAddress": "0x1f7ad7caA53e35b4f0D138dC5CBF91aC108a2674", + "polygonRollupManagerAddress": "0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2", + "polTokenAddress": "0xEdE9cf798E0fE25D35469493f43E88FeA4a5da0E", + "polygonZkEVMAddress": "0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91" + } +} +` + data, err := convertFileToToml(jsonFile, "json") + require.NoError(t, err) + require.Equal(t, "genesisBlockNumber = 63.0\nrollupCreationBlockNumber = 63.0\nrollupManagerCreationBlockNumber = 57.0\n\n[L1Config]\n chainId = 271828.0\n polTokenAddress = \"0xEdE9cf798E0fE25D35469493f43E88FeA4a5da0E\"\n polygonRollupManagerAddress = \"0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2\"\n polygonZkEVMAddress = \"0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91\"\n polygonZkEVMGlobalExitRootAddress = \"0x1f7ad7caA53e35b4f0D138dC5CBF91aC108a2674\"\n", data) +} + +/* +TODO: This test generate this, is the same? +[PrivateKey] + Password = "testonly" + Path = "./test/sequencer.keystore" + +func TestConfigRenderValueIsAObject(t *testing.T) { + var tests = []testCaseData{ + { + name: "Complex struct object inside var", + contents: []string{"PrivateKey = {Path = \"./test/sequencer.keystore\", Password = \"testonly\"}"}, + expectedRenderConfig: "PrivateKey = {Path = \"./test/sequencer.keystore\", Password = \"testonly\"}", + }, + } + executeCases(t, tests) +} +*/ + +type configRenderTestData struct { + Sut *ConfigRender + EnvMock *osLookupEnvMock +} + +func newConfigRenderTestData(data []string) configRenderTestData { + envMock := &osLookupEnvMock{ + Env: map[string]string{}, + } + filesData := make([]FileData, len(data)) + for i, d := range data { + filesData[i] = FileData{Name: fmt.Sprintf("file%d", i), Content: d} + } + return configRenderTestData{ + EnvMock: envMock, + Sut: &ConfigRender{ + FilesData: filesData, + LookupEnvFunc: envMock.LookupEnv, + EnvinormentPrefix: "UTCR", + }, + } +} + +type osLookupEnvMock struct { + Env map[string]string +} + +func (m *osLookupEnvMock) LookupEnv(key string) (string, bool) { + val, exists := m.Env[key] + return val, exists +} + +func executeCases(t *testing.T, tests []testCaseData) { + t.Helper() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testData := newConfigRenderTestData(tt.contents) + if tt.envVars != nil { + testData.EnvMock.Env = tt.envVars + } + if tt.expectedMerged != "" { + merged, err := testData.Sut.Merge() + require.NoError(t, err) + require.Equal(t, tt.expectedMerged, merged) + } + res, err := testData.Sut.Render() + if tt.expectedError != nil { + require.Error(t, err) + require.ErrorIs(t, err, tt.expectedError) + } else { + require.NoError(t, err) + } + if len(tt.expectedRenderConfig) > 0 { + require.Equal(t, tt.expectedRenderConfig, res) + } + }) + } +} diff --git a/config/config_test.go b/config/config_test.go index a0844d96..a7da6481 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,37 +1,120 @@ package config import ( + "flag" + "fmt" "os" "testing" "github.com/stretchr/testify/require" + "github.com/urfave/cli/v2" ) -func TestLoadDeafaultConfig(t *testing.T) { +func TestLExploratorySetConfigFlag(t *testing.T) { + value := []string{"config.json", "another_config.json"} + ctx := newCliContextConfigFlag(t, value...) + configFilePath := ctx.StringSlice(FlagCfg) + require.Equal(t, value, configFilePath) +} + +func TestLoadDefaultConfig(t *testing.T) { tmpFile, err := os.CreateTemp("", "ut_config") require.NoError(t, err) defer os.Remove(tmpFile.Name()) - _, err = tmpFile.Write([]byte(DefaultValues)) + _, err = tmpFile.Write([]byte(DefaultMandatoryVars)) require.NoError(t, err) - cfg, err := LoadFile(tmpFile.Name()) + ctx := newCliContextConfigFlag(t, tmpFile.Name()) + cfg, err := Load(ctx) require.NoError(t, err) require.NotNil(t, cfg) } -const configWithUnexpectedFields = ` -[UnknownField] -Field = "value" +const configWithDeprecatedFields = ` +[SequenceSender.EthTxManager] +nodepretatedfield = "value2" +persistencefilename = "value" ` -func TestLoadConfigWithUnexpectedFields(t *testing.T) { +func TestLoadConfigWithDeprecatedFields(t *testing.T) { + tmpFile, err := os.CreateTemp("", "ut_config") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.Write([]byte(DefaultVars + "\n" + configWithDeprecatedFields)) + require.NoError(t, err) + ctx := newCliContextConfigFlag(t, tmpFile.Name()) + cfg, err := Load(ctx) + require.Error(t, err) + require.Nil(t, cfg) +} + +func TestLoadConfigWithSaveConfigFile(t *testing.T) { tmpFile, err := os.CreateTemp("", "ut_config") require.NoError(t, err) defer os.Remove(tmpFile.Name()) - _, err = tmpFile.Write([]byte(configWithUnexpectedFields)) + _, err = tmpFile.Write([]byte(DefaultVars + "\n")) require.NoError(t, err) - cfg, err := LoadFile(tmpFile.Name()) + fmt.Printf("file: %s\n", tmpFile.Name()) + ctx := newCliContextConfigFlag(t, tmpFile.Name()) + dir, err := os.MkdirTemp("", "ut_test_save_config") + require.NoError(t, err) + defer os.RemoveAll(dir) + + err = ctx.Set(FlagSaveConfigPath, dir) + require.NoError(t, err) + cfg, err := Load(ctx) require.NoError(t, err) require.NotNil(t, cfg) + _, err = os.Stat(dir + "/" + SaveConfigFileName) + require.NoError(t, err) +} + +func TestTLoadFileFromStringDeprecatedField(t *testing.T) { + configFileData := configWithDeprecatedFields + _, err := LoadFileFromString(configFileData, "toml") + require.Error(t, err) +} +func TestTLoadDeprecatedField(t *testing.T) { + tmpFile, err := os.CreateTemp("", "ut_config") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.Write([]byte(DefaultVars + "\n" + configWithDeprecatedFields)) + require.NoError(t, err) + ctx := newCliContextConfigFlag(t, tmpFile.Name()) + _, err = Load(ctx) + require.Error(t, err) +} + +func TestTLoadDeprecatedFieldWithAllowFlag(t *testing.T) { + tmpFile, err := os.CreateTemp("", "ut_config") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.Write([]byte(DefaultVars + "\n" + configWithDeprecatedFields)) + require.NoError(t, err) + ctx := newCliContextConfigFlag(t, tmpFile.Name()) + err = ctx.Set(FlagAllowDeprecatedFields, "true") + require.NoError(t, err) + _, err = Load(ctx) + require.NoError(t, err) +} + +func TestCheckDeprecatedFields(t *testing.T) { + err := checkDeprecatedFields([]string{deprecatedFieldsOnConfig[0].FieldNamePattern}) + require.Error(t, err) + require.Contains(t, err.Error(), deprecatedFieldsOnConfig[0].FieldNamePattern) + require.Contains(t, err.Error(), deprecatedFieldsOnConfig[0].Reason) +} + +func TestCheckDeprecatedFieldsPattern(t *testing.T) { + err := checkDeprecatedFields([]string{"aggregator.synchronizer.db.name"}) + require.Error(t, err) + require.Contains(t, err.Error(), deprecatedFieldSyncDB) +} + +func TestLoadConfigWithInvalidFilename(t *testing.T) { + ctx := newCliContextConfigFlag(t, "invalid_file") + cfg, err := Load(ctx) + require.Error(t, err) + require.Nil(t, cfg) } func TestLoadConfigWithForbiddenFields(t *testing.T) { @@ -41,7 +124,7 @@ func TestLoadConfigWithForbiddenFields(t *testing.T) { }{ { name: "[Aggregator.Synchronizer] DB", - input: `[aggregator.synchronizer.db] + input: `[Aggregator.Synchronizer.DB] name = "value"`, }, { @@ -58,9 +141,24 @@ func TestLoadConfigWithForbiddenFields(t *testing.T) { defer os.Remove(tmpFile.Name()) _, err = tmpFile.Write([]byte(c.input)) require.NoError(t, err) - cfg, err := LoadFile(tmpFile.Name()) - require.NoError(t, err) - require.NotNil(t, cfg) + ctx := newCliContextConfigFlag(t, tmpFile.Name()) + cfg, err := Load(ctx) + require.Error(t, err) + require.Nil(t, cfg) }) } } + +func newCliContextConfigFlag(t *testing.T, values ...string) *cli.Context { + t.Helper() + flagSet := flag.NewFlagSet("test", flag.ContinueOnError) + var configFilePaths cli.StringSlice + flagSet.Var(&configFilePaths, FlagCfg, "") + flagSet.Bool(FlagAllowDeprecatedFields, false, "") + flagSet.String(FlagSaveConfigPath, "", "") + for _, value := range values { + err := flagSet.Parse([]string{"--" + FlagCfg, value}) + require.NoError(t, err) + } + return cli.NewContext(nil, flagSet, nil) +} diff --git a/config/default.go b/config/default.go index 74eec57d..442a44e0 100644 --- a/config/default.go +++ b/config/default.go @@ -1,17 +1,75 @@ package config +// This values doesnt have a default value because depend on the +// environment / deployment +const DefaultMandatoryVars = ` +L1URL = "http://localhost:8545" +L2URL = "localhost:8123" +L1AggOracleURL = "http://test-aggoracle-l1:8545" +L2AggOracleURL = "http://test-aggoracle-l2:8545" + + +ForkId = 9 +ContractVersions = "elderberry" +IsValidiumMode = false + +L2Coinbase = "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d" +SequencerPrivateKeyPath = "/app/sequencer.keystore" +SequencerPrivateKeyPassword = "test" +WitnessURL = "localhost:8123" +AggLayerURL = "https://agglayer-dev.polygon.technology" +StreamServer = "localhost:6900" + +AggregatorPrivateKeyPath = "/app/keystore/aggregator.keystore" +AggregatorPrivateKeyPassword = "testonly" +# Who send Proof to L1? AggLayer addr, or aggregator addr? +SenderProofToL1Addr = "0x0000000000000000000000000000000000000000" + + + +# This values can be override directly from genesis.json +rollupCreationBlockNumber = 0 +rollupManagerCreationBlockNumber = 0 +genesisBlockNumber = 0 +[L1Config] + chainId = 0 + polygonZkEVMGlobalExitRootAddress = "0x0000000000000000000000000000000000000000" + polygonRollupManagerAddress = "0x0000000000000000000000000000000000000000" + polTokenAddress = "0x0000000000000000000000000000000000000000" + polygonZkEVMAddress = "0x0000000000000000000000000000000000000000" + polygonBridgeAddr = "0x0000000000000000000000000000000000000000" + +[L2Config] + GlobalExitRootAddr = "0x0000000000000000000000000000000000000000" + +` + +// This doesnt below to config, but are the vars used +// to avoid repetition in config-files +const DefaultVars = ` +PathRWData = "/tmp/cdk" +L1URLSyncChunkSize = 100 + +` + // DefaultValues is the default configuration const DefaultValues = ` ForkUpgradeBatchNumber = 0 ForkUpgradeNewForkId = 0 + +[Log] +Environment = "development" # "production" or "development" +Level = "info" +Outputs = ["stderr"] + [Etherman] - URL="http://localhost:8545" - ForkIDChunkSize=100 + URL="{{L1URL}}" + ForkIDChunkSize={{L1URLSyncChunkSize}} [Etherman.EthermanConfig] - URL="http://localhost:8545" + URL="{{L1URL}}" MultiGasProvider=false - L1ChainID=1337 + L1ChainID={{NetworkConfig.L1.L1ChainID}} HTTPHeaders=[] [Etherman.EthermanConfig.Etherscan] ApiKey="" @@ -19,28 +77,23 @@ ForkUpgradeNewForkId = 0 [Common] NetworkID = 1 -IsValidiumMode = false -ContractVersions = "banana" - -[Log] -Environment = "development" # "production" or "development" -Level = "info" -Outputs = ["stderr"] +IsValidiumMode = {{IsValidiumMode}} +ContractVersions = "{{ContractVersions}}" [SequenceSender] WaitPeriodSendSequence = "15s" LastBatchVirtualizationTimeMaxWaitPeriod = "10s" L1BlockTimestampMargin = "30s" MaxTxSizeForL1 = 131072 -L2Coinbase = "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d" -PrivateKey = {Path = "./test/sequencer.keystore", Password = "testonly"} +L2Coinbase = "{{L2Coinbase}}" +PrivateKey = { Path = "{{SequencerPrivateKeyPath}}", Password = "{{SequencerPrivateKeyPassword}}"} SequencesTxFileName = "sequencesender.json" GasOffset = 80000 WaitPeriodPurgeTxFile = "15m" MaxPendingTx = 1 MaxBatchesForL1 = 300 BlockFinality = "FinalizedBlock" -RPCURL = "localhost:8123" +RPCURL = "{{L2URL}}" GetBatchWaitInterval = "10s" [SequenceSender.EthTxManager] FrequencyToMonitorTxs = "1s" @@ -48,42 +101,49 @@ GetBatchWaitInterval = "10s" GetReceiptMaxTime = "250ms" GetReceiptWaitInterval = "1s" PrivateKeys = [ - {Path = "./test/sequencer.keystore", Password = "testonly"}, + {Path = "{{SequencerPrivateKeyPath}}", Password = "{{SequencerPrivateKeyPassword}}"}, ] ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - StoragePath = "ethtxmanager.db" + StoragePath = "ethtxmanager.sqlite" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 0 FinalizedStatusL1NumberOfBlocks = 0 [SequenceSender.EthTxManager.Etherman] - URL = "http://127.0.0.1:8545" + URL = "{{L1URL}}" MultiGasProvider = false - L1ChainID = 1337 + L1ChainID = {{NetworkConfig.L1.L1ChainID}} [Aggregator] +# GRPC server host Host = "0.0.0.0" +# GRPC server port Port = 50081 RetryTime = "5s" VerifyProofInterval = "10s" +ProofStatePollingInterval = "5s" TxProfitabilityCheckerType = "acceptall" TxProfitabilityMinReward = "1.1" -ProofStatePollingInterval = "5s" -SenderAddress = "" +IntervalAfterWhichBatchConsolidateAnyway="0s" +BatchProofSanityCheckEnabled = true +# ChainID is L2ChainID. Is populated on runtimme +ChainID = 0 +ForkId = {{ForkId}} +SenderAddress = "{{SenderProofToL1Addr}}" CleanupLockedProofsInterval = "2m" GeneratingProofCleanupThreshold = "10m" -BatchProofSanityCheckEnabled = true -ForkId = 9 GasOffset = 0 -WitnessURL = "localhost:8123" +WitnessURL = "{{WitnessURL}}" UseL1BatchData = true UseFullWitness = false SettlementBackend = "l1" AggLayerTxTimeout = "5m" -AggLayerURL = "" +AggLayerURL = "{{AggLayerURL}}" MaxWitnessRetrievalWorkers = 2 SyncModeOnlyEnabled = false -SequencerPrivateKey = {} + [Aggregator.SequencerPrivateKey] + Path = "{{SequencerPrivateKeyPath}}" + Password = "{{SequencerPrivateKeyPassword}}" [Aggregator.DB] Name = "aggregator_db" User = "aggregator_user" @@ -93,18 +153,18 @@ SequencerPrivateKey = {} EnableLog = false MaxConns = 200 [Aggregator.Log] - Environment = "development" # "production" or "development" - Level = "info" + Environment ="{{Log.Environment}}" # "production" or "development" + Level = "{{Log.Level}}" Outputs = ["stderr"] [Aggregator.StreamClient] - Server = "localhost:6900" + Server = "{{StreamServer}}" [Aggregator.EthTxManager] FrequencyToMonitorTxs = "1s" WaitTxToBeMined = "2m" GetReceiptMaxTime = "250ms" GetReceiptWaitInterval = "1s" PrivateKeys = [ - {Path = "/pk/aggregator.keystore", Password = "testonly"}, + {Path = "{{AggregatorPrivateKeyPath}}", Password = "{{AggregatorPrivateKeyPassword}}"}, ] ForcedGas = 0 GasPriceMarginFactor = 1 @@ -114,61 +174,68 @@ SequencerPrivateKey = {} SafeStatusL1NumberOfBlocks = 0 FinalizedStatusL1NumberOfBlocks = 0 [Aggregator.EthTxManager.Etherman] - URL = "" - L1ChainID = 11155111 + URL = "{{L1URL}}" + L1ChainID = {{NetworkConfig.L1.L1ChainID}} HTTPHeaders = [] [Aggregator.Synchronizer] [Aggregator.Synchronizer.Log] - Environment = "development" # "production" or "development" - Level = "info" + Environment = "{{Log.Environment}}" # "production" or "development" + Level = "{{Log.Level}}" Outputs = ["stderr"] [Aggregator.Synchronizer.SQLDB] DriverName = "sqlite3" - DataSourceName = "file:/tmp/aggregator_sync_db.sqlite" + DataSource = "file:{{PathRWData}}/aggregator_sync_db.sqlite" [Aggregator.Synchronizer.Synchronizer] SyncInterval = "10s" SyncChunkSize = 1000 - GenesisBlockNumber = 5511080 + GenesisBlockNumber = {{genesisBlockNumber}} SyncUpToBlock = "finalized" BlockFinality = "finalized" OverrideStorageCheck = false [Aggregator.Synchronizer.Etherman] - L1URL = "http://localhost:8545" + L1URL = "{{L1URL}}" ForkIDChunkSize = 100 - L1ChainID = 0 + L1ChainID = {{NetworkConfig.L1.L1ChainID}} + PararellBlockRequest = false + [Aggregator.Synchronizer.Etherman.Contracts] + GlobalExitRootManagerAddr = "{{NetworkConfig.L1.GlobalExitRootManagerAddr}}" + RollupManagerAddr = "{{NetworkConfig.L1.RollupManagerAddr}}" + ZkEVMAddr = "{{NetworkConfig.L1.ZkEVMAddr}}" [Aggregator.Synchronizer.Etherman.Validium] - Enabled = false + Enabled = {{IsValidiumMode}} + # L2URL, empty ask to contract TrustedSequencerURL = "" RetryOnDACErrorInterval = "1m" DataSourcePriority = ["trusted", "external"] [Aggregator.Synchronizer.Etherman.Validium.Translator] FullMatchRules = [] [Aggregator.Synchronizer.Etherman.Validium.RateLimit] - NumRequests = 900 + NumRequests = 1000 Interval = "1s" [ReorgDetectorL1] -DBPath = "/tmp/reorgdetectorl1" +DBPath = "{{PathRWData}}/reorgdetectorl1" [ReorgDetectorL2] -DBPath = "/tmp/reorgdetectorl2" +DBPath = "{{PathRWData}}/reorgdetectorl2" [L1InfoTreeSync] -DBPath = "/tmp/L1InfoTreeSync.sqlite" -GlobalExitRootAddr="0x8464135c8F25Da09e49BC8782676a84730C318bC" +DBPath = "{{PathRWData}}/L1InfoTreeSync.sqlite" +GlobalExitRootAddr="{{NetworkConfig.L1.GlobalExitRootManagerAddr}}" +RollupManagerAddr = "{{NetworkConfig.L1.RollupManagerAddr}}" SyncBlockChunkSize=10 BlockFinality="LatestBlock" -URLRPCL1="http://test-aggoracle-l1:8545" +URLRPCL1="{{L1AggOracleURL}}" WaitForNewBlocksPeriod="100ms" InitialBlock=0 [AggOracle] TargetChainType="EVM" -URLRPCL1="http://test-aggoracle-l1:8545" +URLRPCL1="{{L1AggOracleURL}}" BlockFinality="FinalizedBlock" WaitPeriodNextGER="100ms" [AggOracle.EVMSender] - GlobalExitRootL2="0x8464135c8F25Da09e49BC8782676a84730C318bC" - URLRPCL2="http://test-aggoracle-l2:8545" + GlobalExitRootL2="{{L2Config.GlobalExitRootAddr}}" + URLRPCL2="{{L2AggOracleURL}}" ChainIDL2=1337 GasOffset=0 WaitPeriodMonitorTx="100ms" @@ -184,14 +251,14 @@ WaitPeriodNextGER="100ms" ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - StoragePath = "/tmp/ethtxmanager-sequencesender.db" + StoragePath = "/{{PathRWData}}/ethtxmanager-sequencesender.sqlite" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 [AggOracle.EVMSender.EthTxManager.Etherman] - URL = "http://test-aggoracle-l2" + URL = "{{L2AggOracleURL}}" MultiGasProvider = false - L1ChainID = 1337 + L1ChainID = {{NetworkConfig.L1.L1ChainID}} HTTPHeaders = [] [RPC] @@ -202,7 +269,7 @@ WriteTimeout = "2s" MaxRequestsPerIPAndSecond = 10 [ClaimSponsor] -DBPath = "/tmp/claimsopnsor" +DBPath = "/{{PathRWData}}/claimsopnsor" Enabled = true SenderAddr = "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d" BridgeAddrL2 = "0xB7098a13a48EcE087d3DA15b2D28eCE0f89819B8" @@ -223,51 +290,53 @@ GasOffset = 0 ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - StoragePath = "/tmp/ethtxmanager-claimsponsor.db" + StoragePath = "/{{PathRWData}}/ethtxmanager-claimsponsor.sqlite" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 [ClaimSponsor.EthTxManager.Etherman] - URL = "http://test-aggoracle-l2" + URL = "{{L2AggOracleURL}}" MultiGasProvider = false - L1ChainID = 1337 + L1ChainID = {{NetworkConfig.L1.L1ChainID}} HTTPHeaders = [] [BridgeL1Sync] -DBPath = "/tmp/bridgel1sync" +DBPath = "{{PathRWData}}/bridgel1sync" BlockFinality = "LatestBlock" InitialBlockNum = 0 -BridgeAddr = "0xB7098a13a48EcE087d3DA15b2D28eCE0f89819B8" +BridgeAddr = "{{L1Config.polygonBridgeAddr}}" SyncBlockChunkSize = 100 RetryAfterErrorPeriod = "1s" MaxRetryAttemptsAfterError = -1 WaitForNewBlocksPeriod = "3s" [BridgeL2Sync] -DBPath = "/tmp/bridgel2sync" +DBPath = "{{PathRWData}}/bridgel2sync" BlockFinality = "LatestBlock" InitialBlockNum = 0 -BridgeAddr = "0xB7098a13a48EcE087d3DA15b2D28eCE0f89819B8" +BridgeAddr = "{{L1Config.polygonBridgeAddr}}" SyncBlockChunkSize = 100 RetryAfterErrorPeriod = "1s" MaxRetryAttemptsAfterError = -1 WaitForNewBlocksPeriod = "3s" [LastGERSync] -DBPath = "/tmp/lastgersync" +# MDBX database path +DBPath = "{{PathRWData}}/lastgersync" BlockFinality = "LatestBlock" InitialBlockNum = 0 -GlobalExitRootL2Addr = "0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa" +GlobalExitRootL2Addr = "{{L2Config.GlobalExitRootAddr}}" RetryAfterErrorPeriod = "1s" MaxRetryAttemptsAfterError = -1 WaitForNewBlocksPeriod = "1s" DownloadBufferSize = 100 [NetworkConfig.L1] -L1ChainID = 0 -PolAddr = "0x0000000000000000000000000000000000000000" -ZkEVMAddr = "0x0000000000000000000000000000000000000000" -RollupManagerAddr = "0x0000000000000000000000000000000000000000" -GlobalExitRootManagerAddr = "0x0000000000000000000000000000000000000000" +L1ChainID = {{L1Config.chainId}} +PolAddr = "{{L1Config.polTokenAddress}}" +ZkEVMAddr = "{{L1Config.polygonZkEVMAddress}}" +RollupManagerAddr = "{{L1Config.polygonRollupManagerAddress}}" +GlobalExitRootManagerAddr = "{{L1Config.polygonZkEVMGlobalExitRootAddress}}" + ` diff --git a/config/network.go b/config/network.go index fc3f75ce..a613feb0 100644 --- a/config/network.go +++ b/config/network.go @@ -1,17 +1,8 @@ package config import ( - "encoding/json" - "errors" - "fmt" - "io" - "os" - ethermanconfig "github.com/0xPolygon/cdk/etherman/config" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/state" - "github.com/ethereum/go-ethereum/common" - "github.com/urfave/cli/v2" ) // NetworkConfig is the configuration struct for the different environments @@ -65,108 +56,3 @@ type genesisAccountFromJSON struct { // Name of the contract in L1 (e.g. "PolygonZkEVMDeployer", "PolygonZkEVMBridge",...) ContractName string `json:"contractName"` } - -func (cfg *Config) loadNetworkConfig(ctx *cli.Context) { - cfgPath := ctx.String(FlagCustomNetwork) - - networkJSON, err := LoadGenesisFileAsString(cfgPath) - if err != nil { - panic(err.Error()) - } - - config, err := LoadGenesisFromJSONString(networkJSON) - if err != nil { - panic(fmt.Errorf("failed to load genesis configuration from file. Error: %w", err)) - } - cfg.NetworkConfig = config -} - -// LoadGenesisFileAsString loads the genesis file as a string -func LoadGenesisFileAsString(cfgPath string) (string, error) { - if cfgPath != "" { - f, err := os.Open(cfgPath) - if err != nil { - return "", err - } - - defer func() { - err := f.Close() - if err != nil { - log.Error(err) - } - }() - - b, err := io.ReadAll(f) - if err != nil { - return "", err - } - - return string(b), nil - } else { - return "", errors.New("custom netwrork file not provided. Please use the custom-network-file flag") - } -} - -// LoadGenesisFromJSONString loads the genesis file from JSON string -func LoadGenesisFromJSONString(jsonStr string) (NetworkConfig, error) { - var cfg NetworkConfig - - var cfgJSON GenesisFromJSON - if err := json.Unmarshal([]byte(jsonStr), &cfgJSON); err != nil { - return NetworkConfig{}, err - } - - if len(cfgJSON.Genesis) == 0 { - return cfg, nil - } - - cfg.L1Config = cfgJSON.L1Config - cfg.Genesis = state.Genesis{ - BlockNumber: cfgJSON.GenesisBlockNum, - Root: common.HexToHash(cfgJSON.Root), - Actions: []*state.GenesisAction{}, - } - - for _, account := range cfgJSON.Genesis { - if account.Balance != "" && account.Balance != "0" { - action := &state.GenesisAction{ - Address: account.Address, - Type: int(LeafTypeBalance), - Value: account.Balance, - } - cfg.Genesis.Actions = append(cfg.Genesis.Actions, action) - } - - if account.Nonce != "" && account.Nonce != "0" { - action := &state.GenesisAction{ - Address: account.Address, - Type: int(LeafTypeNonce), - Value: account.Nonce, - } - cfg.Genesis.Actions = append(cfg.Genesis.Actions, action) - } - - if account.Bytecode != "" { - action := &state.GenesisAction{ - Address: account.Address, - Type: int(LeafTypeCode), - Bytecode: account.Bytecode, - } - cfg.Genesis.Actions = append(cfg.Genesis.Actions, action) - } - - if len(account.Storage) > 0 { - for storageKey, storageValue := range account.Storage { - action := &state.GenesisAction{ - Address: account.Address, - Type: int(LeafTypeStorage), - StoragePosition: storageKey, - Value: storageValue, - } - cfg.Genesis.Actions = append(cfg.Genesis.Actions, action) - } - } - } - - return cfg, nil -} diff --git a/go.mod b/go.mod index baa547b0..3c02cea6 100644 --- a/go.mod +++ b/go.mod @@ -16,14 +16,20 @@ require ( github.com/invopop/jsonschema v0.12.0 github.com/jackc/pgconn v1.14.3 github.com/jackc/pgx/v4 v4.18.3 + github.com/knadh/koanf/parsers/json v0.1.0 + github.com/knadh/koanf/parsers/toml v0.1.0 + github.com/knadh/koanf/providers/rawbytes v0.1.0 + github.com/knadh/koanf/v2 v2.1.1 github.com/ledgerwatch/erigon-lib v1.0.0 github.com/mattn/go-sqlite3 v1.14.23 github.com/mitchellh/mapstructure v1.5.0 + github.com/pelletier/go-toml/v2 v2.2.2 github.com/rubenv/sql-migrate v1.7.0 github.com/russross/meddler v1.0.1 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 github.com/urfave/cli/v2 v2.27.4 + github.com/valyala/fasttemplate v1.2.2 go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel/metric v1.24.0 go.uber.org/zap v1.27.0 @@ -76,6 +82,7 @@ require ( github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-pkgz/expirable-cache v0.0.3 // indirect github.com/go-stack/stack v1.8.1 // indirect + github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect @@ -99,6 +106,7 @@ require ( github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jmoiron/sqlx v1.2.0 // indirect github.com/klauspost/compress v1.17.9 // indirect + github.com/knadh/koanf/maps v0.1.1 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/ledgerwatch/log/v3 v3.9.0 // indirect @@ -109,14 +117,16 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/miguelmota/go-solidity-sha3 v0.1.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/onsi/gomega v1.27.10 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.20.4 // indirect @@ -143,6 +153,7 @@ require ( github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fastrand v1.1.0 // indirect github.com/valyala/histogram v1.2.0 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect diff --git a/go.sum b/go.sum index 9bbafe6a..c2f44da6 100644 --- a/go.sum +++ b/go.sum @@ -135,6 +135,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= @@ -256,6 +258,16 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/parsers/json v0.1.0 h1:dzSZl5pf5bBcW0Acnu20Djleto19T0CfHcvZ14NJ6fU= +github.com/knadh/koanf/parsers/json v0.1.0/go.mod h1:ll2/MlXcZ2BfXD6YJcjVFzhG9P0TdJ207aIBKQhV2hY= +github.com/knadh/koanf/parsers/toml v0.1.0 h1:S2hLqS4TgWZYj4/7mI5m1CQQcWurxUz6ODgOub/6LCI= +github.com/knadh/koanf/parsers/toml v0.1.0/go.mod h1:yUprhq6eo3GbyVXFFMdbfZSo928ksS+uo0FFqNMnO18= +github.com/knadh/koanf/providers/rawbytes v0.1.0 h1:dpzgu2KO6uf6oCb4aP05KDmKmAmI51k5pe8RYKQ0qME= +github.com/knadh/koanf/providers/rawbytes v0.1.0/go.mod h1:mMTB1/IcJ/yE++A2iEZbY1MLygX7vttU+C+S/YmPu9c= +github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= +github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -307,11 +319,15 @@ github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWt github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/miguelmota/go-solidity-sha3 v0.1.1 h1:3Y08sKZDtudtE5kbTBPC9RYJznoSYyWI9VD6mghU0CA= github.com/miguelmota/go-solidity-sha3 v0.1.1/go.mod h1:sax1FvQF+f71j8W1uUHMZn8NxKyl5rYLks2nqj8RFEw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= @@ -339,6 +355,8 @@ github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= @@ -436,8 +454,12 @@ github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2n github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8= github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ= github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template index 15948b60..8fd9e82b 100644 --- a/test/config/kurtosis-cdk-node-config.toml.template +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -1,101 +1,57 @@ -ForkUpgradeBatchNumber = 0 -ForkUpgradeNewForkId = 0 +PathRWData = "/data/" +L1URL="{{.l1_rpc_url}}" +L2URL="http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" +L1AggOracleURL = "http://test-aggoracle-l1:8545" +L2AggOracleURL = "http://test-aggoracle-l2:8545" -[Common] +ForkId = {{.zkevm_rollup_fork_id}} IsValidiumMode = {{.is_cdk_validium}} - {{if eq .zkevm_rollup_fork_id "12"}} ContractVersions = "banana" {{else}} ContractVersions = "elderberry" {{end}} -[Etherman] -URL = "{{.l1_rpc_url}}" +L2Coinbase = "{{.zkevm_l2_sequencer_address}}" +SequencerPrivateKeyPath = "{{or .zkevm_l2_sequencer_keystore_file "/etc/cdk/sequencer.keystore"}}" +SequencerPrivateKeyPassword = "{{.zkevm_l2_keystore_password}}" + +AggregatorPrivateKeyPath = "{{or .zkevm_l2_aggregator_keystore_file "/etc/cdk/aggregator.keystore"}}" +AggregatorPrivateKeyPassword = "{{.zkevm_l2_keystore_password}}" +SenderProofToL1Addr = "{{.zkevm_l2_agglayer_address}}" + + + + +WitnessURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" +AggLayerURL = "http://agglayer:{{.agglayer_port}}" +StreamServer = "{{.sequencer_name}}{{.deployment_suffix}}:{{.zkevm_data_streamer_port}}" + + + +# This values can be override directly from genesis.json +rollupCreationBlockNumber = "{{.zkevm_rollup_manager_block_number}}" +rollupManagerCreationBlockNumber = "{{.zkevm_rollup_manager_block_number}}" +genesisBlockNumber = "{{.zkevm_rollup_manager_block_number}}" +[L1Config] + chainId = "{{.l1_chain_id}}" + polygonZkEVMGlobalExitRootAddress = "{{.zkevm_global_exit_root_address}}" + polygonRollupManagerAddress = "{{.zkevm_rollup_manager_address}}" + polTokenAddress = "{{.pol_token_address}}" + polygonZkEVMAddress = "{{.zkevm_rollup_address}}" + polygonBridgeAddr = "0x0000000000000000000000000000000000000000" + +[L2Config] + GlobalExitRootAddr = "{{.zkevm_global_exit_root_address}}" [Log] Environment = "development" # "production" or "development" Level = "{{.global_log_level}}" Outputs = ["stderr"] -[SequenceSender] -WaitPeriodSendSequence = "15s" -LastBatchVirtualizationTimeMaxWaitPeriod = "10s" -MaxTxSizeForL1 = 131072 -L2Coinbase = "{{.zkevm_l2_sequencer_address}}" -PrivateKey = {Path = "{{or .zkevm_l2_sequencer_keystore_file "/etc/cdk/sequencer.keystore"}}", Password = "{{.zkevm_l2_keystore_password}}"} -SequencesTxFileName = "/data/sequencesender.json" -GasOffset = 80000 -WaitPeriodPurgeTxFile = "15m" -MaxPendingTx = 1 -{{if eq .zkevm_rollup_fork_id "12"}} -MaxBatchesForL1 = 300 -BlockFinality="FinalizedBlock" -RPCURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" -GetBatchWaitInterval = "10s" -{{end}} - [SequenceSender.StreamClient] - Server = "{{.sequencer_name}}{{.deployment_suffix}}:{{.zkevm_data_streamer_port}}" - [SequenceSender.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2m" - ConsolidationL1ConfirmationBlocks = 5 - {{if eq .zkevm_rollup_fork_id "12"}} - FinalizedStatusL1NumberOfBlocks = 10 - WaitReceiptMaxTime = "250ms" - WaitReceiptCheckInterval = "8s" - {{else}} - FinalizationL1ConfirmationBlocks = 10 - WaitReceiptToBeGenerated = "8s" - {{end}} - PrivateKeys = [ - {Path = "{{or .zkevm_l2_sequencer_keystore_file "/etc/cdk/sequencer.keystore"}}", Password = "{{.zkevm_l2_keystore_password}}"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - PersistenceFilename = "/data/ethtxmanager.json" - [SequenceSender.EthTxManager.Etherman] - URL = "{{.l1_rpc_url}}" - L1ChainID = {{.l1_chain_id}} - HTTPHeaders = [] - [Aggregator] - FinalProofSanityCheckEnabled = false - Host = "0.0.0.0" Port = "{{.zkevm_aggregator_port}}" - RetryTime = "30s" - VerifyProofInterval = "30s" - ProofStatePollingInterval = "5s" - TxProfitabilityCheckerType = "acceptall" - TxProfitabilityMinReward = "1.1" - IntervalAfterWhichBatchConsolidateAnyway = "0s" - ChainID = "{{.zkevm_rollup_chain_id}}" - ForkId = {{.zkevm_rollup_fork_id}} - CleanupLockedProofsInterval = "2m0s" - GeneratingProofCleanupThreshold = "10m" - GasOffset = 150000 - UpgradeEtrogBatchNumber = "{{.zkevm_rollup_manager_block_number}}" - WitnessURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" - {{if .is_cdk_validium}} - SenderAddress = "{{.zkevm_l2_agglayer_address}}" - SettlementBackend = "agglayer" - AggLayerTxTimeout = "600s" - AggLayerURL = "http://agglayer:{{.agglayer_port}}" - {{else}} - SenderAddress = "{{.zkevm_l2_aggregator_address}}" - {{end}} - {{if eq .zkevm_rollup_fork_id "12"}} - UseL1BatchData = true - UseFullWitness = false - MaxWitnessRetrievalWorkers = 2 - SyncModeOnlyEnabled = false - {{end}} - - [Aggregator.SequencerPrivateKey] - Path = "/etc/cdk/sequencer.keystore" - Password = "{{.zkevm_l2_keystore_password}}" [Aggregator.DB] Name = "{{.aggregator_db.name}}" User = "{{.aggregator_db.user}}" @@ -103,72 +59,4 @@ GetBatchWaitInterval = "10s" Host = "{{.aggregator_db.hostname}}" Port = "{{.aggregator_db.port}}" EnableLog = false - MaxConns = 200 - [Aggregator.Log] - Environment = "development" # "production" or "development" - Level = "{{.global_log_level}}" - Outputs = ["stderr"] - [Aggregator.StreamClient] - Server = "{{.sequencer_name}}{{.deployment_suffix}}:{{.zkevm_data_streamer_port}}" - [Aggregator.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2m" - - {{if eq .zkevm_rollup_fork_id "12"}} - WaitReceiptMaxTime = "250ms" - WaitReceiptCheckInterval = "1s" - {{else}} - GetReceiptMaxTime = "250ms" - GetReceiptWaitInterval = "1s" - {{end}} - - PrivateKeys = [ - {Path = "{{or .zkevm_l2_aggregator_keystore_file "/etc/cdk/aggregator.keystore"}}", Password = "{{.zkevm_l2_keystore_password}}"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - PersistenceFilename = "" - ReadPendingL1Txs = false - SafeStatusL1NumberOfBlocks = 0 - FinalizedStatusL1NumberOfBlocks = 0 - [Aggregator.EthTxManager.Etherman] - URL = "{{.l1_rpc_url}}" - L1ChainID = {{.l1_chain_id}} - HTTPHeaders = [] - [Aggregator.Synchronizer] - [Aggregator.Synchronizer.SQLDB] - DriverName = "sqlite3" - DataSource = "file:/data/aggregator_sync_db.sqlite" - [Aggregator.Synchronizer.Synchronizer] - SyncInterval = "10s" - SyncChunkSize = 1000 - GenesisBlockNumber = "{{.zkevm_rollup_manager_block_number}}" - SyncUpToBlock = "latest" - BlockFinality = "latest" - OverrideStorageCheck = false - [Aggregator.Synchronizer.Etherman] - [Aggregator.Synchronizer.Etherman.Validium] - Enabled = {{.is_cdk_validium}} - - -[L1InfoTreeSync] -DBPath = "/tmp/L1InfoTreeSync" # TODO: put a more realisitic path here -GlobalExitRootAddr = "{{.zkevm_global_exit_root_address}}" -RollupManagerAddr = "{{.zkevm_rollup_manager_address}}" -SyncBlockChunkSize = 10 -BlockFinality = "LatestBlock" -URLRPCL1 = "{{.l1_rpc_url}}" -WaitForNewBlocksPeriod = "1s" -InitialBlock = "{{.zkevm_rollup_manager_block_number}}" - -[NetworkConfig.L1] -{{if eq .zkevm_rollup_fork_id "12"}} -L1ChainID = "{{.l1_chain_id}}" -{{else}} -ChainID = "{{.l1_chain_id}}" -{{end}} -PolAddr = "{{.pol_token_address}}" -ZkEVMAddr = "{{.zkevm_rollup_address}}" -RollupManagerAddr = "{{.zkevm_rollup_manager_address}}" -GlobalExitRootManagerAddr = "{{.zkevm_global_exit_root_address}}" + MaxConns = 200 \ No newline at end of file From 3a35c8ed45a9a83cd604e46516f893c5670d8027 Mon Sep 17 00:00:00 2001 From: Victor Castell Date: Thu, 24 Oct 2024 14:45:44 +0200 Subject: [PATCH 52/53] test: withdrawal from L2 to L1 (#126) - L2 to L1 withdrawal test - Refactor tests to reduce complexity - Bump kurtosis-cdk version to v0.2.15 - Improve CI by dumping enclave logs + files in kurtosis --- .github/workflows/test-e2e.yml | 21 +++++- test/bridge-e2e.bats | 85 ++++++++++++++++-------- test/combinations/fork11-rollup.yml | 2 - test/combinations/fork9-cdk-validium.yml | 2 +- test/helpers/common.bash | 24 ++++--- test/helpers/lxly-bridge-test.bash | 31 +++++---- 6 files changed, 111 insertions(+), 54 deletions(-) diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 787d2301..7fdb5a2b 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -70,7 +70,7 @@ jobs: with: repository: 0xPolygon/kurtosis-cdk path: "kurtosis-cdk" - ref: "v0.2.14" + ref: "v0.2.15" - name: Setup Bats and bats libs uses: bats-core/bats-action@2.0.0 @@ -81,3 +81,22 @@ jobs: env: KURTOSIS_FOLDER: ${{ github.workspace }}/kurtosis-cdk BATS_LIB_PATH: /usr/lib/ + + - name: Dump enclave logs + if: failure() + run: kurtosis dump ./dump + + - name: Generate archive name + if: failure() + run: | + archive_name="dump_run_with_args_${{matrix.e2e-group}}_${{ github.run_id }}" + echo "ARCHIVE_NAME=${archive_name}" >> "$GITHUB_ENV" + echo "Generated archive name: ${archive_name}" + kurtosis service exec cdk cdk-node-001 'cat /etc/cdk/cdk-node-config.toml' > ./dump/cdk-node-config.toml + + - name: Upload logs + if: failure() + uses: actions/upload-artifact@v4 + with: + name: ${{ env.ARCHIVE_NAME }} + path: ./dump diff --git a/test/bridge-e2e.bats b/test/bridge-e2e.bats index f5391d1c..d504c1c9 100644 --- a/test/bridge-e2e.bats +++ b/test/bridge-e2e.bats @@ -13,15 +13,26 @@ setup() { bridge_default_address=$(echo "$combined_json_output" | jq -r .polygonZkEVMBridgeAddress) BRIDGE_ADDRESS=$bridge_default_address fi - echo "Bridge address=$BRIDGE_ADDRESS" >&3 readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + readonly sender_addr="$(cast wallet address --private-key $sender_private_key)" destination_net=${DESTINATION_NET:-"1"} destination_addr=${DESTINATION_ADDRESS:-"0x0bb7AA0b4FdC2D2862c088424260e99ed6299148"} ether_value=${ETHER_VALUE:-"0.0200000054"} amount=$(cast to-wei $ether_value ether) - token_addr=${TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} + readonly native_token_addr=${NATIVE_TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} + if [[ -n "$GAS_TOKEN_ADDR" ]]; then + echo "Using provided GAS_TOKEN_ADDR: $GAS_TOKEN_ADDR" >&3 + gas_token_addr="$GAS_TOKEN_ADDR" + else + echo "GAS_TOKEN_ADDR not provided, retrieving from rollup parameters file." >&3 + readonly rollup_params_file=/opt/zkevm/create_rollup_parameters.json + run bash -c "$contracts_service_wrapper 'cat $rollup_params_file' | tail -n +2 | jq -r '.gasTokenAddress'" + assert_success + assert_output --regexp "0x[a-fA-F0-9]{40}" + gas_token_addr=$output + fi readonly is_forced=${IS_FORCED:-"true"} readonly bridge_addr=$BRIDGE_ADDRESS readonly meta_bytes=${META_BYTES:-"0x"} @@ -30,47 +41,39 @@ setup() { readonly bridge_api_url=${BRIDGE_API_URL:-"$(kurtosis port print $enclave zkevm-bridge-service-001 rpc)"} readonly dry_run=${DRY_RUN:-"false"} - readonly sender_addr="$(cast wallet address --private-key $sender_private_key)" readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID() (uint32)') readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID() (uint32)') gas_price=$(cast gas-price --rpc-url "$l2_rpc_url") + readonly weth_token_addr=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'WETHToken()' | cast parse-bytes32-address) } -@test "Run deposit" { +@test "Native gas token deposit to WETH" { + local initial_receiver_balance=$(cast call --rpc-url "$l2_rpc_url" "$weth_token_addr" "$balance_of_fn_sig" "$destination_addr" | awk '{print $1}') + echo "Initial receiver balance of native token on L2 $initial_receiver_balance" >&3 + echo "Running LxLy deposit" >&3 - run deposit + run bridgeAsset "$native_token_addr" "$l1_rpc_url" assert_success - assert_output --partial 'transactionHash' -} -@test "Run claim" { echo "Running LxLy claim" >&3 - timeout="120" claim_frequency="10" - run wait_for_claim "$timeout" "$claim_frequency" + run wait_for_claim "$timeout" "$claim_frequency" "$l2_rpc_url" assert_success -} -@test "Custom native token transfer" { - # Use GAS_TOKEN_ADDR if provided, otherwise retrieve from file - if [[ -n "$GAS_TOKEN_ADDR" ]]; then - echo "Using provided GAS_TOKEN_ADDR: $GAS_TOKEN_ADDR" >&3 - local gas_token_addr="$GAS_TOKEN_ADDR" - else - echo "GAS_TOKEN_ADDR not provided, retrieving from rollup parameters file." >&3 - readonly rollup_params_file=/opt/zkevm/create_rollup_parameters.json - run bash -c "$contracts_service_wrapper 'cat $rollup_params_file' | tail -n +2 | jq -r '.gasTokenAddress'" - assert_success - assert_output --regexp "0x[a-fA-F0-9]{40}" - local gas_token_addr=$output + run verify_balance "$l2_rpc_url" "$weth_token_addr" "$destination_addr" "$initial_receiver_balance" "$ether_value" + if [ $status -eq 0 ]; then + break fi + assert_success +} +@test "Custom gas token deposit" { echo "Gas token addr $gas_token_addr, L1 RPC: $l1_rpc_url" >&3 # Set receiver address and query for its initial native token balance on the L2 receiver=${RECEIVER:-"0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6"} - local initial_receiver_balance=$(cast balance --ether "$receiver" --rpc-url "$l2_rpc_url") + local initial_receiver_balance=$(cast balance "$receiver" --rpc-url "$l2_rpc_url") echo "Initial receiver balance of native token on L2 $initial_receiver_balance" >&3 # Query for initial sender balance @@ -106,20 +109,46 @@ setup() { assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" # Deposit - token_addr=$gas_token_addr destination_addr=$receiver destination_net=$l2_rpc_network_id amount=$wei_amount - run deposit + run bridgeAsset "$gas_token_addr" "$l1_rpc_url" assert_success # Claim deposits (settle them on the L2) timeout="120" claim_frequency="10" - run wait_for_claim "$timeout" "$claim_frequency" + run wait_for_claim "$timeout" "$claim_frequency" "$l2_rpc_url" assert_success # Validate that the native token of receiver on L2 has increased by the bridge tokens amount - run verify_native_token_balance "$l2_rpc_url" "$receiver" "$initial_receiver_balance" "$tokens_amount" + run verify_balance "$l2_rpc_url" "$native_token_addr" "$receiver" "$initial_receiver_balance" "$tokens_amount" + assert_success +} + +@test "Custom gas token withdrawal" { + echo "Running LxLy withdrawal" >&3 + echo "Gas token addr $gas_token_addr, L1 RPC: $l1_rpc_url" >&3 + + local initial_receiver_balance=$(cast call --rpc-url "$l1_rpc_url" "$gas_token_addr" "$balance_of_fn_sig" "$destination_addr" | awk '{print $1}') + assert_success + echo "Receiver balance of gas token on L1 $initial_receiver_balance" >&3 + + destination_net=$l1_rpc_network_id + run bridgeAsset "$native_token_addr" "$l2_rpc_url" + assert_success + + # Claim withdrawals (settle them on the L1) + timeout="360" + claim_frequency="10" + destination_net=$l1_rpc_network_id + run wait_for_claim "$timeout" "$claim_frequency" "$l1_rpc_url" + assert_success + + # Validate that the token of receiver on L1 has increased by the bridge tokens amount + run verify_balance "$l1_rpc_url" "$gas_token_addr" "$destination_addr" "$initial_receiver_balance" "$ether_value" + if [ $status -eq 0 ]; then + break + fi assert_success } diff --git a/test/combinations/fork11-rollup.yml b/test/combinations/fork11-rollup.yml index 653adc9d..1afd8f79 100644 --- a/test/combinations/fork11-rollup.yml +++ b/test/combinations/fork11-rollup.yml @@ -5,7 +5,5 @@ args: zkevm_node_image: hermeznetwork/zkevm-node:v0.7.0-fork11-RC1 cdk_node_image: cdk zkevm_use_gas_token_contract: true - additional_services: - - pless_zkevm_node data_availability_mode: rollup sequencer_type: erigon diff --git a/test/combinations/fork9-cdk-validium.yml b/test/combinations/fork9-cdk-validium.yml index f60fec9c..c28b2c49 100644 --- a/test/combinations/fork9-cdk-validium.yml +++ b/test/combinations/fork9-cdk-validium.yml @@ -1,6 +1,6 @@ args: zkevm_contracts_image: leovct/zkevm-contracts:v6.0.0-rc.1-fork.9 - zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.4 + zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.6 cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.3-RC1 cdk_validium_node_image: 0xpolygon/cdk-validium-node:0.7.0-cdk diff --git a/test/helpers/common.bash b/test/helpers/common.bash index a5ed751c..1978a8aa 100644 --- a/test/helpers/common.bash +++ b/test/helpers/common.bash @@ -294,21 +294,25 @@ function check_balances() { fi } -function verify_native_token_balance() { - local rpc_url="$1" # RPC URL - local account="$2" # account address - local initial_balance="$3" # initial balance in Ether (decimal) - local ether_amount="$4" # amount to be added (in Ether, decimal) - - # Convert initial balance and amount to wei (no decimals) - local initial_balance_wei=$(cast --to-wei "$initial_balance") +function verify_balance() { + local rpc_url="$1" # RPC URL + local token_addr="$2" # gas token contract address + local account="$3" # account address + local initial_balance_wei="$4" # initial balance in Wei (decimal) + local ether_amount="$5" # amount to be added (in Ether, decimal) # Trim 'ether' from ether_amount if it exists ether_amount=$(echo "$ether_amount" | sed 's/ether//') local amount_wei=$(cast --to-wei "$ether_amount") # Get final balance in wei (after the operation) - local final_balance_wei=$(cast balance "$account" --rpc-url "$rpc_url" | awk '{print $1}') + local final_balance_wei + if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then + final_balance_wei=$(cast balance "$account" --rpc-url "$rpc_url" | awk '{print $1}') + else + final_balance_wei=$(cast call --rpc-url "$rpc_url" "$token_addr" "$balance_of_fn_sig" "$destination_addr" | awk '{print $1}') + fi + echo "Final balance of $account in $rpc_url: $final_balance_wei wei" >&3 # Calculate expected final balance (initial_balance + amount) local expected_final_balance_wei=$(echo "$initial_balance_wei + $amount_wei" | bc) @@ -317,7 +321,7 @@ function verify_native_token_balance() { if [ "$(echo "$final_balance_wei == $expected_final_balance_wei" | bc)" -eq 1 ]; then echo "✅ Balance verification successful: final balance is correct." else - echo "❌ Balance verification failed: expected $expected_final_balance_wei but got $final_balance_wei." + echo "❌ Balance verification failed: expected $expected_final_balance_wei but got $final_balance_wei." >&3 exit 1 fi } diff --git a/test/helpers/lxly-bridge-test.bash b/test/helpers/lxly-bridge-test.bash index 700e7ad2..c753393a 100644 --- a/test/helpers/lxly-bridge-test.bash +++ b/test/helpers/lxly-bridge-test.bash @@ -1,37 +1,44 @@ #!/usr/bin/env bash # Error code reference https://hackmd.io/WwahVBZERJKdfK3BbKxzQQ -function deposit() { - readonly deposit_sig='bridgeAsset(uint32,address,uint256,address,bool,bytes)' +function bridgeAsset() { + local token_addr="$1" + local rpc_url="$2" + readonly bridge_sig='bridgeAsset(uint32,address,uint256,address,bool,bytes)' if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then echo "The ETH balance for sender "$sender_addr":" >&3 - cast balance -e --rpc-url $l1_rpc_url $sender_addr >&3 + cast balance -e --rpc-url $rpc_url $sender_addr >&3 else echo "The "$token_addr" token balance for sender "$sender_addr":" >&3 - balance_wei=$(cast call --rpc-url "$l1_rpc_url" "$token_addr" "$balance_of_fn_sig" "$sender_addr") + echo "cast call --rpc-url $rpc_url $token_addr \"$balance_of_fn_sig\" $sender_addr" >&3 + balance_wei=$(cast call --rpc-url "$rpc_url" "$token_addr" "$balance_of_fn_sig" "$sender_addr" | awk '{print $1}') echo "$(cast --from-wei "$balance_wei")" >&3 fi - echo "Attempting to deposit $amount [wei] to $destination_addr, token $token_addr (sender=$sender_addr, network id=$destination_net, rpc url=$l1_rpc_url)" >&3 + echo "Attempting to deposit $amount [wei] to $destination_addr, token $token_addr (sender=$sender_addr, network id=$destination_net, rpc url=$rpc_url)" >&3 if [[ $dry_run == "true" ]]; then - cast calldata $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes + cast calldata $bridge_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes else if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then - cast send --legacy --private-key $sender_private_key --value $amount --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes + echo "cast send --legacy --private-key $sender_private_key --value $amount --rpc-url $rpc_url $bridge_addr $bridge_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes" >&3 + cast send --legacy --private-key $sender_private_key --value $amount --rpc-url $rpc_url $bridge_addr $bridge_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes else - cast send --legacy --private-key $sender_private_key --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes + echo "cast send --legacy --private-key $sender_private_key --rpc-url $rpc_url $bridge_addr \"$bridge_sig\" $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes" + cast send --legacy --private-key $sender_private_key --rpc-url $rpc_url $bridge_addr $bridge_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes fi fi } function claim() { + local destination_rpc_url="$1" readonly claim_sig="claimAsset(bytes32[32],bytes32[32],uint256,bytes32,bytes32,uint32,address,uint32,address,uint256,bytes)" readonly bridge_deposit_file=$(mktemp) readonly claimable_deposit_file=$(mktemp) echo "Getting full list of deposits" >&3 curl -s "$bridge_api_url/bridges/$destination_addr?limit=100&offset=0" | jq '.' | tee $bridge_deposit_file + echo "Looking for claimable deposits" >&3 jq '[.deposits[] | select(.ready_for_claim == true and .claim_tx_hash == "" and .dest_net == '$destination_net')]' $bridge_deposit_file | tee $claimable_deposit_file readonly claimable_count=$(jq '. | length' $claimable_deposit_file) @@ -69,7 +76,6 @@ function claim() { if [[ $dry_run == "true" ]]; then cast calldata $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata - cast call --rpc-url $l2_rpc_url $bridge_addr $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata else local comp_gas_price=$(bc -l <<< "$gas_price * 1.5" | sed 's/\..*//') if [[ $? -ne 0 ]]; then @@ -77,8 +83,8 @@ function claim() { exit 1 fi - echo "cast send --legacy --gas-price $comp_gas_price --rpc-url $l2_rpc_url --private-key $sender_private_key $bridge_addr \"$claim_sig\" \"$in_merkle_proof\" \"$in_rollup_merkle_proof\" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata" >&3 - cast send --legacy --gas-price $comp_gas_price --rpc-url $l2_rpc_url --private-key $sender_private_key $bridge_addr "$claim_sig" "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata + echo "cast send --legacy --gas-price $comp_gas_price --rpc-url $destination_rpc_url --private-key $sender_private_key $bridge_addr \"$claim_sig\" \"$in_merkle_proof\" \"$in_rollup_merkle_proof\" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata" >&3 + cast send --legacy --gas-price $comp_gas_price --rpc-url $destination_rpc_url --private-key $sender_private_key $bridge_addr "$claim_sig" "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata fi done < <(seq 0 $((claimable_count - 1))) @@ -87,6 +93,7 @@ function claim() { function wait_for_claim() { local timeout="$1" # timeout (in seconds) local claim_frequency="$2" # claim frequency (in seconds) + local destination_rpc_url="$3" # destination rpc url local start_time=$(date +%s) local end_time=$((start_time + timeout)) @@ -97,7 +104,7 @@ function wait_for_claim() { exit 1 fi - run claim + run claim $destination_rpc_url if [ $status -eq 0 ]; then break fi From ad87a4e5916a41c7d4b856e25efe095e340b465b Mon Sep 17 00:00:00 2001 From: Arnau Bennassar Date: Thu, 24 Oct 2024 21:17:54 +0200 Subject: [PATCH 53/53] bump sync (#142) --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 3c02cea6..631e54b7 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7 - github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4 + github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5 github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/hermeznetwork/tracerr v0.3.2 diff --git a/go.sum b/go.sum index c2f44da6..4a0095c8 100644 --- a/go.sum +++ b/go.sum @@ -10,6 +10,8 @@ github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7 h1:73sYxRQ9cOmtYBEyHePgEwr github.com/0xPolygonHermez/zkevm-data-streamer v0.2.7/go.mod h1:7nM7Ihk+fTG1TQPwdZoGOYd3wprqqyIyjtS514uHzWE= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4 h1:+ZbyEpaBZu88jWtov/7iBWvwgBMu5cxlvAFDxsPrnGQ= github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5 h1:YmnhuCl349MoNASN0fMeGKU1o9HqJhiZkfMsA/1cTRA= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.5/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=