diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000..ad93c14a0f --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,5 @@ +{ + "image": "mcr.microsoft.com/devcontainers/universal:2", + "features": { + } +} diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 0000000000..cc1008277f --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,7 @@ +{ + "recommendations": [ + "fangjin.gist", + "k9982874.github-gist-explorer", + "michaeljolley.vscx-gist" + ] +} \ No newline at end of file diff --git a/accounts/abi/bind/auth.go b/accounts/abi/bind/auth.go index cdce77d0d8..d2a214a7e5 100644 --- a/accounts/abi/bind/auth.go +++ b/accounts/abi/bind/auth.go @@ -33,10 +33,10 @@ import ( "io" "math/big" - "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/coreth/accounts/external" - "github.com/ava-labs/coreth/accounts/keystore" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/accounts" + "github.com/tenderly/coreth/accounts/external" + "github.com/tenderly/coreth/accounts/keystore" + "github.com/tenderly/coreth/core/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" diff --git a/accounts/abi/bind/backend.go b/accounts/abi/bind/backend.go index 846ffbca05..24bc599910 100644 --- a/accounts/abi/bind/backend.go +++ b/accounts/abi/bind/backend.go @@ -31,8 +31,8 @@ import ( "errors" "math/big" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/interfaces" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/interfaces" "github.com/ethereum/go-ethereum/common" ) diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 31aa31b0ce..dabedb9ec4 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -34,26 +34,26 @@ import ( "sync" "time" - "github.com/ava-labs/coreth/eth" - "github.com/ava-labs/coreth/vmerrs" - - "github.com/ava-labs/coreth/accounts/abi" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/bloombits" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/eth/filters" - "github.com/ava-labs/coreth/interfaces" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/eth" + "github.com/tenderly/coreth/vmerrs" + + "github.com/tenderly/coreth/accounts/abi" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/bloombits" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/eth/filters" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/interfaces" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" ) @@ -62,6 +62,8 @@ import ( var ( _ bind.AcceptedContractCaller = (*SimulatedBackend)(nil) _ bind.ContractBackend = (*SimulatedBackend)(nil) + _ bind.ContractFilterer = (*SimulatedBackend)(nil) + _ bind.ContractTransactor = (*SimulatedBackend)(nil) _ bind.DeployBackend = (*SimulatedBackend)(nil) _ interfaces.ChainReader = (*SimulatedBackend)(nil) @@ -78,7 +80,6 @@ var ( var ( errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block") - errBlockHashUnsupported = errors.New("simulatedBackend cannot access blocks by hash other than the latest block") errBlockDoesNotExist = errors.New("block does not exist in blockchain") errTransactionDoesNotExist = errors.New("transaction does not exist") ) @@ -96,8 +97,7 @@ type SimulatedBackend struct { acceptedBlock *types.Block // Currently accepted block that will be imported on request acceptedState *state.StateDB // Currently accepted state that will be the active on request - events *filters.EventSystem // for filtering log events live - filterSystem *filters.FilterSystem // for filtering database logs + events *filters.EventSystem // Event system for filtering log events live config *params.ChainConfig } @@ -106,30 +106,20 @@ type SimulatedBackend struct { // and uses a simulated blockchain for testing purposes. // A simulated backend always uses chainID 1337. func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { - copyConfig := *params.TestChainConfig - copyConfig.ChainID = big.NewInt(1337) - genesis := core.Genesis{ - Config: ©Config, - GasLimit: gasLimit, - Alloc: alloc, - } + cpcfg := params.TestChainConfig + cpcfg.ChainID = big.NewInt(1337) + genesis := core.Genesis{Config: cpcfg, GasLimit: gasLimit, Alloc: alloc} + genesis.MustCommit(database) cacheConfig := &core.CacheConfig{} - blockchain, _ := core.NewBlockChain(database, cacheConfig, &genesis, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) + blockchain, _ := core.NewBlockChain(database, cacheConfig, genesis.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) backend := &SimulatedBackend{ database: database, blockchain: blockchain, config: genesis.Config, + events: filters.NewEventSystem(&filterBackend{database, blockchain}, false), } - - filterBackend := &filterBackend{database, blockchain, backend} - backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{}) - backend.events = filters.NewEventSystem(backend.filterSystem) - - header := backend.blockchain.CurrentBlock() - block := backend.blockchain.GetBlock(header.Hash(), header.Number.Uint64()) - - backend.rollback(block) + backend.rollback(blockchain.CurrentBlock()) return backend } @@ -146,9 +136,9 @@ func (b *SimulatedBackend) Close() error { return nil } -// Commit imports all the accepted transactions as a single block and starts a +// Commit imports all the pending transactions as a single block and starts a // fresh new state. -func (b *SimulatedBackend) Commit(accept bool) common.Hash { +func (b *SimulatedBackend) Commit(accept bool) { b.mu.Lock() defer b.mu.Unlock() @@ -161,24 +151,17 @@ func (b *SimulatedBackend) Commit(accept bool) common.Hash { } b.blockchain.DrainAcceptorQueue() } - blockHash := b.acceptedBlock.Hash() - // Using the last inserted block here makes it possible to build on a side // chain after a fork. b.rollback(b.acceptedBlock) - - return blockHash } -// Rollback aborts all accepted transactions, reverting to the last committed state. +// Rollback aborts all pending transactions, reverting to the last committed state. func (b *SimulatedBackend) Rollback() { b.mu.Lock() defer b.mu.Unlock() - header := b.blockchain.CurrentBlock() - block := b.blockchain.GetBlock(header.Hash(), header.Number.Uint64()) - - b.rollback(block) + b.rollback(b.blockchain.CurrentBlock()) } func (b *SimulatedBackend) rollback(parent *types.Block) { @@ -205,7 +188,7 @@ func (b *SimulatedBackend) Fork(ctx context.Context, parent common.Hash) error { defer b.mu.Unlock() if len(b.acceptedBlock.Transactions()) != 0 { - return errors.New("accepted block dirty") + return errors.New("pending block dirty") } block, err := b.blockByHash(ctx, parent) if err != nil { @@ -217,7 +200,7 @@ func (b *SimulatedBackend) Fork(ctx context.Context, parent common.Hash) error { // stateByBlockNumber retrieves a state by a given blocknumber. func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) { - if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number) == 0 { + if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) == 0 { return b.blockchain.State() } block, err := b.blockByNumber(ctx, blockNumber) @@ -236,23 +219,6 @@ func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address, if err != nil { return nil, err } - return stateDB.GetCode(contract), nil -} - -// CodeAtHash returns the code associated with a certain account in the blockchain. -func (b *SimulatedBackend) CodeAtHash(ctx context.Context, contract common.Address, blockHash common.Hash) ([]byte, error) { - b.mu.Lock() - defer b.mu.Unlock() - - header, err := b.headerByHash(blockHash) - if err != nil { - return nil, err - } - - stateDB, err := b.blockchain.StateAt(header.Root) - if err != nil { - return nil, err - } return stateDB.GetCode(contract), nil } @@ -266,6 +232,7 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres if err != nil { return nil, err } + return stateDB.GetBalance(contract), nil } @@ -278,6 +245,7 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address, if err != nil { return 0, err } + return stateDB.GetNonce(contract), nil } @@ -290,6 +258,7 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres if err != nil { return nil, err } + val := stateDB.GetState(contract, key) return val[:], nil } @@ -306,10 +275,10 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common return receipt, nil } -// TransactionByHash checks the pool of accepted transactions in addition to the -// blockchain. The isAccepted return value indicates whether the transaction has been +// TransactionByHash checks the pool of pending transactions in addition to the +// blockchain. The isPending return value indicates whether the transaction has been // mined yet. Note that the transaction may not be part of the canonical chain even if -// it's not accepted. +// it's not pending. func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, bool, error) { b.mu.Lock() defer b.mu.Unlock() @@ -360,7 +329,7 @@ func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) ( // (associated with its hash) if found without Lock. func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { if number == nil || number.Cmp(b.acceptedBlock.Number()) == 0 { - return b.blockByHash(ctx, b.blockchain.CurrentBlock().Hash()) + return b.blockchain.CurrentBlock(), nil } block := b.blockchain.GetBlockByNumber(uint64(number.Int64())) @@ -375,11 +344,7 @@ func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) ( func (b *SimulatedBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { b.mu.Lock() defer b.mu.Unlock() - return b.headerByHash(hash) -} -// headerByHash retrieves a header from the database by hash without Lock. -func (b *SimulatedBackend) headerByHash(hash common.Hash) (*types.Header, error) { if hash == b.acceptedBlock.Hash() { return b.acceptedBlock.Header(), nil } @@ -492,25 +457,9 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call interfaces.Cal b.mu.Lock() defer b.mu.Unlock() - if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number) != 0 { + if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 { return nil, errBlockNumberUnsupported } - return b.callContractAtHead(ctx, call) -} - -// CallContractAtHash executes a contract call on a specific block hash. -func (b *SimulatedBackend) CallContractAtHash(ctx context.Context, call interfaces.CallMsg, blockHash common.Hash) ([]byte, error) { - b.mu.Lock() - defer b.mu.Unlock() - - if blockHash != b.blockchain.CurrentBlock().Hash() { - return nil, errBlockHashUnsupported - } - return b.callContractAtHead(ctx, call) -} - -// callContractAtHead executes a contract call against the latest block state. -func (b *SimulatedBackend) callContractAtHead(ctx context.Context, call interfaces.CallMsg) ([]byte, error) { stateDB, err := b.blockchain.State() if err != nil { return nil, err @@ -532,7 +481,7 @@ func (b *SimulatedBackend) AcceptedCallContract(ctx context.Context, call interf defer b.mu.Unlock() defer b.acceptedState.RevertToSnapshot(b.acceptedState.Snapshot()) - res, err := b.callContract(ctx, call, b.acceptedBlock.Header(), b.acceptedState) + res, err := b.callContract(ctx, call, b.acceptedBlock, b.acceptedState) if err != nil { return nil, err } @@ -570,7 +519,7 @@ func (b *SimulatedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, erro return big.NewInt(1), nil } -// EstimateGas executes the requested code against the currently accepted block/state and +// EstimateGas executes the requested code against the currently pending block/state and // returns the used amount of gas. func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.CallMsg) (uint64, error) { b.mu.Lock() @@ -604,7 +553,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.Call available := new(big.Int).Set(balance) if call.Value != nil { if call.Value.Cmp(available) >= 0 { - return 0, core.ErrInsufficientFundsForTransfer + return 0, errors.New("insufficient funds for transfer") } available.Sub(available, call.Value) } @@ -614,7 +563,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.Call if transfer == nil { transfer = new(big.Int) } - log.Info("Gas estimation capped by limited funds", "original", hi, "balance", balance, + log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance, "sent", transfer, "feecap", feeCap, "fundable", allowance) hi = allowance.Uint64() } @@ -626,7 +575,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.Call call.Gas = gas snapshot := b.acceptedState.Snapshot() - res, err := b.callContract(ctx, call, b.acceptedBlock.Header(), b.acceptedState) + res, err := b.callContract(ctx, call, b.acceptedBlock, b.acceptedState) b.acceptedState.RevertToSnapshot(snapshot) if err != nil { @@ -661,7 +610,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.Call return 0, err } if failed { - if result != nil && !errors.Is(result.Err, vmerrs.ErrOutOfGas) { + if result != nil && result.Err != vmerrs.ErrOutOfGas { if len(result.Revert()) > 0 { return 0, newRevertError(result) } @@ -674,14 +623,15 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.Call return hi, nil } -// callContract implements common code between normal and accepted contract calls. +// callContract implements common code between normal and pending contract calls. // state is modified during execution, make sure to copy it if necessary. -func (b *SimulatedBackend) callContract(ctx context.Context, call interfaces.CallMsg, header *types.Header, stateDB *state.StateDB) (*core.ExecutionResult, error) { +func (b *SimulatedBackend) callContract(ctx context.Context, call interfaces.CallMsg, block *types.Block, stateDB *state.StateDB) (*core.ExecutionResult, error) { // Gas prices post 1559 need to be initialized if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") } - if !b.blockchain.Config().IsApricotPhase3(header.Time) { + head := b.blockchain.CurrentHeader() + if !b.blockchain.Config().IsApricotPhase3(new(big.Int).SetUint64(head.Time)) { // If there's no basefee, then it must be a non-1559 execution if call.GasPrice == nil { call.GasPrice = new(big.Int) @@ -693,7 +643,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call interfaces.Cal // User specified the legacy gas field, convert to 1559 gas typing call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice } else { - // User specified 1559 gas fields (or none), use those + // User specified 1559 gas feilds (or none), use those if call.GasFeeCap == nil { call.GasFeeCap = new(big.Int) } @@ -703,47 +653,34 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call interfaces.Cal // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes call.GasPrice = new(big.Int) if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 { - call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, header.BaseFee), call.GasFeeCap) + call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, head.BaseFee), call.GasFeeCap) } } } // Ensure message is initialized properly. if call.Gas == 0 { - call.Gas = 10 * header.GasLimit + call.Gas = 50000000 } if call.Value == nil { call.Value = new(big.Int) } - // Set infinite balance to the fake caller account. from := stateDB.GetOrNewStateObject(call.From) from.SetBalance(math.MaxBig256) - // Execute the call. - msg := &core.Message{ - From: call.From, - To: call.To, - Value: call.Value, - GasLimit: call.Gas, - GasPrice: call.GasPrice, - GasFeeCap: call.GasFeeCap, - GasTipCap: call.GasTipCap, - Data: call.Data, - AccessList: call.AccessList, - SkipAccountChecks: true, - } + msg := callMsg{call} + txContext := core.NewEVMTxContext(msg) + evmContext := core.NewEVMBlockContext(block.Header(), b.blockchain, nil) // Create a new environment which holds all relevant information // about the transaction and calling mechanisms. - txContext := core.NewEVMTxContext(msg) - evmContext := core.NewEVMBlockContext(header, b.blockchain, nil) vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{NoBaseFee: true}) gasPool := new(core.GasPool).AddGas(math.MaxUint64) - return core.ApplyMessage(vmEnv, msg, gasPool) + return core.NewStateTransition(vmEnv, msg, gasPool).TransitionDb() } -// SendTransaction updates the accepted block to include the given transaction. +// SendTransaction updates the pending block to include the given transaction. func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error { b.mu.Lock() defer b.mu.Unlock() @@ -754,7 +691,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa return errors.New("could not fetch parent") } // Check transaction validity - signer := types.MakeSigner(b.blockchain.Config(), block.Number(), block.Time()) + signer := types.NewLondonSigner(b.blockchain.Config().ChainID) sender, err := types.Sender(signer, tx) if err != nil { return fmt.Errorf("invalid transaction: %v", err) @@ -773,10 +710,8 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa if err != nil { return err } - stateDB, err := b.blockchain.State() - if err != nil { - return err - } + stateDB, _ := b.blockchain.State() + b.acceptedBlock = blocks[0] b.acceptedState, _ = state.New(b.acceptedBlock.Root(), stateDB.Database(), nil) return nil @@ -790,7 +725,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query interfaces.Filt var filter *filters.Filter if query.BlockHash != nil { // Block filter requested, construct a single-shot filter - filter = b.filterSystem.NewBlockFilter(*query.BlockHash, query.Addresses, query.Topics) + filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics) } else { // Initialize unset filter boundaries to run from genesis to chain head from := int64(0) @@ -802,7 +737,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query interfaces.Filt to = query.ToBlock.Int64() } // Construct the range filter - filter = b.filterSystem.NewRangeFilter(from, to, query.Addresses, query.Topics) + filter, _ = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics) } // Run the filter and return all the logs logs, err := filter.Logs(ctx) @@ -884,22 +819,17 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error { defer b.mu.Unlock() if len(b.acceptedBlock.Transactions()) != 0 { - return errors.New("could not adjust time on non-empty block") - } - block := b.blockchain.GetBlockByHash(b.acceptedBlock.ParentHash()) - if block == nil { - return errors.New("could not find parent") + return errors.New("Could not adjust time on non-empty block") } - blocks, _, _ := core.GenerateChain(b.config, block, dummy.NewFaker(), b.database, 1, 10, func(number int, block *core.BlockGen) { + blocks, _, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), dummy.NewFaker(), b.database, 1, 10, func(number int, block *core.BlockGen) { block.OffsetTime(int64(adjustment.Seconds())) }) - stateDB, err := b.blockchain.State() - if err != nil { - return err - } + stateDB, _ := b.blockchain.State() + b.acceptedBlock = blocks[0] b.acceptedState, _ = state.New(b.acceptedBlock.Root(), stateDB.Database(), nil) + return nil } @@ -908,12 +838,28 @@ func (b *SimulatedBackend) Blockchain() *core.BlockChain { return b.blockchain } +// callMsg implements core.Message to allow passing it as a transaction simulator. +type callMsg struct { + interfaces.CallMsg +} + +func (m callMsg) From() common.Address { return m.CallMsg.From } +func (m callMsg) Nonce() uint64 { return 0 } +func (m callMsg) IsFake() bool { return true } +func (m callMsg) To() *common.Address { return m.CallMsg.To } +func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice } +func (m callMsg) GasFeeCap() *big.Int { return m.CallMsg.GasFeeCap } +func (m callMsg) GasTipCap() *big.Int { return m.CallMsg.GasTipCap } +func (m callMsg) Gas() uint64 { return m.CallMsg.Gas } +func (m callMsg) Value() *big.Int { return m.CallMsg.Value } +func (m callMsg) Data() []byte { return m.CallMsg.Data } +func (m callMsg) AccessList() types.AccessList { return m.CallMsg.AccessList } + // filterBackend implements filters.Backend to support filtering for logs without // taking bloom-bits acceleration structures into account. type filterBackend struct { - db ethdb.Database - bc *core.BlockChain - backend *SimulatedBackend + db ethdb.Database + bc *core.BlockChain } func (fb *filterBackend) SubscribeChainAcceptedEvent(ch chan<- core.ChainEvent) event.Subscription { @@ -928,8 +874,8 @@ func (fb *filterBackend) SubscribeAcceptedTransactionEvent(ch chan<- core.NewTxs return fb.bc.SubscribeAcceptedTransactionEvent(ch) } -func (fb *filterBackend) IsAllowUnfinalizedQueries() bool { - return false +func (fb *filterBackend) GetVMConfig() *vm.Config { + return fb.bc.GetVMConfig() } func (fb *filterBackend) LastAcceptedBlock() *types.Block { @@ -940,49 +886,41 @@ func (fb *filterBackend) GetMaxBlocksPerRequest() int64 { return eth.DefaultSettings.MaxBlocksPerRequest } -func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db } - +func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db } func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") } -func (fb *filterBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { - switch number { - case rpc.PendingBlockNumber, rpc.FinalizedBlockNumber: - if block := fb.backend.acceptedBlock; block != nil { - return block.Header(), nil - } - return nil, nil - case rpc.LatestBlockNumber: +func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumber) (*types.Header, error) { + if block == rpc.LatestBlockNumber { return fb.bc.CurrentHeader(), nil - default: - return fb.bc.GetHeaderByNumber(uint64(number.Int64())), nil } + return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil } func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { return fb.bc.GetHeaderByHash(hash), nil } -func (fb *filterBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { - if body := fb.bc.GetBody(hash); body != nil { - return body, nil +func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { + number := rawdb.ReadHeaderNumber(fb.db, hash) + if number == nil { + return nil, nil } - return nil, errors.New("block body not found") + return rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config()), nil } -func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { +func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { number := rawdb.ReadHeaderNumber(fb.db, hash) if number == nil { return nil, nil } - header := rawdb.ReadHeader(fb.db, hash, *number) - if header == nil { + receipts := rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config()) + if receipts == nil { return nil, nil } - return rawdb.ReadReceipts(fb.db, hash, *number, header.Time, fb.bc.Config()), nil -} - -func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { - logs := rawdb.ReadLogs(fb.db, hash, number) + logs := make([][]*types.Log, len(receipts)) + for i, receipt := range receipts { + logs[i] = receipt.Logs + } return logs, nil } @@ -1012,14 +950,6 @@ func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.Matche panic("not supported") } -func (fb *filterBackend) ChainConfig() *params.ChainConfig { - panic("not supported") -} - -func (fb *filterBackend) CurrentHeader() *types.Header { - panic("not supported") -} - func nullSubscription() event.Subscription { return event.NewSubscription(func(quit <-chan struct{}) error { <-quit diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index b3eaf65048..3870f9a991 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -37,12 +37,12 @@ import ( "testing" "time" - "github.com/ava-labs/coreth/accounts/abi" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/interfaces" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/accounts/abi" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/interfaces" + "github.com/tenderly/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" ) diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index 958128ffa8..492829ddfe 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -34,10 +34,10 @@ import ( "strings" "sync" - "github.com/ava-labs/coreth/accounts/abi" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/interfaces" + "github.com/tenderly/coreth/accounts/abi" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/interfaces" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" diff --git a/accounts/abi/bind/base_test.go b/accounts/abi/bind/base_test.go index 141df4657e..91e01b61f2 100644 --- a/accounts/abi/bind/base_test.go +++ b/accounts/abi/bind/base_test.go @@ -35,16 +35,16 @@ import ( "strings" "testing" - "github.com/ava-labs/coreth/accounts/abi" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/interfaces" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" + "github.com/tenderly/coreth/accounts/abi" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/interfaces" + "github.com/tenderly/coreth/rlp" ) func mockSign(addr common.Address, tx *types.Transaction) (*types.Transaction, error) { return tx, nil } diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go index 0a4223c5e5..1f3cbce1d1 100644 --- a/accounts/abi/bind/bind.go +++ b/accounts/abi/bind/bind.go @@ -39,7 +39,7 @@ import ( "text/template" "unicode" - "github.com/ava-labs/coreth/accounts/abi" + "github.com/tenderly/coreth/accounts/abi" "github.com/ethereum/go-ethereum/log" ) diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index 3da6421150..2fa95de410 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -28,6 +28,7 @@ package bind import ( "fmt" + "io/ioutil" "os" "os/exec" "path/filepath" @@ -297,9 +298,9 @@ var bindTests = []struct { ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -352,9 +353,9 @@ var bindTests = []struct { ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -398,9 +399,9 @@ var bindTests = []struct { ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -455,10 +456,10 @@ var bindTests = []struct { "math/big" "reflect" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -504,9 +505,9 @@ var bindTests = []struct { ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -570,9 +571,9 @@ var bindTests = []struct { ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -615,10 +616,10 @@ var bindTests = []struct { []string{`6060604052609f8060106000396000f3606060405260e060020a6000350463f97a60058114601a575b005b600060605260c0604052600d60809081527f4920646f6e27742065786973740000000000000000000000000000000000000060a052602060c0908152600d60e081905281906101009060a09080838184600060046012f15050815172ffffffffffffffffffffffffffffffffffffff1916909152505060405161012081900392509050f3`}, []string{`[{"constant":true,"inputs":[],"name":"String","outputs":[{"name":"","type":"string"}],"type":"function"}]`}, ` - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/core" `, ` // Create a simulator and wrap a non-deployed contract @@ -654,10 +655,10 @@ var bindTests = []struct { []string{`6080604052348015600f57600080fd5b5060888061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063d5f6622514602d575b600080fd5b6033604c565b6040805192835260208301919091528051918290030190f35b600a809156fea264697066735822beefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeef64736f6c6343decafe0033`}, []string{`[{"inputs":[],"name":"Struct","outputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"}],"stateMutability":"pure","type":"function"}]`}, ` - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/core" `, ` // Create a simulator and wrap a non-deployed contract @@ -702,9 +703,9 @@ var bindTests = []struct { ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -751,10 +752,10 @@ var bindTests = []struct { ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -827,9 +828,9 @@ var bindTests = []struct { "fmt" "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -920,10 +921,10 @@ var bindTests = []struct { "math/big" "time" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -1111,9 +1112,9 @@ var bindTests = []struct { ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -1246,9 +1247,9 @@ var bindTests = []struct { "math/big" "reflect" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, @@ -1388,9 +1389,9 @@ var bindTests = []struct { ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -1454,15 +1455,17 @@ var bindTests = []struct { "math/big" "time" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" + "github.com/tenderly/coreth/params" `, ` // Initialize test accounts key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + auth.GasFeeCap = new(big.Int).SetInt64(params.ApricotPhase4MaxBaseFee) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: new(big.Int).Mul(big.NewInt(10000000000000000), big.NewInt(1000))}}, 10000000) defer sim.Close() @@ -1563,10 +1566,10 @@ var bindTests = []struct { ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/crypto" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/core" `, ` // Initialize test accounts @@ -1626,10 +1629,10 @@ var bindTests = []struct { ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/crypto" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/core" `, ` key, _ := crypto.GenerateKey() @@ -1688,9 +1691,9 @@ var bindTests = []struct { ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -1708,7 +1711,7 @@ var bindTests = []struct { } sim.Commit(false) - // This test the existence of the free retriever call for view and pure functions + // This test the existence of the free retreiver call for view and pure functions if num, err := pav.PureFunc(nil); err != nil { t.Fatalf("Failed to call anonymous field retriever: %v", err) } else if num.Cmp(big.NewInt(42)) != 0 { @@ -1749,16 +1752,15 @@ var bindTests = []struct { "bytes" "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) - sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000000000000)}}, 1000000) defer sim.Close() @@ -1838,9 +1840,9 @@ var bindTests = []struct { ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, ` @@ -1880,7 +1882,7 @@ var bindTests = []struct { if count != 1 { t.Fatal("Unexpected contract event number") } - `, + `, nil, nil, nil, @@ -1890,51 +1892,51 @@ var bindTests = []struct { { `NewErrors`, ` - pragma solidity >0.8.4; - - contract NewErrors { - error MyError(uint256); - error MyError1(uint256); - error MyError2(uint256, uint256); - error MyError3(uint256 a, uint256 b, uint256 c); - function Error() public pure { - revert MyError3(1,2,3); + pragma solidity >0.8.4; + + contract NewErrors { + error MyError(uint256); + error MyError1(uint256); + error MyError2(uint256, uint256); + error MyError3(uint256 a, uint256 b, uint256 c); + function Error() public pure { + revert MyError3(1,2,3); + } } - } - `, + `, []string{"0x6080604052348015600f57600080fd5b5060998061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063726c638214602d575b600080fd5b60336035565b005b60405163024876cd60e61b815260016004820152600260248201526003604482015260640160405180910390fdfea264697066735822122093f786a1bc60216540cd999fbb4a6109e0fef20abcff6e9107fb2817ca968f3c64736f6c63430008070033"}, []string{`[{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError1","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError2","type":"error"},{"inputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"},{"internalType":"uint256","name":"c","type":"uint256"}],"name":"MyError3","type":"error"},{"inputs":[],"name":"Error","outputs":[],"stateMutability":"pure","type":"function"}]`}, ` - "math/big" - - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - var ( - key, _ = crypto.GenerateKey() - user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, 10000000) - ) - defer sim.Close() - - _, tx, contract, err := DeployNewErrors(user, sim) - if err != nil { - t.Fatal(err) - } - sim.Commit(true) - _, err = bind.WaitDeployed(nil, sim, tx) - if err != nil { - t.Error(err) - } - if err := contract.Error(new(bind.CallOpts)); err == nil { - t.Fatalf("expected contract to throw error") - } - // TODO (MariusVanDerWijden unpack error using abigen - // once that is implemented - `, + "math/big" + + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" + "github.com/ethereum/go-ethereum/crypto" + `, + ` + var ( + key, _ = crypto.GenerateKey() + user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, 10000000) + ) + defer sim.Close() + + _, tx, contract, err := DeployNewErrors(user, sim) + if err != nil { + t.Fatal(err) + } + sim.Commit(true) + _, err = bind.WaitDeployed(nil, sim, tx) + if err != nil { + t.Error(err) + } + if err := contract.Error(new(bind.CallOpts)); err == nil { + t.Fatalf("expected contract to throw error") + } + // TODO (MariusVanDerWijden unpack error using abigen + // once that is implemented + `, nil, nil, nil, @@ -1958,9 +1960,9 @@ var bindTests = []struct { imports: ` "math/big" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" "github.com/ethereum/go-ethereum/crypto" `, tester: ` @@ -1983,114 +1985,6 @@ var bindTests = []struct { } `, }, - { - name: `NameConflict`, - contract: ` - // SPDX-License-Identifier: GPL-3.0 - pragma solidity >=0.4.22 <0.9.0; - contract oracle { - struct request { - bytes data; - bytes _data; - } - event log (int msg, int _msg); - function addRequest(request memory req) public pure {} - function getRequest() pure public returns (request memory) { - return request("", ""); - } - } - `, - bytecode: []string{"0x608060405234801561001057600080fd5b5061042b806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063c2bb515f1461003b578063cce7b04814610059575b600080fd5b610043610075565b60405161005091906101af565b60405180910390f35b610073600480360381019061006e91906103ac565b6100b5565b005b61007d6100b8565b604051806040016040528060405180602001604052806000815250815260200160405180602001604052806000815250815250905090565b50565b604051806040016040528060608152602001606081525090565b600081519050919050565b600082825260208201905092915050565b60005b8381101561010c5780820151818401526020810190506100f1565b8381111561011b576000848401525b50505050565b6000601f19601f8301169050919050565b600061013d826100d2565b61014781856100dd565b93506101578185602086016100ee565b61016081610121565b840191505092915050565b600060408301600083015184820360008601526101888282610132565b915050602083015184820360208601526101a28282610132565b9150508091505092915050565b600060208201905081810360008301526101c9818461016b565b905092915050565b6000604051905090565b600080fd5b600080fd5b600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61022282610121565b810181811067ffffffffffffffff82111715610241576102406101ea565b5b80604052505050565b60006102546101d1565b90506102608282610219565b919050565b600080fd5b600080fd5b600080fd5b600067ffffffffffffffff82111561028f5761028e6101ea565b5b61029882610121565b9050602081019050919050565b82818337600083830152505050565b60006102c76102c284610274565b61024a565b9050828152602081018484840111156102e3576102e261026f565b5b6102ee8482856102a5565b509392505050565b600082601f83011261030b5761030a61026a565b5b813561031b8482602086016102b4565b91505092915050565b60006040828403121561033a576103396101e5565b5b610344604061024a565b9050600082013567ffffffffffffffff81111561036457610363610265565b5b610370848285016102f6565b600083015250602082013567ffffffffffffffff81111561039457610393610265565b5b6103a0848285016102f6565b60208301525092915050565b6000602082840312156103c2576103c16101db565b5b600082013567ffffffffffffffff8111156103e0576103df6101e0565b5b6103ec84828501610324565b9150509291505056fea264697066735822122033bca1606af9b6aeba1673f98c52003cec19338539fb44b86690ce82c51483b564736f6c634300080e0033"}, - abi: []string{`[ { "anonymous": false, "inputs": [ { "indexed": false, "internalType": "int256", "name": "msg", "type": "int256" }, { "indexed": false, "internalType": "int256", "name": "_msg", "type": "int256" } ], "name": "log", "type": "event" }, { "inputs": [ { "components": [ { "internalType": "bytes", "name": "data", "type": "bytes" }, { "internalType": "bytes", "name": "_data", "type": "bytes" } ], "internalType": "struct oracle.request", "name": "req", "type": "tuple" } ], "name": "addRequest", "outputs": [], "stateMutability": "pure", "type": "function" }, { "inputs": [], "name": "getRequest", "outputs": [ { "components": [ { "internalType": "bytes", "name": "data", "type": "bytes" }, { "internalType": "bytes", "name": "_data", "type": "bytes" } ], "internalType": "struct oracle.request", "name": "", "type": "tuple" } ], "stateMutability": "pure", "type": "function" } ]`}, - imports: ` - "math/big" - - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" - "github.com/ethereum/go-ethereum/crypto" - `, - tester: ` - var ( - gasCeil = uint64(30000000) // Note: from geth's ethconfig.Defaults.Miner.GasCeil - key, _ = crypto.GenerateKey() - user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, gasCeil) - ) - defer sim.Close() - - _, tx, _, err := DeployNameConflict(user, sim) - if err != nil { - t.Fatalf("DeployNameConflict() got err %v; want nil err", err) - } - sim.Commit(true) - - if _, err = bind.WaitDeployed(nil, sim, tx); err != nil { - t.Logf("Deployment tx: %+v", tx) - t.Errorf("bind.WaitDeployed(nil, %T, ) got err %v; want nil err", sim, err) - } - `, - }, - { - name: "RangeKeyword", - contract: ` - // SPDX-License-Identifier: GPL-3.0 - pragma solidity >=0.4.22 <0.9.0; - contract keywordcontract { - function functionWithKeywordParameter(range uint256) public pure {} - } - `, - bytecode: []string{"0x608060405234801561001057600080fd5b5060dc8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063527a119f14602d575b600080fd5b60436004803603810190603f9190605b565b6045565b005b50565b6000813590506055816092565b92915050565b600060208284031215606e57606d608d565b5b6000607a848285016048565b91505092915050565b6000819050919050565b600080fd5b6099816083565b811460a357600080fd5b5056fea2646970667358221220d4f4525e2615516394055d369fb17df41c359e5e962734f27fd683ea81fd9db164736f6c63430008070033"}, - abi: []string{`[{"inputs":[{"internalType":"uint256","name":"range","type":"uint256"}],"name":"functionWithKeywordParameter","outputs":[],"stateMutability":"pure","type":"function"}]`}, - imports: ` - "math/big" - - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" - "github.com/ethereum/go-ethereum/crypto" - `, - tester: ` - var ( - gasCeil = uint64(30000000) // Note: from geth's ethconfig.Defaults.Miner.GasCeil - key, _ = crypto.GenerateKey() - user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, gasCeil) - ) - _, tx, _, err := DeployRangeKeyword(user, sim) - if err != nil { - t.Fatalf("error deploying contract: %v", err) - } - sim.Commit(true) - - if _, err = bind.WaitDeployed(nil, sim, tx); err != nil { - t.Errorf("error deploying the contract: %v", err) - } - `, - }, { - name: "NumericMethodName", - contract: ` - // SPDX-License-Identifier: GPL-3.0 - pragma solidity >=0.4.22 <0.9.0; - - contract NumericMethodName { - event _1TestEvent(address _param); - function _1test() public pure {} - function __1test() public pure {} - function __2test() public pure {} - } - `, - bytecode: []string{"0x6080604052348015600f57600080fd5b5060958061001e6000396000f3fe6080604052348015600f57600080fd5b5060043610603c5760003560e01c80639d993132146041578063d02767c7146049578063ffa02795146051575b600080fd5b60476059565b005b604f605b565b005b6057605d565b005b565b565b56fea26469706673582212200382ca602dff96a7e2ba54657985e2b4ac423a56abe4a1f0667bc635c4d4371f64736f6c63430008110033"}, - abi: []string{`[{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"_param","type":"address"}],"name":"_1TestEvent","type":"event"},{"inputs":[],"name":"_1test","outputs":[],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"__1test","outputs":[],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"__2test","outputs":[],"stateMutability":"pure","type":"function"}]`}, - imports: ` - "github.com/ethereum/go-ethereum/common" - `, - tester: ` - if b, err := NewNumericMethodName(common.Address{}, nil); b == nil || err != nil { - t.Fatalf("combined binding (%v) nil or error (%v) not nil", b, nil) - } -`, - }, } // The binding tests have been modified to run in two separate test @@ -2106,17 +2000,20 @@ func TestGolangBindings(t *testing.T) { // Tests that packages generated by the binder can be successfully compiled and // the requested tester run against it. func golangBindings(t *testing.T, overload bool) { - t.Parallel() // Skip the test if no Go command can be found gocmd := runtime.GOROOT() + "/bin/go" if !common.FileExist(gocmd) { t.Skip("go sdk not found for testing") } // Create a temporary workspace for the test suite - ws := t.TempDir() + ws, err := ioutil.TempDir("", "binding-test") + if err != nil { + t.Fatalf("failed to create temporary workspace: %v", err) + } + //defer os.RemoveAll(ws) pkg := filepath.Join(ws, "bindtest") - if err := os.MkdirAll(pkg, 0700); err != nil { + if err = os.MkdirAll(pkg, 0700); err != nil { t.Fatalf("failed to create package: %v", err) } // Generate the test suite for all the contracts @@ -2141,7 +2038,7 @@ func golangBindings(t *testing.T, overload bool) { if err != nil { t.Fatalf("test %d: failed to generate binding: %v", i, err) } - if err = os.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil { + if err = ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil { t.Fatalf("test %d: failed to write binding: %v", i, err) } // Generate the test file with the injected test code @@ -2157,7 +2054,7 @@ func golangBindings(t *testing.T, overload bool) { %s } `, tt.imports, tt.name, tt.tester) - if err := os.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil { + if err := ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil { t.Fatalf("test %d: failed to write tests: %v", i, err) } }) @@ -2169,12 +2066,12 @@ func golangBindings(t *testing.T, overload bool) { t.Fatalf("failed to convert binding test to modules: %v\n%s", err, out) } pwd, _ := os.Getwd() - replacer := exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/ava-labs/coreth@v0.0.0", "-replace", "github.com/ava-labs/coreth="+filepath.Join(pwd, "..", "..", "..")) // Repo root + replacer := exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/tenderly/coreth@v0.0.0", "-replace", "github.com/tenderly/coreth="+filepath.Join(pwd, "..", "..", "..")) // Repo root replacer.Dir = pkg if out, err := replacer.CombinedOutput(); err != nil { t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out) } - tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.21") + tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.17") tidier.Dir = pkg if out, err := tidier.CombinedOutput(); err != nil { t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) @@ -2186,3 +2083,408 @@ func golangBindings(t *testing.T, overload bool) { t.Fatalf("failed to run binding test: %v\n%s", err, out) } } + +// Tests that java binding generated by the binder is exactly matched. +func TestJavaBindings(t *testing.T) { + var cases = []struct { + name string + contract string + abi string + bytecode string + expected string + }{ + { + "test", + ` + pragma experimental ABIEncoderV2; + pragma solidity ^0.5.2; + + contract test { + function setAddress(address a) public returns(address){} + function setAddressList(address[] memory a_l) public returns(address[] memory){} + function setAddressArray(address[2] memory a_a) public returns(address[2] memory){} + + function setUint8(uint8 u8) public returns(uint8){} + function setUint16(uint16 u16) public returns(uint16){} + function setUint32(uint32 u32) public returns(uint32){} + function setUint64(uint64 u64) public returns(uint64){} + function setUint256(uint256 u256) public returns(uint256){} + function setUint256List(uint256[] memory u256_l) public returns(uint256[] memory){} + function setUint256Array(uint256[2] memory u256_a) public returns(uint256[2] memory){} + + function setInt8(int8 i8) public returns(int8){} + function setInt16(int16 i16) public returns(int16){} + function setInt32(int32 i32) public returns(int32){} + function setInt64(int64 i64) public returns(int64){} + function setInt256(int256 i256) public returns(int256){} + function setInt256List(int256[] memory i256_l) public returns(int256[] memory){} + function setInt256Array(int256[2] memory i256_a) public returns(int256[2] memory){} + + function setBytes1(bytes1 b1) public returns(bytes1) {} + function setBytes32(bytes32 b32) public returns(bytes32) {} + function setBytes(bytes memory bs) public returns(bytes memory) {} + function setBytesList(bytes[] memory bs_l) public returns(bytes[] memory) {} + function setBytesArray(bytes[2] memory bs_a) public returns(bytes[2] memory) {} + + function setString(string memory s) public returns(string memory) {} + function setStringList(string[] memory s_l) public returns(string[] memory) {} + function setStringArray(string[2] memory s_a) public returns(string[2] memory) {} + + function setBool(bool b) public returns(bool) {} + function setBoolList(bool[] memory b_l) public returns(bool[] memory) {} + function setBoolArray(bool[2] memory b_a) public returns(bool[2] memory) {} + }`, + `[{"constant":false,"inputs":[{"name":"u16","type":"uint16"}],"name":"setUint16","outputs":[{"name":"","type":"uint16"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b_a","type":"bool[2]"}],"name":"setBoolArray","outputs":[{"name":"","type":"bool[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"a_a","type":"address[2]"}],"name":"setAddressArray","outputs":[{"name":"","type":"address[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"bs_l","type":"bytes[]"}],"name":"setBytesList","outputs":[{"name":"","type":"bytes[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u8","type":"uint8"}],"name":"setUint8","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u32","type":"uint32"}],"name":"setUint32","outputs":[{"name":"","type":"uint32"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b","type":"bool"}],"name":"setBool","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i256_l","type":"int256[]"}],"name":"setInt256List","outputs":[{"name":"","type":"int256[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u256_a","type":"uint256[2]"}],"name":"setUint256Array","outputs":[{"name":"","type":"uint256[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b_l","type":"bool[]"}],"name":"setBoolList","outputs":[{"name":"","type":"bool[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"bs_a","type":"bytes[2]"}],"name":"setBytesArray","outputs":[{"name":"","type":"bytes[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"a_l","type":"address[]"}],"name":"setAddressList","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i256_a","type":"int256[2]"}],"name":"setInt256Array","outputs":[{"name":"","type":"int256[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"s_a","type":"string[2]"}],"name":"setStringArray","outputs":[{"name":"","type":"string[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"s","type":"string"}],"name":"setString","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u64","type":"uint64"}],"name":"setUint64","outputs":[{"name":"","type":"uint64"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i16","type":"int16"}],"name":"setInt16","outputs":[{"name":"","type":"int16"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i8","type":"int8"}],"name":"setInt8","outputs":[{"name":"","type":"int8"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u256_l","type":"uint256[]"}],"name":"setUint256List","outputs":[{"name":"","type":"uint256[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i256","type":"int256"}],"name":"setInt256","outputs":[{"name":"","type":"int256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i32","type":"int32"}],"name":"setInt32","outputs":[{"name":"","type":"int32"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b32","type":"bytes32"}],"name":"setBytes32","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"s_l","type":"string[]"}],"name":"setStringList","outputs":[{"name":"","type":"string[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u256","type":"uint256"}],"name":"setUint256","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"bs","type":"bytes"}],"name":"setBytes","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"a","type":"address"}],"name":"setAddress","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i64","type":"int64"}],"name":"setInt64","outputs":[{"name":"","type":"int64"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b1","type":"bytes1"}],"name":"setBytes1","outputs":[{"name":"","type":"bytes1"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`, + `608060405234801561001057600080fd5b5061265a806100206000396000f3fe608060405234801561001057600080fd5b50600436106101e1576000357c0100000000000000000000000000000000000000000000000000000000900480637fcaf66611610116578063c2b12a73116100b4578063da359dc81161008e578063da359dc814610666578063e30081a014610696578063e673eb32146106c6578063fba1a1c3146106f6576101e1565b8063c2b12a73146105d6578063c577796114610606578063d2282dc514610636576101e1565b80639a19a953116100f05780639a19a95314610516578063a0709e1914610546578063a53b1c1e14610576578063b7d5df31146105a6576101e1565b80637fcaf66614610486578063822cba69146104b657806386114cea146104e6576101e1565b806322722302116101835780635119655d1161015d5780635119655d146103c65780635be6b37e146103f65780636aa482fc146104265780637173b69514610456576101e1565b806322722302146103365780632766a755146103665780634d5ee6da14610396576101e1565b806316c105e2116101bf57806316c105e2146102765780631774e646146102a65780631c9352e2146102d65780631e26fd3314610306576101e1565b80630477988a146101e6578063118a971814610216578063151f547114610246575b600080fd5b61020060048036036101fb9190810190611599565b610726565b60405161020d9190611f01565b60405180910390f35b610230600480360361022b919081019061118d565b61072d565b60405161023d9190611ca6565b60405180910390f35b610260600480360361025b9190810190611123565b61073a565b60405161026d9190611c69565b60405180910390f35b610290600480360361028b9190810190611238565b610747565b60405161029d9190611d05565b60405180910390f35b6102c060048036036102bb919081019061163d565b61074e565b6040516102cd9190611f6d565b60405180910390f35b6102f060048036036102eb91908101906115eb565b610755565b6040516102fd9190611f37565b60405180910390f35b610320600480360361031b91908101906113cf565b61075c565b60405161032d9190611de5565b60405180910390f35b610350600480360361034b91908101906112a2565b610763565b60405161035d9190611d42565b60405180910390f35b610380600480360361037b9190810190611365565b61076a565b60405161038d9190611da8565b60405180910390f35b6103b060048036036103ab91908101906111b6565b610777565b6040516103bd9190611cc1565b60405180910390f35b6103e060048036036103db91908101906111f7565b61077e565b6040516103ed9190611ce3565b60405180910390f35b610410600480360361040b919081019061114c565b61078b565b60405161041d9190611c84565b60405180910390f35b610440600480360361043b9190810190611279565b610792565b60405161044d9190611d27565b60405180910390f35b610470600480360361046b91908101906112e3565b61079f565b60405161047d9190611d64565b60405180910390f35b6104a0600480360361049b9190810190611558565b6107ac565b6040516104ad9190611edf565b60405180910390f35b6104d060048036036104cb9190810190611614565b6107b3565b6040516104dd9190611f52565b60405180910390f35b61050060048036036104fb919081019061148b565b6107ba565b60405161050d9190611e58565b60405180910390f35b610530600480360361052b919081019061152f565b6107c1565b60405161053d9190611ec4565b60405180910390f35b610560600480360361055b919081019061138e565b6107c8565b60405161056d9190611dc3565b60405180910390f35b610590600480360361058b91908101906114b4565b6107cf565b60405161059d9190611e73565b60405180910390f35b6105c060048036036105bb91908101906114dd565b6107d6565b6040516105cd9190611e8e565b60405180910390f35b6105f060048036036105eb9190810190611421565b6107dd565b6040516105fd9190611e1b565b60405180910390f35b610620600480360361061b9190810190611324565b6107e4565b60405161062d9190611d86565b60405180910390f35b610650600480360361064b91908101906115c2565b6107eb565b60405161065d9190611f1c565b60405180910390f35b610680600480360361067b919081019061144a565b6107f2565b60405161068d9190611e36565b60405180910390f35b6106b060048036036106ab91908101906110fa565b6107f9565b6040516106bd9190611c4e565b60405180910390f35b6106e060048036036106db9190810190611506565b610800565b6040516106ed9190611ea9565b60405180910390f35b610710600480360361070b91908101906113f8565b610807565b60405161071d9190611e00565b60405180910390f35b6000919050565b61073561080e565b919050565b610742610830565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b610772610852565b919050565b6060919050565b610786610874565b919050565b6060919050565b61079a61089b565b919050565b6107a76108bd565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108835790505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108cc5790505090565b60006108f082356124f2565b905092915050565b600082601f830112151561090b57600080fd5b600261091e61091982611fb5565b611f88565b9150818385602084028201111561093457600080fd5b60005b83811015610964578161094a88826108e4565b845260208401935060208301925050600181019050610937565b5050505092915050565b600082601f830112151561098157600080fd5b813561099461098f82611fd7565b611f88565b915081818352602084019350602081019050838560208402820111156109b957600080fd5b60005b838110156109e957816109cf88826108e4565b8452602084019350602083019250506001810190506109bc565b5050505092915050565b600082601f8301121515610a0657600080fd5b6002610a19610a1482611fff565b611f88565b91508183856020840282011115610a2f57600080fd5b60005b83811015610a5f5781610a458882610e9e565b845260208401935060208301925050600181019050610a32565b5050505092915050565b600082601f8301121515610a7c57600080fd5b8135610a8f610a8a82612021565b611f88565b91508181835260208401935060208101905083856020840282011115610ab457600080fd5b60005b83811015610ae45781610aca8882610e9e565b845260208401935060208301925050600181019050610ab7565b5050505092915050565b600082601f8301121515610b0157600080fd5b6002610b14610b0f82612049565b611f88565b9150818360005b83811015610b4b5781358601610b318882610eda565b845260208401935060208301925050600181019050610b1b565b5050505092915050565b600082601f8301121515610b6857600080fd5b8135610b7b610b768261206b565b611f88565b9150818183526020840193506020810190508360005b83811015610bc15781358601610ba78882610eda565b845260208401935060208301925050600181019050610b91565b5050505092915050565b600082601f8301121515610bde57600080fd5b6002610bf1610bec82612093565b611f88565b91508183856020840282011115610c0757600080fd5b60005b83811015610c375781610c1d8882610f9a565b845260208401935060208301925050600181019050610c0a565b5050505092915050565b600082601f8301121515610c5457600080fd5b8135610c67610c62826120b5565b611f88565b91508181835260208401935060208101905083856020840282011115610c8c57600080fd5b60005b83811015610cbc5781610ca28882610f9a565b845260208401935060208301925050600181019050610c8f565b5050505092915050565b600082601f8301121515610cd957600080fd5b6002610cec610ce7826120dd565b611f88565b9150818360005b83811015610d235781358601610d098882610fea565b845260208401935060208301925050600181019050610cf3565b5050505092915050565b600082601f8301121515610d4057600080fd5b8135610d53610d4e826120ff565b611f88565b9150818183526020840193506020810190508360005b83811015610d995781358601610d7f8882610fea565b845260208401935060208301925050600181019050610d69565b5050505092915050565b600082601f8301121515610db657600080fd5b6002610dc9610dc482612127565b611f88565b91508183856020840282011115610ddf57600080fd5b60005b83811015610e0f5781610df588826110aa565b845260208401935060208301925050600181019050610de2565b5050505092915050565b600082601f8301121515610e2c57600080fd5b8135610e3f610e3a82612149565b611f88565b91508181835260208401935060208101905083856020840282011115610e6457600080fd5b60005b83811015610e945781610e7a88826110aa565b845260208401935060208301925050600181019050610e67565b5050505092915050565b6000610eaa8235612504565b905092915050565b6000610ebe8235612510565b905092915050565b6000610ed2823561253c565b905092915050565b600082601f8301121515610eed57600080fd5b8135610f00610efb82612171565b611f88565b91508082526020830160208301858383011115610f1c57600080fd5b610f278382846125cd565b50505092915050565b600082601f8301121515610f4357600080fd5b8135610f56610f518261219d565b611f88565b91508082526020830160208301858383011115610f7257600080fd5b610f7d8382846125cd565b50505092915050565b6000610f928235612546565b905092915050565b6000610fa68235612553565b905092915050565b6000610fba823561255d565b905092915050565b6000610fce823561256a565b905092915050565b6000610fe28235612577565b905092915050565b600082601f8301121515610ffd57600080fd5b813561101061100b826121c9565b611f88565b9150808252602083016020830185838301111561102c57600080fd5b6110378382846125cd565b50505092915050565b600082601f830112151561105357600080fd5b8135611066611061826121f5565b611f88565b9150808252602083016020830185838301111561108257600080fd5b61108d8382846125cd565b50505092915050565b60006110a28235612584565b905092915050565b60006110b68235612592565b905092915050565b60006110ca823561259c565b905092915050565b60006110de82356125ac565b905092915050565b60006110f282356125c0565b905092915050565b60006020828403121561110c57600080fd5b600061111a848285016108e4565b91505092915050565b60006040828403121561113557600080fd5b6000611143848285016108f8565b91505092915050565b60006020828403121561115e57600080fd5b600082013567ffffffffffffffff81111561117857600080fd5b6111848482850161096e565b91505092915050565b60006040828403121561119f57600080fd5b60006111ad848285016109f3565b91505092915050565b6000602082840312156111c857600080fd5b600082013567ffffffffffffffff8111156111e257600080fd5b6111ee84828501610a69565b91505092915050565b60006020828403121561120957600080fd5b600082013567ffffffffffffffff81111561122357600080fd5b61122f84828501610aee565b91505092915050565b60006020828403121561124a57600080fd5b600082013567ffffffffffffffff81111561126457600080fd5b61127084828501610b55565b91505092915050565b60006040828403121561128b57600080fd5b600061129984828501610bcb565b91505092915050565b6000602082840312156112b457600080fd5b600082013567ffffffffffffffff8111156112ce57600080fd5b6112da84828501610c41565b91505092915050565b6000602082840312156112f557600080fd5b600082013567ffffffffffffffff81111561130f57600080fd5b61131b84828501610cc6565b91505092915050565b60006020828403121561133657600080fd5b600082013567ffffffffffffffff81111561135057600080fd5b61135c84828501610d2d565b91505092915050565b60006040828403121561137757600080fd5b600061138584828501610da3565b91505092915050565b6000602082840312156113a057600080fd5b600082013567ffffffffffffffff8111156113ba57600080fd5b6113c684828501610e19565b91505092915050565b6000602082840312156113e157600080fd5b60006113ef84828501610e9e565b91505092915050565b60006020828403121561140a57600080fd5b600061141884828501610eb2565b91505092915050565b60006020828403121561143357600080fd5b600061144184828501610ec6565b91505092915050565b60006020828403121561145c57600080fd5b600082013567ffffffffffffffff81111561147657600080fd5b61148284828501610f30565b91505092915050565b60006020828403121561149d57600080fd5b60006114ab84828501610f86565b91505092915050565b6000602082840312156114c657600080fd5b60006114d484828501610f9a565b91505092915050565b6000602082840312156114ef57600080fd5b60006114fd84828501610fae565b91505092915050565b60006020828403121561151857600080fd5b600061152684828501610fc2565b91505092915050565b60006020828403121561154157600080fd5b600061154f84828501610fd6565b91505092915050565b60006020828403121561156a57600080fd5b600082013567ffffffffffffffff81111561158457600080fd5b61159084828501611040565b91505092915050565b6000602082840312156115ab57600080fd5b60006115b984828501611096565b91505092915050565b6000602082840312156115d457600080fd5b60006115e2848285016110aa565b91505092915050565b6000602082840312156115fd57600080fd5b600061160b848285016110be565b91505092915050565b60006020828403121561162657600080fd5b6000611634848285016110d2565b91505092915050565b60006020828403121561164f57600080fd5b600061165d848285016110e6565b91505092915050565b61166f816123f7565b82525050565b61167e816122ab565b61168782612221565b60005b828110156116b95761169d858351611666565b6116a68261235b565b915060208501945060018101905061168a565b5050505050565b60006116cb826122b6565b8084526020840193506116dd8361222b565b60005b8281101561170f576116f3868351611666565b6116fc82612368565b91506020860195506001810190506116e0565b50849250505092915050565b611724816122c1565b61172d82612238565b60005b8281101561175f57611743858351611ab3565b61174c82612375565b9150602085019450600181019050611730565b5050505050565b6000611771826122cc565b80845260208401935061178383612242565b60005b828110156117b557611799868351611ab3565b6117a282612382565b9150602086019550600181019050611786565b50849250505092915050565b60006117cc826122d7565b836020820285016117dc8561224f565b60005b848110156118155783830388526117f7838351611b16565b92506118028261238f565b91506020880197506001810190506117df565b508196508694505050505092915050565b6000611831826122e2565b8084526020840193508360208202850161184a85612259565b60005b84811015611883578383038852611865838351611b16565b92506118708261239c565b915060208801975060018101905061184d565b508196508694505050505092915050565b61189d816122ed565b6118a682612266565b60005b828110156118d8576118bc858351611b5b565b6118c5826123a9565b91506020850194506001810190506118a9565b5050505050565b60006118ea826122f8565b8084526020840193506118fc83612270565b60005b8281101561192e57611912868351611b5b565b61191b826123b6565b91506020860195506001810190506118ff565b50849250505092915050565b600061194582612303565b836020820285016119558561227d565b60005b8481101561198e578383038852611970838351611bcd565b925061197b826123c3565b9150602088019750600181019050611958565b508196508694505050505092915050565b60006119aa8261230e565b808452602084019350836020820285016119c385612287565b60005b848110156119fc5783830388526119de838351611bcd565b92506119e9826123d0565b91506020880197506001810190506119c6565b508196508694505050505092915050565b611a1681612319565b611a1f82612294565b60005b82811015611a5157611a35858351611c12565b611a3e826123dd565b9150602085019450600181019050611a22565b5050505050565b6000611a6382612324565b808452602084019350611a758361229e565b60005b82811015611aa757611a8b868351611c12565b611a94826123ea565b9150602086019550600181019050611a78565b50849250505092915050565b611abc81612409565b82525050565b611acb81612415565b82525050565b611ada81612441565b82525050565b6000611aeb8261233a565b808452611aff8160208601602086016125dc565b611b088161260f565b602085010191505092915050565b6000611b218261232f565b808452611b358160208601602086016125dc565b611b3e8161260f565b602085010191505092915050565b611b558161244b565b82525050565b611b6481612458565b82525050565b611b7381612462565b82525050565b611b828161246f565b82525050565b611b918161247c565b82525050565b6000611ba282612350565b808452611bb68160208601602086016125dc565b611bbf8161260f565b602085010191505092915050565b6000611bd882612345565b808452611bec8160208601602086016125dc565b611bf58161260f565b602085010191505092915050565b611c0c81612489565b82525050565b611c1b816124b7565b82525050565b611c2a816124c1565b82525050565b611c39816124d1565b82525050565b611c48816124e5565b82525050565b6000602082019050611c636000830184611666565b92915050565b6000604082019050611c7e6000830184611675565b92915050565b60006020820190508181036000830152611c9e81846116c0565b905092915050565b6000604082019050611cbb600083018461171b565b92915050565b60006020820190508181036000830152611cdb8184611766565b905092915050565b60006020820190508181036000830152611cfd81846117c1565b905092915050565b60006020820190508181036000830152611d1f8184611826565b905092915050565b6000604082019050611d3c6000830184611894565b92915050565b60006020820190508181036000830152611d5c81846118df565b905092915050565b60006020820190508181036000830152611d7e818461193a565b905092915050565b60006020820190508181036000830152611da0818461199f565b905092915050565b6000604082019050611dbd6000830184611a0d565b92915050565b60006020820190508181036000830152611ddd8184611a58565b905092915050565b6000602082019050611dfa6000830184611ab3565b92915050565b6000602082019050611e156000830184611ac2565b92915050565b6000602082019050611e306000830184611ad1565b92915050565b60006020820190508181036000830152611e508184611ae0565b905092915050565b6000602082019050611e6d6000830184611b4c565b92915050565b6000602082019050611e886000830184611b5b565b92915050565b6000602082019050611ea36000830184611b6a565b92915050565b6000602082019050611ebe6000830184611b79565b92915050565b6000602082019050611ed96000830184611b88565b92915050565b60006020820190508181036000830152611ef98184611b97565b905092915050565b6000602082019050611f166000830184611c03565b92915050565b6000602082019050611f316000830184611c12565b92915050565b6000602082019050611f4c6000830184611c21565b92915050565b6000602082019050611f676000830184611c30565b92915050565b6000602082019050611f826000830184611c3f565b92915050565b6000604051905081810181811067ffffffffffffffff82111715611fab57600080fd5b8060405250919050565b600067ffffffffffffffff821115611fcc57600080fd5b602082029050919050565b600067ffffffffffffffff821115611fee57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561201657600080fd5b602082029050919050565b600067ffffffffffffffff82111561203857600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561206057600080fd5b602082029050919050565b600067ffffffffffffffff82111561208257600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120aa57600080fd5b602082029050919050565b600067ffffffffffffffff8211156120cc57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120f457600080fd5b602082029050919050565b600067ffffffffffffffff82111561211657600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561213e57600080fd5b602082029050919050565b600067ffffffffffffffff82111561216057600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561218857600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121b457600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121e057600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff82111561220c57600080fd5b601f19601f8301169050602081019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b600061240282612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b60006124fd82612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b838110156125fa5780820151818401526020810190506125df565b83811115612609576000848401525b50505050565b6000601f19601f830116905091905056fea265627a7a723058206fe37171cf1b10ebd291cfdca61d67e7fc3c208795e999c833c42a14d86cf00d6c6578706572696d656e74616cf50037`, + ` +// This file is an automatically generated Java binding. Do not modify as any +// change will likely be lost upon the next re-generation! + +package bindtest; + +import org.ethereum.geth.*; +import java.util.*; + +public class Test { + // ABI is the input ABI used to generate the binding from. + public final static String ABI = "[{\"constant\":false,\"inputs\":[{\"name\":\"u16\",\"type\":\"uint16\"}],\"name\":\"setUint16\",\"outputs\":[{\"name\":\"\",\"type\":\"uint16\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b_a\",\"type\":\"bool[2]\"}],\"name\":\"setBoolArray\",\"outputs\":[{\"name\":\"\",\"type\":\"bool[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a_a\",\"type\":\"address[2]\"}],\"name\":\"setAddressArray\",\"outputs\":[{\"name\":\"\",\"type\":\"address[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs_l\",\"type\":\"bytes[]\"}],\"name\":\"setBytesList\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u8\",\"type\":\"uint8\"}],\"name\":\"setUint8\",\"outputs\":[{\"name\":\"\",\"type\":\"uint8\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u32\",\"type\":\"uint32\"}],\"name\":\"setUint32\",\"outputs\":[{\"name\":\"\",\"type\":\"uint32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b\",\"type\":\"bool\"}],\"name\":\"setBool\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256_l\",\"type\":\"int256[]\"}],\"name\":\"setInt256List\",\"outputs\":[{\"name\":\"\",\"type\":\"int256[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256_a\",\"type\":\"uint256[2]\"}],\"name\":\"setUint256Array\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b_l\",\"type\":\"bool[]\"}],\"name\":\"setBoolList\",\"outputs\":[{\"name\":\"\",\"type\":\"bool[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs_a\",\"type\":\"bytes[2]\"}],\"name\":\"setBytesArray\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a_l\",\"type\":\"address[]\"}],\"name\":\"setAddressList\",\"outputs\":[{\"name\":\"\",\"type\":\"address[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256_a\",\"type\":\"int256[2]\"}],\"name\":\"setInt256Array\",\"outputs\":[{\"name\":\"\",\"type\":\"int256[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s_a\",\"type\":\"string[2]\"}],\"name\":\"setStringArray\",\"outputs\":[{\"name\":\"\",\"type\":\"string[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s\",\"type\":\"string\"}],\"name\":\"setString\",\"outputs\":[{\"name\":\"\",\"type\":\"string\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u64\",\"type\":\"uint64\"}],\"name\":\"setUint64\",\"outputs\":[{\"name\":\"\",\"type\":\"uint64\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i16\",\"type\":\"int16\"}],\"name\":\"setInt16\",\"outputs\":[{\"name\":\"\",\"type\":\"int16\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i8\",\"type\":\"int8\"}],\"name\":\"setInt8\",\"outputs\":[{\"name\":\"\",\"type\":\"int8\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256_l\",\"type\":\"uint256[]\"}],\"name\":\"setUint256List\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256\",\"type\":\"int256\"}],\"name\":\"setInt256\",\"outputs\":[{\"name\":\"\",\"type\":\"int256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i32\",\"type\":\"int32\"}],\"name\":\"setInt32\",\"outputs\":[{\"name\":\"\",\"type\":\"int32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b32\",\"type\":\"bytes32\"}],\"name\":\"setBytes32\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s_l\",\"type\":\"string[]\"}],\"name\":\"setStringList\",\"outputs\":[{\"name\":\"\",\"type\":\"string[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256\",\"type\":\"uint256\"}],\"name\":\"setUint256\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs\",\"type\":\"bytes\"}],\"name\":\"setBytes\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a\",\"type\":\"address\"}],\"name\":\"setAddress\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i64\",\"type\":\"int64\"}],\"name\":\"setInt64\",\"outputs\":[{\"name\":\"\",\"type\":\"int64\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b1\",\"type\":\"bytes1\"}],\"name\":\"setBytes1\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes1\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"; + + // BYTECODE is the compiled bytecode used for deploying new contracts. + public final static String BYTECODE = "0x608060405234801561001057600080fd5b5061265a806100206000396000f3fe608060405234801561001057600080fd5b50600436106101e1576000357c0100000000000000000000000000000000000000000000000000000000900480637fcaf66611610116578063c2b12a73116100b4578063da359dc81161008e578063da359dc814610666578063e30081a014610696578063e673eb32146106c6578063fba1a1c3146106f6576101e1565b8063c2b12a73146105d6578063c577796114610606578063d2282dc514610636576101e1565b80639a19a953116100f05780639a19a95314610516578063a0709e1914610546578063a53b1c1e14610576578063b7d5df31146105a6576101e1565b80637fcaf66614610486578063822cba69146104b657806386114cea146104e6576101e1565b806322722302116101835780635119655d1161015d5780635119655d146103c65780635be6b37e146103f65780636aa482fc146104265780637173b69514610456576101e1565b806322722302146103365780632766a755146103665780634d5ee6da14610396576101e1565b806316c105e2116101bf57806316c105e2146102765780631774e646146102a65780631c9352e2146102d65780631e26fd3314610306576101e1565b80630477988a146101e6578063118a971814610216578063151f547114610246575b600080fd5b61020060048036036101fb9190810190611599565b610726565b60405161020d9190611f01565b60405180910390f35b610230600480360361022b919081019061118d565b61072d565b60405161023d9190611ca6565b60405180910390f35b610260600480360361025b9190810190611123565b61073a565b60405161026d9190611c69565b60405180910390f35b610290600480360361028b9190810190611238565b610747565b60405161029d9190611d05565b60405180910390f35b6102c060048036036102bb919081019061163d565b61074e565b6040516102cd9190611f6d565b60405180910390f35b6102f060048036036102eb91908101906115eb565b610755565b6040516102fd9190611f37565b60405180910390f35b610320600480360361031b91908101906113cf565b61075c565b60405161032d9190611de5565b60405180910390f35b610350600480360361034b91908101906112a2565b610763565b60405161035d9190611d42565b60405180910390f35b610380600480360361037b9190810190611365565b61076a565b60405161038d9190611da8565b60405180910390f35b6103b060048036036103ab91908101906111b6565b610777565b6040516103bd9190611cc1565b60405180910390f35b6103e060048036036103db91908101906111f7565b61077e565b6040516103ed9190611ce3565b60405180910390f35b610410600480360361040b919081019061114c565b61078b565b60405161041d9190611c84565b60405180910390f35b610440600480360361043b9190810190611279565b610792565b60405161044d9190611d27565b60405180910390f35b610470600480360361046b91908101906112e3565b61079f565b60405161047d9190611d64565b60405180910390f35b6104a0600480360361049b9190810190611558565b6107ac565b6040516104ad9190611edf565b60405180910390f35b6104d060048036036104cb9190810190611614565b6107b3565b6040516104dd9190611f52565b60405180910390f35b61050060048036036104fb919081019061148b565b6107ba565b60405161050d9190611e58565b60405180910390f35b610530600480360361052b919081019061152f565b6107c1565b60405161053d9190611ec4565b60405180910390f35b610560600480360361055b919081019061138e565b6107c8565b60405161056d9190611dc3565b60405180910390f35b610590600480360361058b91908101906114b4565b6107cf565b60405161059d9190611e73565b60405180910390f35b6105c060048036036105bb91908101906114dd565b6107d6565b6040516105cd9190611e8e565b60405180910390f35b6105f060048036036105eb9190810190611421565b6107dd565b6040516105fd9190611e1b565b60405180910390f35b610620600480360361061b9190810190611324565b6107e4565b60405161062d9190611d86565b60405180910390f35b610650600480360361064b91908101906115c2565b6107eb565b60405161065d9190611f1c565b60405180910390f35b610680600480360361067b919081019061144a565b6107f2565b60405161068d9190611e36565b60405180910390f35b6106b060048036036106ab91908101906110fa565b6107f9565b6040516106bd9190611c4e565b60405180910390f35b6106e060048036036106db9190810190611506565b610800565b6040516106ed9190611ea9565b60405180910390f35b610710600480360361070b91908101906113f8565b610807565b60405161071d9190611e00565b60405180910390f35b6000919050565b61073561080e565b919050565b610742610830565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b610772610852565b919050565b6060919050565b610786610874565b919050565b6060919050565b61079a61089b565b919050565b6107a76108bd565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108835790505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108cc5790505090565b60006108f082356124f2565b905092915050565b600082601f830112151561090b57600080fd5b600261091e61091982611fb5565b611f88565b9150818385602084028201111561093457600080fd5b60005b83811015610964578161094a88826108e4565b845260208401935060208301925050600181019050610937565b5050505092915050565b600082601f830112151561098157600080fd5b813561099461098f82611fd7565b611f88565b915081818352602084019350602081019050838560208402820111156109b957600080fd5b60005b838110156109e957816109cf88826108e4565b8452602084019350602083019250506001810190506109bc565b5050505092915050565b600082601f8301121515610a0657600080fd5b6002610a19610a1482611fff565b611f88565b91508183856020840282011115610a2f57600080fd5b60005b83811015610a5f5781610a458882610e9e565b845260208401935060208301925050600181019050610a32565b5050505092915050565b600082601f8301121515610a7c57600080fd5b8135610a8f610a8a82612021565b611f88565b91508181835260208401935060208101905083856020840282011115610ab457600080fd5b60005b83811015610ae45781610aca8882610e9e565b845260208401935060208301925050600181019050610ab7565b5050505092915050565b600082601f8301121515610b0157600080fd5b6002610b14610b0f82612049565b611f88565b9150818360005b83811015610b4b5781358601610b318882610eda565b845260208401935060208301925050600181019050610b1b565b5050505092915050565b600082601f8301121515610b6857600080fd5b8135610b7b610b768261206b565b611f88565b9150818183526020840193506020810190508360005b83811015610bc15781358601610ba78882610eda565b845260208401935060208301925050600181019050610b91565b5050505092915050565b600082601f8301121515610bde57600080fd5b6002610bf1610bec82612093565b611f88565b91508183856020840282011115610c0757600080fd5b60005b83811015610c375781610c1d8882610f9a565b845260208401935060208301925050600181019050610c0a565b5050505092915050565b600082601f8301121515610c5457600080fd5b8135610c67610c62826120b5565b611f88565b91508181835260208401935060208101905083856020840282011115610c8c57600080fd5b60005b83811015610cbc5781610ca28882610f9a565b845260208401935060208301925050600181019050610c8f565b5050505092915050565b600082601f8301121515610cd957600080fd5b6002610cec610ce7826120dd565b611f88565b9150818360005b83811015610d235781358601610d098882610fea565b845260208401935060208301925050600181019050610cf3565b5050505092915050565b600082601f8301121515610d4057600080fd5b8135610d53610d4e826120ff565b611f88565b9150818183526020840193506020810190508360005b83811015610d995781358601610d7f8882610fea565b845260208401935060208301925050600181019050610d69565b5050505092915050565b600082601f8301121515610db657600080fd5b6002610dc9610dc482612127565b611f88565b91508183856020840282011115610ddf57600080fd5b60005b83811015610e0f5781610df588826110aa565b845260208401935060208301925050600181019050610de2565b5050505092915050565b600082601f8301121515610e2c57600080fd5b8135610e3f610e3a82612149565b611f88565b91508181835260208401935060208101905083856020840282011115610e6457600080fd5b60005b83811015610e945781610e7a88826110aa565b845260208401935060208301925050600181019050610e67565b5050505092915050565b6000610eaa8235612504565b905092915050565b6000610ebe8235612510565b905092915050565b6000610ed2823561253c565b905092915050565b600082601f8301121515610eed57600080fd5b8135610f00610efb82612171565b611f88565b91508082526020830160208301858383011115610f1c57600080fd5b610f278382846125cd565b50505092915050565b600082601f8301121515610f4357600080fd5b8135610f56610f518261219d565b611f88565b91508082526020830160208301858383011115610f7257600080fd5b610f7d8382846125cd565b50505092915050565b6000610f928235612546565b905092915050565b6000610fa68235612553565b905092915050565b6000610fba823561255d565b905092915050565b6000610fce823561256a565b905092915050565b6000610fe28235612577565b905092915050565b600082601f8301121515610ffd57600080fd5b813561101061100b826121c9565b611f88565b9150808252602083016020830185838301111561102c57600080fd5b6110378382846125cd565b50505092915050565b600082601f830112151561105357600080fd5b8135611066611061826121f5565b611f88565b9150808252602083016020830185838301111561108257600080fd5b61108d8382846125cd565b50505092915050565b60006110a28235612584565b905092915050565b60006110b68235612592565b905092915050565b60006110ca823561259c565b905092915050565b60006110de82356125ac565b905092915050565b60006110f282356125c0565b905092915050565b60006020828403121561110c57600080fd5b600061111a848285016108e4565b91505092915050565b60006040828403121561113557600080fd5b6000611143848285016108f8565b91505092915050565b60006020828403121561115e57600080fd5b600082013567ffffffffffffffff81111561117857600080fd5b6111848482850161096e565b91505092915050565b60006040828403121561119f57600080fd5b60006111ad848285016109f3565b91505092915050565b6000602082840312156111c857600080fd5b600082013567ffffffffffffffff8111156111e257600080fd5b6111ee84828501610a69565b91505092915050565b60006020828403121561120957600080fd5b600082013567ffffffffffffffff81111561122357600080fd5b61122f84828501610aee565b91505092915050565b60006020828403121561124a57600080fd5b600082013567ffffffffffffffff81111561126457600080fd5b61127084828501610b55565b91505092915050565b60006040828403121561128b57600080fd5b600061129984828501610bcb565b91505092915050565b6000602082840312156112b457600080fd5b600082013567ffffffffffffffff8111156112ce57600080fd5b6112da84828501610c41565b91505092915050565b6000602082840312156112f557600080fd5b600082013567ffffffffffffffff81111561130f57600080fd5b61131b84828501610cc6565b91505092915050565b60006020828403121561133657600080fd5b600082013567ffffffffffffffff81111561135057600080fd5b61135c84828501610d2d565b91505092915050565b60006040828403121561137757600080fd5b600061138584828501610da3565b91505092915050565b6000602082840312156113a057600080fd5b600082013567ffffffffffffffff8111156113ba57600080fd5b6113c684828501610e19565b91505092915050565b6000602082840312156113e157600080fd5b60006113ef84828501610e9e565b91505092915050565b60006020828403121561140a57600080fd5b600061141884828501610eb2565b91505092915050565b60006020828403121561143357600080fd5b600061144184828501610ec6565b91505092915050565b60006020828403121561145c57600080fd5b600082013567ffffffffffffffff81111561147657600080fd5b61148284828501610f30565b91505092915050565b60006020828403121561149d57600080fd5b60006114ab84828501610f86565b91505092915050565b6000602082840312156114c657600080fd5b60006114d484828501610f9a565b91505092915050565b6000602082840312156114ef57600080fd5b60006114fd84828501610fae565b91505092915050565b60006020828403121561151857600080fd5b600061152684828501610fc2565b91505092915050565b60006020828403121561154157600080fd5b600061154f84828501610fd6565b91505092915050565b60006020828403121561156a57600080fd5b600082013567ffffffffffffffff81111561158457600080fd5b61159084828501611040565b91505092915050565b6000602082840312156115ab57600080fd5b60006115b984828501611096565b91505092915050565b6000602082840312156115d457600080fd5b60006115e2848285016110aa565b91505092915050565b6000602082840312156115fd57600080fd5b600061160b848285016110be565b91505092915050565b60006020828403121561162657600080fd5b6000611634848285016110d2565b91505092915050565b60006020828403121561164f57600080fd5b600061165d848285016110e6565b91505092915050565b61166f816123f7565b82525050565b61167e816122ab565b61168782612221565b60005b828110156116b95761169d858351611666565b6116a68261235b565b915060208501945060018101905061168a565b5050505050565b60006116cb826122b6565b8084526020840193506116dd8361222b565b60005b8281101561170f576116f3868351611666565b6116fc82612368565b91506020860195506001810190506116e0565b50849250505092915050565b611724816122c1565b61172d82612238565b60005b8281101561175f57611743858351611ab3565b61174c82612375565b9150602085019450600181019050611730565b5050505050565b6000611771826122cc565b80845260208401935061178383612242565b60005b828110156117b557611799868351611ab3565b6117a282612382565b9150602086019550600181019050611786565b50849250505092915050565b60006117cc826122d7565b836020820285016117dc8561224f565b60005b848110156118155783830388526117f7838351611b16565b92506118028261238f565b91506020880197506001810190506117df565b508196508694505050505092915050565b6000611831826122e2565b8084526020840193508360208202850161184a85612259565b60005b84811015611883578383038852611865838351611b16565b92506118708261239c565b915060208801975060018101905061184d565b508196508694505050505092915050565b61189d816122ed565b6118a682612266565b60005b828110156118d8576118bc858351611b5b565b6118c5826123a9565b91506020850194506001810190506118a9565b5050505050565b60006118ea826122f8565b8084526020840193506118fc83612270565b60005b8281101561192e57611912868351611b5b565b61191b826123b6565b91506020860195506001810190506118ff565b50849250505092915050565b600061194582612303565b836020820285016119558561227d565b60005b8481101561198e578383038852611970838351611bcd565b925061197b826123c3565b9150602088019750600181019050611958565b508196508694505050505092915050565b60006119aa8261230e565b808452602084019350836020820285016119c385612287565b60005b848110156119fc5783830388526119de838351611bcd565b92506119e9826123d0565b91506020880197506001810190506119c6565b508196508694505050505092915050565b611a1681612319565b611a1f82612294565b60005b82811015611a5157611a35858351611c12565b611a3e826123dd565b9150602085019450600181019050611a22565b5050505050565b6000611a6382612324565b808452602084019350611a758361229e565b60005b82811015611aa757611a8b868351611c12565b611a94826123ea565b9150602086019550600181019050611a78565b50849250505092915050565b611abc81612409565b82525050565b611acb81612415565b82525050565b611ada81612441565b82525050565b6000611aeb8261233a565b808452611aff8160208601602086016125dc565b611b088161260f565b602085010191505092915050565b6000611b218261232f565b808452611b358160208601602086016125dc565b611b3e8161260f565b602085010191505092915050565b611b558161244b565b82525050565b611b6481612458565b82525050565b611b7381612462565b82525050565b611b828161246f565b82525050565b611b918161247c565b82525050565b6000611ba282612350565b808452611bb68160208601602086016125dc565b611bbf8161260f565b602085010191505092915050565b6000611bd882612345565b808452611bec8160208601602086016125dc565b611bf58161260f565b602085010191505092915050565b611c0c81612489565b82525050565b611c1b816124b7565b82525050565b611c2a816124c1565b82525050565b611c39816124d1565b82525050565b611c48816124e5565b82525050565b6000602082019050611c636000830184611666565b92915050565b6000604082019050611c7e6000830184611675565b92915050565b60006020820190508181036000830152611c9e81846116c0565b905092915050565b6000604082019050611cbb600083018461171b565b92915050565b60006020820190508181036000830152611cdb8184611766565b905092915050565b60006020820190508181036000830152611cfd81846117c1565b905092915050565b60006020820190508181036000830152611d1f8184611826565b905092915050565b6000604082019050611d3c6000830184611894565b92915050565b60006020820190508181036000830152611d5c81846118df565b905092915050565b60006020820190508181036000830152611d7e818461193a565b905092915050565b60006020820190508181036000830152611da0818461199f565b905092915050565b6000604082019050611dbd6000830184611a0d565b92915050565b60006020820190508181036000830152611ddd8184611a58565b905092915050565b6000602082019050611dfa6000830184611ab3565b92915050565b6000602082019050611e156000830184611ac2565b92915050565b6000602082019050611e306000830184611ad1565b92915050565b60006020820190508181036000830152611e508184611ae0565b905092915050565b6000602082019050611e6d6000830184611b4c565b92915050565b6000602082019050611e886000830184611b5b565b92915050565b6000602082019050611ea36000830184611b6a565b92915050565b6000602082019050611ebe6000830184611b79565b92915050565b6000602082019050611ed96000830184611b88565b92915050565b60006020820190508181036000830152611ef98184611b97565b905092915050565b6000602082019050611f166000830184611c03565b92915050565b6000602082019050611f316000830184611c12565b92915050565b6000602082019050611f4c6000830184611c21565b92915050565b6000602082019050611f676000830184611c30565b92915050565b6000602082019050611f826000830184611c3f565b92915050565b6000604051905081810181811067ffffffffffffffff82111715611fab57600080fd5b8060405250919050565b600067ffffffffffffffff821115611fcc57600080fd5b602082029050919050565b600067ffffffffffffffff821115611fee57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561201657600080fd5b602082029050919050565b600067ffffffffffffffff82111561203857600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561206057600080fd5b602082029050919050565b600067ffffffffffffffff82111561208257600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120aa57600080fd5b602082029050919050565b600067ffffffffffffffff8211156120cc57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120f457600080fd5b602082029050919050565b600067ffffffffffffffff82111561211657600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561213e57600080fd5b602082029050919050565b600067ffffffffffffffff82111561216057600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561218857600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121b457600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121e057600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff82111561220c57600080fd5b601f19601f8301169050602081019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b600061240282612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b60006124fd82612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b838110156125fa5780820151818401526020810190506125df565b83811115612609576000848401525b50505050565b6000601f19601f830116905091905056fea265627a7a723058206fe37171cf1b10ebd291cfdca61d67e7fc3c208795e999c833c42a14d86cf00d6c6578706572696d656e74616cf50037"; + + // deploy deploys a new Ethereum contract, binding an instance of Test to it. + public static Test deploy(TransactOpts auth, EthereumClient client) throws Exception { + Interfaces args = Geth.newInterfaces(0); + String bytecode = BYTECODE; + return new Test(Geth.deployContract(auth, ABI, Geth.decodeFromHex(bytecode), client, args)); + } + + // Internal constructor used by contract deployment. + private Test(BoundContract deployment) { + this.Address = deployment.getAddress(); + this.Deployer = deployment.getDeployer(); + this.Contract = deployment; + } + + // Ethereum address where this contract is located at. + public final Address Address; + + // Ethereum transaction in which this contract was deployed (if known!). + public final Transaction Deployer; + + // Contract instance bound to a blockchain address. + private final BoundContract Contract; + + // Creates a new instance of Test, bound to a specific deployed contract. + public Test(Address address, EthereumClient client) throws Exception { + this(Geth.bindContract(address, ABI, client)); + } + + // setAddress is a paid mutator transaction binding the contract method 0xe30081a0. + // + // Solidity: function setAddress(address a) returns(address) + public Transaction setAddress(TransactOpts opts, Address a) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setAddress(a);args.set(0,arg0); + + return this.Contract.transact(opts, "setAddress" , args); + } + + // setAddressArray is a paid mutator transaction binding the contract method 0x151f5471. + // + // Solidity: function setAddressArray(address[2] a_a) returns(address[2]) + public Transaction setAddressArray(TransactOpts opts, Addresses a_a) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setAddresses(a_a);args.set(0,arg0); + + return this.Contract.transact(opts, "setAddressArray" , args); + } + + // setAddressList is a paid mutator transaction binding the contract method 0x5be6b37e. + // + // Solidity: function setAddressList(address[] a_l) returns(address[]) + public Transaction setAddressList(TransactOpts opts, Addresses a_l) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setAddresses(a_l);args.set(0,arg0); + + return this.Contract.transact(opts, "setAddressList" , args); + } + + // setBool is a paid mutator transaction binding the contract method 0x1e26fd33. + // + // Solidity: function setBool(bool b) returns(bool) + public Transaction setBool(TransactOpts opts, boolean b) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBool(b);args.set(0,arg0); + + return this.Contract.transact(opts, "setBool" , args); + } + + // setBoolArray is a paid mutator transaction binding the contract method 0x118a9718. + // + // Solidity: function setBoolArray(bool[2] b_a) returns(bool[2]) + public Transaction setBoolArray(TransactOpts opts, Bools b_a) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBools(b_a);args.set(0,arg0); + + return this.Contract.transact(opts, "setBoolArray" , args); + } + + // setBoolList is a paid mutator transaction binding the contract method 0x4d5ee6da. + // + // Solidity: function setBoolList(bool[] b_l) returns(bool[]) + public Transaction setBoolList(TransactOpts opts, Bools b_l) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBools(b_l);args.set(0,arg0); + + return this.Contract.transact(opts, "setBoolList" , args); + } + + // setBytes is a paid mutator transaction binding the contract method 0xda359dc8. + // + // Solidity: function setBytes(bytes bs) returns(bytes) + public Transaction setBytes(TransactOpts opts, byte[] bs) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBinary(bs);args.set(0,arg0); + + return this.Contract.transact(opts, "setBytes" , args); + } + + // setBytes1 is a paid mutator transaction binding the contract method 0xfba1a1c3. + // + // Solidity: function setBytes1(bytes1 b1) returns(bytes1) + public Transaction setBytes1(TransactOpts opts, byte[] b1) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBinary(b1);args.set(0,arg0); + + return this.Contract.transact(opts, "setBytes1" , args); + } + + // setBytes32 is a paid mutator transaction binding the contract method 0xc2b12a73. + // + // Solidity: function setBytes32(bytes32 b32) returns(bytes32) + public Transaction setBytes32(TransactOpts opts, byte[] b32) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBinary(b32);args.set(0,arg0); + + return this.Contract.transact(opts, "setBytes32" , args); + } + + // setBytesArray is a paid mutator transaction binding the contract method 0x5119655d. + // + // Solidity: function setBytesArray(bytes[2] bs_a) returns(bytes[2]) + public Transaction setBytesArray(TransactOpts opts, Binaries bs_a) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBinaries(bs_a);args.set(0,arg0); + + return this.Contract.transact(opts, "setBytesArray" , args); + } + + // setBytesList is a paid mutator transaction binding the contract method 0x16c105e2. + // + // Solidity: function setBytesList(bytes[] bs_l) returns(bytes[]) + public Transaction setBytesList(TransactOpts opts, Binaries bs_l) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBinaries(bs_l);args.set(0,arg0); + + return this.Contract.transact(opts, "setBytesList" , args); + } + + // setInt16 is a paid mutator transaction binding the contract method 0x86114cea. + // + // Solidity: function setInt16(int16 i16) returns(int16) + public Transaction setInt16(TransactOpts opts, short i16) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setInt16(i16);args.set(0,arg0); + + return this.Contract.transact(opts, "setInt16" , args); + } + + // setInt256 is a paid mutator transaction binding the contract method 0xa53b1c1e. + // + // Solidity: function setInt256(int256 i256) returns(int256) + public Transaction setInt256(TransactOpts opts, BigInt i256) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBigInt(i256);args.set(0,arg0); + + return this.Contract.transact(opts, "setInt256" , args); + } + + // setInt256Array is a paid mutator transaction binding the contract method 0x6aa482fc. + // + // Solidity: function setInt256Array(int256[2] i256_a) returns(int256[2]) + public Transaction setInt256Array(TransactOpts opts, BigInts i256_a) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBigInts(i256_a);args.set(0,arg0); + + return this.Contract.transact(opts, "setInt256Array" , args); + } + + // setInt256List is a paid mutator transaction binding the contract method 0x22722302. + // + // Solidity: function setInt256List(int256[] i256_l) returns(int256[]) + public Transaction setInt256List(TransactOpts opts, BigInts i256_l) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBigInts(i256_l);args.set(0,arg0); + + return this.Contract.transact(opts, "setInt256List" , args); + } + + // setInt32 is a paid mutator transaction binding the contract method 0xb7d5df31. + // + // Solidity: function setInt32(int32 i32) returns(int32) + public Transaction setInt32(TransactOpts opts, int i32) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setInt32(i32);args.set(0,arg0); + + return this.Contract.transact(opts, "setInt32" , args); + } + + // setInt64 is a paid mutator transaction binding the contract method 0xe673eb32. + // + // Solidity: function setInt64(int64 i64) returns(int64) + public Transaction setInt64(TransactOpts opts, long i64) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setInt64(i64);args.set(0,arg0); + + return this.Contract.transact(opts, "setInt64" , args); + } + + // setInt8 is a paid mutator transaction binding the contract method 0x9a19a953. + // + // Solidity: function setInt8(int8 i8) returns(int8) + public Transaction setInt8(TransactOpts opts, byte i8) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setInt8(i8);args.set(0,arg0); + + return this.Contract.transact(opts, "setInt8" , args); + } + + // setString is a paid mutator transaction binding the contract method 0x7fcaf666. + // + // Solidity: function setString(string s) returns(string) + public Transaction setString(TransactOpts opts, String s) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setString(s);args.set(0,arg0); + + return this.Contract.transact(opts, "setString" , args); + } + + // setStringArray is a paid mutator transaction binding the contract method 0x7173b695. + // + // Solidity: function setStringArray(string[2] s_a) returns(string[2]) + public Transaction setStringArray(TransactOpts opts, Strings s_a) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setStrings(s_a);args.set(0,arg0); + + return this.Contract.transact(opts, "setStringArray" , args); + } + + // setStringList is a paid mutator transaction binding the contract method 0xc5777961. + // + // Solidity: function setStringList(string[] s_l) returns(string[]) + public Transaction setStringList(TransactOpts opts, Strings s_l) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setStrings(s_l);args.set(0,arg0); + + return this.Contract.transact(opts, "setStringList" , args); + } + + // setUint16 is a paid mutator transaction binding the contract method 0x0477988a. + // + // Solidity: function setUint16(uint16 u16) returns(uint16) + public Transaction setUint16(TransactOpts opts, BigInt u16) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setUint16(u16);args.set(0,arg0); + + return this.Contract.transact(opts, "setUint16" , args); + } + + // setUint256 is a paid mutator transaction binding the contract method 0xd2282dc5. + // + // Solidity: function setUint256(uint256 u256) returns(uint256) + public Transaction setUint256(TransactOpts opts, BigInt u256) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBigInt(u256);args.set(0,arg0); + + return this.Contract.transact(opts, "setUint256" , args); + } + + // setUint256Array is a paid mutator transaction binding the contract method 0x2766a755. + // + // Solidity: function setUint256Array(uint256[2] u256_a) returns(uint256[2]) + public Transaction setUint256Array(TransactOpts opts, BigInts u256_a) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBigInts(u256_a);args.set(0,arg0); + + return this.Contract.transact(opts, "setUint256Array" , args); + } + + // setUint256List is a paid mutator transaction binding the contract method 0xa0709e19. + // + // Solidity: function setUint256List(uint256[] u256_l) returns(uint256[]) + public Transaction setUint256List(TransactOpts opts, BigInts u256_l) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setBigInts(u256_l);args.set(0,arg0); + + return this.Contract.transact(opts, "setUint256List" , args); + } + + // setUint32 is a paid mutator transaction binding the contract method 0x1c9352e2. + // + // Solidity: function setUint32(uint32 u32) returns(uint32) + public Transaction setUint32(TransactOpts opts, BigInt u32) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setUint32(u32);args.set(0,arg0); + + return this.Contract.transact(opts, "setUint32" , args); + } + + // setUint64 is a paid mutator transaction binding the contract method 0x822cba69. + // + // Solidity: function setUint64(uint64 u64) returns(uint64) + public Transaction setUint64(TransactOpts opts, BigInt u64) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setUint64(u64);args.set(0,arg0); + + return this.Contract.transact(opts, "setUint64" , args); + } + + // setUint8 is a paid mutator transaction binding the contract method 0x1774e646. + // + // Solidity: function setUint8(uint8 u8) returns(uint8) + public Transaction setUint8(TransactOpts opts, BigInt u8) throws Exception { + Interfaces args = Geth.newInterfaces(1); + Interface arg0 = Geth.newInterface();arg0.setUint8(u8);args.set(0,arg0); + + return this.Contract.transact(opts, "setUint8" , args); + } +} +`, + }, + } + for i, c := range cases { + binding, err := Bind([]string{c.name}, []string{c.abi}, []string{c.bytecode}, nil, "bindtest", LangJava, nil, nil) + if err != nil { + t.Fatalf("test %d: failed to generate binding: %v", i, err) + } + // Remove empty lines + removeEmptys := func(input string) string { + lines := strings.Split(input, "\n") + var index int + for _, line := range lines { + if strings.TrimSpace(line) != "" { + lines[index] = line + index += 1 + } + } + lines = lines[:index] + return strings.Join(lines, "\n") + } + binding = removeEmptys(binding) + expect := removeEmptys(c.expected) + if binding != expect { + t.Fatalf("test %d: generated binding mismatch, has %s, want %s", i, binding, c.expected) + } + } +} diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go index 22dca1e71d..4203c96e7e 100644 --- a/accounts/abi/bind/template.go +++ b/accounts/abi/bind/template.go @@ -26,7 +26,7 @@ package bind -import "github.com/ava-labs/coreth/accounts/abi" +import "github.com/tenderly/coreth/accounts/abi" // tmplData is the data structure required to fill the binding template. type tmplData struct { @@ -101,10 +101,10 @@ import ( "strings" "errors" - "github.com/ava-labs/coreth/accounts/abi" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/interfaces" + "github.com/tenderly/coreth/accounts/abi" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/interfaces" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" ) diff --git a/accounts/abi/bind/util.go b/accounts/abi/bind/util.go index 378f7ef877..c9c14c58da 100644 --- a/accounts/abi/bind/util.go +++ b/accounts/abi/bind/util.go @@ -31,8 +31,8 @@ import ( "errors" "time" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/interfaces" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/interfaces" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go index 2acf86fb0c..7e27354166 100644 --- a/accounts/abi/bind/util_test.go +++ b/accounts/abi/bind/util_test.go @@ -33,10 +33,10 @@ import ( "testing" "time" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/accounts/abi/bind/backends" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" ) diff --git a/accounts/accounts.go b/accounts/accounts.go index acde6b436a..e29d85778d 100644 --- a/accounts/accounts.go +++ b/accounts/accounts.go @@ -31,8 +31,8 @@ import ( "fmt" "math/big" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/interfaces" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/interfaces" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" "golang.org/x/crypto/sha3" diff --git a/accounts/external/backend.go b/accounts/external/backend.go index 31f8d6804e..406617d634 100644 --- a/accounts/external/backend.go +++ b/accounts/external/backend.go @@ -32,11 +32,11 @@ import ( "math/big" "sync" - "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/interfaces" - "github.com/ava-labs/coreth/rpc" - "github.com/ava-labs/coreth/signer/core/apitypes" + "github.com/tenderly/coreth/accounts" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/interfaces" + "github.com/tenderly/coreth/rpc" + "github.com/tenderly/coreth/signer/core/apitypes" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/event" diff --git a/accounts/keystore/account_cache.go b/accounts/keystore/account_cache.go index 4284d29f47..d05a8a9d78 100644 --- a/accounts/keystore/account_cache.go +++ b/accounts/keystore/account_cache.go @@ -37,11 +37,10 @@ import ( "sync" "time" - "github.com/ava-labs/coreth/accounts" - mapset "github.com/deckarep/golang-set/v2" + "github.com/tenderly/coreth/accounts" + mapset "github.com/deckarep/golang-set" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "golang.org/x/exp/slices" ) // Minimum amount of time between cache reloads. This limit applies if the platform does @@ -49,10 +48,11 @@ import ( // exist yet, the code will attempt to create a watcher at most this often. const minReloadInterval = 2 * time.Second -// byURL defines the sorting order for accounts. -func byURL(a, b accounts.Account) int { - return a.URL.Cmp(b.URL) -} +type accountsByURL []accounts.Account + +func (s accountsByURL) Len() int { return len(s) } +func (s accountsByURL) Less(i, j int) bool { return s[i].URL.Cmp(s[j].URL) < 0 } +func (s accountsByURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // AmbiguousAddrError is returned when attempting to unlock // an address for which more than one file exists. @@ -77,7 +77,7 @@ type accountCache struct { keydir string watcher *watcher mu sync.Mutex - all []accounts.Account + all accountsByURL byAddr map[common.Address][]accounts.Account throttle *time.Timer notify chan struct{} @@ -89,7 +89,7 @@ func newAccountCache(keydir string) (*accountCache, chan struct{}) { keydir: keydir, byAddr: make(map[common.Address][]accounts.Account), notify: make(chan struct{}, 1), - fileC: fileCache{all: mapset.NewThreadUnsafeSet[string]()}, + fileC: fileCache{all: mapset.NewThreadUnsafeSet()}, } ac.watcher = newWatcher(ac) return ac, ac.notify @@ -156,14 +156,6 @@ func (ac *accountCache) deleteByFile(path string) { } } -// watcherStarted returns true if the watcher loop started running (even if it -// has since also ended). -func (ac *accountCache) watcherStarted() bool { - ac.mu.Lock() - defer ac.mu.Unlock() - return ac.watcher.running || ac.watcher.runEnded -} - func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account { for i := range slice { if slice[i] == elem { @@ -204,7 +196,7 @@ func (ac *accountCache) find(a accounts.Account) (accounts.Account, error) { default: err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]accounts.Account, len(matches))} copy(err.Matches, matches) - slices.SortFunc(err.Matches, byURL) + sort.Sort(accountsByURL(err.Matches)) return accounts.Account{}, err } } @@ -293,15 +285,16 @@ func (ac *accountCache) scanAccounts() error { // Process all the file diffs start := time.Now() - for _, path := range creates.ToSlice() { - if a := readAccount(path); a != nil { + for _, p := range creates.ToSlice() { + if a := readAccount(p.(string)); a != nil { ac.add(*a) } } - for _, path := range deletes.ToSlice() { - ac.deleteByFile(path) + for _, p := range deletes.ToSlice() { + ac.deleteByFile(p.(string)) } - for _, path := range updates.ToSlice() { + for _, p := range updates.ToSlice() { + path := p.(string) ac.deleteByFile(path) if a := readAccount(path); a != nil { ac.add(*a) diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go index 32c2ae3330..debcaa2307 100644 --- a/accounts/keystore/account_cache_test.go +++ b/accounts/keystore/account_cache_test.go @@ -36,7 +36,7 @@ import ( "testing" "time" - "github.com/ava-labs/coreth/accounts" + "github.com/tenderly/coreth/accounts" "github.com/cespare/cp" "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/common" diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go index 23b39fa584..6a2d820412 100644 --- a/accounts/keystore/key.go +++ b/accounts/keystore/key.go @@ -38,7 +38,7 @@ import ( "strings" "time" - "github.com/ava-labs/coreth/accounts" + "github.com/tenderly/coreth/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/google/uuid" diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index fb72f0eb14..a923487dcd 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -42,8 +42,8 @@ import ( "sync" "time" - "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/accounts" + "github.com/tenderly/coreth/core/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" diff --git a/accounts/keystore/keystore_test.go b/accounts/keystore/keystore_test.go index ed51fb8e76..4626fd1eaf 100644 --- a/accounts/keystore/keystore_test.go +++ b/accounts/keystore/keystore_test.go @@ -36,7 +36,7 @@ import ( "testing" "time" - "github.com/ava-labs/coreth/accounts" + "github.com/tenderly/coreth/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" diff --git a/accounts/keystore/passphrase.go b/accounts/keystore/passphrase.go index acf944f717..bc85f30033 100644 --- a/accounts/keystore/passphrase.go +++ b/accounts/keystore/passphrase.go @@ -47,7 +47,7 @@ import ( "os" "path/filepath" - "github.com/ava-labs/coreth/accounts" + "github.com/tenderly/coreth/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" diff --git a/accounts/keystore/presale.go b/accounts/keystore/presale.go index 1dfbd9c2a9..b25c6fc246 100644 --- a/accounts/keystore/presale.go +++ b/accounts/keystore/presale.go @@ -35,7 +35,7 @@ import ( "errors" "fmt" - "github.com/ava-labs/coreth/accounts" + "github.com/tenderly/coreth/accounts" "github.com/ethereum/go-ethereum/crypto" "github.com/google/uuid" "golang.org/x/crypto/pbkdf2" diff --git a/accounts/keystore/wallet.go b/accounts/keystore/wallet.go index 7193526399..0ff33b5b94 100644 --- a/accounts/keystore/wallet.go +++ b/accounts/keystore/wallet.go @@ -29,9 +29,9 @@ package keystore import ( "math/big" - "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/interfaces" + "github.com/tenderly/coreth/accounts" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/interfaces" "github.com/ethereum/go-ethereum/crypto" ) diff --git a/accounts/scwallet/hub.go b/accounts/scwallet/hub.go index d2449bfd81..e9ce0d4fc2 100644 --- a/accounts/scwallet/hub.go +++ b/accounts/scwallet/hub.go @@ -51,7 +51,7 @@ import ( "sync" "time" - "github.com/ava-labs/coreth/accounts" + "github.com/tenderly/coreth/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go index 327339c222..4d6472c8f5 100644 --- a/accounts/scwallet/wallet.go +++ b/accounts/scwallet/wallet.go @@ -43,9 +43,9 @@ import ( "sync" "time" - "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/interfaces" + "github.com/tenderly/coreth/accounts" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/interfaces" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" diff --git a/chain/chain_test.go b/chain/chain_test.go new file mode 100644 index 0000000000..a6a2db25ae --- /dev/null +++ b/chain/chain_test.go @@ -0,0 +1,232 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chain + +import ( + cryptorand "crypto/rand" + "math/big" + "math/rand" + "testing" + + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/accounts/keystore" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/eth" + "github.com/tenderly/coreth/eth/ethconfig" + "github.com/tenderly/coreth/node" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rlp" +) + +type testChain struct { + t *testing.T + name string + hasBlock map[common.Hash]struct{} + blocks []common.Hash + blkCount uint32 + chain *ETHChain + outBlockCh chan<- []byte + inAckCh <-chan struct{} +} + +func (tc *testChain) insertBlock(block *types.Block) { + if _, ok := tc.hasBlock[block.Hash()]; !ok { + tc.hasBlock[block.Hash()] = struct{}{} + tc.blocks = append(tc.blocks, block.Hash()) + } +} + +func newTestChain(name string, config *eth.Config, + inBlockCh <-chan []byte, outBlockCh chan<- []byte, + inAckCh <-chan struct{}, outAckCh chan<- struct{}, + t *testing.T) *testChain { + chain, err := NewETHChain( + config, + &node.Config{}, + rawdb.NewMemoryDatabase(), + eth.DefaultSettings, + &dummy.ConsensusCallbacks{ + OnFinalizeAndAssemble: func(head *types.Header, _ *state.StateDB, _ []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { + randData := make([]byte, 32) + _, err := rand.Read(randData) + if err != nil { + t.Fatal(err) + } + return randData, nil, nil, nil + }, + }, + common.Hash{}, + &mockable.Clock{}, + ) + if err != nil { + t.Fatal(err) + } + tc := &testChain{ + t: t, + name: name, + hasBlock: make(map[common.Hash]struct{}), + blocks: make([]common.Hash, 0), + blkCount: 0, + chain: chain, + outBlockCh: outBlockCh, + inAckCh: inAckCh, + } + tc.insertBlock(tc.chain.GetGenesisBlock()) + // Start a goroutine to deserialize and insert each block received from [inBlockCh] + // and send an acknowledgement via [outAckCh] + go func() { + for serialized := range inBlockCh { + block := new(types.Block) + err := rlp.DecodeBytes(serialized, block) + if err != nil { + panic(err) + } + if block.Hash() != tc.chain.GetGenesisBlock().Hash() { + if err = tc.chain.InsertBlock(block); err != nil { + tc.t.Fatalf("Failed to insert block for chain %q: %s", name, err) + } + } + tc.insertBlock(block) + outAckCh <- struct{}{} + } + }() + return tc +} + +func (tc *testChain) start() { + tc.chain.Start() +} + +func (tc *testChain) stop() { + tc.chain.Stop() +} + +func (tc *testChain) GenRandomTree(genBlocks int) { + for i := 0; i < genBlocks; i++ { + numBlocks := len(tc.blocks) + parentIndex := rand.Intn(numBlocks) + parentBlockHash := tc.blocks[parentIndex] + parentBlock := tc.chain.GetBlockByHash(parentBlockHash) + if parentBlock == nil { + tc.t.Fatalf("Failed to get parent block by hash %s, %d", parentBlockHash, numBlocks) + } + if err := tc.chain.SetPreference(parentBlock); err != nil { + tc.t.Fatal(err) + } + block, err := tc.chain.GenerateBlock() + if err != nil { + tc.t.Fatalf("chain %s failed to generate block: %s", tc.name, err) + } + if err := tc.chain.InsertBlock(block); err != nil { + tc.t.Fatal(err) + } + + tc.blkCount++ + if len(block.Uncles()) != 0 { + tc.t.Fatal("#uncles should be zero") + } + tc.insertBlock(block) + if tc.outBlockCh != nil { + serialized, err := rlp.EncodeToBytes(block) + if err != nil { + tc.t.Fatal(err) + } + tc.outBlockCh <- serialized + <-tc.inAckCh + } + } +} + +func run(config *eth.Config, size1, size2 int, t *testing.T) { + aliceBlk := make(chan []byte) + bobBlk := make(chan []byte) + aliceAck := make(chan struct{}) + bobAck := make(chan struct{}) + alice := newTestChain("alice", config, bobBlk, aliceBlk, bobAck, aliceAck, t) + bob := newTestChain("bob", config, aliceBlk, bobBlk, aliceAck, bobAck, t) + alice.start() + bob.start() + log.Info("alice genesis", "block", alice.chain.GetGenesisBlock().Hash().Hex()) + log.Info("bob genesis", "block", bob.chain.GetGenesisBlock().Hash().Hex()) + alice.GenRandomTree(size1) + log.Info("alice finished generating the tree") + + bob.outBlockCh = nil + bob.GenRandomTree(size2) + for i := range bob.blocks { + serialized, err := rlp.EncodeToBytes(bob.chain.GetBlockByHash(bob.blocks[i])) + if err != nil { + t.Fatal(err) + } + bobBlk <- serialized + <-aliceAck + } + log.Info("bob finished generating the tree") + + log.Info("comparing two trees") + if len(alice.blocks) != len(bob.blocks) { + t.Fatalf("mismatching tree size %d != %d", len(alice.blocks), len(bob.blocks)) + } + gn := big.NewInt(0) + for i := range alice.blocks { + ablk := alice.chain.GetBlockByHash(alice.blocks[i]) + bblk := bob.chain.GetBlockByHash(alice.blocks[i]) + for ablk.Number().Cmp(gn) > 0 && bblk.Number().Cmp(gn) > 0 { + result := ablk.Hash() == bblk.Hash() + if !result { + t.Fatal("mismatching path") + } + ablk = alice.chain.GetBlockByHash(ablk.ParentHash()) + bblk = bob.chain.GetBlockByHash(bblk.ParentHash()) + } + } + alice.stop() + bob.stop() +} + +// TestChain randomly generates a chain (tree of blocks) on each of two +// entities ("Alice" and "Bob") and lets them exchange each other's blocks via +// a go channel and finally checks if they have the identical chain structure. +func TestChain(t *testing.T) { + // configure the chain + config := ethconfig.DefaultConfig + chainConfig := ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, + EIP150Block: big.NewInt(0), + EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + ApricotPhase1BlockTimestamp: big.NewInt(0), + } + + // configure the genesis block + genBalance := big.NewInt(100000000000000000) + genKey, _ := keystore.NewKey(cryptorand.Reader) + + config.Genesis = &core.Genesis{ + Config: chainConfig, + Nonce: 0, + Number: 0, + ExtraData: hexutil.MustDecode("0x00"), + GasLimit: 100000000, + Difficulty: big.NewInt(0), + Alloc: core.GenesisAlloc{genKey.Address: {Balance: genBalance}}, + } + + run(&config, 20, 20, t) +} diff --git a/chain/coreth.go b/chain/coreth.go new file mode 100644 index 0000000000..5eec8181bf --- /dev/null +++ b/chain/coreth.go @@ -0,0 +1,218 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chain + +import ( + "fmt" + "time" + + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/eth" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/node" + "github.com/tenderly/coreth/rpc" + "github.com/ethereum/go-ethereum/common" +) + +var ( + BlackholeAddr = common.Address{ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } +) + +type Tx = types.Transaction +type Block = types.Block +type Hash = common.Hash + +type ETHChain struct { + backend *eth.Ethereum +} + +// NewETHChain creates an Ethereum blockchain with the given configs. +func NewETHChain(config *eth.Config, nodecfg *node.Config, chainDB ethdb.Database, settings eth.Settings, consensusCallbacks *dummy.ConsensusCallbacks, lastAcceptedHash common.Hash, clock *mockable.Clock) (*ETHChain, error) { + node, err := node.New(nodecfg) + if err != nil { + return nil, err + } + backend, err := eth.New(node, config, consensusCallbacks, chainDB, settings, lastAcceptedHash, clock) + if err != nil { + return nil, fmt.Errorf("failed to create backend: %w", err) + } + chain := ÐChain{backend: backend} + backend.SetEtherbase(BlackholeAddr) + return chain, nil +} + +func (self *ETHChain) Start() { + self.backend.Start() +} + +func (self *ETHChain) Stop() { + self.backend.Stop() +} + +func (self *ETHChain) GenerateBlock() (*types.Block, error) { + return self.backend.Miner().GenerateBlock() +} + +func (self *ETHChain) BlockChain() *core.BlockChain { + return self.backend.BlockChain() +} + +func (self *ETHChain) APIBackend() *eth.EthAPIBackend { + return self.backend.APIBackend +} + +func (self *ETHChain) PendingSize() int { + pending := self.backend.TxPool().Pending(true) + count := 0 + for _, txs := range pending { + count += len(txs) + } + return count +} + +func (self *ETHChain) AddRemoteTxs(txs []*types.Transaction) []error { + return self.backend.TxPool().AddRemotes(txs) +} + +func (self *ETHChain) AddRemoteTxsSync(txs []*types.Transaction) []error { + return self.backend.TxPool().AddRemotesSync(txs) +} + +func (self *ETHChain) AddLocalTxs(txs []*types.Transaction) []error { + return self.backend.TxPool().AddLocals(txs) +} + +func (self *ETHChain) CurrentBlock() *types.Block { + return self.backend.BlockChain().CurrentBlock() +} + +// Returns a new mutable state based on the current HEAD block. +func (self *ETHChain) CurrentState() (*state.StateDB, error) { + return self.backend.BlockChain().State() +} + +// Returns a new mutable state based on the given block. +func (self *ETHChain) BlockState(block *types.Block) (*state.StateDB, error) { + return self.backend.BlockChain().StateAt(block.Root()) +} + +// Retrives a block from the database by hash. +func (self *ETHChain) GetBlockByHash(hash common.Hash) *types.Block { + return self.backend.BlockChain().GetBlockByHash(hash) +} + +// Retrives a block from the database by number. +func (self *ETHChain) GetBlockByNumber(num uint64) *types.Block { + return self.backend.BlockChain().GetBlockByNumber(num) +} + +// Validate the canonical chain from current block to the genesis. +// This should only be called as a convenience method in tests, not +// in production as it traverses the entire chain. +func (self *ETHChain) ValidateCanonicalChain() error { + return self.backend.BlockChain().ValidateCanonicalChain() +} + +// SetPreference sets the current head block to the one provided as an argument +// regardless of what the chain contents were prior. +func (self *ETHChain) SetPreference(block *types.Block) error { + return self.BlockChain().SetPreference(block) +} + +// Accept sets a minimum height at which no reorg can pass. Additionally, +// this function may trigger a reorg if the block being accepted is not in the +// canonical chain. +func (self *ETHChain) Accept(block *types.Block) error { + return self.BlockChain().Accept(block) +} + +// Reject tells the chain that [block] has been rejected. +func (self *ETHChain) Reject(block *types.Block) error { + return self.BlockChain().Reject(block) +} + +// LastConsensusAcceptedBlock returns the last block to be marked as accepted. +// It may or may not be processed. +func (self *ETHChain) LastConsensusAcceptedBlock() *types.Block { + return self.BlockChain().LastConsensusAcceptedBlock() +} + +// LastAcceptedBlock returns the last block to be marked as accepted and +// processed. +func (self *ETHChain) LastAcceptedBlock() *types.Block { + return self.BlockChain().LastAcceptedBlock() +} + +// RemoveRejectedBlocks removes the rejected blocks between heights +// [start] and [end]. +func (self *ETHChain) RemoveRejectedBlocks(start, end uint64) error { + return self.BlockChain().RemoveRejectedBlocks(start, end) +} + +func (self *ETHChain) GetReceiptsByHash(hash common.Hash) types.Receipts { + return self.backend.BlockChain().GetReceiptsByHash(hash) +} + +func (self *ETHChain) GetGenesisBlock() *types.Block { + return self.backend.BlockChain().Genesis() +} + +func (self *ETHChain) InsertBlock(block *types.Block) error { + return self.backend.BlockChain().InsertBlock(block) +} + +func (self *ETHChain) NewRPCHandler(maximumDuration time.Duration) *rpc.Server { + return rpc.NewServer(maximumDuration) +} + +// AttachEthService registers the backend RPC services provided by Ethereum +// to the provided handler under their assigned namespaces. +func (self *ETHChain) AttachEthService(handler *rpc.Server, names []string) error { + enabledServicesSet := make(map[string]struct{}) + for _, ns := range names { + enabledServicesSet[ns] = struct{}{} + } + + apiSet := make(map[string]rpc.API) + for _, api := range self.backend.APIs() { + if existingAPI, exists := apiSet[api.Name]; exists { + return fmt.Errorf("duplicated API name: %s, namespaces %s and %s", api.Name, api.Namespace, existingAPI.Namespace) + } + apiSet[api.Name] = api + } + + for name := range enabledServicesSet { + api, exists := apiSet[name] + if !exists { + return fmt.Errorf("API service %s not found", name) + } + if err := handler.RegisterName(api.Namespace, api.Service); err != nil { + return err + } + } + + return nil +} + +func (self *ETHChain) GetTxSubmitCh() <-chan core.NewTxsEvent { + newTxsChan := make(chan core.NewTxsEvent) + self.backend.TxPool().SubscribeNewTxsEvent(newTxsChan) + return newTxsChan +} + +func (self *ETHChain) GetTxAcceptedSubmitCh() <-chan core.NewTxsEvent { + newTxsChan := make(chan core.NewTxsEvent) + self.backend.BlockChain().SubscribeAcceptedTransactionEvent(newTxsChan) + return newTxsChan +} + +func (self *ETHChain) GetTxPool() *core.TxPool { return self.backend.TxPool() } +func (self *ETHChain) BloomIndexer() *core.ChainIndexer { return self.backend.BloomIndexer() } diff --git a/chain/counter_test.go b/chain/counter_test.go new file mode 100644 index 0000000000..10e9cbc19b --- /dev/null +++ b/chain/counter_test.go @@ -0,0 +1,117 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. + +// NOTE from Ted: to compile from solidity source using the code, make sure +// your solc<0.8.0, as geth 1.9.21 does not support the JSON output from +// solc>=0.8.0: +// See: +// - https://github.com/ethereum/go-ethereum/issues/22041 +// - https://github.com/ethereum/go-ethereum/pull/22092 + +package chain + +import ( + "fmt" + "math/big" + + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/types" + + "github.com/ethereum/go-ethereum/log" +) + +func TestCounter(t *testing.T) { + chain, newTxPoolHeadChan, txSubmitCh := NewDefaultChain(t) + + // Mark the genesis block as accepted and start the chain + chain.Start() + defer chain.Stop() + + // NOTE: use precompiled `counter.sol` for portability, do not remove the + // following code (for debug purpose) + //counterSrc, err := filepath.Abs(gopath + "/src/github.com/tenderly/coreth/examples/counter/counter.sol") + // if err != nil { + // t.Fatal(err) + // } + //contracts, err := compiler.CompileSolidity("", counterSrc) + // if err != nil { + // t.Fatal(err) + // } + //contract, _ := contracts[fmt.Sprintf("%s:%s", counterSrc, "Counter")] + //code := common.Hex2Bytes(contract.Code[2:]) + contract := "6080604052348015600f57600080fd5b50602a60008190555060b9806100266000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c80631003e2d214602d575b600080fd5b605660048036036020811015604157600080fd5b8101908080359060200190929190505050606c565b6040518082815260200191505060405180910390f35b60008160005401600081905550600054905091905056fea264697066735822122066dad7255aac3ea41858c2a0fe986696876ac85b2bb4e929d2062504c244054964736f6c63430007060033" + code := common.Hex2Bytes(contract) + + nonce := uint64(0) + tx := types.NewContractCreation(nonce, big.NewInt(0), uint64(gasLimit), gasPrice, code) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(chainID), fundedKey.PrivateKey) + if err != nil { + t.Fatal(err) + } + for _, err := range chain.AddRemoteTxs([]*types.Transaction{signedTx}) { + if err != nil { + t.Fatal(err) + } + } + <-txSubmitCh + nonce++ + block, err := chain.GenerateBlock() + if err != nil { + t.Fatal(err) + } + insertAndAccept(t, chain, block) + + <-newTxPoolHeadChan + log.Info("Generated block with new counter contract creation", "blkNumber", block.NumberU64()) + + receipts := chain.GetReceiptsByHash(block.Hash()) + if len(receipts) != 1 { + t.Fatalf("Expected length of receipts to be 1, but found %d", len(receipts)) + } + contractAddr := receipts[0].ContractAddress + + call := common.Hex2Bytes("1003e2d20000000000000000000000000000000000000000000000000000000000000001") + txs := make([]*types.Transaction, 0) + for i := 0; i < 10; i++ { + tx := types.NewTransaction(nonce, contractAddr, big.NewInt(0), uint64(gasLimit), gasPrice, call) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(chainID), fundedKey.PrivateKey) + if err != nil { + t.Fatal(err) + } + txs = append(txs, signedTx) + nonce++ + } + for _, err := range chain.AddRemoteTxs(txs) { + if err != nil { + t.Fatal(err) + } + } + <-txSubmitCh + // Generate block + block, err = chain.GenerateBlock() + if err != nil { + t.Fatal(err) + } + insertAndAccept(t, chain, block) + + <-newTxPoolHeadChan + log.Info("Generated block with counter contract interactions", "blkNumber", block.NumberU64()) + receipts = chain.GetReceiptsByHash(block.Hash()) + if len(receipts) != 10 { + t.Fatalf("Expected 10 receipts, but found %d", len(receipts)) + } + + state, err := chain.CurrentState() + if err != nil { + t.Fatal(err) + } + xState := state.GetState(contractAddr, common.BigToHash(big.NewInt(0))) + + log.Info(fmt.Sprintf("genesis balance = %s", state.GetBalance(fundedKey.Address))) + log.Info(fmt.Sprintf("contract balance = %s", state.GetBalance(contractAddr))) + log.Info(fmt.Sprintf("x = %s", xState.String())) + if xState.Big().Cmp(big.NewInt(52)) != 0 { + t.Fatal("incorrect state value") + } +} diff --git a/chain/multicoin_test.go b/chain/multicoin_test.go new file mode 100644 index 0000000000..0a1fdae26f --- /dev/null +++ b/chain/multicoin_test.go @@ -0,0 +1,290 @@ +// (c) 2019-2020, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// NOTE from Ted: to compile from solidity source using the code, make sure +// your solc<0.8.0, as geth 1.9.21 does not support the JSON output from +// solc>=0.8.0: +// See: +// - https://github.com/ethereum/go-ethereum/issues/22041 +// - https://github.com/ethereum/go-ethereum/pull/22092 + +package chain + +import ( + "crypto/rand" + "encoding/json" + "fmt" + "math/big" + "strings" + "testing" + + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/accounts/keystore" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/eth" + "github.com/tenderly/coreth/eth/ethconfig" + "github.com/tenderly/coreth/node" +) + +// TestMulticoin tests multicoin low-level state management and regular +// transaction/smart contract transfer. +func TestMulticoin(t *testing.T) { + // configure the chain + config := ethconfig.NewDefaultConfig() + + // configure the genesis block + genesisJSON := `{"config":{"chainId":1,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"751a0b96e1042bee789452ecb20253fba40dbe85":{"balance":"0x1000000000000000", "mcbalance": {"0x0000000000000000000000000000000000000000000000000000000000000000": 1000000000000000000}}, "0100000000000000000000000000000000000000": {"code": "0x73000000000000000000000000000000000000000030146080604052600436106100405760003560e01c80631e01043914610045578063b6510bb314610087575b600080fd5b6100716004803603602081101561005b57600080fd5b81019080803590602001909291905050506100f6565b6040518082815260200191505060405180910390f35b81801561009357600080fd5b506100f4600480360360808110156100aa57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019092919080359060200190929190505050610119565b005b60003073ffffffffffffffffffffffffffffffffffffffff168290cd9050919050565b8373ffffffffffffffffffffffffffffffffffffffff1681836108fc8690811502906040516000604051808303818888878c8acf95505050505050158015610165573d6000803e3d6000fd5b505050505056fea26469706673582212204ca02a58b31e59814fcb487b2bdc205149e01e9f695f02f5e73ae40c4f027c1e64736f6c634300060a0033", "balance": "0x0", "mcbalance": {}}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` + mcAbiJSON := `[{"inputs":[{"internalType":"uint256","name":"coinid","type":"uint256"}],"name":"getBalance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address payable","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"uint256","name":"coinid","type":"uint256"},{"internalType":"uint256","name":"amount2","type":"uint256"}],"name":"transfer","outputs":[],"stateMutability":"nonpayable","type":"function"}]` + genesisKey := "0xabd71b35d559563fea757f0f5edbde286fb8c043105b15abb7cd57189306d7d1" + + bobKey, _ := keystore.NewKey(rand.Reader) + genesisBlock := new(core.Genesis) + if err := json.Unmarshal([]byte(genesisJSON), genesisBlock); err != nil { + t.Fatal(err) + } + hk, _ := crypto.HexToECDSA(genesisKey[2:]) + genKey := keystore.NewKeyFromECDSA(hk) + + config.Genesis = genesisBlock + + // NOTE: use precompiled `mc_test.sol` for portability, do not remove the + // following code (for debug purpose) + // + //// compile the smart contract + //gopath := os.Getenv("GOPATH") + //if gopath == "" { + // gopath = build.Default.GOPATH + //} + //counterSrc, err := filepath.Abs(gopath + "/src/github.com/tenderly/coreth/examples/multicoin/mc_test.sol") + //if err != nil { + // t.Fatal(err) + // } + //contracts, err := compiler.CompileSolidity("", counterSrc) + //if err != nil { + // t.Fatal(err) + // } + //contract, _ := contracts[fmt.Sprintf("%s:%s", counterSrc, "MCTest")] + // abiStr, err := json.Marshal(contract.Info.AbiDefinition) + // contractAbi, err := abi.JSON(strings.NewReader(string(abiStr))) + // if err != nil { + // t.Fatal(err) + // } + // code := common.Hex2Bytes(contract.Code[2:]) + + // see `mc_test.sol` + contract := "608060405234801561001057600080fd5b50610426806100206000396000f3fe60806040526004361061002d5760003560e01c8063a41fe49f14610039578063ba7b37d41461008857610034565b3661003457005b600080fd5b34801561004557600080fd5b506100866004803603606081101561005c57600080fd5b810190808035906020019092919080359060200190929190803590602001909291905050506100c3565b005b34801561009457600080fd5b506100c1600480360360208110156100ab57600080fd5b810190808035906020019092919050505061025a565b005b600073010000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1633858585604051602401808573ffffffffffffffffffffffffffffffffffffffff1681526020018481526020018381526020018281526020019450505050506040516020818303038152906040527fb6510bb3000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050506040518082805190602001908083835b602083106101df57805182526020820191506020810190506020830392506101bc565b6001836020036101000a0380198251168184511680821785525050505050509050019150506000604051808303816000865af19150503d8060008114610241576040519150601f19603f3d011682016040523d82523d6000602084013e610246565b606091505b505090508061025457600080fd5b50505050565b60008073010000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1683604051602401808281526020019150506040516020818303038152906040527f1e010439000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050506040518082805190602001908083835b602083106103495780518252602082019150602081019050602083039250610326565b6001836020036101000a0380198251168184511680821785525050505050509050019150506000604051808303816000865af19150503d80600081146103ab576040519150601f19603f3d011682016040523d82523d6000602084013e6103b0565b606091505b5091509150816103bf57600080fd5b8080602001905160208110156103d457600080fd5b810190808051906020019092919050505060008190555050505056fea26469706673582212207931f8bf71bbaeaffac554cafb419604155328b1466fae52488964ccba082f5464736f6c63430007060033" + contractAbi, err := abi.JSON(strings.NewReader(`[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"uint256","name":"coinid","type":"uint256"}],"name":"updateBalance","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"uint256","name":"coinid","type":"uint256"},{"internalType":"uint256","name":"amount2","type":"uint256"}],"name":"withdraw","outputs":[],"stateMutability":"nonpayable","type":"function"},{"stateMutability":"payable","type":"receive"}]`)) + if err != nil { + t.Fatal(err) + } + code := common.Hex2Bytes(contract) + + var ( + chain *ETHChain + ) + chain, err = NewETHChain( + &config, + &node.Config{}, + rawdb.NewMemoryDatabase(), + eth.DefaultSettings, + new(dummy.ConsensusCallbacks), + common.Hash{}, + &mockable.Clock{}, + ) + if err != nil { + t.Fatal(err) + } + + newTxPoolHeadChan := make(chan core.NewTxPoolHeadEvent, 1) + log.Info(chain.GetGenesisBlock().Hash().Hex()) + + mcAbi, err := abi.JSON(strings.NewReader(mcAbiJSON)) + if err != nil { + t.Fatal(err) + } + + // start the chain + chain.GetTxPool().SubscribeNewHeadEvent(newTxPoolHeadChan) + txSubmitCh := chain.GetTxSubmitCh() + chain.Start() + + nonce := uint64(0) + tx := types.NewContractCreation(nonce, big.NewInt(0), uint64(gasLimit), gasPrice, code) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(chainID), genKey.PrivateKey) + if err != nil { + t.Fatal(err) + } + for _, err := range chain.AddRemoteTxs([]*types.Transaction{signedTx}) { + if err != nil { + t.Fatal(err) + } + } + <-txSubmitCh + nonce++ + block, err := chain.GenerateBlock() + if err != nil { + t.Fatal(err) + } + insertAndAccept(t, chain, block) + + <-newTxPoolHeadChan + log.Info("Generated block with new counter contract creation", "blkNumber", block.NumberU64()) + + if txs := block.Transactions(); len(txs) != 1 { + t.Fatalf("Expected new block to contain 1 transaction, but found %d", len(txs)) + } + receipts := chain.GetReceiptsByHash(block.Hash()) + if len(receipts) != 1 { + t.Fatalf("Expected length of receipts to be 1, but found %d", len(receipts)) + } + contractAddr := receipts[0].ContractAddress + + // give Bob some initial balance + tx = types.NewTransaction(nonce, bobKey.Address, big.NewInt(300000000000000000), uint64(gasLimit), gasPrice, nil) + signedTx, err = types.SignTx(tx, types.NewEIP155Signer(chainID), genKey.PrivateKey) + if err != nil { + t.Fatal(err) + } + chain.AddRemoteTxs([]*types.Transaction{signedTx}) + nonce++ + <-txSubmitCh + block, err = chain.GenerateBlock() + if err != nil { + t.Fatal(err) + } + insertAndAccept(t, chain, block) + + // Await block generation + <-newTxPoolHeadChan + if txs := block.Transactions(); len(txs) != 1 { + t.Fatalf("Expected new block to contain 1 transaction, but found %d", len(txs)) + } + + bobTransferInput, err := mcAbi.Pack("transfer", bobKey.Address, big.NewInt(0), big.NewInt(0), big.NewInt(100000000000000000)) + if err != nil { + t.Fatal(err) + } + contractTransferInput, err := mcAbi.Pack("transfer", contractAddr, big.NewInt(0), big.NewInt(0), big.NewInt(100000000000000000)) + if err != nil { + t.Fatal(err) + } + + // send 5 * 100000000000000000 to Bob + // send 5 * 100000000000000000 to the contract + for i := 0; i < 5; i++ { + // transfer some coin0 balance to Bob + tx1 := types.NewTransaction(nonce, vm.BuiltinAddr, big.NewInt(0), uint64(gasLimit), gasPrice, bobTransferInput) + signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(chainID), genKey.PrivateKey) + if err != nil { + t.Fatal(err) + } + nonce++ + + // transfer some coin0 balance to the contract + tx2 := types.NewTransaction(nonce, vm.BuiltinAddr, big.NewInt(0), uint64(gasLimit), gasPrice, contractTransferInput) + signedTx2, err := types.SignTx(tx2, types.NewEIP155Signer(chainID), genKey.PrivateKey) + if err != nil { + t.Fatal(err) + } + nonce++ + + for _, err := range chain.AddRemoteTxs([]*types.Transaction{signedTx1, signedTx2}) { + if err != nil { + t.Fatal(err) + } + } + + <-txSubmitCh + block, err := chain.GenerateBlock() + if err != nil { + t.Fatal(err) + } + insertAndAccept(t, chain, block) + + <-newTxPoolHeadChan + if txs := block.Transactions(); len(txs) != 2 { + t.Fatalf("Expected block to contain 2 transactions, but found %d", len(txs)) + } + } + + // test contract methods + // withdraw 10000000000000000 from contract to Bob + input, err := contractAbi.Pack("withdraw", big.NewInt(0), big.NewInt(0), big.NewInt(10000000000000000)) + if err != nil { + t.Fatal(err) + } + withdrawTx := types.NewTransaction(nonce, contractAddr, big.NewInt(0), uint64(gasLimit), gasPrice, input) + signedWithdrawTx, err := types.SignTx(withdrawTx, types.NewEIP155Signer(chainID), genKey.PrivateKey) + if err != nil { + t.Fatal(err) + } + nonce++ + + input, err = contractAbi.Pack("updateBalance", big.NewInt(0)) + if err != nil { + t.Fatal(err) + } + updateBalanceTx := types.NewTransaction(nonce, contractAddr, big.NewInt(0), uint64(gasLimit), gasPrice, input) + signedUpdateBalanceTx, err := types.SignTx(updateBalanceTx, types.NewEIP155Signer(chainID), genKey.PrivateKey) + if err != nil { + t.Fatal(err) + } + chain.AddRemoteTxs([]*types.Transaction{signedWithdrawTx, signedUpdateBalanceTx}) + + <-txSubmitCh + block, err = chain.GenerateBlock() + if err != nil { + t.Fatal(err) + } + insertAndAccept(t, chain, block) + + <-newTxPoolHeadChan + + if txs := block.Transactions(); len(txs) != 2 { + t.Fatalf("Expected new block to contain 2 transaction, but found %d", len(txs)) + } + + coin0 := common.HexToHash("0x0") + state, err := chain.CurrentState() + if err != nil { + t.Fatal(err) + } + + genMCBalance := state.GetBalanceMultiCoin(genKey.Address, coin0) + bobMCBalance := state.GetBalanceMultiCoin(bobKey.Address, coin0) + contractMCBalance := state.GetBalanceMultiCoin(contractAddr, coin0) + + log.Info(fmt.Sprintf("genesis balance = %s", state.GetBalance(genKey.Address))) + log.Info(fmt.Sprintf("genesis mcbalance(0) = %s", genMCBalance)) + log.Info(fmt.Sprintf("bob's balance = %s", state.GetBalance(bobKey.Address))) + log.Info(fmt.Sprintf("bob's mcbalance(0) = %s", bobMCBalance)) + log.Info(fmt.Sprintf("contract mcbalance(0) = %s", contractMCBalance)) + + if genMCBalance.Cmp(big.NewInt(10000000000000000)) != 0 { + t.Fatal("incorrect genesis MC balance") + } + if bobMCBalance.Cmp(big.NewInt(500000000000000000)) != 0 { + t.Fatal("incorrect bob's MC balance") + } + if contractMCBalance.Cmp(big.NewInt(490000000000000000)) != 0 { + t.Fatal("incorrect contract's MC balance") + } +} diff --git a/chain/payment_test.go b/chain/payment_test.go new file mode 100644 index 0000000000..ebe2295636 --- /dev/null +++ b/chain/payment_test.go @@ -0,0 +1,75 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chain + +import ( + "math/big" + "testing" + + "github.com/tenderly/coreth/core/types" + "github.com/ethereum/go-ethereum/log" +) + +// TestPayment tests basic payment (balance, not multi-coin) +func TestPayment(t *testing.T) { + chain, newTxPoolHeadChan, txSubmitCh := NewDefaultChain(t) + + // Mark the genesis block as accepted and start the chain + chain.Start() + defer chain.Stop() + + nonce := uint64(0) + numTxs := 10 + txs := make([]*types.Transaction, 0) + for i := 0; i < numTxs; i++ { + tx := types.NewTransaction(nonce, bob.Address, value, uint64(basicTxGasLimit), gasPrice, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(chainID), fundedKey.PrivateKey) + if err != nil { + t.Fatal(err) + } + txs = append(txs, signedTx) + nonce++ + } + for _, err := range chain.AddRemoteTxs(txs) { + if err != nil { + t.Fatalf("Failed to add remote transactions due to %s", err) + } + } + <-txSubmitCh + + block, err := chain.GenerateBlock() + if err != nil { + t.Fatal(err) + } + insertAndAccept(t, chain, block) + <-newTxPoolHeadChan + + if txs := block.Transactions(); len(txs) != numTxs { + t.Fatalf("Expected block to contain %d transactions, but found %d transactions", numTxs, len(txs)) + } + log.Info("Generated block", "BlockHash", block.Hash(), "BlockNumber", block.NumberU64()) + currentBlock := chain.BlockChain().CurrentBlock() + if currentBlock.Hash() != block.Hash() { + t.Fatalf("Found unexpected current block (%s, %d), expected (%s, %d)", currentBlock.Hash(), currentBlock.NumberU64(), block.Hash(), block.NumberU64()) + } + + // state, err := chain.BlockState(currentBlock) + state, err := chain.CurrentState() + if err != nil { + t.Fatal(err) + } + genBalance := state.GetBalance(fundedKey.Address) + bobBalance := state.GetBalance(bob.Address) + expectedBalance := new(big.Int).Mul(value, big.NewInt(int64(numTxs))) + if bobBalance.Cmp(expectedBalance) != 0 { + t.Fatalf("Found incorrect balance %d for bob's address, expected %d", bobBalance, expectedBalance) + } + + totalBalance := bobBalance.Add(bobBalance, genBalance) + totalFees := new(big.Int).Mul(big.NewInt(int64(numTxs*basicTxGasLimit)), gasPrice) + expectedTotalBalance := new(big.Int).Sub(initialBalance, totalFees) + if totalBalance.Cmp(expectedTotalBalance) != 0 { + t.Fatalf("Found incorrect total balance %d, expected total balance of %d", totalBalance, expectedTotalBalance) + } +} diff --git a/chain/subscribe_accepted_heads_test.go b/chain/subscribe_accepted_heads_test.go new file mode 100644 index 0000000000..7950c9e41d --- /dev/null +++ b/chain/subscribe_accepted_heads_test.go @@ -0,0 +1,110 @@ +package chain + +import ( + "math/big" + "testing" + + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +func TestAcceptedHeadSubscriptions(t *testing.T) { + chain, newTxPoolHeadChan, txSubmitCh := NewDefaultChain(t) + + chain.Start() + defer chain.Stop() + + ethBackend := chain.APIBackend() + + acceptedChainCh := make(chan core.ChainEvent, 1000) + chainCh := make(chan core.ChainEvent, 1000) + ethBackend.SubscribeChainAcceptedEvent(acceptedChainCh) + ethBackend.SubscribeChainEvent(chainCh) + + // *NOTE* this was pre-compiled for the test.. + // src := `pragma solidity >=0.6.0; + // + // contract Counter { + // uint256 x; + // + // constructor() public { + // x = 42; + // } + // + // function add(uint256 y) public returns (uint256) { + // x = x + y; + // return x; + // } + // }` + // contracts, err := compiler.CompileSolidityString("", src) + // checkError(err) + // contract, _ := contracts[fmt.Sprintf("%s:%s", ".", "Counter")] + // _ = contract + + // solc-linux-amd64-v0.6.12+commit.27d51765 --bin -o counter.bin counter.sol + + code := common.Hex2Bytes( + "6080604052348015600f57600080fd5b50602a60008190555060b9806100266000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c80631003e2d214602d575b600080fd5b605660048036036020811015604157600080fd5b8101908080359060200190929190505050606c565b6040518082815260200191505060405180910390f35b60008160005401600081905550600054905091905056fea26469706673582212200dc7c76677426e8c621c6839348a7c8d60787c546a9b9c7fc91efa57f71d46a364736f6c634300060c0033", + // contract.Code[2:], + ) + tx := types.NewContractCreation(uint64(0), big.NewInt(0), uint64(gasLimit), gasPrice, code) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(chainID), fundedKey.PrivateKey) + if err != nil { + t.Fatal(err) + } + for _, err := range chain.AddRemoteTxs([]*types.Transaction{signedTx}) { + if err != nil { + t.Fatal(err) + } + } + <-txSubmitCh + + block, err := chain.GenerateBlock() + if err != nil { + t.Fatal(err) + } + insertAndSetPreference(t, chain, block) + <-newTxPoolHeadChan + log.Info("Generated block with new counter contract creation", "blkNumber", block.NumberU64()) + + if block.NumberU64() != uint64(1) { + t.Fatal(err) + } + + select { + case fb := <-chainCh: + if fb.Block.NumberU64() != 1 { + t.Fatalf("received block with unexpected block number %d via chain channel", fb.Block.NumberU64()) + } + if fb.Block.Hash() != block.Hash() { + t.Fatalf("Received block with unexpected hash %s via chain channel", fb.Block.Hash().String()) + } + default: + t.Fatal("failed to received block via chain channel") + } + + select { + case <-acceptedChainCh: + t.Fatalf("Received unexpected chain accept event before accepting block") + default: + } + + if err := chain.Accept(block); err != nil { + t.Fatal(err) + } + chain.BlockChain().DrainAcceptorQueue() + + select { + case fb := <-acceptedChainCh: + if fb.Block.NumberU64() != 1 { + t.Fatalf("received block with unexpected block number %d on accepted block channel", fb.Block.NumberU64()) + } + if fb.Block.Hash() != block.Hash() { + t.Fatalf("Received block with unexpected hash %s via accepted block channel", fb.Block.Hash().String()) + } + default: + t.Fatal("failed to received block via accepted block channel") + } +} diff --git a/chain/subscribe_block_logs_test.go b/chain/subscribe_block_logs_test.go new file mode 100644 index 0000000000..2f47d44292 --- /dev/null +++ b/chain/subscribe_block_logs_test.go @@ -0,0 +1,294 @@ +package chain + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/tenderly/coreth/eth/filters" + + "github.com/tenderly/coreth/core/types" + "github.com/ethereum/go-ethereum/common" +) + +func TestBlockLogsAllowUnfinalized(t *testing.T) { + chain, newTxPoolHeadChan, txSubmitCh := NewDefaultChain(t) + + chain.Start() + defer chain.Stop() + + acceptedLogsCh := make(chan []*types.Log, 1000) + ethBackend := chain.APIBackend() + ethBackend.SubscribeAcceptedLogsEvent(acceptedLogsCh) + + api := filters.NewPublicFilterAPI(ethBackend, true, 5*time.Minute) + + // *NOTE* this was pre-compiled for the test.. + /* + pragma solidity >=0.6.0; + + contract Counter { + uint256 x; + + event CounterEmit(uint256 indexed oldval, uint256 indexed newval); + + constructor() public { + emit CounterEmit(0, 42); + x = 42; + } + + function add(uint256 y) public returns (uint256) { + x = x + y; + emit CounterEmit(y, x); + return x; + } + } + */ + // contracts, err := compiler.CompileSolidityString("", src) + // checkError(err) + // contract, _ := contracts[fmt.Sprintf("%s:%s", ".", "Counter")] + // _ = contract + + // solc-linux-amd64-v0.6.12+commit.27d51765 --bin -o counter.bin counter.sol + + code := common.Hex2Bytes( + "608060405234801561001057600080fd5b50602a60007f53564ba0be98bdbd40460eb78d2387edab91de6a842e1449053dae1f07439a3160405160405180910390a3602a60008190555060e9806100576000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c80631003e2d214602d575b600080fd5b605660048036036020811015604157600080fd5b8101908080359060200190929190505050606c565b6040518082815260200191505060405180910390f35b60008160005401600081905550600054827f53564ba0be98bdbd40460eb78d2387edab91de6a842e1449053dae1f07439a3160405160405180910390a3600054905091905056fea2646970667358221220dd9c84516cd903bf6a151cbdaef2f2514c28f2f422782a388a2774412b81f08864736f6c634300060c0033", + // contract.Code[2:], + ) + + tx := types.NewContractCreation(uint64(0), big.NewInt(0), uint64(gasLimit), gasPrice, code) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(chainID), fundedKey.PrivateKey) + if err != nil { + t.Fatal(err) + } + for _, err := range chain.AddRemoteTxs([]*types.Transaction{signedTx}) { + if err != nil { + t.Fatal(err) + } + } + <-txSubmitCh + block, err := chain.GenerateBlock() + if err != nil { + t.Fatal(err) + } + insertAndSetPreference(t, chain, block) + + <-newTxPoolHeadChan + + if block.NumberU64() != uint64(1) { + t.Fatalf("Expected to create a new block with height 1, but found %d", block.NumberU64()) + } + + ctx := context.Background() + fc := filters.FilterCriteria{ + FromBlock: big.NewInt(1), + ToBlock: big.NewInt(1), + } + + fid, err := api.NewFilter(fc) + if err != nil { + t.Fatalf("Failed to create NewFilter due to %s", err) + } + + chain.BlockChain().GetVMConfig().AllowUnfinalizedQueries = true + logs, err := api.GetLogs(ctx, fc) + if err != nil { + t.Fatalf("GetLogs failed due to %s", err) + } + if len(logs) != 1 { + t.Fatalf("Expected GetLogs to return 1 log, but found %d", len(logs)) + } + if logs[0].BlockNumber != 1 { + t.Fatalf("Expected GetLogs to return 1 log with block number 1, but found block number %d", logs[0].BlockNumber) + } + + logs, err = api.GetFilterLogs(ctx, fid) + if err != nil { + t.Fatalf("GetFilter failed due to %s", err) + } + if len(logs) != 1 { + t.Fatalf("Expected GetFilterLogs to return 1 log, but found %d", len(logs)) + } + if logs[0].BlockNumber != 1 { + t.Fatalf("Expected GetFilterLogs to return 1 log with BlockNumber 1, but found BlockNumber %d", logs[0].BlockNumber) + } + + // Fetching blocks from an unfinalized height without specifying a to height + // will not yield any logs because the to block is populated using the last + // accepted block. + fc2 := filters.FilterCriteria{ + FromBlock: big.NewInt(1), + } + + fid2, err := api.NewFilter(fc2) + if err != nil { + t.Fatalf("Failed to create NewFilter due to %s", err) + } + + logs, err = api.GetLogs(ctx, fc2) + if err == nil || err.Error() != "begin block 1 is greater than end block 0" { + t.Fatalf("Expected GetLogs to error about invalid range, but found error %s", err) + } + if len(logs) != 0 { + t.Fatalf("Expected GetLogs to return 0 log, but found %d", len(logs)) + } + + logs, err = api.GetFilterLogs(ctx, fid2) + if err == nil || err.Error() != "begin block 1 is greater than end block 0" { + t.Fatalf("Expected GetLogs to error about invalid range, but found error %s", err) + } + if len(logs) != 0 { + t.Fatalf("Expected GetFilterLogs to return 0 log, but found %d", len(logs)) + } + + chain.BlockChain().GetVMConfig().AllowUnfinalizedQueries = false + logs, err = api.GetLogs(ctx, fc) + if logs != nil { + t.Fatalf("Expected logs to be empty, but found %d logs", len(logs)) + } + if err == nil || err.Error() != "requested from block 1 after last accepted block 0" { + t.Fatalf("Expected GetLogs to error due to requesting above last accepted block, but found error %s", err) + } + + logs, err = api.GetFilterLogs(ctx, fid) + if logs != nil { + t.Fatalf("Expected GetFilterLogs to return empty logs, but found %d logs", len(logs)) + } + if err == nil || err.Error() != "requested from block 1 after last accepted block 0" { + t.Fatalf("Expected GetLogs to fail due to requesting block above last accepted block, but found error %s", err) + } + + logs, err = api.GetLogs(ctx, fc2) + if logs != nil { + t.Fatalf("Expected logs to be empty, but found %d logs", len(logs)) + } + if err == nil || err.Error() != "requested from block 1 after last accepted block 0" { + t.Fatalf("Expected GetLogs to error due to requesting above last accepted block, but found error %s", err) + } + + logs, err = api.GetFilterLogs(ctx, fid2) + if logs != nil { + t.Fatalf("Expected GetFilterLogs to return empty logs, but found %d logs", len(logs)) + } + if err == nil || err.Error() != "requested from block 1 after last accepted block 0" { + t.Fatalf("Expected GetLogs to fail due to requesting block above last accepted block, but found error %s", err) + } + + fc3 := filters.FilterCriteria{ + FromBlock: big.NewInt(0), + ToBlock: big.NewInt(1), + } + logs, err = api.GetLogs(ctx, fc3) + if logs != nil { + t.Fatalf("Expected GetLogs to return empty, but found %d logs", len(logs)) + } + if err == nil || err.Error() != "requested to block 1 after last accepted block 0" { + t.Fatalf("Expected GetLogs to error due to requesting block above last accepted block, but found error %s", err) + } + + fid3, err := api.NewFilter(fc3) + if err != nil { + t.Fatalf("NewFilter failed due to %s", err) + } + logs, err = api.GetFilterLogs(ctx, fid3) + if logs != nil { + t.Fatalf("Expected GetFilterLogs to return empty logs but found %d logs", len(logs)) + } + if err == nil || err.Error() != "requested to block 1 after last accepted block 0" { + t.Fatalf("Expected GetFilterLogs to fail due to requesting block above last accepted block, but found error %s", err) + } + + // Unless otherwise specified, getting the latest will still return the last + // accepted logs even when AllowUnfinalizedQueries = true. + fc4 := filters.FilterCriteria{} + logs, err = api.GetLogs(ctx, fc4) + if err != nil { + t.Fatalf("Failed to GetLogs for FilterCriteria with empty from and to block due to %s", err) + } + if len(logs) != 0 { + t.Fatalf("Expected GetLogs to return 0 log, but found %d", len(logs)) + } + fid4, err := api.NewFilter(fc4) + if err != nil { + t.Fatalf("NewFilter failed due to %s", err) + } + logs, err = api.GetFilterLogs(ctx, fid4) + if err != nil { + t.Fatalf("GetFilterLogs failed due to %s", err) + } + if len(logs) != 0 { + t.Fatalf("Expected GetFilterLogs to return 0 log, but found %d", len(logs)) + } + + select { + case <-acceptedLogsCh: + t.Fatal("Received accepted logs event before Accepting block") + default: + } + + if err := chain.Accept(block); err != nil { + t.Fatal(err) + } + chain.BlockChain().DrainAcceptorQueue() + + chain.BlockChain().GetVMConfig().AllowUnfinalizedQueries = false + logs, err = api.GetLogs(ctx, fc) + if err != nil { + t.Fatalf("GetLogs failed due to %s", err) + } + if len(logs) != 1 { + t.Fatalf("Expected GetLogs to return 1 log, but found %d", len(logs)) + } + if logs[0].BlockNumber != 1 { + t.Fatalf("Expected single log to have block number 1, but found %d", logs[0].BlockNumber) + } + + logs, err = api.GetFilterLogs(ctx, fid) + if err != nil { + t.Fatalf("GetFilterLogs failed due to %s", err) + } + if len(logs) != 1 { + t.Fatalf("Expected GetFilterLogs to return 1 log, but found %d", len(logs)) + } + if logs[0].BlockNumber != 1 { + t.Fatalf("Expected GetFilterLogs to return 1 log with BlocKNumber 1, but found BlockNumber %d", logs[0].BlockNumber) + } + + logs, err = api.GetLogs(ctx, fc4) + if err != nil { + t.Fatalf("Failed to GetLogs for FilterCriteria with empty from and to block due to %s", err) + } + if len(logs) != 1 { + t.Fatalf("Expected GetLogs to return 1 log, but found %d", len(logs)) + } + if logs[0].BlockNumber != 1 { + t.Fatalf("Expected single log to have block number 1, but found %d", logs[0].BlockNumber) + } + fid4, err = api.NewFilter(fc4) + if err != nil { + t.Fatalf("NewFilter failed due to %s", err) + } + logs, err = api.GetFilterLogs(ctx, fid4) + if err != nil { + t.Fatalf("GetFilterLogs failed due to %s", err) + } + if len(logs) != 1 { + t.Fatalf("Expected GetFilterLogs to return 1 log, but found %d", len(logs)) + } + if logs[0].BlockNumber != 1 { + t.Fatalf("Expected GetFilterLogs to return 1 log with BlockNumber 1, but found BlockNumber %d", logs[0].BlockNumber) + } + + select { + case acceptedLogs := <-acceptedLogsCh: + if len(acceptedLogs) != 1 { + t.Fatalf("Expected accepted logs channel to return 1 log, but found %d", len(acceptedLogs)) + } + if acceptedLogs[0].BlockNumber != 1 { + t.Fatalf("Expected accepted logs channel to return 1 log with BlockNumber 1, but found BlockNumber %d", acceptedLogs[0].BlockNumber) + } + default: + t.Fatal("Failed to receive logs via accepted logs channel") + } +} diff --git a/chain/subscribe_transactions_test.go b/chain/subscribe_transactions_test.go new file mode 100644 index 0000000000..a9108487f8 --- /dev/null +++ b/chain/subscribe_transactions_test.go @@ -0,0 +1,129 @@ +package chain + +import ( + "math/big" + "testing" + + "github.com/tenderly/coreth/eth/filters" + + "github.com/tenderly/coreth/core/types" + "github.com/ethereum/go-ethereum/common" +) + +func TestSubscribeTransactions(t *testing.T) { + chain, newTxPoolHeadChan, txSubmitCh := NewDefaultChain(t) + + ethBackend := chain.APIBackend() + eventSystem := filters.NewEventSystem(ethBackend, true) + + acceptedTxsEventsChannel := make(chan []common.Hash) + acceptedTxsEvents := eventSystem.SubscribeAcceptedTxs(acceptedTxsEventsChannel) + + pendingTxsEventsChannel := make(chan []common.Hash) + pendingTxsEvents := eventSystem.SubscribePendingTxs(pendingTxsEventsChannel) + + chain.Start() + defer chain.Stop() + + // *NOTE* this was pre-compiled for the test.. + /* + pragma solidity >=0.6.0; + + contract Counter { + uint256 x; + + event CounterEmit(uint256 indexed oldval, uint256 indexed newval); + + constructor() public { + emit CounterEmit(0, 42); + x = 42; + } + + function add(uint256 y) public returns (uint256) { + x = x + y; + emit CounterEmit(y, x); + return x; + } + } + */ + // contracts, err := compiler.CompileSolidityString("", src) + // checkError(err) + // contract, _ := contracts[fmt.Sprintf("%s:%s", ".", "Counter")] + // _ = contract + + // solc-linux-amd64-v0.6.12+commit.27d51765 --bin -o counter.bin counter.sol + + code := common.Hex2Bytes( + "608060405234801561001057600080fd5b50602a60007f53564ba0be98bdbd40460eb78d2387edab91de6a842e1449053dae1f07439a3160405160405180910390a3602a60008190555060e9806100576000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c80631003e2d214602d575b600080fd5b605660048036036020811015604157600080fd5b8101908080359060200190929190505050606c565b6040518082815260200191505060405180910390f35b60008160005401600081905550600054827f53564ba0be98bdbd40460eb78d2387edab91de6a842e1449053dae1f07439a3160405160405180910390a3600054905091905056fea2646970667358221220dd9c84516cd903bf6a151cbdaef2f2514c28f2f422782a388a2774412b81f08864736f6c634300060c0033", + // contract.Code[2:], + ) + + tx := types.NewContractCreation(uint64(0), big.NewInt(0), uint64(gasLimit), gasPrice, code) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(chainID), fundedKey.PrivateKey) + if err != nil { + t.Fatal(err) + } + for _, err := range chain.AddRemoteTxs([]*types.Transaction{signedTx}) { + if err != nil { + t.Fatal(err) + } + } + txs := <-txSubmitCh + block, err := chain.GenerateBlock() + if err != nil { + t.Fatal(err) + } + insertAndSetPreference(t, chain, block) + + <-newTxPoolHeadChan + + select { + case <-acceptedTxsEventsChannel: + t.Fatal("Unexpected accepted tx head") + default: + } + + pendingTx := <-pendingTxsEventsChannel + if len(pendingTx) != 1 { + t.Fatal("Expected a new pending tx") + } + if pendingTx[0] != signedTx.Hash() { + t.Fatalf("Expected a new pending tx for signed hash %s", signedTx.Hash().String()) + } + + if err := chain.Accept(block); err != nil { + t.Fatal(err) + } + + pendingTxsEventHash := <-pendingTxsEventsChannel + + acceptedTxsEventHash := <-acceptedTxsEventsChannel + + pendingTxsEvents.Unsubscribe() + acceptedTxsEvents.Unsubscribe() + + if block.NumberU64() != uint64(1) { + t.Fatalf("Expected to create a new block with height 1, but found %d", block.NumberU64()) + } + + if len(pendingTxsEventHash) != 1 { + t.Fatal("Expected a new pending tx") + } + if pendingTxsEventHash[0] != signedTx.Hash() { + t.Fatalf("Expected a new pending tx for signed hash %s", signedTx.Hash().String()) + } + + if len(acceptedTxsEventHash) != 1 { + t.Fatal("Expected a new accepted tx") + } + if acceptedTxsEventHash[0] != signedTx.Hash() { + t.Fatalf("Expected a new accepted tx for signed hash %s", signedTx.Hash().String()) + } + + if len(txs.Txs) != 1 { + t.Fatal("Expected to create a new tx") + } + if txs.Txs[0].Hash() != signedTx.Hash() { + t.Fatalf("Expected subscription for signed hash %s", signedTx.Hash().String()) + } +} diff --git a/chain/test_chain.go b/chain/test_chain.go new file mode 100644 index 0000000000..85da35a176 --- /dev/null +++ b/chain/test_chain.go @@ -0,0 +1,126 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chain + +import ( + "crypto/rand" + "math/big" + "testing" + + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/tenderly/coreth/accounts/keystore" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/eth" + "github.com/tenderly/coreth/eth/ethconfig" + "github.com/tenderly/coreth/node" + "github.com/tenderly/coreth/params" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +var ( + basicTxGasLimit = 21000 + fundedKey, bob, alice *keystore.Key + initialBalance = big.NewInt(1000000000000000000) + chainID = big.NewInt(1) + value = big.NewInt(1000000000000) + gasLimit = 1000000 + gasPrice = big.NewInt(params.LaunchMinGasPrice) +) + +func init() { + genKey, err := keystore.NewKey(rand.Reader) + if err != nil { + panic(err) + } + fundedKey = genKey + genKey, err = keystore.NewKey(rand.Reader) + if err != nil { + panic(err) + } + bob = genKey + genKey, err = keystore.NewKey(rand.Reader) + if err != nil { + panic(err) + } + alice = genKey +} + +func NewDefaultChain(t *testing.T) (*ETHChain, chan core.NewTxPoolHeadEvent, <-chan core.NewTxsEvent) { + // configure the chain + config := ethconfig.NewDefaultConfig() + chainConfig := ¶ms.ChainConfig{ + ChainID: chainID, + HomesteadBlock: big.NewInt(0), + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, + EIP150Block: big.NewInt(0), + EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + } + + config.Genesis = &core.Genesis{ + Config: chainConfig, + Nonce: 0, + Number: 0, + ExtraData: hexutil.MustDecode("0x00"), + GasLimit: 100000000, + Difficulty: big.NewInt(0), + Alloc: core.GenesisAlloc{fundedKey.Address: {Balance: initialBalance}}, + } + + var ( + chain *ETHChain + err error + ) + chain, err = NewETHChain( + &config, + &node.Config{}, + rawdb.NewMemoryDatabase(), + eth.DefaultSettings, + new(dummy.ConsensusCallbacks), + common.Hash{}, + &mockable.Clock{}, + ) + if err != nil { + t.Fatal(err) + } + + newTxPoolHeadChan := make(chan core.NewTxPoolHeadEvent, 1) + chain.GetTxPool().SubscribeNewHeadEvent(newTxPoolHeadChan) + + txSubmitCh := chain.GetTxSubmitCh() + return chain, newTxPoolHeadChan, txSubmitCh +} + +// insertAndAccept inserts [block] into [chain], sets the chains preference to it +// and then Accepts it. +func insertAndAccept(t *testing.T, chain *ETHChain, block *types.Block) { + if err := chain.InsertBlock(block); err != nil { + t.Fatal(err) + } + if err := chain.SetPreference(block); err != nil { + t.Fatal(err) + } + if err := chain.Accept(block); err != nil { + t.Fatal(err) + } +} + +func insertAndSetPreference(t *testing.T, chain *ETHChain, block *types.Block) { + if err := chain.InsertBlock(block); err != nil { + t.Fatal(err) + } + if err := chain.SetPreference(block); err != nil { + t.Fatal(err) + } +} diff --git a/cmd/abigen/main.go b/cmd/abigen/main.go index a6f670ea4d..daa2f0080e 100644 --- a/cmd/abigen/main.go +++ b/cmd/abigen/main.go @@ -29,91 +29,124 @@ package main import ( "encoding/json" "fmt" - "io" + "io/ioutil" "os" + "path/filepath" "regexp" "strings" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/cmd/utils" - "github.com/ava-labs/coreth/internal/flags" + "github.com/tenderly/coreth/accounts/abi" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/internal/flags" + "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common/compiler" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/urfave/cli/v2" + "gopkg.in/urfave/cli.v1" ) var ( + // Git SHA1 commit hash of the release (set via linker flags) + gitCommit = "" + gitDate = "" + + app *cli.App + // Flags needed by abigen - abiFlag = &cli.StringFlag{ + abiFlag = cli.StringFlag{ Name: "abi", Usage: "Path to the Ethereum contract ABI json to bind, - for STDIN", } - binFlag = &cli.StringFlag{ + binFlag = cli.StringFlag{ Name: "bin", Usage: "Path to the Ethereum contract bytecode (generate deploy method)", } - typeFlag = &cli.StringFlag{ + typeFlag = cli.StringFlag{ Name: "type", Usage: "Struct name for the binding (default = package name)", } - jsonFlag = &cli.StringFlag{ + jsonFlag = cli.StringFlag{ Name: "combined-json", - Usage: "Path to the combined-json file generated by compiler, - for STDIN", + Usage: "Path to the combined-json file generated by compiler", + } + solFlag = cli.StringFlag{ + Name: "sol", + Usage: "Path to the Ethereum contract Solidity source to build and bind", + } + solcFlag = cli.StringFlag{ + Name: "solc", + Usage: "Solidity compiler to use if source builds are requested", + Value: "solc", } - excFlag = &cli.StringFlag{ + vyFlag = cli.StringFlag{ + Name: "vy", + Usage: "Path to the Ethereum contract Vyper source to build and bind", + } + vyperFlag = cli.StringFlag{ + Name: "vyper", + Usage: "Vyper compiler to use if source builds are requested", + Value: "vyper", + } + excFlag = cli.StringFlag{ Name: "exc", Usage: "Comma separated types to exclude from binding", } - pkgFlag = &cli.StringFlag{ + pkgFlag = cli.StringFlag{ Name: "pkg", Usage: "Package name to generate the binding into", } - outFlag = &cli.StringFlag{ + outFlag = cli.StringFlag{ Name: "out", Usage: "Output file for the generated binding (default = stdout)", } - langFlag = &cli.StringFlag{ + langFlag = cli.StringFlag{ Name: "lang", - Usage: "Destination language for the bindings (go)", + Usage: "Destination language for the bindings (go, java, objc)", Value: "go", } - aliasFlag = &cli.StringFlag{ + aliasFlag = cli.StringFlag{ Name: "alias", Usage: "Comma separated aliases for function and event renaming, e.g. original1=alias1, original2=alias2", } ) -var app = flags.NewApp("Ethereum ABI wrapper code generator") - func init() { - app.Name = "abigen" + app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool") app.Flags = []cli.Flag{ abiFlag, binFlag, typeFlag, jsonFlag, + solFlag, + solcFlag, + vyFlag, + vyperFlag, excFlag, pkgFlag, outFlag, langFlag, aliasFlag, } - app.Action = abigen + app.Action = utils.MigrateFlags(abigen) + cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate } func abigen(c *cli.Context) error { - utils.CheckExclusive(c, abiFlag, jsonFlag) // Only one source can be selected. - - if c.String(pkgFlag.Name) == "" { + utils.CheckExclusive(c, abiFlag, jsonFlag, solFlag, vyFlag) // Only one source can be selected. + if c.GlobalString(pkgFlag.Name) == "" { utils.Fatalf("No destination package specified (--pkg)") } var lang bind.Lang - switch c.String(langFlag.Name) { + switch c.GlobalString(langFlag.Name) { case "go": lang = bind.LangGo + case "java": + lang = bind.LangJava + case "objc": + lang = bind.LangObjC + utils.Fatalf("Objc binding generation is uncompleted") default: - utils.Fatalf("Unsupported destination language \"%s\" (--lang)", c.String(langFlag.Name)) + utils.Fatalf("Unsupported destination language \"%s\" (--lang)", c.GlobalString(langFlag.Name)) } // If the entire solidity code was specified, build and bind based on that var ( @@ -124,17 +157,17 @@ func abigen(c *cli.Context) error { libs = make(map[string]string) aliases = make(map[string]string) ) - if c.String(abiFlag.Name) != "" { + if c.GlobalString(abiFlag.Name) != "" { // Load up the ABI, optional bytecode and type name from the parameters var ( abi []byte err error ) - input := c.String(abiFlag.Name) + input := c.GlobalString(abiFlag.Name) if input == "-" { - abi, err = io.ReadAll(os.Stdin) + abi, err = ioutil.ReadAll(os.Stdin) } else { - abi, err = os.ReadFile(input) + abi, err = ioutil.ReadFile(input) } if err != nil { utils.Fatalf("Failed to read input ABI: %v", err) @@ -142,8 +175,8 @@ func abigen(c *cli.Context) error { abis = append(abis, string(abi)) var bin []byte - if binFile := c.String(binFlag.Name); binFile != "" { - if bin, err = os.ReadFile(binFile); err != nil { + if binFile := c.GlobalString(binFlag.Name); binFile != "" { + if bin, err = ioutil.ReadFile(binFile); err != nil { utils.Fatalf("Failed to read input bytecode: %v", err) } if strings.Contains(string(bin), "//") { @@ -152,35 +185,47 @@ func abigen(c *cli.Context) error { } bins = append(bins, string(bin)) - kind := c.String(typeFlag.Name) + kind := c.GlobalString(typeFlag.Name) if kind == "" { - kind = c.String(pkgFlag.Name) + kind = c.GlobalString(pkgFlag.Name) } types = append(types, kind) } else { // Generate the list of types to exclude from binding - var exclude *nameFilter - if c.IsSet(excFlag.Name) { - var err error - if exclude, err = newNameFilter(strings.Split(c.String(excFlag.Name), ",")...); err != nil { - utils.Fatalf("Failed to parse excludes: %v", err) - } + exclude := make(map[string]bool) + for _, kind := range strings.Split(c.GlobalString(excFlag.Name), ",") { + exclude[strings.ToLower(kind)] = true } + var err error var contracts map[string]*compiler.Contract - if c.IsSet(jsonFlag.Name) { - var ( - input = c.String(jsonFlag.Name) - jsonOutput []byte - err error - ) - if input == "-" { - jsonOutput, err = io.ReadAll(os.Stdin) - } else { - jsonOutput, err = os.ReadFile(input) + switch { + case c.GlobalIsSet(solFlag.Name): + contracts, err = compiler.CompileSolidity(c.GlobalString(solcFlag.Name), c.GlobalString(solFlag.Name)) + if err != nil { + utils.Fatalf("Failed to build Solidity contract: %v", err) + } + case c.GlobalIsSet(vyFlag.Name): + output, err := compiler.CompileVyper(c.GlobalString(vyperFlag.Name), c.GlobalString(vyFlag.Name)) + if err != nil { + utils.Fatalf("Failed to build Vyper contract: %v", err) } + contracts = make(map[string]*compiler.Contract) + for n, contract := range output { + name := n + // Sanitize the combined json names to match the + // format expected by solidity. + if !strings.Contains(n, ":") { + // Remove extra path components + name = abi.ToCamelCase(strings.TrimSuffix(filepath.Base(name), ".vy")) + } + contracts[name] = contract + } + + case c.GlobalIsSet(jsonFlag.Name): + jsonOutput, err := ioutil.ReadFile(c.GlobalString(jsonFlag.Name)) if err != nil { - utils.Fatalf("Failed to read combined-json: %v", err) + utils.Fatalf("Failed to read combined-json from compiler: %v", err) } contracts, err = compiler.ParseCombinedJSON(jsonOutput, "", "", "", "") if err != nil { @@ -189,11 +234,7 @@ func abigen(c *cli.Context) error { } // Gather all non-excluded contract for binding for name, contract := range contracts { - // fully qualified name is of the form : - nameParts := strings.Split(name, ":") - typeName := nameParts[len(nameParts)-1] - if exclude != nil && exclude.Matches(name) { - fmt.Fprintf(os.Stderr, "excluding: %v\n", name) + if exclude[strings.ToLower(name)] { continue } abi, err := json.Marshal(contract.Info.AbiDefinition) // Flatten the compiler parse @@ -203,46 +244,43 @@ func abigen(c *cli.Context) error { abis = append(abis, string(abi)) bins = append(bins, contract.Code) sigs = append(sigs, contract.Hashes) - types = append(types, typeName) + nameParts := strings.Split(name, ":") + types = append(types, nameParts[len(nameParts)-1]) - // Derive the library placeholder which is a 34 character prefix of the - // hex encoding of the keccak256 hash of the fully qualified library name. - // Note that the fully qualified library name is the path of its source - // file and the library name separated by ":". - libPattern := crypto.Keccak256Hash([]byte(name)).String()[2:36] // the first 2 chars are 0x - libs[libPattern] = typeName + libPattern := crypto.Keccak256Hash([]byte(name)).String()[2:36] + libs[libPattern] = nameParts[len(nameParts)-1] } } // Extract all aliases from the flags - if c.IsSet(aliasFlag.Name) { + if c.GlobalIsSet(aliasFlag.Name) { // We support multi-versions for aliasing // e.g. // foo=bar,foo2=bar2 // foo:bar,foo2:bar2 re := regexp.MustCompile(`(?:(\w+)[:=](\w+))`) - submatches := re.FindAllStringSubmatch(c.String(aliasFlag.Name), -1) + submatches := re.FindAllStringSubmatch(c.GlobalString(aliasFlag.Name), -1) for _, match := range submatches { aliases[match[1]] = match[2] } } // Generate the contract binding - code, err := bind.Bind(types, abis, bins, sigs, c.String(pkgFlag.Name), lang, libs, aliases) + code, err := bind.Bind(types, abis, bins, sigs, c.GlobalString(pkgFlag.Name), lang, libs, aliases) if err != nil { utils.Fatalf("Failed to generate ABI binding: %v", err) } // Either flush it out to a file or display on the standard output - if !c.IsSet(outFlag.Name) { + if !c.GlobalIsSet(outFlag.Name) { fmt.Printf("%s\n", code) return nil } - if err := os.WriteFile(c.String(outFlag.Name), []byte(code), 0600); err != nil { + if err := ioutil.WriteFile(c.GlobalString(outFlag.Name), []byte(code), 0600); err != nil { utils.Fatalf("Failed to write ABI binding: %v", err) } return nil } func main() { - log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true))) + log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) if err := app.Run(os.Args); err != nil { fmt.Fprintln(os.Stderr, err) diff --git a/consensus/consensus.go b/consensus/consensus.go index d4e247ceaf..2160b58bf9 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -30,9 +30,9 @@ package consensus import ( "math/big" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/params" "github.com/ethereum/go-ethereum/common" ) diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go index 797966156b..8bb91a5aa4 100644 --- a/consensus/dummy/consensus.go +++ b/consensus/dummy/consensus.go @@ -10,13 +10,12 @@ import ( "math/big" "time" - "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/consensus/misc/eip4844" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/trie" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rpc" + "github.com/tenderly/coreth/trie" "github.com/ethereum/go-ethereum/common" ) @@ -32,14 +31,16 @@ var ( errExtDataGasUsedTooLarge = errors.New("extDataGasUsed is not uint64") ) -type Mode struct { - ModeSkipHeader bool - ModeSkipBlockFee bool - ModeSkipCoinbase bool -} +type Mode uint + +const ( + ModeSkipHeader Mode = 1 // Skip over header verification + ModeSkipBlockFee Mode = 2 // Skip block fee verification +) type ( OnFinalizeAndAssembleCallbackType = func(header *types.Header, state *state.StateDB, txs []*types.Transaction) (extraData []byte, blockFeeContribution *big.Int, extDataGasUsed *big.Int, err error) + OnAPIsCallbackType = func(consensus.ChainHeaderReader) []rpc.API OnExtraStateChangeType = func(block *types.Block, statedb *state.StateDB) (blockFeeContribution *big.Int, extDataGasUsed *big.Int, err error) ConsensusCallbacks struct { @@ -48,62 +49,45 @@ type ( } DummyEngine struct { - cb ConsensusCallbacks - clock *mockable.Clock + cb *ConsensusCallbacks consensusMode Mode } ) -func NewETHFaker() *DummyEngine { - return &DummyEngine{ - clock: &mockable.Clock{}, - consensusMode: Mode{ModeSkipBlockFee: true}, - } -} - -func NewFaker() *DummyEngine { - return &DummyEngine{ - clock: &mockable.Clock{}, - } -} - -func NewFakerWithClock(cb ConsensusCallbacks, clock *mockable.Clock) *DummyEngine { +func NewDummyEngine(cb *ConsensusCallbacks) *DummyEngine { return &DummyEngine{ - cb: cb, - clock: clock, + cb: cb, } } -func NewFakerWithCallbacks(cb ConsensusCallbacks) *DummyEngine { +func NewETHFaker() *DummyEngine { return &DummyEngine{ - cb: cb, - clock: &mockable.Clock{}, + cb: new(ConsensusCallbacks), + consensusMode: ModeSkipBlockFee, } } -func NewFakerWithMode(cb ConsensusCallbacks, mode Mode) *DummyEngine { +func NewComplexETHFaker(cb *ConsensusCallbacks) *DummyEngine { return &DummyEngine{ cb: cb, - clock: &mockable.Clock{}, - consensusMode: mode, + consensusMode: ModeSkipBlockFee, } } -func NewCoinbaseFaker() *DummyEngine { - return &DummyEngine{ - clock: &mockable.Clock{}, - consensusMode: Mode{ModeSkipCoinbase: true}, - } +func NewFaker() *DummyEngine { + return NewDummyEngine(new(ConsensusCallbacks)) } func NewFullFaker() *DummyEngine { return &DummyEngine{ - clock: &mockable.Clock{}, - consensusMode: Mode{ModeSkipHeader: true}, + cb: new(ConsensusCallbacks), + consensusMode: ModeSkipHeader, } } func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, header *types.Header, parent *types.Header) error { + timestamp := new(big.Int).SetUint64(header.Time) + // Verify that the gas limit is <= 2^63-1 if header.GasLimit > params.MaxGasLimit { return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit) @@ -112,13 +96,9 @@ func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, heade if header.GasUsed > header.GasLimit { return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit) } - if config.IsCortina(header.Time) { - if header.GasLimit != params.CortinaGasLimit { - return fmt.Errorf("expected gas limit to be %d in Cortina, but found %d", params.CortinaGasLimit, header.GasLimit) - } - } else if config.IsApricotPhase1(header.Time) { + if config.IsApricotPhase1(timestamp) { if header.GasLimit != params.ApricotPhase1GasLimit { - return fmt.Errorf("expected gas limit to be %d in ApricotPhase1, but found %d", params.ApricotPhase1GasLimit, header.GasLimit) + return fmt.Errorf("expected gas limit to be %d, but found %d", params.ApricotPhase1GasLimit, header.GasLimit) } } else { // Verify that the gas limit remains within allowed bounds @@ -133,7 +113,7 @@ func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, heade } } - if !config.IsApricotPhase3(header.Time) { + if !config.IsApricotPhase3(timestamp) { // Verify BaseFee is not present before AP3 if header.BaseFee != nil { return fmt.Errorf("invalid baseFee before fork: have %d, want ", header.BaseFee) @@ -145,7 +125,7 @@ func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, heade if err != nil { return fmt.Errorf("failed to calculate base fee: %w", err) } - if len(header.Extra) < len(expectedRollupWindowBytes) || !bytes.Equal(expectedRollupWindowBytes, header.Extra[:len(expectedRollupWindowBytes)]) { + if !bytes.Equal(expectedRollupWindowBytes, header.Extra) { return fmt.Errorf("expected rollup window bytes: %x, found %x", expectedRollupWindowBytes, header.Extra) } if header.BaseFee == nil { @@ -160,7 +140,7 @@ func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, heade } // Verify BlockGasCost, ExtDataGasUsed not present before AP4 - if !config.IsApricotPhase4(header.Time) { + if !config.IsApricotPhase4(timestamp) { if header.BlockGasCost != nil { return fmt.Errorf("invalid blockGasCost before fork: have %d, want ", header.BlockGasCost) } @@ -172,7 +152,7 @@ func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, heade // Enforce BlockGasCost constraints blockGasCostStep := ApricotPhase4BlockGasCostStep - if config.IsApricotPhase5(header.Time) { + if config.IsApricotPhase5(timestamp) { blockGasCostStep = ApricotPhase5BlockGasCostStep } expectedBlockGasCost := calcBlockGasCost( @@ -205,36 +185,33 @@ func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, heade // modified from consensus.go func (self *DummyEngine) verifyHeader(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Header, uncle bool) error { - config := chain.Config() + var ( + config = chain.Config() + timestamp = new(big.Int).SetUint64(header.Time) + ) // Ensure that we do not verify an uncle if uncle { return errUnclesUnsupported } - switch { - case config.IsDurango(header.Time): - if len(header.Extra) < params.DynamicFeeExtraDataSize { - return fmt.Errorf("expected extra-data field length >= %d, found %d", params.DynamicFeeExtraDataSize, len(header.Extra)) - } - case config.IsApricotPhase3(header.Time): - if len(header.Extra) != params.DynamicFeeExtraDataSize { - return fmt.Errorf("expected extra-data field to be: %d, but found %d", params.DynamicFeeExtraDataSize, len(header.Extra)) - } - default: + // Ensure that the header's extra-data section is of a reasonable size + if !config.IsApricotPhase3(timestamp) { if uint64(len(header.Extra)) > params.MaximumExtraDataSize { return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize) } + } else { + if uint64(len(header.Extra)) != params.ApricotPhase3ExtraDataSize { + return fmt.Errorf("expected extra-data field to be: %d, but found %d", params.ApricotPhase3ExtraDataSize, len(header.Extra)) + } } // Ensure gas-related header fields are correct if err := self.verifyHeaderGasFields(config, header, parent); err != nil { return err } - // Verify the header's timestamp - if header.Time > uint64(self.clock.Time().Add(allowedFutureBlockTime).Unix()) { + if header.Time > uint64(time.Now().Add(allowedFutureBlockTime).Unix()) { return consensus.ErrFutureBlock } - // Verify the header's timestamp is not earlier than parent's - // it does include equality(==), so multiple blocks per second is ok + //if header.Time <= parent.Time { if header.Time < parent.Time { return errInvalidBlockTime } @@ -242,28 +219,6 @@ func (self *DummyEngine) verifyHeader(chain consensus.ChainHeaderReader, header if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 { return consensus.ErrInvalidNumber } - // Verify the existence / non-existence of excessBlobGas - cancun := chain.Config().IsCancun(header.Number, header.Time) - if !cancun { - switch { - case header.ExcessBlobGas != nil: - return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", *header.ExcessBlobGas) - case header.BlobGasUsed != nil: - return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *header.BlobGasUsed) - case header.ParentBeaconRoot != nil: - return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", *header.ParentBeaconRoot) - } - } else { - if header.ParentBeaconRoot == nil { - return errors.New("header is missing beaconRoot") - } - if *header.ParentBeaconRoot != (common.Hash{}) { - return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected empty", *header.ParentBeaconRoot) - } - if err := eip4844.VerifyEIP4844Header(parent, header); err != nil { - return err - } - } return nil } @@ -273,7 +228,7 @@ func (self *DummyEngine) Author(header *types.Header) (common.Address, error) { func (self *DummyEngine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error { // If we're running a full engine faking, accept any input as valid - if self.consensusMode.ModeSkipHeader { + if self.consensusMode == ModeSkipHeader { return nil } // Short circuit if the header is known, or it's parent not @@ -308,7 +263,7 @@ func (self *DummyEngine) verifyBlockFee( receipts []*types.Receipt, extraStateChangeContribution *big.Int, ) error { - if self.consensusMode.ModeSkipBlockFee { + if self.consensusMode == ModeSkipBlockFee { return nil } if baseFee == nil || baseFee.Sign() <= 0 { @@ -332,7 +287,7 @@ func (self *DummyEngine) verifyBlockFee( totalBlockFee.Add(totalBlockFee, extraStateChangeContribution) } - // Calculate the total excess (denominated in AVAX) over the base fee that was paid towards the block fee + // Calculate the total excess over the base fee that was paid towards the block fee for i, receipt := range receipts { // Each transaction contributes the excess over the baseFee towards the totalBlockFee // This should be equivalent to the sum of the "priority fees" within EIP-1559. @@ -340,20 +295,11 @@ func (self *DummyEngine) verifyBlockFee( if err != nil { return err } - // Multiply the [txFeePremium] by the gasUsed in the transaction since this gives the total AVAX that was paid - // above the amount required if the transaction had simply paid the minimum base fee for the block. - // - // Ex. LegacyTx paying a gas price of 100 gwei for 1M gas in a block with a base fee of 10 gwei. - // Total Fee = 100 gwei * 1M gas - // Minimum Fee = 10 gwei * 1M gas (minimum fee that would have been accepted for this transaction) - // Fee Premium = 90 gwei - // Total Overpaid = 90 gwei * 1M gas - blockFeeContribution.Mul(txFeePremium, gasUsed.SetUint64(receipt.GasUsed)) totalBlockFee.Add(totalBlockFee, blockFeeContribution) } // Calculate how much gas the [totalBlockFee] would purchase at the price level - // set by the base fee of this block. + // set by this block. blockGas := new(big.Int).Div(totalBlockFee, baseFee) // Require that the amount of gas purchased by the effective tips within the block, [blockGas], @@ -382,7 +328,7 @@ func (self *DummyEngine) Finalize(chain consensus.ChainHeaderReader, block *type return err } } - if chain.Config().IsApricotPhase4(block.Time()) { + if chain.Config().IsApricotPhase4(new(big.Int).SetUint64(block.Time())) { // Validate extDataGasUsed and BlockGasCost match expectations // // NOTE: This is a duplicate check of what is already performed in @@ -394,11 +340,9 @@ func (self *DummyEngine) Finalize(chain consensus.ChainHeaderReader, block *type return fmt.Errorf("invalid extDataGasUsed: have %d, want %d", blockExtDataGasUsed, extDataGasUsed) } blockGasCostStep := ApricotPhase4BlockGasCostStep - if chain.Config().IsApricotPhase5(block.Time()) { + if chain.Config().IsApricotPhase5(new(big.Int).SetUint64(block.Time())) { blockGasCostStep = ApricotPhase5BlockGasCostStep } - // Calculate the expected blockGasCost for this block. - // Note: this is a deterministic transtion that defines an exact block fee for this block. blockGasCost := calcBlockGasCost( ApricotPhase4TargetBlockRate, ApricotPhase4MinBlockGasCost, @@ -407,11 +351,9 @@ func (self *DummyEngine) Finalize(chain consensus.ChainHeaderReader, block *type parent.BlockGasCost, parent.Time, block.Time(), ) - // Verify the BlockGasCost set in the header matches the calculated value. if blockBlockGasCost := block.BlockGasCost(); blockBlockGasCost == nil || !blockBlockGasCost.IsUint64() || blockBlockGasCost.Cmp(blockGasCost) != 0 { return fmt.Errorf("invalid blockGasCost: have %d, want %d", blockBlockGasCost, blockGasCost) } - // Verify the block fee was paid. if err := self.verifyBlockFee( block.BaseFee(), block.BlockGasCost(), @@ -427,8 +369,7 @@ func (self *DummyEngine) Finalize(chain consensus.ChainHeaderReader, block *type } func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Header, state *state.StateDB, txs []*types.Transaction, - uncles []*types.Header, receipts []*types.Receipt, -) (*types.Block, error) { + uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { var ( contribution, extDataGasUsed *big.Int extraData []byte @@ -440,16 +381,15 @@ func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, return nil, err } } - if chain.Config().IsApricotPhase4(header.Time) { + if chain.Config().IsApricotPhase4(new(big.Int).SetUint64(header.Time)) { header.ExtDataGasUsed = extDataGasUsed if header.ExtDataGasUsed == nil { header.ExtDataGasUsed = new(big.Int).Set(common.Big0) } blockGasCostStep := ApricotPhase4BlockGasCostStep - if chain.Config().IsApricotPhase5(header.Time) { + if chain.Config().IsApricotPhase5(new(big.Int).SetUint64(header.Time)) { blockGasCostStep = ApricotPhase5BlockGasCostStep } - // Calculate the required block gas cost for this block. header.BlockGasCost = calcBlockGasCost( ApricotPhase4TargetBlockRate, ApricotPhase4MinBlockGasCost, @@ -458,7 +398,6 @@ func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, parent.BlockGasCost, parent.Time, header.Time, ) - // Verify that this block covers the block fee. if err := self.verifyBlockFee( header.BaseFee, header.BlockGasCost, @@ -473,9 +412,9 @@ func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Header seems complete, assemble into a block and return - return types.NewBlockWithExtData( - header, txs, uncles, receipts, trie.NewStackTrie(nil), - extraData, chain.Config().IsApricotPhase1(header.Time), + return types.NewBlock( + header, txs, uncles, receipts, new(trie.Trie), extraData, + chain.Config().IsApricotPhase1(new(big.Int).SetUint64(header.Time)), ), nil } diff --git a/consensus/dummy/consensus_test.go b/consensus/dummy/consensus_test.go index 64a8439817..702d4efd1b 100644 --- a/consensus/dummy/consensus_test.go +++ b/consensus/dummy/consensus_test.go @@ -8,7 +8,7 @@ import ( "math/big" "testing" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/core/types" "github.com/ethereum/go-ethereum/common" ) diff --git a/consensus/dummy/dynamic_fees.go b/consensus/dummy/dynamic_fees.go index 1f7d375deb..8f4a38c863 100644 --- a/consensus/dummy/dynamic_fees.go +++ b/consensus/dummy/dynamic_fees.go @@ -9,8 +9,8 @@ import ( "math/big" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" ) diff --git a/consensus/dummy/dynamic_fees_test.go b/consensus/dummy/dynamic_fees_test.go index c8218140c0..4fd373654a 100644 --- a/consensus/dummy/dynamic_fees_test.go +++ b/consensus/dummy/dynamic_fees_test.go @@ -8,8 +8,8 @@ import ( "math/big" "testing" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/params" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/assert" diff --git a/consensus/misc/dao.go b/consensus/misc/dao.go new file mode 100644 index 0000000000..a4e582a9f8 --- /dev/null +++ b/consensus/misc/dao.go @@ -0,0 +1,95 @@ +// (c) 2019-2020, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package misc + +import ( + "bytes" + "errors" + "math/big" + + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/params" +) + +var ( + // ErrBadProDAOExtra is returned if a header doesn't support the DAO fork on a + // pro-fork client. + ErrBadProDAOExtra = errors.New("bad DAO pro-fork extra-data") + + // ErrBadNoDAOExtra is returned if a header does support the DAO fork on a no- + // fork client. + ErrBadNoDAOExtra = errors.New("bad DAO no-fork extra-data") +) + +// VerifyDAOHeaderExtraData validates the extra-data field of a block header to +// ensure it conforms to DAO hard-fork rules. +// +// DAO hard-fork extension to the header validity: +// a) if the node is no-fork, do not accept blocks in the [fork, fork+10) range +// with the fork specific extra-data set +// b) if the node is pro-fork, require blocks in the specific range to have the +// unique extra-data set. +func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error { + // Short circuit validation if the node doesn't care about the DAO fork + if config.DAOForkBlock == nil { + return nil + } + // Make sure the block is within the fork's modified extra-data range + limit := new(big.Int).Add(config.DAOForkBlock, params.DAOForkExtraRange) + if header.Number.Cmp(config.DAOForkBlock) < 0 || header.Number.Cmp(limit) >= 0 { + return nil + } + // Depending on whether we support or oppose the fork, validate the extra-data contents + if config.DAOForkSupport { + if !bytes.Equal(header.Extra, params.DAOForkBlockExtra) { + return ErrBadProDAOExtra + } + } else { + if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { + return ErrBadNoDAOExtra + } + } + // All ok, header has the same extra-data we expect + return nil +} + +// ApplyDAOHardFork modifies the state database according to the DAO hard-fork +// rules, transferring all balances of a set of DAO accounts to a single refund +// contract. +func ApplyDAOHardFork(statedb *state.StateDB) { + // Retrieve the contract to refund balances into + if !statedb.Exist(params.DAORefundContract) { + statedb.CreateAccount(params.DAORefundContract) + } + + // Move every DAO account and extra-balance account funds into the refund contract + for _, addr := range params.DAODrainList() { + statedb.AddBalance(params.DAORefundContract, statedb.GetBalance(addr)) + statedb.SetBalance(addr, new(big.Int)) + } +} diff --git a/constants/constants.go b/constants/constants.go index 2514f58119..ed52ed8c3c 100644 --- a/constants/constants.go +++ b/constants/constants.go @@ -6,8 +6,8 @@ package constants import "github.com/ethereum/go-ethereum/common" var ( - BlackholeAddr = common.Address{ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - } + BlackholeAddr = common.Address{ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } ) diff --git a/core/bench_test.go b/core/bench_test.go index 34d46e09f7..4706e199dc 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -31,15 +31,15 @@ import ( "math/big" "testing" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/params" ) func BenchmarkInsertChain_empty_memdb(b *testing.B) { @@ -88,20 +88,8 @@ func genValueTx(nbytes int) func(int, *BlockGen) { return func(i int, gen *BlockGen) { toaddr := common.Address{} data := make([]byte, nbytes) - gas, _ := IntrinsicGas(data, nil, false, params.Rules{}) // Disable Istanbul and EIP-2028 for this test - signer := gen.Signer() - gasPrice := big.NewInt(0) - if gen.header.BaseFee != nil { - gasPrice = gen.header.BaseFee - } - tx, _ := types.SignNewTx(benchRootKey, signer, &types.LegacyTx{ - Nonce: gen.TxNonce(benchRootAddr), - To: &toaddr, - Value: big.NewInt(1), - Gas: gas, - Data: data, - GasPrice: gasPrice, - }) + gas, _ := IntrinsicGas(data, nil, false, false, false) + tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, big.NewInt(225000000000), data), types.HomesteadSigner{}, benchRootKey) gen.AddTx(tx) } } @@ -130,26 +118,21 @@ func genTxRing(naccounts int) func(int, *BlockGen) { return func(i int, gen *BlockGen) { block := gen.PrevBlock(i - 1) gas := block.GasLimit() - signer := gen.Signer() for { gas -= params.TxGas if gas < params.TxGas { break } to := (from + 1) % naccounts - burn := new(big.Int).SetUint64(params.TxGas) - burn.Mul(burn, gen.header.BaseFee) - tx, err := types.SignNewTx(ringKeys[from], signer, - &types.LegacyTx{ - Nonce: gen.TxNonce(ringAddrs[from]), - To: &ringAddrs[to], - Value: amount.Sub(amount, fee), - Gas: params.TxGas, - GasPrice: big.NewInt(225000000000), - }) - if err != nil { - panic(err) - } + tx := types.NewTransaction( + gen.TxNonce(ringAddrs[from]), + ringAddrs[to], + amount.Sub(amount, fee), + params.TxGas, + big.NewInt(225000000000), + nil, + ) + tx, _ = types.SignTx(tx, types.HomesteadSigner{}, ringKeys[from]) gen.AddTx(tx) from = to } @@ -173,15 +156,16 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { // Generate a chain of b.N blocks using the supplied block // generator function. - gspec := &Genesis{ + gspec := Genesis{ Config: params.TestChainConfig, Alloc: GenesisAlloc{benchRootAddr: {Balance: benchRootFunds}}, } - _, chain, _, _ := GenerateChainWithGenesis(gspec, dummy.NewCoinbaseFaker(), b.N, 10, gen) + genesis := gspec.MustCommit(db) + chain, _, _ := GenerateChain(gspec.Config, genesis, dummy.NewFaker(), db, b.N, 10, gen) // Time the insertion of the new chain. // State and blocks are stored in the same DB. - chainman, _ := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) + chainman, _ := NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) defer chainman.Stop() b.ReportAllocs() b.ResetTimer() @@ -238,19 +222,14 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) { ParentHash: hash, Difficulty: big.NewInt(1), UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyTxsHash, - ReceiptHash: types.EmptyReceiptsHash, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, } hash = header.Hash() rawdb.WriteHeader(db, header) rawdb.WriteCanonicalHash(db, hash, n) - if n == 0 { - rawdb.WriteChainConfig(db, hash, params.TestChainConfig) - } - rawdb.WriteHeadHeaderHash(db, hash) - if full || n == 0 { block := types.NewBlockWithHeader(header) rawdb.WriteBody(db, hash, n, block.Body()) @@ -289,7 +268,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) { if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) } - chain, err := NewBlockChain(db, DefaultCacheConfig, nil, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + chain, err := NewBlockChain(db, DefaultCacheConfig, params.TestChainConfig, dummy.NewFaker(), vm.Config{}, common.Hash{}) if err != nil { b.Fatalf("error creating chain: %v", err) } @@ -299,7 +278,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) { if full { hash := header.Hash() rawdb.ReadBody(db, hash, n) - rawdb.ReadReceipts(db, hash, n, header.Time, chain.Config()) + rawdb.ReadReceipts(db, hash, n, chain.Config()) } } chain.Stop() diff --git a/core/block_validator.go b/core/block_validator.go index a75eeb01a1..283cee8abf 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -30,11 +30,11 @@ import ( "errors" "fmt" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/trie" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/trie" ) // BlockValidator is responsible for validating block headers, uncles and diff --git a/core/blockchain.go b/core/blockchain.go index cac2bb82b9..89bd53815e 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -34,75 +34,36 @@ import ( "io" "math/big" "runtime" - "strings" "sync" "sync/atomic" "time" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/consensus/misc/eip4844" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/state/snapshot" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/internal/version" - "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/trie/triedb/hashdb" - "github.com/ava-labs/coreth/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/lru" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" + lru "github.com/hashicorp/golang-lru" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/state/snapshot" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/metrics" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/trie" ) var ( - accountReadTimer = metrics.NewRegisteredCounter("chain/account/reads", nil) - accountHashTimer = metrics.NewRegisteredCounter("chain/account/hashes", nil) - accountUpdateTimer = metrics.NewRegisteredCounter("chain/account/updates", nil) - accountCommitTimer = metrics.NewRegisteredCounter("chain/account/commits", nil) - storageReadTimer = metrics.NewRegisteredCounter("chain/storage/reads", nil) - storageHashTimer = metrics.NewRegisteredCounter("chain/storage/hashes", nil) - storageUpdateTimer = metrics.NewRegisteredCounter("chain/storage/updates", nil) - storageCommitTimer = metrics.NewRegisteredCounter("chain/storage/commits", nil) - snapshotAccountReadTimer = metrics.NewRegisteredCounter("chain/snapshot/account/reads", nil) - snapshotStorageReadTimer = metrics.NewRegisteredCounter("chain/snapshot/storage/reads", nil) - snapshotCommitTimer = metrics.NewRegisteredCounter("chain/snapshot/commits", nil) - - triedbCommitTimer = metrics.NewRegisteredCounter("chain/triedb/commits", nil) - - blockInsertTimer = metrics.NewRegisteredCounter("chain/block/inserts", nil) - blockInsertCount = metrics.NewRegisteredCounter("chain/block/inserts/count", nil) - blockContentValidationTimer = metrics.NewRegisteredCounter("chain/block/validations/content", nil) - blockStateInitTimer = metrics.NewRegisteredCounter("chain/block/inits/state", nil) - blockExecutionTimer = metrics.NewRegisteredCounter("chain/block/executions", nil) - blockTrieOpsTimer = metrics.NewRegisteredCounter("chain/block/trie", nil) - blockValidationTimer = metrics.NewRegisteredCounter("chain/block/validations/state", nil) - blockWriteTimer = metrics.NewRegisteredCounter("chain/block/writes", nil) - - acceptorQueueGauge = metrics.NewRegisteredGauge("chain/acceptor/queue/size", nil) - acceptorWorkTimer = metrics.NewRegisteredCounter("chain/acceptor/work", nil) - acceptorWorkCount = metrics.NewRegisteredCounter("chain/acceptor/work/count", nil) - processedBlockGasUsedCounter = metrics.NewRegisteredCounter("chain/block/gas/used/processed", nil) - acceptedBlockGasUsedCounter = metrics.NewRegisteredCounter("chain/block/gas/used/accepted", nil) - badBlockCounter = metrics.NewRegisteredCounter("chain/block/bad/count", nil) - - txUnindexTimer = metrics.NewRegisteredCounter("chain/txs/unindex", nil) - acceptedTxsCounter = metrics.NewRegisteredCounter("chain/txs/accepted", nil) - processedTxsCounter = metrics.NewRegisteredCounter("chain/txs/processed", nil) - - acceptedLogsCounter = metrics.NewRegisteredCounter("chain/logs/accepted", nil) - processedLogsCounter = metrics.NewRegisteredCounter("chain/logs/processed", nil) + acceptorQueueGauge = metrics.NewRegisteredGauge("blockchain/acceptor/queue/size", nil) + processedBlockGasUsedCounter = metrics.NewRegisteredCounter("blockchain/blocks/gas/used/processed", nil) + acceptedBlockGasUsedCounter = metrics.NewRegisteredCounter("blockchain/blocks/gas/used/accepted", nil) + badBlockCounter = metrics.NewRegisteredCounter("blockchain/blocks/bad/count", nil) ErrRefuseToCorruptArchiver = errors.New("node has operated with pruning disabled, shutting down to prevent missing tries") errFutureBlockUnsupported = errors.New("future block insertion not supported") errCacheConfigNotSpecified = errors.New("must specify cache config") - errInvalidOldChain = errors.New("invalid old chain") - errInvalidNewChain = errors.New("invalid new chain") ) const ( @@ -111,6 +72,7 @@ const ( receiptsCacheLimit = 32 txLookupCacheLimit = 1024 badBlockLimit = 10 + TriesInMemory = 128 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. // @@ -140,79 +102,36 @@ const ( // statsReportLimit is the time limit during import and export after which we // always print out progress. This avoids the user wondering what's going on. statsReportLimit = 8 * time.Second - - // trieCleanCacheStatsNamespace is the namespace to surface stats from the trie - // clean cache's underlying fastcache. - trieCleanCacheStatsNamespace = "hashdb/memcache/clean/fastcache" ) -// CacheConfig contains the configuration values for the trie database -// and state snapshot these are resident in a blockchain. +// CacheConfig contains the configuration values for the trie caching/pruning +// that's resident in a blockchain. type CacheConfig struct { TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory TrieDirtyLimit int // Memory limit (MB) at which to block on insert and force a flush of dirty trie nodes to disk TrieDirtyCommitTarget int // Memory limit (MB) to target for the dirties cache before invoking commit - TriePrefetcherParallelism int // Max concurrent disk reads trie prefetcher should perform at once CommitInterval uint64 // Commit the trie every [CommitInterval] blocks. Pruning bool // Whether to disable trie write caching and GC altogether (archive node) AcceptorQueueLimit int // Blocks to queue before blocking during acceptance PopulateMissingTries *uint64 // If non-nil, sets the starting height for re-generating historical tries. - PopulateMissingTriesParallelism int // Number of readers to use when trying to populate missing tries. + PopulateMissingTriesParallelism int // Is the number of readers to use when trying to populate missing tries. AllowMissingTries bool // Whether to allow an archive node to run with pruning enabled - SnapshotDelayInit bool // Whether to initialize snapshots on startup or wait for external call (= StateSyncEnabled) + SnapshotDelayInit bool // Whether to initialize snapshots on startup or wait for external call SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory + SnapshotAsync bool // Generate snapshot tree async SnapshotVerify bool // Verify generated snapshots + SkipSnapshotRebuild bool // Whether to skip rebuilding the snapshot in favor of returning an error (only set to true for tests) Preimages bool // Whether to store preimage of trie key to the disk - AcceptedCacheSize int // Depth of accepted headers cache and accepted logs cache at the accepted tip - TxLookupLimit uint64 // Number of recent blocks for which to maintain transaction lookup indices - SkipTxIndexing bool // Whether to skip transaction indexing - StateHistory uint64 // Number of blocks from head whose state histories are reserved. - StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top - - SnapshotNoBuild bool // Whether the background generation is allowed - SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it -} - -// triedbConfig derives the configures for trie database. -func (c *CacheConfig) triedbConfig() *trie.Config { - config := &trie.Config{Preimages: c.Preimages} - if c.StateScheme == rawdb.HashScheme { - config.HashDB = &hashdb.Config{ - CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, - StatsPrefix: trieCleanCacheStatsNamespace, - } - } - if c.StateScheme == rawdb.PathScheme { - config.PathDB = &pathdb.Config{ - StateHistory: c.StateHistory, - CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, - DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024, - } - } - return config } -// DefaultCacheConfig are the default caching values if none are specified by the -// user (also used during testing). var DefaultCacheConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, // 20% overhead in memory counting (this targets 16 MB) - TriePrefetcherParallelism: 16, - Pruning: true, - CommitInterval: 4096, - AcceptorQueueLimit: 64, // Provides 2 minutes of buffer (2s block target) for a commit delay - SnapshotLimit: 256, - AcceptedCacheSize: 32, - StateScheme: rawdb.HashScheme, -} - -// DefaultCacheConfigWithScheme returns a deep copied default cache config with -// a provided trie node scheme. -func DefaultCacheConfigWithScheme(scheme string) *CacheConfig { - config := *DefaultCacheConfig - config.StateScheme = scheme - return &config + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, // 20% overhead in memory counting (this targets 16 MB) + Pruning: true, + CommitInterval: 4096, + AcceptorQueueLimit: 64, // Provides 2 minutes of buffer (2s block target) for a commit delay + SnapshotLimit: 256, } // BlockChain represents the canonical chain given a database with a genesis @@ -233,11 +152,9 @@ type BlockChain struct { chainConfig *params.ChainConfig // Chain & network configuration cacheConfig *CacheConfig // Cache configuration for pruning - db ethdb.Database // Low level persistent database to store final content in - snaps *snapshot.Tree // Snapshot tree for fast trie leaf access - triedb *trie.Database // The database handler for maintaining trie nodes. - stateCache state.Database // State database to reuse between imports (contains state cache) - stateManager TrieWriter + db ethdb.Database // Low level persistent database to store final content in + + snaps *snapshot.Tree // Snapshot tree for fast trie leaf access hc *HeaderChain rmLogsFeed event.Feed @@ -256,20 +173,24 @@ type BlockChain struct { // Readers don't need to take it, they can just read the database. chainmu sync.RWMutex - currentBlock atomic.Pointer[types.Header] // Current head of the block chain + currentBlock atomic.Value // Current head of the block chain - bodyCache *lru.Cache[common.Hash, *types.Body] // Cache for the most recent block bodies - receiptsCache *lru.Cache[common.Hash, []*types.Receipt] // Cache for the most recent receipts per block - blockCache *lru.Cache[common.Hash, *types.Block] // Cache for the most recent entire blocks - txLookupCache *lru.Cache[common.Hash, *rawdb.LegacyTxLookupEntry] // Cache for the most recent transaction lookup data. - badBlocks *lru.Cache[common.Hash, *badBlock] // Cache for bad blocks + stateCache state.Database // State database to reuse between imports (contains state cache) + stateManager TrieWriter + bodyCache *lru.Cache // Cache for the most recent block bodies + receiptsCache *lru.Cache // Cache for the most recent receipts per block + blockCache *lru.Cache // Cache for the most recent entire blocks + txLookupCache *lru.Cache // Cache for the most recent transaction lookup data. - stopping atomic.Bool // false if chain is running, true when stopped + running int32 // 0 if chain is running, 1 when stopped - engine consensus.Engine - validator Validator // Block and state validator interface - processor Processor // Block transaction processor interface - vmConfig vm.Config + engine consensus.Engine + validator Validator // Block and state validator interface + prefetcher Prefetcher // Block state prefetcher interface + processor Processor // Block transaction processor interface + vmConfig vm.Config + + badBlocks *lru.Cache // Bad block cache lastAccepted *types.Block // Prevents reorgs past this height @@ -294,84 +215,53 @@ type BlockChain struct { // during shutdown and in tests. acceptorWg sync.WaitGroup - // [wg] is used to wait for the async blockchain processes to finish on shutdown. - wg sync.WaitGroup - - // quit channel is used to listen for when the blockchain is shut down to close - // async processes. - // WaitGroups are used to ensure that async processes have finished during shutdown. - quit chan struct{} - // [acceptorTip] is the last block processed by the acceptor. This is // returned as the LastAcceptedBlock() to ensure clients get only fully // processed blocks. This may be equal to [lastAccepted]. acceptorTip *types.Block acceptorTipLock sync.Mutex - - // [flattenLock] prevents the [acceptor] from flattening snapshots while - // a block is being verified. - flattenLock sync.Mutex - - // [acceptedLogsCache] stores recently accepted logs to improve the performance of eth_getLogs. - acceptedLogsCache FIFOCache[common.Hash, [][]*types.Log] - - // [txIndexTailLock] is used to synchronize the updating of the tx index tail. - txIndexTailLock sync.Mutex } // NewBlockChain returns a fully initialised block chain using information // available in the database. It initialises the default Ethereum Validator and // Processor. func NewBlockChain( - db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis, engine consensus.Engine, - vmConfig vm.Config, lastAcceptedHash common.Hash, skipChainConfigCheckCompatible bool, + db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, + vmConfig vm.Config, lastAcceptedHash common.Hash, ) (*BlockChain, error) { if cacheConfig == nil { return nil, errCacheConfigNotSpecified } - // Open trie database with provided config - triedb := trie.NewDatabase(db, cacheConfig.triedbConfig()) - - // Setup the genesis block, commit the provided genesis specification - // to database if the genesis block is not present yet, or load the - // stored one from database. - // Note: In go-ethereum, the code rewinds the chain on an incompatible config upgrade. - // We don't do this and expect the node operator to always update their node's configuration - // before network upgrades take effect. - chainConfig, _, err := SetupGenesisBlock(db, triedb, genesis, lastAcceptedHash, skipChainConfigCheckCompatible) - if err != nil { - return nil, err - } - log.Info("") - log.Info(strings.Repeat("-", 153)) - for _, line := range strings.Split(chainConfig.Description(), "\n") { - log.Info(line) - } - log.Info(strings.Repeat("-", 153)) - log.Info("") + bodyCache, _ := lru.New(bodyCacheLimit) + receiptsCache, _ := lru.New(receiptsCacheLimit) + blockCache, _ := lru.New(blockCacheLimit) + txLookupCache, _ := lru.New(txLookupCacheLimit) + badBlocks, _ := lru.New(badBlockLimit) bc := &BlockChain{ - chainConfig: chainConfig, - cacheConfig: cacheConfig, - db: db, - triedb: triedb, - bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), - receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), - blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), - txLookupCache: lru.NewCache[common.Hash, *rawdb.LegacyTxLookupEntry](txLookupCacheLimit), - badBlocks: lru.NewCache[common.Hash, *badBlock](badBlockLimit), - engine: engine, - vmConfig: vmConfig, - senderCacher: NewTxSenderCacher(runtime.NumCPU()), - acceptorQueue: make(chan *types.Block, cacheConfig.AcceptorQueueLimit), - quit: make(chan struct{}), - acceptedLogsCache: NewFIFOCache[common.Hash, [][]*types.Log](cacheConfig.AcceptedCacheSize), - } - bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb) + chainConfig: chainConfig, + cacheConfig: cacheConfig, + db: db, + stateCache: state.NewDatabaseWithConfig(db, &trie.Config{ + Cache: cacheConfig.TrieCleanLimit, + Preimages: cacheConfig.Preimages, + }), + bodyCache: bodyCache, + receiptsCache: receiptsCache, + blockCache: blockCache, + txLookupCache: txLookupCache, + engine: engine, + vmConfig: vmConfig, + badBlocks: badBlocks, + senderCacher: newTxSenderCacher(runtime.NumCPU()), + acceptorQueue: make(chan *types.Block, cacheConfig.AcceptorQueueLimit), + } bc.validator = NewBlockValidator(chainConfig, bc, engine) + bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine) bc.processor = NewStateProcessor(chainConfig, bc, engine) - bc.hc, err = NewHeaderChain(db, chainConfig, cacheConfig, engine) + var err error + bc.hc, err = NewHeaderChain(db, chainConfig, engine) if err != nil { return nil, err } @@ -380,10 +270,11 @@ func NewBlockChain( return nil, ErrNoGenesis } - bc.currentBlock.Store(nil) + var nilBlock *types.Block + bc.currentBlock.Store(nilBlock) // Create the state manager - bc.stateManager = NewTrieWriter(bc.triedb, cacheConfig) + bc.stateManager = NewTrieWriter(bc.stateCache.TrieDB(), cacheConfig) // Re-generate current block state if it is missing if err := bc.loadLastState(lastAcceptedHash); err != nil { @@ -399,8 +290,8 @@ func NewBlockChain( // Make sure the state associated with the block is available head := bc.CurrentBlock() - if !bc.HasState(head.Root) { - return nil, fmt.Errorf("head state missing %d:%s", head.Number, head.Hash()) + if !bc.HasState(head.Root()) { + return nil, fmt.Errorf("head state missing %d:%s", head.Number(), head.Hash()) } if err := bc.protectTrieIndex(); err != nil { @@ -421,141 +312,25 @@ func NewBlockChain( bc.initSnapshot(head) } - // Warm up [hc.acceptedNumberCache] and [acceptedLogsCache] - bc.warmAcceptedCaches() - - // if txlookup limit is 0 (uindexing disabled), we don't need to repair the tx index tail. - if bc.cacheConfig.TxLookupLimit != 0 { - latestStateSynced := rawdb.GetLatestSyncPerformed(bc.db) - bc.setTxIndexTail(latestStateSynced) - } - // Start processing accepted blocks effects in the background go bc.startAcceptor() - // Start tx indexer/unindexer if required. - if bc.cacheConfig.TxLookupLimit != 0 { - bc.wg.Add(1) - var ( - headCh = make(chan ChainEvent, 1) // Buffered to avoid locking up the event feed - sub = bc.SubscribeChainAcceptedEvent(headCh) - ) - go func() { - defer bc.wg.Done() - if sub == nil { - log.Warn("could not create chain accepted subscription to unindex txs") - return - } - defer sub.Unsubscribe() - - bc.maintainTxIndex(headCh) - }() - } return bc, nil } -// unindexBlocks unindexes transactions depending on user configuration -func (bc *BlockChain) unindexBlocks(tail uint64, head uint64, done chan struct{}) { - start := time.Now() - txLookupLimit := bc.cacheConfig.TxLookupLimit - bc.txIndexTailLock.Lock() - defer func() { - txUnindexTimer.Inc(time.Since(start).Milliseconds()) - bc.txIndexTailLock.Unlock() - close(done) - bc.wg.Done() - }() - - // If head is 0, it means the chain is just initialized and no blocks are inserted, - // so don't need to indexing anything. - if head == 0 { - return - } - - if head-txLookupLimit+1 >= tail { - // Unindex a part of stale indices and forward index tail to HEAD-limit - rawdb.UnindexTransactions(bc.db, tail, head-txLookupLimit+1, bc.quit) - } -} - -// maintainTxIndex is responsible for the deletion of the -// transaction index. This does not support reconstruction of removed indexes. -// Invariant: If TxLookupLimit is 0, it means all tx indices will be preserved. -// Meaning that this function should never be called. -func (bc *BlockChain) maintainTxIndex(headCh <-chan ChainEvent) { - txLookupLimit := bc.cacheConfig.TxLookupLimit - - // If the user just upgraded to a new version which supports transaction - // index pruning, write the new tail and remove anything older. - if rawdb.ReadTxIndexTail(bc.db) == nil { - rawdb.WriteTxIndexTail(bc.db, 0) - } - - // Any reindexing done, start listening to chain events and moving the index window - var ( - done chan struct{} // Non-nil if background unindexing or reindexing routine is active. - ) - log.Info("Initialized transaction unindexer", "limit", txLookupLimit) - - // Launch the initial processing if chain is not empty. This step is - // useful in these scenarios that chain has no progress and indexer - // is never triggered. - if head := bc.CurrentBlock(); head != nil && head.Number.Uint64() > txLookupLimit { - done = make(chan struct{}) - tail := rawdb.ReadTxIndexTail(bc.db) - bc.wg.Add(1) - go bc.unindexBlocks(*tail, head.Number.Uint64(), done) - } - - for { - select { - case head := <-headCh: - headNum := head.Block.NumberU64() - if headNum < txLookupLimit { - break - } - - if done == nil { - done = make(chan struct{}) - // Note: tail will not be nil since it is initialized in this function. - tail := rawdb.ReadTxIndexTail(bc.db) - bc.wg.Add(1) - go bc.unindexBlocks(*tail, headNum, done) - } - case <-done: - done = nil - case <-bc.quit: - if done != nil { - log.Info("Waiting background transaction unindexer to exit") - <-done - } - return - } - } -} - // writeBlockAcceptedIndices writes any indices that must be persisted for accepted block. // This includes the following: // - transaction lookup indices // - updating the acceptor tip index func (bc *BlockChain) writeBlockAcceptedIndices(b *types.Block) error { batch := bc.db.NewBatch() - if err := bc.batchBlockAcceptedIndices(batch, b); err != nil { - return err - } - if err := batch.Write(); err != nil { - return fmt.Errorf("%w: failed to write accepted indices entries batch", err) - } - return nil -} - -func (bc *BlockChain) batchBlockAcceptedIndices(batch ethdb.Batch, b *types.Block) error { - if !bc.cacheConfig.SkipTxIndexing { - rawdb.WriteTxLookupEntriesByBlock(batch, b) - } + rawdb.WriteTxLookupEntriesByBlock(batch, b) if err := rawdb.WriteAcceptorTip(batch, b.Hash()); err != nil { return fmt.Errorf("%w: failed to write acceptor tip key", err) } + if err := batch.Write(); err != nil { + return fmt.Errorf("%w: failed to write tx lookup entries batch", err) + } return nil } @@ -575,62 +350,18 @@ func (bc *BlockChain) flattenSnapshot(postAbortWork func() error, hash common.Ha return err } - // Ensure we avoid flattening the snapshot while we are processing a block, or - // block execution will fallback to reading from the trie (which is much - // slower). - bc.flattenLock.Lock() - defer bc.flattenLock.Unlock() - // Flatten the entire snap Trie to disk // // Note: This resumes snapshot generation. return bc.snaps.Flatten(hash) } -// warmAcceptedCaches fetches previously accepted headers and logs from disk to -// pre-populate [hc.acceptedNumberCache] and [acceptedLogsCache]. -func (bc *BlockChain) warmAcceptedCaches() { - var ( - startTime = time.Now() - lastAccepted = bc.LastAcceptedBlock().NumberU64() - startIndex = uint64(1) - targetCacheSize = uint64(bc.cacheConfig.AcceptedCacheSize) - ) - if targetCacheSize == 0 { - log.Info("Not warming accepted cache because disabled") - return - } - if lastAccepted < startIndex { - // This could occur if we haven't accepted any blocks yet - log.Info("Not warming accepted cache because there are no accepted blocks") - return - } - cacheDiff := targetCacheSize - 1 // last accepted lookback is inclusive, so we reduce size by 1 - if cacheDiff < lastAccepted { - startIndex = lastAccepted - cacheDiff - } - for i := startIndex; i <= lastAccepted; i++ { - block := bc.GetBlockByNumber(i) - if block == nil { - // This could happen if a node state-synced - log.Info("Exiting accepted cache warming early because header is nil", "height", i, "t", time.Since(startTime)) - break - } - // TODO: handle blocks written to disk during state sync - bc.hc.acceptedNumberCache.Put(block.NumberU64(), block.Header()) - logs := bc.collectUnflattenedLogs(block, false) - bc.acceptedLogsCache.Put(block.Hash(), logs) - } - log.Info("Warmed accepted caches", "start", startIndex, "end", lastAccepted, "t", time.Since(startTime)) -} - // startAcceptor starts processing items on the [acceptorQueue]. If a [nil] // object is placed on the [acceptorQueue], the [startAcceptor] will exit. func (bc *BlockChain) startAcceptor() { log.Info("Starting Acceptor", "queue length", bc.cacheConfig.AcceptorQueueLimit) for next := range bc.acceptorQueue { - start := time.Now() acceptorQueueGauge.Dec(1) if err := bc.flattenSnapshot(func() error { @@ -644,34 +375,22 @@ func (bc *BlockChain) startAcceptor() { log.Crit("failed to write accepted block effects", "err", err) } - // Ensure [hc.acceptedNumberCache] and [acceptedLogsCache] have latest content - bc.hc.acceptedNumberCache.Put(next.NumberU64(), next.Header()) - logs := bc.collectUnflattenedLogs(next, false) - bc.acceptedLogsCache.Put(next.Hash(), logs) - - // Update the acceptor tip before sending events to ensure that any client acting based off of - // the events observes the updated acceptorTip on subsequent requests - bc.acceptorTipLock.Lock() - bc.acceptorTip = next - bc.acceptorTipLock.Unlock() + // Fetch block logs + logs := bc.gatherBlockLogs(next.Hash(), next.NumberU64(), false) // Update accepted feeds - flattenedLogs := types.FlattenLogs(logs) - bc.chainAcceptedFeed.Send(ChainEvent{Block: next, Hash: next.Hash(), Logs: flattenedLogs}) - if len(flattenedLogs) > 0 { - bc.logsAcceptedFeed.Send(flattenedLogs) + bc.chainAcceptedFeed.Send(ChainEvent{Block: next, Hash: next.Hash(), Logs: logs}) + if len(logs) > 0 { + bc.logsAcceptedFeed.Send(logs) } if len(next.Transactions()) != 0 { bc.txAcceptedFeed.Send(NewTxsEvent{next.Transactions()}) } + bc.acceptorTipLock.Lock() + bc.acceptorTip = next + bc.acceptorTipLock.Unlock() bc.acceptorWg.Done() - - acceptorWorkTimer.Inc(time.Since(start).Milliseconds()) - acceptorWorkCount.Inc(1) - // Note: in contrast to most accepted metrics, we increment the accepted log metrics in the acceptor queue because - // the logs are already processed in the acceptor queue. - acceptedLogsCounter.Inc(int64(len(logs))) } } @@ -695,8 +414,8 @@ func (bc *BlockChain) addAcceptorQueue(b *types.Block) { // DrainAcceptorQueue blocks until all items in [acceptorQueue] have been // processed. func (bc *BlockChain) DrainAcceptorQueue() { - bc.acceptorClosingLock.RLock() - defer bc.acceptorClosingLock.RUnlock() + bc.acceptorClosingLock.Lock() + defer bc.acceptorClosingLock.Unlock() if bc.acceptorClosed { return @@ -757,15 +476,15 @@ func (bc *BlockChain) loadLastState(lastAcceptedHash common.Hash) error { return errors.New("could not read head block hash") } // Make sure the entire head block is available - headBlock := bc.GetBlockByHash(head) - if headBlock == nil { + currentBlock := bc.GetBlockByHash(head) + if currentBlock == nil { return fmt.Errorf("could not load head block %s", head.Hex()) } // Everything seems to be fine, set as the head block - bc.currentBlock.Store(headBlock.Header()) + bc.currentBlock.Store(currentBlock) // Restore the last known head header - currentHeader := headBlock.Header() + currentHeader := currentBlock.Header() if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { if header := bc.GetHeaderByHash(head); header != nil { currentHeader = header @@ -774,7 +493,7 @@ func (bc *BlockChain) loadLastState(lastAcceptedHash common.Hash) error { bc.hc.SetCurrentHeader(currentHeader) log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) - log.Info("Loaded most recent local full block", "number", headBlock.Number(), "hash", headBlock.Hash(), "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0))) + log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) // Otherwise, set the last accepted block and perform a re-org. bc.lastAccepted = bc.GetBlockByHash(lastAcceptedHash) @@ -804,7 +523,7 @@ func (bc *BlockChain) loadGenesisState() error { // Last update all in-memory chain markers bc.lastAccepted = bc.genesisBlock - bc.currentBlock.Store(bc.genesisBlock.Header()) + bc.currentBlock.Store(bc.genesisBlock) bc.hc.SetGenesis(bc.genesisBlock.Header()) bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) return nil @@ -812,7 +531,7 @@ func (bc *BlockChain) loadGenesisState() error { // Export writes the active chain to the given writer. func (bc *BlockChain) Export(w io.Writer) error { - return bc.ExportN(w, uint64(0), bc.CurrentBlock().Number.Uint64()) + return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) } // ExportN writes a subset of the active chain to the given writer. @@ -840,7 +559,7 @@ func (bc *BlockChain) ExportCallback(callback func(block *types.Block) error, fi return fmt.Errorf("export failed on #%d: not found", nr) } if nr > first && block.ParentHash() != parentHash { - return errors.New("export failed: chain reorg during export") + return fmt.Errorf("export failed: chain reorg during export") } parentHash = block.Hash() if err := callback(block); err != nil { @@ -874,7 +593,7 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) { } // Update all in-memory chain markers in the last step bc.hc.SetCurrentHeader(block.Header()) - bc.currentBlock.Store(block.Header()) + bc.currentBlock.Store(block) } // ValidateCanonicalChain confirms a canonical chain is well-formed. @@ -884,19 +603,19 @@ func (bc *BlockChain) ValidateCanonicalChain() error { current := bc.CurrentBlock() i := 0 - log.Info("Beginning to validate canonical chain", "startBlock", current.Number) + log.Info("Beginning to validate canonical chain", "startBlock", current.NumberU64()) for current.Hash() != bc.genesisBlock.Hash() { blkByHash := bc.GetBlockByHash(current.Hash()) if blkByHash == nil { - return fmt.Errorf("couldn't find block by hash %s at height %d", current.Hash().String(), current.Number) + return fmt.Errorf("couldn't find block by hash %s at height %d", current.Hash().String(), current.Number()) } if blkByHash.Hash() != current.Hash() { return fmt.Errorf("blockByHash returned a block with an unexpected hash: %s, expected: %s", blkByHash.Hash().String(), current.Hash().String()) } - blkByNumber := bc.GetBlockByNumber(current.Number.Uint64()) + blkByNumber := bc.GetBlockByNumber(current.Number().Uint64()) if blkByNumber == nil { - return fmt.Errorf("couldn't find block by number at height %d", current.Number) + return fmt.Errorf("couldn't find block by number at height %d", current.Number()) } if blkByNumber.Hash() != current.Hash() { return fmt.Errorf("blockByNumber returned a block with unexpected hash: %s, expected: %s", blkByNumber.Hash().String(), current.Hash().String()) @@ -904,33 +623,25 @@ func (bc *BlockChain) ValidateCanonicalChain() error { hdrByHash := bc.GetHeaderByHash(current.Hash()) if hdrByHash == nil { - return fmt.Errorf("couldn't find block header by hash %s at height %d", current.Hash().String(), current.Number) + return fmt.Errorf("couldn't find block header by hash %s at height %d", current.Hash().String(), current.Number()) } if hdrByHash.Hash() != current.Hash() { return fmt.Errorf("hdrByHash returned a block header with an unexpected hash: %s, expected: %s", hdrByHash.Hash().String(), current.Hash().String()) } - hdrByNumber := bc.GetHeaderByNumber(current.Number.Uint64()) + hdrByNumber := bc.GetHeaderByNumber(current.Number().Uint64()) if hdrByNumber == nil { - return fmt.Errorf("couldn't find block header by number at height %d", current.Number) + return fmt.Errorf("couldn't find block header by number at height %d", current.Number()) } if hdrByNumber.Hash() != current.Hash() { return fmt.Errorf("hdrByNumber returned a block header with unexpected hash: %s, expected: %s", hdrByNumber.Hash().String(), current.Hash().String()) } - // Lookup the full block to get the transactions - block := bc.GetBlock(current.Hash(), current.Number.Uint64()) - if block == nil { - log.Error("Current block not found in database", "block", current.Number, "hash", current.Hash()) - return fmt.Errorf("current block missing: #%d [%x..]", current.Number, current.Hash().Bytes()[:4]) - } - txs := block.Transactions() + txs := current.Body().Transactions // Transactions are only indexed beneath the last accepted block, so we only check // that the transactions have been indexed, if we are checking below the last accepted // block. - shouldIndexTxs := !bc.cacheConfig.SkipTxIndexing && - (bc.cacheConfig.TxLookupLimit == 0 || bc.lastAccepted.NumberU64() < current.Number.Uint64()+bc.cacheConfig.TxLookupLimit) - if current.Number.Uint64() <= bc.lastAccepted.NumberU64() && shouldIndexTxs { + if current.NumberU64() <= bc.lastAccepted.NumberU64() { // Ensure that all of the transactions have been stored correctly in the canonical // chain for txIndex, tx := range txs { @@ -941,8 +652,8 @@ func (bc *BlockChain) ValidateCanonicalChain() error { if txLookup.BlockHash != current.Hash() { return fmt.Errorf("tx lookup returned with incorrect block hash: %s, expected: %s", txLookup.BlockHash.String(), current.Hash().String()) } - if txLookup.BlockIndex != current.Number.Uint64() { - return fmt.Errorf("tx lookup returned with incorrect block index: %d, expected: %d", txLookup.BlockIndex, current.Number) + if txLookup.BlockIndex != current.Number().Uint64() { + return fmt.Errorf("tx lookup returned with incorrect block index: %d, expected: %d", txLookup.BlockIndex, current.Number().Uint64()) } if txLookup.Index != uint64(txIndex) { return fmt.Errorf("tx lookup returned with incorrect transaction index: %d, expected: %d", txLookup.Index, txIndex) @@ -961,8 +672,8 @@ func (bc *BlockChain) ValidateCanonicalChain() error { if txReceipt.BlockHash != current.Hash() { return fmt.Errorf("transaction receipt had block hash %s, but expected %s", txReceipt.BlockHash.String(), current.Hash().String()) } - if txReceipt.BlockNumber.Uint64() != current.Number.Uint64() { - return fmt.Errorf("transaction receipt had block number %d, but expected %d", txReceipt.BlockNumber.Uint64(), current.Number) + if txReceipt.BlockNumber.Uint64() != current.NumberU64() { + return fmt.Errorf("transaction receipt had block number %d, but expected %d", txReceipt.BlockNumber.Uint64(), current.NumberU64()) } } @@ -971,9 +682,9 @@ func (bc *BlockChain) ValidateCanonicalChain() error { log.Info("Validate Canonical Chain Update", "totalBlocks", i) } - parent := bc.GetHeaderByHash(current.ParentHash) - if parent.Hash() != current.ParentHash { - return fmt.Errorf("getBlockByHash retrieved parent block with incorrect hash, found %s, expected: %s", parent.Hash().String(), current.ParentHash.String()) + parent := bc.GetBlockByHash(current.ParentHash()) + if parent.Hash() != current.ParentHash() { + return fmt.Errorf("getBlockByHash retrieved parent block with incorrect hash, found %s, expected: %s", parent.Hash().String(), current.ParentHash().String()) } current = parent } @@ -981,25 +692,30 @@ func (bc *BlockChain) ValidateCanonicalChain() error { return nil } -// stopWithoutSaving stops the blockchain service. If any imports are currently in progress -// it will abort them using the procInterrupt. This method stops all running -// goroutines, but does not do all the post-stop work of persisting data. -// OBS! It is generally recommended to use the Stop method! -// This method has been exposed to allow tests to stop the blockchain while simulating -// a crash. -func (bc *BlockChain) stopWithoutSaving() { - if !bc.stopping.CompareAndSwap(false, true) { +// Stop stops the blockchain service. If any imports are currently in progress +// it will abort them using the procInterrupt. +func (bc *BlockChain) Stop() { + if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { return } - log.Info("Closing quit channel") - close(bc.quit) // Wait for accepted feed to process all remaining items log.Info("Stopping Acceptor") start := time.Now() bc.stopAcceptor() log.Info("Acceptor queue drained", "t", time.Since(start)) + log.Info("Shutting down state manager") + start = time.Now() + if err := bc.stateManager.Shutdown(); err != nil { + log.Error("Failed to Shutdown state manager", "err", err) + } + log.Info("State manager shut down", "t", time.Since(start)) + // Flush the collected preimages to disk + if err := bc.stateCache.TrieDB().CommitPreimages(); err != nil { + log.Error("Failed to commit trie preimages", "err", err) + } + // Stop senderCacher's goroutines log.Info("Shutting down sender cacher") bc.senderCacher.Shutdown() @@ -1008,36 +724,6 @@ func (bc *BlockChain) stopWithoutSaving() { log.Info("Closing scope") bc.scope.Close() - // Waiting for background processes to complete - log.Info("Waiting for background processes to complete") - bc.wg.Wait() -} - -// Stop stops the blockchain service. If any imports are currently in progress -// it will abort them using the procInterrupt. -func (bc *BlockChain) Stop() { - bc.stopWithoutSaving() - - // Ensure that the entirety of the state snapshot is journaled to disk. - if bc.snaps != nil { - bc.snaps.Release() - } - if bc.triedb.Scheme() == rawdb.PathScheme { - // Ensure that the in-memory trie nodes are journaled to disk properly. - if err := bc.triedb.Journal(bc.CurrentBlock().Root); err != nil { - log.Info("Failed to journal in-memory trie nodes", "err", err) - } - } - log.Info("Shutting down state manager") - start := time.Now() - if err := bc.stateManager.Shutdown(); err != nil { - log.Error("Failed to Shutdown state manager", "err", err) - } - log.Info("State manager shut down", "t", time.Since(start)) - // Close the trie database, release all the held resources as the last step. - if err := bc.triedb.Close(); err != nil { - log.Error("Failed to close trie database", "err", err) - } log.Info("Blockchain stopped") } @@ -1085,7 +771,7 @@ func (bc *BlockChain) setPreference(block *types.Block) error { return nil } -// LastConsensusAcceptedBlock returns the last block to be marked as accepted. It may or +// LastAcceptedBlock returns the last block to be marked as accepted. It may or // may not yet be processed. func (bc *BlockChain) LastConsensusAcceptedBlock() *types.Block { bc.chainmu.Lock() @@ -1135,11 +821,10 @@ func (bc *BlockChain) Accept(block *types.Block) error { } } - // Enqueue block in the acceptor bc.lastAccepted = block bc.addAcceptorQueue(block) acceptedBlockGasUsedCounter.Inc(int64(block.GasUsed())) - acceptedTxsCounter.Inc(int64(len(block.Transactions()))) + return nil } @@ -1239,18 +924,13 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // diff layer for the block. var err error if bc.snaps == nil { - _, err = state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), true) + _, err = state.Commit(bc.chainConfig.IsEIP158(block.Number()), true) } else { - _, err = state.CommitWithSnap(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.snaps, block.Hash(), block.ParentHash(), true) + _, err = state.CommitWithSnap(bc.chainConfig.IsEIP158(block.Number()), bc.snaps, block.Hash(), block.ParentHash(), true) } if err != nil { return err } - // If node is running in path mode, skip explicit gc operation - // which is unnecessary in this mode. - if bc.triedb.Scheme() == rawdb.PathScheme { - return nil - } // Note: if InsertTrie must be the last step in verification that can return an error. // This allows [stateManager] to assume that if it inserts a trie without returning an @@ -1326,11 +1006,26 @@ func (bc *BlockChain) InsertBlockManual(block *types.Block, writes bool) error { return err } +// gatherBlockLogs fetches logs from a previously inserted block. +func (bc *BlockChain) gatherBlockLogs(hash common.Hash, number uint64, removed bool) []*types.Log { + receipts := rawdb.ReadReceipts(bc.db, hash, number, bc.chainConfig) + var logs []*types.Log + for _, receipt := range receipts { + for _, log := range receipt.Logs { + l := *log + if removed { + l.Removed = true + } + logs = append(logs, &l) + } + } + + return logs +} + func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { - start := time.Now() - bc.senderCacher.Recover(types.MakeSigner(bc.chainConfig, block.Number(), block.Time()), block.Transactions()) + bc.senderCacher.Recover(types.MakeSigner(bc.chainConfig, block.Number(), new(big.Int).SetUint64(block.Time())), block.Transactions()) - substart := time.Now() err := bc.engine.VerifyHeader(bc, block.Header()) if err == nil { err = bc.validator.ValidateBody(block) @@ -1362,8 +1057,6 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { bc.reportBlock(block, nil, err) return err } - blockContentValidationTimer.Inc(time.Since(substart).Milliseconds()) - // No validation errors for the block var activeState *state.StateDB defer func() { @@ -1376,28 +1069,23 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { } }() - // Retrieve the parent block to determine which root to build state on - substart = time.Now() - parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1) + // Retrieve the parent block and its state to execute on top + start := time.Now() - // Instantiate the statedb to use for processing transactions - // - // NOTE: Flattening a snapshot during block execution requires fetching state - // entries directly from the trie (much slower). - bc.flattenLock.Lock() - defer bc.flattenLock.Unlock() + // Retrieve the parent block and its state to execute block + parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1) statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) if err != nil { return err } - blockStateInitTimer.Inc(time.Since(substart).Milliseconds()) // Enable prefetching to pull in trie node paths while processing transactions - statedb.StartPrefetcher("chain", bc.cacheConfig.TriePrefetcherParallelism) + statedb.StartPrefetcher("chain") activeState = statedb + // If we have a followup block, run that against the current state to pre-cache + // transactions and probabilistically some of the account/storage trie nodes. // Process block using the parent state as reference point - pstart := time.Now() receipts, logs, usedGas, err := bc.processor.Process(block, parent, statedb, bc.vmConfig) if serr := statedb.Error(); serr != nil { log.Error("statedb error encountered", "err", serr, "number", block.Number(), "hash", block.Hash()) @@ -1406,32 +1094,12 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { bc.reportBlock(block, receipts, err) return err } - ptime := time.Since(pstart) // Validate the state using the default validator - vstart := time.Now() if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { bc.reportBlock(block, receipts, err) return err } - vtime := time.Since(vstart) - - // Update the metrics touched during block processing and validation - accountReadTimer.Inc(statedb.AccountReads.Milliseconds()) // Account reads are complete(in processing) - storageReadTimer.Inc(statedb.StorageReads.Milliseconds()) // Storage reads are complete(in processing) - snapshotAccountReadTimer.Inc(statedb.SnapshotAccountReads.Milliseconds()) // Account reads are complete(in processing) - snapshotStorageReadTimer.Inc(statedb.SnapshotStorageReads.Milliseconds()) // Storage reads are complete(in processing) - accountUpdateTimer.Inc(statedb.AccountUpdates.Milliseconds()) // Account updates are complete(in validation) - storageUpdateTimer.Inc(statedb.StorageUpdates.Milliseconds()) // Storage updates are complete(in validation) - accountHashTimer.Inc(statedb.AccountHashes.Milliseconds()) // Account hashes are complete(in validation) - storageHashTimer.Inc(statedb.StorageHashes.Milliseconds()) // Storage hashes are complete(in validation) - triehash := statedb.AccountHashes + statedb.StorageHashes // The time spent on tries hashing - trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update - trieRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read - trieRead += statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read - blockExecutionTimer.Inc((ptime - trieRead).Milliseconds()) // The time spent on EVM processing - blockValidationTimer.Inc((vtime - (triehash + trieUpdate)).Milliseconds()) // The time spent on block validation - blockTrieOpsTimer.Inc((triehash + trieUpdate + trieRead).Milliseconds()) // The time spent on trie operations // If [writes] are disabled, skip [writeBlockWithState] so that we do not write the block // or the state trie to disk. @@ -1444,18 +1112,9 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { // writeBlockWithState (called within writeBlockAndSethead) creates a reference that // will be cleaned up in Accept/Reject so we need to ensure an error cannot occur // later in verification, since that would cause the referenced root to never be dereferenced. - wstart := time.Now() if err := bc.writeBlockAndSetHead(block, receipts, logs, statedb); err != nil { return err } - // Update the metrics touched during block commit - accountCommitTimer.Inc(statedb.AccountCommits.Milliseconds()) // Account commits are complete, we can mark them - storageCommitTimer.Inc(statedb.StorageCommits.Milliseconds()) // Storage commits are complete, we can mark them - snapshotCommitTimer.Inc(statedb.SnapshotCommits.Milliseconds()) // Snapshot commits are complete, we can mark them - triedbCommitTimer.Inc(statedb.TrieDBCommits.Milliseconds()) // Trie database commits are complete, we can mark them - blockWriteTimer.Inc((time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits).Milliseconds()) - blockInsertTimer.Inc(time.Since(start).Milliseconds()) - log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "parentHash", block.ParentHash(), "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), @@ -1464,68 +1123,60 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { ) processedBlockGasUsedCounter.Inc(int64(block.GasUsed())) - processedTxsCounter.Inc(int64(block.Transactions().Len())) - processedLogsCounter.Inc(int64(len(logs))) - blockInsertCount.Inc(1) return nil } -// collectUnflattenedLogs collects the logs that were generated or removed during -// the processing of a block. -func (bc *BlockChain) collectUnflattenedLogs(b *types.Block, removed bool) [][]*types.Log { - var blobGasPrice *big.Int - excessBlobGas := b.ExcessBlobGas() - if excessBlobGas != nil { - blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas) - } - receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64()) - if err := receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), blobGasPrice, b.Transactions()); err != nil { - log.Error("Failed to derive block receipts fields", "hash", b.Hash(), "number", b.NumberU64(), "err", err) - } - - // Note: gross but this needs to be initialized here because returning nil will be treated specially as an incorrect - // error case downstream. - logs := make([][]*types.Log, len(receipts)) - for i, receipt := range receipts { - receiptLogs := make([]*types.Log, len(receipt.Logs)) - for i, log := range receipt.Logs { - if removed { - log.Removed = true - } - receiptLogs[i] = log - } - logs[i] = receiptLogs +// collectLogs collects the logs that were generated or removed during +// the processing of the block that corresponds with the given hash. +// These logs are later announced as deleted or reborn. +func (bc *BlockChain) collectLogs(hash common.Hash, removed bool) []*types.Log { + number := bc.hc.GetBlockNumber(hash) + if number == nil { + return nil } - return logs + return bc.gatherBlockLogs(hash, *number, removed) } -// collectLogs collects the logs that were generated or removed during -// the processing of a block. These logs are later announced as deleted or reborn. -func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log { - unflattenedLogs := bc.collectUnflattenedLogs(b, removed) - return types.FlattenLogs(unflattenedLogs) +// mergeLogs returns a merged log slice with specified sort order. +func mergeLogs(logs [][]*types.Log, reverse bool) []*types.Log { + var ret []*types.Log + if reverse { + for i := len(logs) - 1; i >= 0; i-- { + ret = append(ret, logs[i]...) + } + } else { + for i := 0; i < len(logs); i++ { + ret = append(ret, logs[i]...) + } + } + return ret } // reorg takes two blocks, an old chain and a new chain and will reconstruct the // blocks and inserts them to be part of the new canonical chain and accumulates // potential missing transactions and post an event about them. -func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error { +func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { var ( + newHead = newBlock + oldHead = oldBlock + newChain types.Blocks oldChain types.Blocks commonBlock *types.Block - ) - oldBlock := bc.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) - if oldBlock == nil { - return errors.New("current head block missing") - } - newBlock := newHead + deletedLogs [][]*types.Log + rebirthLogs [][]*types.Log + ) // Reduce the longer chain to the same number as the shorter one if oldBlock.NumberU64() > newBlock.NumberU64() { // Old chain is longer, gather all transactions and logs as deleted ones for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { oldChain = append(oldChain, oldBlock) + // Collect deleted logs for notification + logs := bc.collectLogs(oldBlock.Hash(), true) + if len(logs) > 0 { + deletedLogs = append(deletedLogs, logs) + } } } else { // New chain is longer, stash all blocks away for subsequent insertion @@ -1534,10 +1185,10 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error { } } if oldBlock == nil { - return errInvalidOldChain + return fmt.Errorf("invalid old chain") } if newBlock == nil { - return errInvalidNewChain + return fmt.Errorf("invalid new chain") } // Both sides of the reorg are at the same number, reduce both until the common // ancestor is found @@ -1549,16 +1200,22 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error { } // Remove an old block as well as stash away a new block oldChain = append(oldChain, oldBlock) + // Collect deleted logs for notification + logs := bc.collectLogs(oldBlock.Hash(), true) + if len(logs) > 0 { + deletedLogs = append(deletedLogs, logs) + } + newChain = append(newChain, newBlock) // Step back with both chains oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) if oldBlock == nil { - return errInvalidOldChain + return fmt.Errorf("invalid old chain") } newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) if newBlock == nil { - return errInvalidNewChain + return fmt.Errorf("invalid new chain") } } @@ -1580,15 +1237,20 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error { logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(), "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) } else { - log.Debug("Preference change (rewind to ancestor) occurred", "oldnum", oldHead.Number, "oldhash", oldHead.Hash(), "newnum", newHead.Number(), "newhash", newHead.Hash()) + log.Warn("Unlikely preference change (rewind to ancestor) occurred", "oldnum", oldHead.Number(), "oldhash", oldHead.Hash(), "newnum", newHead.Number(), "newhash", newHead.Hash()) } // Insert the new chain(except the head block(reverse order)), // taking care of the proper incremental order. for i := len(newChain) - 1; i >= 1; i-- { // Insert the block in the canonical way, re-writing history bc.writeHeadBlock(newChain[i]) - } + // Collect reborn logs due to chain reorg + logs := bc.collectLogs(newChain[i].Hash(), false) + if len(logs) > 0 { + rebirthLogs = append(rebirthLogs, logs) + } + } // Delete any canonical number assignments above the new head indexesBatch := bc.db.NewBatch() @@ -1607,42 +1269,20 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error { log.Crit("Failed to delete useless indexes", "err", err) } - // Send out events for logs from the old canon chain, and 'reborn' - // logs from the new canon chain. The number of logs can be very - // high, so the events are sent in batches of size around 512. - - // Deleted logs + blocks: - var deletedLogs []*types.Log - for i := len(oldChain) - 1; i >= 0; i-- { - // Also send event for blocks removed from the canon chain. - bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]}) - - // Collect deleted logs for notification - if logs := bc.collectLogs(oldChain[i], true); len(logs) > 0 { - deletedLogs = append(deletedLogs, logs...) - } - if len(deletedLogs) > 512 { - bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) - deletedLogs = nil - } - } + // If any logs need to be fired, do it now. In theory we could avoid creating + // this goroutine if there are no events to fire, but realistcally that only + // ever happens if we're reorging empty blocks, which will only happen on idle + // networks where performance is not an issue either way. if len(deletedLogs) > 0 { - bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) - } - - // New logs: - var rebirthLogs []*types.Log - for i := len(newChain) - 1; i >= 1; i-- { - if logs := bc.collectLogs(newChain[i], false); len(logs) > 0 { - rebirthLogs = append(rebirthLogs, logs...) - } - if len(rebirthLogs) > 512 { - bc.logsFeed.Send(rebirthLogs) - rebirthLogs = nil - } + bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)}) } if len(rebirthLogs) > 0 { - bc.logsFeed.Send(rebirthLogs) + bc.logsFeed.Send(mergeLogs(rebirthLogs, false)) + } + if len(oldChain) > 0 { + for i := len(oldChain) - 1; i >= 0; i-- { + bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]}) + } } return nil } @@ -1657,30 +1297,29 @@ type BadBlockReason struct { Receipts types.Receipts `json:"receipts"` Number uint64 `json:"number"` Hash common.Hash `json:"hash"` - Error string `json:"error"` + Error error `json:"error"` } func (b *BadBlockReason) String() string { var receiptString string for i, receipt := range b.Receipts { - receiptString += fmt.Sprintf("\n %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x", + receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n", i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(), receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState) } - version, vcs := version.Info() - platform := fmt.Sprintf("%s %s %s %s", version, runtime.Version(), runtime.GOARCH, runtime.GOOS) - if vcs != "" { - vcs = fmt.Sprintf("\nVCS: %s", vcs) - } - return fmt.Sprintf(` -########## BAD BLOCK ######### -Block: %v (%#x) -Error: %v -Platform: %v%v -Chain config: %#v -Receipts: %v -############################## -`, b.Number, b.Hash, b.Error, platform, vcs, b.ChainConfig, receiptString) + reason := fmt.Sprintf(` + ########## BAD BLOCK ######### + Chain config: %v + + Number: %v + Hash: %#x + %v + + Error: %v + ############################## + `, b.ChainConfig, b.Number, b.Hash, receiptString, b.Error) + + return reason } // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network and the BadBlockReason @@ -1690,7 +1329,8 @@ func (bc *BlockChain) BadBlocks() ([]*types.Block, []*BadBlockReason) { blocks := make([]*types.Block, 0, bc.badBlocks.Len()) reasons := make([]*BadBlockReason, 0, bc.badBlocks.Len()) for _, hash := range bc.badBlocks.Keys() { - if badBlk, exist := bc.badBlocks.Peek(hash); exist { + if blk, exist := bc.badBlocks.Peek(hash); exist { + badBlk := blk.(*badBlock) blocks = append(blocks, badBlk.block) reasons = append(reasons, badBlk.reason) } @@ -1713,7 +1353,7 @@ func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, e Receipts: receipts, Number: block.NumberU64(), Hash: block.Hash(), - Error: err.Error(), + Error: err, } badBlockCounter.Inc(1) @@ -1772,7 +1412,7 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block) } // Enable prefetching to pull in trie node paths while processing transactions - statedb.StartPrefetcher("chain", bc.cacheConfig.TriePrefetcherParallelism) + statedb.StartPrefetcher("chain") defer func() { statedb.StopPrefetcher() }() @@ -1793,13 +1433,13 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block) // If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot // diff layer for the block. if bc.snaps == nil { - return statedb.Commit(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), false) + return statedb.Commit(bc.chainConfig.IsEIP158(current.Number()), false) } - return statedb.CommitWithSnap(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash(), false) + return statedb.CommitWithSnap(bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash(), false) } // initSnapshot instantiates a Snapshot instance and adds it to [bc] -func (bc *BlockChain) initSnapshot(b *types.Header) { +func (bc *BlockChain) initSnapshot(b *types.Block) { if bc.cacheConfig.SnapshotLimit <= 0 || bc.snaps != nil { return } @@ -1810,19 +1450,13 @@ func (bc *BlockChain) initSnapshot(b *types.Header) { // // Additionally, we should always repair a snapshot if starting at genesis // if [SnapshotLimit] > 0. - asyncBuild := !bc.cacheConfig.SnapshotWait && b.Number.Uint64() > 0 - noBuild := bc.cacheConfig.SnapshotNoBuild && b.Number.Uint64() > 0 - log.Info("Initializing snapshots", "async", asyncBuild, "rebuild", !noBuild, "headHash", b.Hash(), "headRoot", b.Root) - snapconfig := snapshot.Config{ - CacheSize: bc.cacheConfig.SnapshotLimit, - NoBuild: noBuild, - AsyncBuild: asyncBuild, - SkipVerify: !bc.cacheConfig.SnapshotVerify, - } + async := bc.cacheConfig.SnapshotAsync && b.NumberU64() > 0 + rebuild := !bc.cacheConfig.SkipSnapshotRebuild || b.NumberU64() == 0 + log.Info("Initializing snapshots", "async", async, "rebuild", rebuild, "headHash", b.Hash(), "headRoot", b.Root()) var err error - bc.snaps, err = snapshot.New(snapconfig, bc.db, bc.triedb, b.Hash(), b.Root) + bc.snaps, err = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, b.Hash(), b.Root(), async, rebuild, bc.cacheConfig.SnapshotVerify) if err != nil { - log.Error("failed to initialize snapshots", "headHash", b.Hash(), "headRoot", b.Root, "err", err, "async", asyncBuild) + log.Error("failed to initialize snapshots", "headHash", b.Hash(), "headRoot", b.Root(), "err", err, "async", async) } } @@ -1892,7 +1526,7 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error start = time.Now() logged time.Time previousRoot common.Hash - triedb = bc.triedb + triedb = bc.stateCache.TrieDB() writeIndices bool ) // Note: we add 1 since in each iteration, we attempt to re-execute the next block. @@ -1922,7 +1556,7 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error // that the node stops mid-way through snapshot flattening (performed across multiple DB batches). // If snapshot initialization is delayed due to state sync, skip initializing snaps here if !bc.cacheConfig.SnapshotDelayInit { - bc.initSnapshot(parent.Header()) + bc.initSnapshot(parent) } writeIndices = true // Set [writeIndices] to true, so that the indices will be updated from the last accepted tip onwards. } @@ -1954,10 +1588,10 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error } } - _, nodes, imgs := triedb.Size() + nodes, imgs := triedb.Size() log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) if previousRoot != (common.Hash{}) { - return triedb.Commit(previousRoot, true) + return triedb.Commit(previousRoot, true, nil) } return nil } @@ -1994,7 +1628,7 @@ func (bc *BlockChain) populateMissingTries() error { startHeight = *bc.cacheConfig.PopulateMissingTries startTime = time.Now() logged time.Time - triedb = bc.triedb + triedb = bc.stateCache.TrieDB() missing = 0 ) @@ -2040,7 +1674,7 @@ func (bc *BlockChain) populateMissingTries() error { } // Commit root to disk so that it can be accessed directly - if err := triedb.Commit(root, false); err != nil { + if err := triedb.Commit(root, false, nil); err != nil { return err } parent = current @@ -2055,7 +1689,7 @@ func (bc *BlockChain) populateMissingTries() error { return fmt.Errorf("failed to write offline pruning success marker: %w", err) } - _, nodes, imgs := triedb.Size() + nodes, imgs := triedb.Size() log.Info("All missing tries populated", "startHeight", startHeight, "lastAcceptedHeight", lastAccepted, "missing", missing, "elapsed", time.Since(startTime), "nodes", nodes, "preimages", imgs) return nil } @@ -2132,84 +1766,48 @@ func (bc *BlockChain) gatherBlockRootsAboveLastAccepted() map[common.Hash]struct return blockRoots } -// TODO: split extras to blockchain_extra.go - -// ResetToStateSyncedBlock reinitializes the state of the blockchain +// ResetState reinitializes the state of the blockchain // to the trie represented by [block.Root()] after updating -// in-memory and on disk current block pointers to [block]. -// Only should be called after state sync has completed. -func (bc *BlockChain) ResetToStateSyncedBlock(block *types.Block) error { +// in-memory current block pointers to [block]. +// Only used in state sync. +func (bc *BlockChain) ResetState(block *types.Block) error { bc.chainmu.Lock() defer bc.chainmu.Unlock() // Update head block and snapshot pointers on disk batch := bc.db.NewBatch() - if err := bc.batchBlockAcceptedIndices(batch, block); err != nil { - return err - } + rawdb.WriteAcceptorTip(batch, block.Hash()) rawdb.WriteHeadBlockHash(batch, block.Hash()) rawdb.WriteHeadHeaderHash(batch, block.Hash()) rawdb.WriteSnapshotBlockHash(batch, block.Hash()) rawdb.WriteSnapshotRoot(batch, block.Root()) - if err := rawdb.WriteSyncPerformed(batch, block.NumberU64()); err != nil { - return err - } - if err := batch.Write(); err != nil { return err } - // if txlookup limit is 0 (uindexing disabled), we don't need to repair the tx index tail. - if bc.cacheConfig.TxLookupLimit != 0 { - bc.setTxIndexTail(block.NumberU64()) - } - // Update all in-memory chain markers bc.lastAccepted = block bc.acceptorTip = block - bc.currentBlock.Store(block.Header()) + bc.currentBlock.Store(block) bc.hc.SetCurrentHeader(block.Header()) lastAcceptedHash := block.Hash() - bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb) - + bc.stateCache = state.NewDatabaseWithConfig(bc.db, &trie.Config{ + Cache: bc.cacheConfig.TrieCleanLimit, + Preimages: bc.cacheConfig.Preimages, + }) if err := bc.loadLastState(lastAcceptedHash); err != nil { return err } // Create the state manager - bc.stateManager = NewTrieWriter(bc.triedb, bc.cacheConfig) + bc.stateManager = NewTrieWriter(bc.stateCache.TrieDB(), bc.cacheConfig) // Make sure the state associated with the block is available head := bc.CurrentBlock() - if !bc.HasState(head.Root) { - return fmt.Errorf("head state missing %d:%s", head.Number, head.Hash()) + if !bc.HasState(head.Root()) { + return fmt.Errorf("head state missing %d:%s", head.Number(), head.Hash()) } bc.initSnapshot(head) return nil } - -// CacheConfig returns a reference to [bc.cacheConfig] -// -// This is used by [miner] to set prefetch parallelism -// during block building. -func (bc *BlockChain) CacheConfig() *CacheConfig { - return bc.cacheConfig -} - -func (bc *BlockChain) setTxIndexTail(newTail uint64) error { - bc.txIndexTailLock.Lock() - defer bc.txIndexTailLock.Unlock() - - tailP := rawdb.ReadTxIndexTail(bc.db) - var tailV uint64 - if tailP != nil { - tailV = *tailP - } - - if newTail > tailV { - log.Info("Repairing tx index tail", "old", tailV, "new", newTail) - rawdb.WriteTxIndexTail(bc.db, newTail) - } - return nil -} diff --git a/core/blockchain_iterator.go b/core/blockchain_iterator.go index e81d4a5761..782c5f7114 100644 --- a/core/blockchain_iterator.go +++ b/core/blockchain_iterator.go @@ -33,7 +33,7 @@ import ( "fmt" "sync" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/core/types" ) type blockAndState struct { diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 4c2f81e14b..1159239306 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -27,16 +27,15 @@ package core import ( - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/state/snapshot" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/state/snapshot" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/params" ) // CurrentHeader retrieves the current head header of the canonical chain. The @@ -47,8 +46,8 @@ func (bc *BlockChain) CurrentHeader() *types.Header { // CurrentBlock retrieves the current head block of the canonical chain. The // block is retrieved from the blockchain's internal cache. -func (bc *BlockChain) CurrentBlock() *types.Header { - return bc.currentBlock.Load() +func (bc *BlockChain) CurrentBlock() *types.Block { + return bc.currentBlock.Load().(*types.Block) } // HasHeader checks if a block header is present in the database or not, caching @@ -80,7 +79,8 @@ func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { // Short circuit if the body's already in the cache, retrieve otherwise if cached, ok := bc.bodyCache.Get(hash); ok { - return cached + body := cached.(*types.Body) + return body } number := bc.hc.GetBlockNumber(hash) if number == nil { @@ -122,7 +122,7 @@ func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool { func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { // Short circuit if the block's already in the cache, retrieve otherwise if block, ok := bc.blockCache.Get(hash); ok { - return block + return block.(*types.Block) } block := rawdb.ReadBlock(bc.db, hash, number) if block == nil { @@ -174,17 +174,13 @@ func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*type // GetReceiptsByHash retrieves the receipts for all transactions in a given block. func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { if receipts, ok := bc.receiptsCache.Get(hash); ok { - return receipts + return receipts.(types.Receipts) } number := rawdb.ReadHeaderNumber(bc.db, hash) if number == nil { return nil } - header := bc.GetHeader(hash, *number) - if header == nil { - return nil - } - receipts := rawdb.ReadReceipts(bc.db, hash, *number, header.Time, bc.chainConfig) + receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) if receipts == nil { return nil } @@ -202,7 +198,7 @@ func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash { func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry { // Short circuit if the txlookup already in the cache, retrieve otherwise if lookup, exist := bc.txLookupCache.Get(hash); exist { - return lookup + return lookup.(*rawdb.LegacyTxLookupEntry) } tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash) if tx == nil { @@ -230,9 +226,21 @@ func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { return bc.HasState(block.Root()) } +// TrieNode retrieves a blob of data associated with a trie node +// either from ephemeral in-memory cache, or from persistent storage. +func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { + return bc.stateCache.TrieDB().RawNode(hash) +} + +// ContractCode retrieves a blob of data associated with a contract hash +// either from ephemeral in-memory cache, or from persistent storage. +func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) { + return bc.stateCache.ContractCode(common.Hash{}, hash) +} + // State returns a new mutable state based on the current HEAD block. func (bc *BlockChain) State() (*state.StateDB, error) { - return bc.StateAt(bc.CurrentBlock().Root) + return bc.StateAt(bc.CurrentBlock().Root()) } // StateAt returns a new mutable state based on a particular point in time. @@ -268,7 +276,7 @@ func (bc *BlockChain) StateCache() state.Database { // GasLimit returns the gas limit of the current HEAD block. func (bc *BlockChain) GasLimit() uint64 { - return bc.CurrentBlock().GasLimit + return bc.CurrentBlock().GasLimit() } // Genesis retrieves the chain's genesis block. @@ -281,11 +289,6 @@ func (bc *BlockChain) GetVMConfig() *vm.Config { return &bc.vmConfig } -// TrieDB retrieves the low level trie database used for data storage. -func (bc *BlockChain) TrieDB() *trie.Database { - return bc.triedb -} - // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) @@ -331,17 +334,3 @@ func (bc *BlockChain) SubscribeAcceptedLogsEvent(ch chan<- []*types.Log) event.S func (bc *BlockChain) SubscribeAcceptedTransactionEvent(ch chan<- NewTxsEvent) event.Subscription { return bc.scope.Track(bc.txAcceptedFeed.Subscribe(ch)) } - -// GetLogs fetches all logs from a given block. -func (bc *BlockChain) GetLogs(hash common.Hash, number uint64) [][]*types.Log { - logs, ok := bc.acceptedLogsCache.Get(hash) // this cache is thread-safe - if ok { - return logs - } - block := bc.GetBlockByHash(hash) - if block == nil { - return nil - } - logs = bc.collectUnflattenedLogs(block, false) - return logs -} diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index 9fbf21da6e..6598a51069 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -34,15 +34,12 @@ import ( "math/big" "testing" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/stretchr/testify/require" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/params" ) // rewindTest is a test case for chain rollback upon user request. @@ -504,12 +501,6 @@ func testLongReorgedDeepRepair(t *testing.T, snapshots bool) { } func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - testRepairWithScheme(t, tt, snapshots, scheme) - } -} - -func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme string) { // It's hard to follow the test case, visualize the input //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) // fmt.Println(tt.dump(true)) @@ -517,10 +508,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s // Create a temporary persistent database datadir := t.TempDir() - db, err := rawdb.Open(rawdb.OpenOptions{ - Directory: datadir, - Ephemeral: true, - }) + db, err := rawdb.NewLevelDBDatabase(datadir, 0, 0, "", false) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -528,62 +516,38 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s // Initialize a fresh chain var ( - require = require.New(t) - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - gspec = &Genesis{ - BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), - Config: params.TestChainConfig, - Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(params.Ether)}}, - } - signer = types.LatestSigner(gspec.Config) - engine = dummy.NewFullFaker() - config = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TriePrefetcherParallelism: 4, - SnapshotLimit: 0, // Disable snapshot by default - StateScheme: scheme, + genesis = (&Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee)}).MustCommit(db) + engine = dummy.NewFullFaker() + config = &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + SnapshotLimit: 0, // Disable snapshot by default } ) defer engine.Close() if snapshots { config.SnapshotLimit = 256 - config.SnapshotWait = true } - chain, err := NewBlockChain(db, config, gspec, engine, vm.Config{}, common.Hash{}, false) + chain, err := NewBlockChain(db, config, params.TestChainConfig, engine, vm.Config{}, common.Hash{}) if err != nil { t.Fatalf("Failed to create chain: %v", err) } - defer chain.Stop() lastAcceptedHash := chain.GetBlockByNumber(0).Hash() // If sidechain blocks are needed, make a light chain and import it var sideblocks types.Blocks if tt.sidechainBlocks > 0 { - genDb := rawdb.NewMemoryDatabase() - gspec.MustCommit(genDb, trie.NewDatabase(genDb, nil)) - sideblocks, _, err = GenerateChain(gspec.Config, gspec.ToBlock(), engine, genDb, tt.sidechainBlocks, 10, func(i int, b *BlockGen) { + sideblocks, _, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0x01}) - tx, err := types.SignTx(types.NewTransaction(b.TxNonce(addr1), common.Address{0x01}, big.NewInt(10000), params.TxGas, dummy.ApricotPhase3InitialBaseFee, nil), signer, key1) - require.NoError(err) - b.AddTx(tx) }) - require.NoError(err) if _, err := chain.InsertChain(sideblocks); err != nil { t.Fatalf("Failed to import side chain: %v", err) } } - genDb := rawdb.NewMemoryDatabase() - gspec.MustCommit(genDb, trie.NewDatabase(genDb, nil)) - canonblocks, _, err := GenerateChain(gspec.Config, gspec.ToBlock(), engine, genDb, tt.canonicalBlocks, 10, func(i int, b *BlockGen) { + canonblocks, _, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0x02}) b.SetDifficulty(big.NewInt(1000000)) - tx, err := types.SignTx(types.NewTransaction(b.TxNonce(addr1), common.Address{0x02}, big.NewInt(10000), params.TxGas, dummy.ApricotPhase3InitialBaseFee, nil), signer, key1) - require.NoError(err) - b.AddTx(tx) }) - require.NoError(err) if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil { t.Fatalf("Failed to import canonical chain start: %v", err) } @@ -603,21 +567,16 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s } // Pull the plug on the database, simulating a hard crash - chain.triedb.Close() db.Close() - chain.stopWithoutSaving() // Start a new blockchain back up and see where the repair leads us - db, err = rawdb.Open(rawdb.OpenOptions{ - Directory: datadir, - Ephemeral: true, - }) + db, err = rawdb.NewLevelDBDatabase(datadir, 0, 0, "", false) if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } defer db.Close() - newChain, err := NewBlockChain(db, config, gspec, engine, vm.Config{}, lastAcceptedHash, false) + newChain, err := NewBlockChain(db, config, params.TestChainConfig, engine, vm.Config{}, lastAcceptedHash) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -632,7 +591,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s if head := newChain.CurrentHeader(); head.Number.Uint64() != tt.expHeadBlock { t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadBlock) } - if head := newChain.CurrentBlock(); head.Number.Uint64() != tt.expHeadBlock { - t.Errorf("Head block mismatch: have %d, want %d", head.Number, tt.expHeadBlock) + if head := newChain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock { + t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock) } } diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index 6695ea0464..b0fb98e532 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -32,7 +32,7 @@ package core import ( "testing" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/core/types" ) // verifyNoGaps checks that there are no gaps after the initial set of blocks in diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index 4c2fa821a5..806a3c6b95 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -34,23 +34,21 @@ import ( "fmt" "math/big" "os" - "path" "strings" "testing" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/params" ) // snapshotTestBasic wraps the common testing fields in the snapshot tests. type snapshotTestBasic struct { - scheme string // Disk scheme used for storing trie nodes chainBlocks int // Number of blocks to generate for the canonical chain snapshotBlock uint64 // Block number of the relevant snapshot disk layer @@ -60,11 +58,9 @@ type snapshotTestBasic struct { // share fields, set in runtime datadir string - ancient string db ethdb.Database - genDb ethdb.Database + gendb ethdb.Database engine consensus.Engine - gspec *Genesis lastAcceptedHash common.Hash } @@ -72,28 +68,27 @@ type snapshotTestBasic struct { func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Block) { // Create a temporary persistent database datadir := t.TempDir() - ancient := path.Join(datadir, "ancient") - db, err := rawdb.Open(rawdb.OpenOptions{ - Directory: datadir, - Ephemeral: true, - }) + db, err := rawdb.NewLevelDBDatabase(datadir, 0, 0, "", false) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } // Initialize a fresh chain var ( - gspec = &Genesis{ - BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), - Config: params.TestChainConfig, - } - engine = dummy.NewFullFaker() + genesis = (&Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee)}).MustCommit(db) + engine = dummy.NewFullFaker() + gendb = rawdb.NewMemoryDatabase() + + // Snapshot is enabled, the first snapshot is created from the Genesis. + // The snapshot memory allowance is 256MB, it means no snapshot flush + // will happen during the block insertion. + cacheConfig = DefaultCacheConfig ) - chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(basic.scheme), gspec, engine, vm.Config{}, common.Hash{}, false) + chain, err := NewBlockChain(db, cacheConfig, params.TestChainConfig, engine, vm.Config{}, common.Hash{}) if err != nil { t.Fatalf("Failed to create chain: %v", err) } - genDb, blocks, _, _ := GenerateChainWithGenesis(gspec, engine, basic.chainBlocks, 10, func(i int, b *BlockGen) {}) + blocks, _, _ := GenerateChain(params.TestChainConfig, genesis, engine, gendb, basic.chainBlocks, 10, func(i int, b *BlockGen) {}) // genesis as last accepted basic.lastAcceptedHash = chain.GetBlockByNumber(0).Hash() @@ -130,11 +125,9 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo // Set runtime fields basic.datadir = datadir - basic.ancient = ancient basic.db = db - basic.genDb = genDb + basic.gendb = gendb basic.engine = engine - basic.gspec = gspec return chain, blocks } @@ -146,18 +139,18 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [ if head := chain.CurrentHeader(); head.Number.Uint64() != basic.expHeadBlock { t.Errorf("Head header mismatch: have %d, want %d", head.Number, basic.expHeadBlock) } - if head := chain.CurrentBlock(); head.Number.Uint64() != basic.expHeadBlock { - t.Errorf("Head block mismatch: have %d, want %d", head.Number, basic.expHeadBlock) + if head := chain.CurrentBlock(); head.NumberU64() != basic.expHeadBlock { + t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadBlock) } // Check the disk layer, ensure they are matched block := chain.GetBlockByNumber(basic.expSnapshotBottom) if block == nil { - t.Errorf("The corresponding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) + t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) } else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) { t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot()) } else if len(chain.snaps.Snapshots(block.Hash(), -1, false)) != 1 { - t.Errorf("The corresponding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) + t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) } // Check the snapshot, ensure it's integrated @@ -210,9 +203,8 @@ func (basic *snapshotTestBasic) dump() string { func (basic *snapshotTestBasic) teardown() { basic.db.Close() - basic.genDb.Close() + basic.gendb.Close() os.RemoveAll(basic.datadir) - os.RemoveAll(basic.ancient) } // snapshotTest is a test case type for normal snapshot recovery. @@ -229,7 +221,7 @@ func (snaptest *snapshotTest) test(t *testing.T) { // Restart the chain normally chain.Stop() - newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) + newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -238,7 +230,7 @@ func (snaptest *snapshotTest) test(t *testing.T) { snaptest.verify(t, newchain, blocks) } -// crashSnapshotTest is a test case type for irregular snapshot recovery. +// crashSnapshotTest is a test case type for innormal snapshot recovery. // It can be used for testing that restart Geth after the crash. type crashSnapshotTest struct { snapshotTestBasic @@ -253,14 +245,9 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { // Pull the plug on the database, simulating a hard crash db := chain.db db.Close() - chain.stopWithoutSaving() - chain.triedb.Close() // Start a new blockchain back up and see where the repair leads us - newdb, err := rawdb.Open(rawdb.OpenOptions{ - Directory: snaptest.datadir, - Ephemeral: true, - }) + newdb, err := rawdb.NewLevelDBDatabase(snaptest.datadir, 0, 0, "", false) if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } @@ -270,13 +257,13 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { // the crash, we do restart twice here: one after the crash and one // after the normal stop. It's used to ensure the broken snapshot // can be detected all the time. - newchain, err := NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) + newchain, err := NewBlockChain(newdb, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } newchain.Stop() - newchain, err = NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) + newchain, err = NewBlockChain(newdb, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -303,7 +290,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { // Insert blocks without enabling snapshot if gapping is required. chain.Stop() - gappedBlocks, _, _ := GenerateChain(snaptest.gspec.Config, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.gapped, 10, func(i int, b *BlockGen) {}) + gappedBlocks, _, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.gapped, 10, func(i int, b *BlockGen) {}) // Insert a few more blocks without enabling snapshot var cacheConfig = &CacheConfig{ @@ -312,9 +299,8 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { SnapshotLimit: 0, Pruning: true, CommitInterval: 4096, - StateScheme: snaptest.scheme, } - newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) + newchain, err := NewBlockChain(snaptest.db, cacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -322,7 +308,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { newchain.Stop() // Restart the chain with enabling the snapshot - newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) + newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -357,41 +343,34 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { SnapshotLimit: 0, Pruning: true, CommitInterval: 4096, - StateScheme: snaptest.scheme, } - newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) + newchain, err := NewBlockChain(snaptest.db, config, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } - newBlocks, _, _ := GenerateChain(snaptest.gspec.Config, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.newBlocks, 10, func(i int, b *BlockGen) {}) + newBlocks, _, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, 10, func(i int, b *BlockGen) {}) newchain.InsertChain(newBlocks) newchain.Stop() - // Restart the chain, the wiper should start working + // Restart the chain, the wiper should starts working config = &CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, SnapshotLimit: 256, Pruning: true, CommitInterval: 4096, - SnapshotWait: false, // Don't wait rebuild - StateScheme: snaptest.scheme, } - tmp, err := NewBlockChain(snaptest.db, config, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) + _, err = NewBlockChain(snaptest.db, config, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } - // Simulate the blockchain crash. - tmp.triedb.Close() - tmp.stopWithoutSaving() - newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) + newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } snaptest.verify(t, newchain, blocks) - newchain.Stop() } // Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot @@ -411,20 +390,17 @@ func TestRestartWithNewSnapshot(t *testing.T) { // Expected head header : C8 // Expected head block : C4 // Expected snapshot disk : C4 - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - test := &snapshotTest{ - snapshotTestBasic{ - scheme: scheme, - chainBlocks: 8, - snapshotBlock: 4, - expCanonicalBlocks: 8, - expHeadBlock: 4, - expSnapshotBottom: 4, // Initial disk layer built from genesis - }, - } - test.test(t) - test.teardown() + test := &snapshotTest{ + snapshotTestBasic{ + chainBlocks: 8, + snapshotBlock: 4, + expCanonicalBlocks: 8, + expHeadBlock: 4, + expSnapshotBottom: 4, // Initial disk layer built from genesis + }, } + test.test(t) + test.teardown() } // Tests a Geth was crashed and restarts with a broken snapshot. In this case the @@ -447,20 +423,17 @@ func TestNoCommitCrashWithNewSnapshot(t *testing.T) { // // Expected head block : C4 // Expected snapshot disk : C4 - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - test := &crashSnapshotTest{ - snapshotTestBasic{ - scheme: scheme, - chainBlocks: 8, - snapshotBlock: 4, - expCanonicalBlocks: 8, - expHeadBlock: 4, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }, - } - test.test(t) - test.teardown() + test := &crashSnapshotTest{ + snapshotTestBasic{ + chainBlocks: 8, + snapshotBlock: 4, + expCanonicalBlocks: 8, + expHeadBlock: 4, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }, } + test.test(t) + test.teardown() } // Tests a Geth was crashed and restarts with a broken snapshot. In this case the @@ -483,20 +456,17 @@ func TestLowCommitCrashWithNewSnapshot(t *testing.T) { // // Expected head block : C4 // Expected snapshot disk : C4 - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - test := &crashSnapshotTest{ - snapshotTestBasic{ - scheme: scheme, - chainBlocks: 8, - snapshotBlock: 4, - expCanonicalBlocks: 8, - expHeadBlock: 4, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }, - } - test.test(t) - test.teardown() + test := &crashSnapshotTest{ + snapshotTestBasic{ + chainBlocks: 8, + snapshotBlock: 4, + expCanonicalBlocks: 8, + expHeadBlock: 4, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }, } + test.test(t) + test.teardown() } // Tests a Geth was crashed and restarts with a broken snapshot. In this case @@ -519,20 +489,17 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) { // // Expected head block : C4 // Expected snapshot disk : C4 - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - test := &crashSnapshotTest{ - snapshotTestBasic{ - scheme: scheme, - chainBlocks: 8, - snapshotBlock: 4, - expCanonicalBlocks: 8, - expHeadBlock: 4, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }, - } - test.test(t) - test.teardown() + test := &crashSnapshotTest{ + snapshotTestBasic{ + chainBlocks: 8, + snapshotBlock: 4, + expCanonicalBlocks: 8, + expHeadBlock: 4, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }, } + test.test(t) + test.teardown() } // Tests a Geth was running with snapshot enabled. Then restarts without @@ -551,21 +518,18 @@ func TestGappedNewSnapshot(t *testing.T) { // // Expected head block : G // Expected snapshot disk : G - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - test := &gappedSnapshotTest{ - snapshotTestBasic: snapshotTestBasic{ - scheme: scheme, - chainBlocks: 8, - snapshotBlock: 0, - expCanonicalBlocks: 10, - expHeadBlock: 0, - expSnapshotBottom: 0, // Rebuilt snapshot from the latest HEAD - }, - gapped: 2, - } - test.test(t) - test.teardown() + test := &gappedSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + chainBlocks: 8, + snapshotBlock: 0, + expCanonicalBlocks: 10, + expHeadBlock: 0, + expSnapshotBottom: 0, // Rebuilt snapshot from the latest HEAD + }, + gapped: 2, } + test.test(t) + test.teardown() } // Tests the Geth was running with a complete snapshot and then imports a few @@ -584,19 +548,16 @@ func TestRecoverSnapshotFromWipingCrash(t *testing.T) { // // Expected head block : C4 // Expected snapshot disk : C4 - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - test := &wipeCrashSnapshotTest{ - snapshotTestBasic: snapshotTestBasic{ - scheme: scheme, - chainBlocks: 8, - snapshotBlock: 4, - expCanonicalBlocks: 10, - expHeadBlock: 4, - expSnapshotBottom: 4, - }, - newBlocks: 2, - } - test.test(t) - test.teardown() + test := &wipeCrashSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + chainBlocks: 8, + snapshotBlock: 4, + expCanonicalBlocks: 10, + expHeadBlock: 4, + expSnapshotBottom: 4, + }, + newBlocks: 2, } + test.test(t) + test.teardown() } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 069d8fd8fc..051e3564e4 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -6,72 +6,71 @@ package core import ( "fmt" "math/big" - "os" "testing" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/state/pruner" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/eth/tracers/logger" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/stretchr/testify/require" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/state/pruner" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/params" ) var ( archiveConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - TriePrefetcherParallelism: 4, - Pruning: false, // Archive mode - SnapshotLimit: 256, - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + Pruning: false, // Archive mode + SnapshotLimit: 256, + AcceptorQueueLimit: 64, } pruningConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - TriePrefetcherParallelism: 4, - Pruning: true, // Enable pruning - CommitInterval: 4096, - SnapshotLimit: 256, - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + Pruning: true, // Enable pruning + CommitInterval: 4096, + SnapshotLimit: 256, + AcceptorQueueLimit: 64, } ) -func newGwei(n int64) *big.Int { - return new(big.Int).Mul(big.NewInt(n), big.NewInt(params.GWei)) -} - func createBlockChain( db ethdb.Database, cacheConfig *CacheConfig, - gspec *Genesis, + chainConfig *params.ChainConfig, lastAcceptedHash common.Hash, ) (*BlockChain, error) { // Import the chain. This runs all block validation rules. blockchain, err := NewBlockChain( db, cacheConfig, - gspec, - dummy.NewFakerWithCallbacks(TestCallbacks), + chainConfig, + dummy.NewDummyEngine(&dummy.ConsensusCallbacks{ + OnExtraStateChange: func(block *types.Block, sdb *state.StateDB) (*big.Int, *big.Int, error) { + sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(block.Number().Int64())) + return nil, nil, nil + }, + OnFinalizeAndAssemble: func(header *types.Header, sdb *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { + sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(header.Number.Int64())) + return nil, nil, nil, nil + }, + }), vm.Config{}, lastAcceptedHash, - false, ) return blockchain, err } func TestArchiveBlockChain(t *testing.T) { - createArchiveBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { - return createBlockChain(db, archiveConfig, gspec, lastAcceptedHash) + createArchiveBlockChain := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + return createBlockChain(db, archiveConfig, chainConfig, lastAcceptedHash) } for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { @@ -81,19 +80,18 @@ func TestArchiveBlockChain(t *testing.T) { } func TestArchiveBlockChainSnapsDisabled(t *testing.T) { - create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { return createBlockChain( db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - TriePrefetcherParallelism: 4, - Pruning: false, // Archive mode - SnapshotLimit: 0, // Disable snapshots - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + Pruning: false, // Archive mode + SnapshotLimit: 0, // Disable snapshots + AcceptorQueueLimit: 64, }, - gspec, + chainConfig, lastAcceptedHash, ) } @@ -105,8 +103,8 @@ func TestArchiveBlockChainSnapsDisabled(t *testing.T) { } func TestPruningBlockChain(t *testing.T) { - createPruningBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { - return createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) + createPruningBlockChain := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + return createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) } for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { @@ -116,20 +114,19 @@ func TestPruningBlockChain(t *testing.T) { } func TestPruningBlockChainSnapsDisabled(t *testing.T) { - create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { return createBlockChain( db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - TriePrefetcherParallelism: 4, - Pruning: true, // Enable pruning - CommitInterval: 4096, - SnapshotLimit: 0, // Disable snapshots - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + Pruning: true, // Enable pruning + CommitInterval: 4096, + SnapshotLimit: 0, // Disable snapshots + AcceptorQueueLimit: 64, }, - gspec, + chainConfig, lastAcceptedHash, ) } @@ -147,8 +144,8 @@ type wrappedStateManager struct { func (w *wrappedStateManager) Shutdown() error { return nil } func TestPruningBlockChainUngracefulShutdown(t *testing.T) { - create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { - blockchain, err := createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) + create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + blockchain, err := createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) if err != nil { return nil, err } @@ -166,20 +163,19 @@ func TestPruningBlockChainUngracefulShutdown(t *testing.T) { } func TestPruningBlockChainUngracefulShutdownSnapsDisabled(t *testing.T) { - create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { blockchain, err := createBlockChain( db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - TriePrefetcherParallelism: 4, - Pruning: true, // Enable pruning - CommitInterval: 4096, - SnapshotLimit: 0, // Disable snapshots - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + Pruning: true, // Enable pruning + CommitInterval: 4096, + SnapshotLimit: 0, // Disable snapshots + AcceptorQueueLimit: 64, }, - gspec, + chainConfig, lastAcceptedHash, ) if err != nil { @@ -201,21 +197,20 @@ func TestPruningBlockChainUngracefulShutdownSnapsDisabled(t *testing.T) { func TestEnableSnapshots(t *testing.T) { // Set snapshots to be disabled the first time, and then enable them on the restart snapLimit := 0 - create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { // Import the chain. This runs all block validation rules. blockchain, err := createBlockChain( db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - TriePrefetcherParallelism: 4, - Pruning: true, // Enable pruning - CommitInterval: 4096, - SnapshotLimit: snapLimit, - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + Pruning: true, // Enable pruning + CommitInterval: 4096, + SnapshotLimit: snapLimit, + AcceptorQueueLimit: 64, }, - gspec, + chainConfig, lastAcceptedHash, ) if err != nil { @@ -233,13 +228,13 @@ func TestEnableSnapshots(t *testing.T) { } func TestCorruptSnapshots(t *testing.T) { - create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { // Delete the snapshot block hash and state root to ensure that if we die in between writing a snapshot // diff layer to disk at any point, we can still recover on restart. rawdb.DeleteSnapshotBlockHash(db) rawdb.DeleteSnapshotRoot(db) - return createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) + return createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) } for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { @@ -249,9 +244,9 @@ func TestCorruptSnapshots(t *testing.T) { } func TestBlockChainOfflinePruningUngracefulShutdown(t *testing.T) { - create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { // Import the chain. This runs all block validation rules. - blockchain, err := createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) + blockchain, err := createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) if err != nil { return nil, err } @@ -264,32 +259,23 @@ func TestBlockChainOfflinePruningUngracefulShutdown(t *testing.T) { return blockchain, nil } + tempDir := t.TempDir() if err := blockchain.CleanBlockRootsAboveLastAccepted(); err != nil { return nil, err } - // get the target root to prune to before stopping the blockchain - targetRoot := blockchain.LastAcceptedBlock().Root() - blockchain.Stop() - - tempDir := t.TempDir() - prunerConfig := pruner.Config{ - Datadir: tempDir, - BloomSize: 256, - } - - pruner, err := pruner.NewPruner(db, prunerConfig) + pruner, err := pruner.NewPruner(db, tempDir, 256) if err != nil { return nil, fmt.Errorf("offline pruning failed (%s, %d): %w", tempDir, 256, err) } + targetRoot := blockchain.LastAcceptedBlock().Root() if err := pruner.Prune(targetRoot); err != nil { return nil, fmt.Errorf("failed to prune blockchain with target root: %s due to: %w", targetRoot, err) } // Re-initialize the blockchain after pruning - return createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) + return createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) } for _, tt := range tests { - tt := tt t.Run(tt.Name, func(t *testing.T) { t.Parallel() tt.testFunc(t, create) @@ -303,7 +289,11 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - chainDB = rawdb.NewMemoryDatabase() + // We use two separate databases since GenerateChain commits the state roots to its underlying + // database. + genDB = rawdb.NewMemoryDatabase() + chainDB = rawdb.NewMemoryDatabase() + lastAcceptedHash common.Hash ) // Ensure that key1 has some funds in the genesis block. @@ -312,8 +302,10 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) - blockchain, err := createBlockChain(chainDB, pruningConfig, gspec, common.Hash{}) + blockchain, err := createBlockChain(chainDB, pruningConfig, gspec.Config, lastAcceptedHash) if err != nil { t.Fatal(err) } @@ -321,7 +313,9 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} - _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 10, 10, func(i int, gen *BlockGen) { + // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing + // to the BlockChain's database while generating blocks. + chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 10, 10, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) @@ -339,10 +333,10 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { } blockchain.DrainAcceptorQueue() - lastAcceptedHash := blockchain.LastConsensusAcceptedBlock().Hash() + lastAcceptedHash = blockchain.LastConsensusAcceptedBlock().Hash() blockchain.Stop() - blockchain, err = createBlockChain(chainDB, pruningConfig, gspec, lastAcceptedHash) + blockchain, err = createBlockChain(chainDB, pruningConfig, gspec.Config, lastAcceptedHash) if err != nil { t.Fatal(err) } @@ -363,20 +357,18 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { TrieCleanLimit: 256, TrieDirtyLimit: 256, TrieDirtyCommitTarget: 20, - TriePrefetcherParallelism: 4, Pruning: false, // Archive mode SnapshotLimit: 256, PopulateMissingTries: &startHeight, // Starting point for re-populating. PopulateMissingTriesParallelism: parallelism, AcceptorQueueLimit: 64, }, - gspec, + gspec.Config, lastAcceptedHash, ) if err != nil { t.Fatal(err) } - defer blockchain.Stop() for _, block := range chain { if !blockchain.HasState(block.Root()) { @@ -394,18 +386,17 @@ func TestRepopulateMissingTries(t *testing.T) { func TestUngracefulAsyncShutdown(t *testing.T) { var ( - create = func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + create = func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { blockchain, err := createBlockChain(db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - TriePrefetcherParallelism: 4, - Pruning: true, - CommitInterval: 4096, - SnapshotLimit: 256, - SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails - AcceptorQueueLimit: 1000, // ensure channel doesn't block - }, gspec, lastAcceptedHash) + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + Pruning: true, + CommitInterval: 4096, + SnapshotLimit: 256, + SkipSnapshotRebuild: true, // Ensure the test errors if snapshot initialization fails + AcceptorQueueLimit: 1000, // ensure channel doesn't block + }, chainConfig, lastAcceptedHash) if err != nil { return nil, err } @@ -416,6 +407,9 @@ func TestUngracefulAsyncShutdown(t *testing.T) { key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) + // We use two separate databases since GenerateChain commits the state roots to its underlying + // database. + genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -425,8 +419,10 @@ func TestUngracefulAsyncShutdown(t *testing.T) { Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec, common.Hash{}) + blockchain, err := create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } @@ -434,7 +430,9 @@ func TestUngracefulAsyncShutdown(t *testing.T) { // This call generates a chain of 10 blocks. signer := types.HomesteadSigner{} - _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 10, 10, func(i int, gen *BlockGen) { + // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing + // to the BlockChain's database while generating blocks. + chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 10, 10, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) @@ -534,215 +532,9 @@ func TestUngracefulAsyncShutdown(t *testing.T) { } } -func TestTransactionIndices(t *testing.T) { - // Configure and generate a sample block chain - require := require.New(t) - var ( - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) - funds = big.NewInt(10000000000000) - gspec = &Genesis{ - Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, - Alloc: GenesisAlloc{addr1: {Balance: funds}}, - } - signer = types.LatestSigner(gspec.Config) - ) - genDb, blocks, _, err := GenerateChainWithGenesis(gspec, dummy.NewFakerWithCallbacks(TestCallbacks), 128, 10, func(i int, block *BlockGen) { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) - require.NoError(err) - block.AddTx(tx) - }) - require.NoError(err) - - blocks2, _, err := GenerateChain(gspec.Config, blocks[len(blocks)-1], dummy.NewFakerWithCallbacks(TestCallbacks), genDb, 10, 10, func(i int, block *BlockGen) { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) - require.NoError(err) - block.AddTx(tx) - }) - require.NoError(err) - - conf := &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - TriePrefetcherParallelism: 4, - Pruning: true, - CommitInterval: 4096, - SnapshotLimit: 256, - SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails - AcceptorQueueLimit: 64, - } - - // Init block chain and check all needed indices has been indexed. - chainDB := rawdb.NewMemoryDatabase() - chain, err := createBlockChain(chainDB, conf, gspec, common.Hash{}) - require.NoError(err) - - _, err = chain.InsertChain(blocks) - require.NoError(err) - - for _, block := range blocks { - err := chain.Accept(block) - require.NoError(err) - } - chain.DrainAcceptorQueue() - - lastAcceptedBlock := blocks[len(blocks)-1] - require.Equal(lastAcceptedBlock.Hash(), chain.CurrentHeader().Hash()) - - CheckTxIndices(t, nil, lastAcceptedBlock.NumberU64(), chain.db, false) // check all indices has been indexed - chain.Stop() - - // Reconstruct a block chain which only reserves limited tx indices - // 128 blocks were previously indexed. Now we add a new block at each test step. - limits := []uint64{ - 0, /* tip: 129 reserve all (don't run) */ - 131, /* tip: 130 reserve all */ - 140, /* tip: 131 reserve all */ - 64, /* tip: 132, limit:64 */ - 32, /* tip: 133, limit:32 */ - } - for i, l := range limits { - t.Run(fmt.Sprintf("test-%d, limit: %d", i+1, l), func(t *testing.T) { - conf.TxLookupLimit = l - - chain, err := createBlockChain(chainDB, conf, gspec, lastAcceptedBlock.Hash()) - require.NoError(err) - - tail := getTail(l, lastAcceptedBlock.NumberU64()) - // check if startup indices are correct - CheckTxIndices(t, tail, lastAcceptedBlock.NumberU64(), chain.db, false) - - newBlks := blocks2[i : i+1] - _, err = chain.InsertChain(newBlks) // Feed chain a higher block to trigger indices updater. - require.NoError(err) - - lastAcceptedBlock = newBlks[0] - err = chain.Accept(lastAcceptedBlock) // Accept the block to trigger indices updater. - require.NoError(err) - chain.DrainAcceptorQueue() - - tail = getTail(l, lastAcceptedBlock.NumberU64()) - // check if indices are updated correctly - CheckTxIndices(t, tail, lastAcceptedBlock.NumberU64(), chain.db, false) - chain.Stop() - }) - } -} - -func getTail(limit uint64, lastAccepted uint64) *uint64 { - if limit == 0 { - return nil - } - var tail uint64 - if lastAccepted > limit { - // tail should be the oldest block number which is indexed - // i.e the first block number that's in the lookup range - tail = lastAccepted - limit + 1 - } - return &tail -} - -func TestTransactionSkipIndexing(t *testing.T) { - // Configure and generate a sample block chain - require := require.New(t) - var ( - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) - funds = big.NewInt(10000000000000) - gspec = &Genesis{ - Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, - Alloc: GenesisAlloc{addr1: {Balance: funds}}, - } - signer = types.LatestSigner(gspec.Config) - ) - genDb, blocks, _, err := GenerateChainWithGenesis(gspec, dummy.NewFakerWithCallbacks(TestCallbacks), 5, 10, func(i int, block *BlockGen) { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) - require.NoError(err) - block.AddTx(tx) - }) - require.NoError(err) - - blocks2, _, err := GenerateChain(gspec.Config, blocks[len(blocks)-1], dummy.NewFakerWithCallbacks(TestCallbacks), genDb, 5, 10, func(i int, block *BlockGen) { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) - require.NoError(err) - block.AddTx(tx) - }) - require.NoError(err) - - conf := &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - TriePrefetcherParallelism: 4, - Pruning: true, - CommitInterval: 4096, - SnapshotLimit: 256, - SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails - AcceptorQueueLimit: 64, - SkipTxIndexing: true, - } - - // test1: Init block chain and check all indices has been skipped. - chainDB := rawdb.NewMemoryDatabase() - chain, err := createAndInsertChain(chainDB, conf, gspec, blocks, common.Hash{}, - func(b *types.Block) { - bNumber := b.NumberU64() - checkTxIndicesHelper(t, nil, bNumber+1, bNumber+1, bNumber, chainDB, false) // check all indices has been skipped - }) - require.NoError(err) - chain.Stop() - - // test2: specify lookuplimit with tx index skipping enabled. Blocks should not be indexed but tail should be updated. - conf.TxLookupLimit = 2 - chainDB = rawdb.NewMemoryDatabase() - chain, err = createAndInsertChain(chainDB, conf, gspec, blocks, common.Hash{}, - func(b *types.Block) { - bNumber := b.NumberU64() - tail := bNumber - conf.TxLookupLimit + 1 - checkTxIndicesHelper(t, &tail, bNumber+1, bNumber+1, bNumber, chainDB, false) // check all indices has been skipped - }) - require.NoError(err) - chain.Stop() - - // test3: tx index skipping and unindexer disabled. Blocks should be indexed and tail should be updated. - conf.TxLookupLimit = 0 - conf.SkipTxIndexing = false - chainDB = rawdb.NewMemoryDatabase() - chain, err = createAndInsertChain(chainDB, conf, gspec, blocks, common.Hash{}, - func(b *types.Block) { - bNumber := b.NumberU64() - checkTxIndicesHelper(t, nil, 0, bNumber, bNumber, chainDB, false) // check all indices has been indexed - }) - require.NoError(err) - chain.Stop() - - // now change tx index skipping to true and check that the indices are skipped for the last block - // and old indices are removed up to the tail, but [tail, current) indices are still there. - conf.TxLookupLimit = 2 - conf.SkipTxIndexing = true - chain, err = createAndInsertChain(chainDB, conf, gspec, blocks2[0:1], chain.CurrentHeader().Hash(), - func(b *types.Block) { - bNumber := b.NumberU64() - tail := bNumber - conf.TxLookupLimit + 1 - checkTxIndicesHelper(t, &tail, tail, bNumber-1, bNumber, chainDB, false) - }) - require.NoError(err) - chain.Stop() -} - // TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted // correctly in case reorg is called. func TestCanonicalHashMarker(t *testing.T) { - testCanonicalHashMarker(t, rawdb.HashScheme) - testCanonicalHashMarker(t, rawdb.PathScheme) -} - -func testCanonicalHashMarker(t *testing.T, scheme string) { var cases = []struct { forkA int forkB int @@ -779,24 +571,28 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { } for _, c := range cases { var ( + db = rawdb.NewMemoryDatabase() gspec = &Genesis{ Config: params.TestChainConfig, Alloc: GenesisAlloc{}, BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), } - engine = dummy.NewCoinbaseFaker() + genesis = gspec.MustCommit(db) + engine = dummy.NewFaker() ) - _, forkA, _, err := GenerateChainWithGenesis(gspec, engine, c.forkA, 10, func(i int, gen *BlockGen) {}) + forkA, _, err := GenerateChain(params.TestChainConfig, genesis, engine, db, c.forkA, 10, func(i int, gen *BlockGen) {}) if err != nil { t.Fatal(err) } - _, forkB, _, err := GenerateChainWithGenesis(gspec, engine, c.forkB, 10, func(i int, gen *BlockGen) {}) + forkB, _, err := GenerateChain(params.TestChainConfig, genesis, engine, db, c.forkB, 10, func(i int, gen *BlockGen) {}) if err != nil { t.Fatal(err) } // Initialize test chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, engine, vm.Config{}, common.Hash{}, false) + diskdb := rawdb.NewMemoryDatabase() + gspec.MustCommit(diskdb) + chain, err := NewBlockChain(diskdb, DefaultCacheConfig, params.TestChainConfig, engine, vm.Config{}, common.Hash{}) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -847,486 +643,5 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { } } } - chain.Stop() - } -} - -func TestTxLookupBlockChain(t *testing.T) { - cacheConf := &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - TriePrefetcherParallelism: 4, - Pruning: true, - CommitInterval: 4096, - SnapshotLimit: 256, - SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails - AcceptorQueueLimit: 64, // ensure channel doesn't block - TxLookupLimit: 5, - } - createTxLookupBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { - return createBlockChain(db, cacheConf, gspec, lastAcceptedHash) } - for _, tt := range tests { - t.Run(tt.Name, func(t *testing.T) { - tt.testFunc(t, createTxLookupBlockChain) - }) - } -} - -func TestTxLookupSkipIndexingBlockChain(t *testing.T) { - cacheConf := &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - TriePrefetcherParallelism: 4, - Pruning: true, - CommitInterval: 4096, - SnapshotLimit: 256, - SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails - AcceptorQueueLimit: 64, // ensure channel doesn't block - TxLookupLimit: 5, - SkipTxIndexing: true, - } - createTxLookupBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { - return createBlockChain(db, cacheConf, gspec, lastAcceptedHash) - } - for _, tt := range tests { - t.Run(tt.Name, func(t *testing.T) { - tt.testFunc(t, createTxLookupBlockChain) - }) - } -} - -func TestCreateThenDeletePreByzantium(t *testing.T) { - // We want to use pre-byzantium rules where we have intermediate state roots - // between transactions. - config := *params.TestLaunchConfig - config.ByzantiumBlock = nil - config.ConstantinopleBlock = nil - config.PetersburgBlock = nil - config.IstanbulBlock = nil - config.MuirGlacierBlock = nil - testCreateThenDelete(t, &config) -} -func TestCreateThenDeletePostByzantium(t *testing.T) { - testCreateThenDelete(t, params.TestChainConfig) -} - -// testCreateThenDelete tests a creation and subsequent deletion of a contract, happening -// within the same block. -func testCreateThenDelete(t *testing.T, config *params.ChainConfig) { - var ( - engine = dummy.NewFaker() - // A sender who makes transactions, has some funds - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - destAddress = crypto.CreateAddress(address, 0) - funds = big.NewInt(params.Ether) // Note: additional funds are provided here compared to go-ethereum so test completes. - ) - - // runtime code is 0x60ffff : PUSH1 0xFF SELFDESTRUCT, a.k.a SELFDESTRUCT(0xFF) - code := append([]byte{0x60, 0xff, 0xff}, make([]byte, 32-3)...) - initCode := []byte{ - // SSTORE 1:1 - byte(vm.PUSH1), 0x1, - byte(vm.PUSH1), 0x1, - byte(vm.SSTORE), - // Get the runtime-code on the stack - byte(vm.PUSH32)} - initCode = append(initCode, code...) - initCode = append(initCode, []byte{ - byte(vm.PUSH1), 0x0, // offset - byte(vm.MSTORE), - byte(vm.PUSH1), 0x3, // size - byte(vm.PUSH1), 0x0, // offset - byte(vm.RETURN), // return 3 bytes of zero-code - }...) - gspec := &Genesis{ - Config: config, - Alloc: GenesisAlloc{ - address: {Balance: funds}, - }, - } - nonce := uint64(0) - signer := types.HomesteadSigner{} - _, blocks, _, _ := GenerateChainWithGenesis(gspec, engine, 2, 10, func(i int, b *BlockGen) { - fee := big.NewInt(1) - if b.header.BaseFee != nil { - fee = b.header.BaseFee - } - b.SetCoinbase(common.Address{1}) - tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 100000, - Data: initCode, - }) - nonce++ - b.AddTx(tx) - tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 100000, - To: &destAddress, - }) - b.AddTx(tx) - nonce++ - }) - // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{ - //Debug: true, - //Tracer: logger.NewJSONLogger(nil, os.Stdout), - }, common.Hash{}, false) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - // Import the blocks - for _, block := range blocks { - if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) - } - } -} - -func TestDeleteThenCreate(t *testing.T) { - var ( - engine = dummy.NewFaker() - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - factoryAddr = crypto.CreateAddress(address, 0) - funds = big.NewInt(params.Ether) // Note: additional funds are provided here compared to go-ethereum so test completes. - ) - /* - contract Factory { - function deploy(bytes memory code) public { - address addr; - assembly { - addr := create2(0, add(code, 0x20), mload(code), 0) - if iszero(extcodesize(addr)) { - revert(0, 0) - } - } - } - } - */ - factoryBIN := common.Hex2Bytes("608060405234801561001057600080fd5b50610241806100206000396000f3fe608060405234801561001057600080fd5b506004361061002a5760003560e01c80627743601461002f575b600080fd5b610049600480360381019061004491906100d8565b61004b565b005b6000808251602084016000f59050803b61006457600080fd5b5050565b600061007b61007684610146565b610121565b905082815260208101848484011115610097576100966101eb565b5b6100a2848285610177565b509392505050565b600082601f8301126100bf576100be6101e6565b5b81356100cf848260208601610068565b91505092915050565b6000602082840312156100ee576100ed6101f5565b5b600082013567ffffffffffffffff81111561010c5761010b6101f0565b5b610118848285016100aa565b91505092915050565b600061012b61013c565b90506101378282610186565b919050565b6000604051905090565b600067ffffffffffffffff821115610161576101606101b7565b5b61016a826101fa565b9050602081019050919050565b82818337600083830152505050565b61018f826101fa565b810181811067ffffffffffffffff821117156101ae576101ad6101b7565b5b80604052505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080fd5b600080fd5b600080fd5b600080fd5b6000601f19601f830116905091905056fea2646970667358221220ea8b35ed310d03b6b3deef166941140b4d9e90ea2c92f6b41eb441daf49a59c364736f6c63430008070033") - - /* - contract C { - uint256 value; - constructor() { - value = 100; - } - function destruct() public payable { - selfdestruct(payable(msg.sender)); - } - receive() payable external {} - } - */ - contractABI := common.Hex2Bytes("6080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c63430008070033") - contractAddr := crypto.CreateAddress2(factoryAddr, [32]byte{}, crypto.Keccak256(contractABI)) - - gspec := &Genesis{ - Config: params.TestChainConfig, - Alloc: GenesisAlloc{ - address: {Balance: funds}, - }, - } - nonce := uint64(0) - signer := types.HomesteadSigner{} - _, blocks, _, err := GenerateChainWithGenesis(gspec, engine, 2, 10, func(i int, b *BlockGen) { - fee := big.NewInt(1) - if b.header.BaseFee != nil { - fee = b.header.BaseFee - } - b.SetCoinbase(common.Address{1}) - - // Block 1 - if i == 0 { - tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 500000, - Data: factoryBIN, - }) - nonce++ - b.AddTx(tx) - - data := common.Hex2Bytes("00774360000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000a76080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c6343000807003300000000000000000000000000000000000000000000000000") - tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 500000, - To: &factoryAddr, - Data: data, - }) - b.AddTx(tx) - nonce++ - } else { - // Block 2 - tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 500000, - To: &contractAddr, - Data: common.Hex2Bytes("2b68b9c6"), // destruct - }) - nonce++ - b.AddTx(tx) - - data := common.Hex2Bytes("00774360000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000a76080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c6343000807003300000000000000000000000000000000000000000000000000") - tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 500000, - To: &factoryAddr, // re-creation - Data: data, - }) - b.AddTx(tx) - nonce++ - } - }) - if err != nil { - t.Fatal(err) - } - // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - for _, block := range blocks { - if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) - } - } -} - -// TestTransientStorageReset ensures the transient storage is wiped correctly -// between transactions. -func TestTransientStorageReset(t *testing.T) { - var ( - engine = dummy.NewFaker() - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - destAddress = crypto.CreateAddress(address, 0) - funds = big.NewInt(params.Ether) // Note: additional funds are provided here compared to go-ethereum so test completes. - vmConfig = vm.Config{ - ExtraEips: []int{1153}, // Enable transient storage EIP - } - ) - code := append([]byte{ - // TLoad value with location 1 - byte(vm.PUSH1), 0x1, - byte(vm.TLOAD), - - // PUSH location - byte(vm.PUSH1), 0x1, - - // SStore location:value - byte(vm.SSTORE), - }, make([]byte, 32-6)...) - initCode := []byte{ - // TSTORE 1:1 - byte(vm.PUSH1), 0x1, - byte(vm.PUSH1), 0x1, - byte(vm.TSTORE), - - // Get the runtime-code on the stack - byte(vm.PUSH32)} - initCode = append(initCode, code...) - initCode = append(initCode, []byte{ - byte(vm.PUSH1), 0x0, // offset - byte(vm.MSTORE), - byte(vm.PUSH1), 0x6, // size - byte(vm.PUSH1), 0x0, // offset - byte(vm.RETURN), // return 6 bytes of zero-code - }...) - gspec := &Genesis{ - Config: params.TestChainConfig, - Alloc: GenesisAlloc{ - address: {Balance: funds}, - }, - } - nonce := uint64(0) - signer := types.HomesteadSigner{} - _, blocks, _, _ := GenerateChainWithGenesis(gspec, engine, 1, 10, func(i int, b *BlockGen) { - fee := big.NewInt(1) - if b.header.BaseFee != nil { - fee = b.header.BaseFee - } - b.SetCoinbase(common.Address{1}) - tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 100000, - Data: initCode, - }) - nonce++ - b.AddTxWithVMConfig(tx, vmConfig) - - tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 100000, - To: &destAddress, - }) - b.AddTxWithVMConfig(tx, vmConfig) - nonce++ - }) - - // Initialize the blockchain with 1153 enabled. - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vmConfig, common.Hash{}, false) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - // Import the blocks - if _, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("failed to insert into chain: %v", err) - } - // Check the storage - state, err := chain.StateAt(chain.CurrentHeader().Root) - if err != nil { - t.Fatalf("Failed to load state %v", err) - } - loc := common.BytesToHash([]byte{1}) - slot := state.GetState(destAddress, loc) - if slot != (common.Hash{}) { - t.Fatalf("Unexpected dirty storage slot") - } -} - -func TestEIP3651(t *testing.T) { - var ( - aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") - bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb") - engine = dummy.NewCoinbaseFaker() - - // A sender who makes transactions, has some funds - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) - funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) - gspec = &Genesis{ - Config: params.TestChainConfig, - Alloc: GenesisAlloc{ - addr1: {Balance: funds}, - addr2: {Balance: funds}, - // The address 0xAAAA sloads 0x00 and 0x01 - aa: { - Code: []byte{ - byte(vm.PC), - byte(vm.PC), - byte(vm.SLOAD), - byte(vm.SLOAD), - }, - Nonce: 0, - Balance: big.NewInt(0), - }, - // The address 0xBBBB calls 0xAAAA - bb: { - Code: []byte{ - byte(vm.PUSH1), 0, // out size - byte(vm.DUP1), // out offset - byte(vm.DUP1), // out insize - byte(vm.DUP1), // in offset - byte(vm.PUSH2), // address - byte(0xaa), - byte(0xaa), - byte(vm.GAS), // gas - byte(vm.DELEGATECALL), - }, - Nonce: 0, - Balance: big.NewInt(0), - }, - }, - } - ) - - signer := types.LatestSigner(gspec.Config) - - _, blocks, _, _ := GenerateChainWithGenesis(gspec, engine, 1, 10, func(i int, b *BlockGen) { - b.SetCoinbase(aa) - // One transaction to Coinbase - txdata := &types.DynamicFeeTx{ - ChainID: gspec.Config.ChainID, - Nonce: 0, - To: &bb, - Gas: 500000, - GasFeeCap: newGwei(225), - GasTipCap: big.NewInt(2), - AccessList: nil, - Data: []byte{}, - } - tx := types.NewTx(txdata) - tx, _ = types.SignTx(tx, signer, key1) - - b.AddTx(tx) - }) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr)}, common.Hash{}, false) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - - block := chain.GetBlockByNumber(1) - - // 1+2: Ensure EIP-1559 access lists are accounted for via gas usage. - innerGas := vm.GasQuickStep*2 + params.ColdSloadCostEIP2929*2 - expectedGas := params.TxGas + 5*vm.GasFastestStep + vm.GasQuickStep + 100 + innerGas // 100 because 0xaaaa is in access list - if block.GasUsed() != expectedGas { - t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed()) - } - - state, _ := chain.State() - - // 3: Ensure that miner received the gasUsed * (block baseFee + effectiveGasTip). - // Note this differs from go-ethereum where the miner receives the gasUsed * block baseFee, - // as our handling of the coinbase payment is different. - // Note we use block.GasUsed() here as there is only one tx. - actual := state.GetBalance(block.Coinbase()) - tx := block.Transactions()[0] - gasPrice := new(big.Int).Add(block.BaseFee(), tx.EffectiveGasTipValue(block.BaseFee())) - expected := new(big.Int).SetUint64(block.GasUsed() * gasPrice.Uint64()) - if actual.Cmp(expected) != 0 { - t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual) - } - - // 4: Ensure the tx sender paid for the gasUsed * (block baseFee + effectiveGasTip). - // Note this differs from go-ethereum where the miner receives the gasUsed * block baseFee, - // as our handling of the coinbase payment is different. - actual = new(big.Int).Sub(funds, state.GetBalance(addr1)) - if actual.Cmp(expected) != 0 { - t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual) - } -} - -func createAndInsertChain(db ethdb.Database, cacheConfig *CacheConfig, gspec *Genesis, blocks types.Blocks, lastAcceptedHash common.Hash, accepted func(*types.Block)) (*BlockChain, error) { - chain, err := createBlockChain(db, cacheConfig, gspec, lastAcceptedHash) - if err != nil { - return nil, err - } - _, err = chain.InsertChain(blocks) - if err != nil { - return nil, err - } - for _, block := range blocks { - err := chain.Accept(block) - if err != nil { - return nil, err - } - chain.DrainAcceptorQueue() - if accepted != nil { - accepted(block) - } - } - - return chain, nil } diff --git a/core/bloom_indexer.go b/core/bloom_indexer.go index ab8bc706f1..d61fc42416 100644 --- a/core/bloom_indexer.go +++ b/core/bloom_indexer.go @@ -20,12 +20,12 @@ import ( "context" "time" - "github.com/ava-labs/coreth/core/bloombits" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/bitutil" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/tenderly/coreth/core/bloombits" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" ) const ( diff --git a/core/bloombits/generator.go b/core/bloombits/generator.go index c0422caad5..f5e7edd38b 100644 --- a/core/bloombits/generator.go +++ b/core/bloombits/generator.go @@ -29,7 +29,7 @@ package bloombits import ( "errors" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/core/types" ) var ( diff --git a/core/bloombits/generator_test.go b/core/bloombits/generator_test.go index 40a4749c15..ed1000616d 100644 --- a/core/bloombits/generator_test.go +++ b/core/bloombits/generator_test.go @@ -32,7 +32,7 @@ import ( "math/rand" "testing" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/core/types" ) // Tests that batched bloom bits are correctly rotated from the input bloom diff --git a/core/chain_indexer.go b/core/chain_indexer.go index 05cbc54c76..c67eb9adb1 100644 --- a/core/chain_indexer.go +++ b/core/chain_indexer.go @@ -29,18 +29,17 @@ package core import ( "context" "encoding/binary" - "errors" "fmt" "sync" "sync/atomic" "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" ) // ChainIndexerBackend defines the methods needed to process chain segments in @@ -86,7 +85,7 @@ type ChainIndexer struct { backend ChainIndexerBackend // Background processor generating the index data content children []*ChainIndexer // Child indexers to cascade chain updates to - active atomic.Bool // Flag whether the event loop was started + active uint32 // Flag whether the event loop was started update chan struct{} // Notification channel that headers should be processed quit chan chan error // Quit channel to tear down running goroutines ctx context.Context @@ -177,7 +176,7 @@ func (c *ChainIndexer) Close() error { errs = append(errs, err) } // If needed, tear down the secondary event loop - if c.active.Load() { + if atomic.LoadUint32(&c.active) != 0 { c.quit <- errc if err := <-errc; err != nil { errs = append(errs, err) @@ -207,7 +206,7 @@ func (c *ChainIndexer) Close() error { // queue. func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainHeadEvent, sub event.Subscription) { // Mark the chain indexer as active, requiring an additional teardown - c.active.Store(true) + atomic.StoreUint32(&c.active, 1) defer sub.Unsubscribe() @@ -414,7 +413,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com if header == nil { return common.Hash{}, fmt.Errorf("block #%d [%x..] not found", number, hash[:4]) } else if header.ParentHash != lastHead { - return common.Hash{}, errors.New("chain reorged during section processing") + return common.Hash{}, fmt.Errorf("chain reorged during section processing") } if err := c.backend.Process(c.ctx, header); err != nil { return common.Hash{}, err diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go index 3edf175d3d..8194cc0396 100644 --- a/core/chain_indexer_test.go +++ b/core/chain_indexer_test.go @@ -35,9 +35,9 @@ import ( "testing" "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" ) // Runs multiple tests with randomized parameters. diff --git a/core/chain_makers.go b/core/chain_makers.go index df0175bc9b..6294160d21 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -30,25 +30,23 @@ import ( "fmt" "math/big" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/consensus/misc/eip4844" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/consensus/misc" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/params" ) // BlockGen creates blocks for testing. // See GenerateChain for a detailed explanation. type BlockGen struct { i int - cm *chainMaker parent *types.Block + chain []*types.Block header *types.Header statedb *state.StateDB @@ -57,6 +55,7 @@ type BlockGen struct { receipts []*types.Receipt uncles []*types.Header + config *params.ChainConfig engine consensus.Engine onBlockGenerated func(*types.Block) } @@ -79,11 +78,6 @@ func (b *BlockGen) SetExtra(data []byte) { b.header.Extra = data } -// AppendExtra appends data to the extra data field of the generated block. -func (b *BlockGen) AppendExtra(data []byte) { - b.header.Extra = append(b.header.Extra, data...) -} - // SetNonce sets the nonce field of the generated block. func (b *BlockGen) SetNonce(nonce types.BlockNonce) { b.header.Nonce = nonce @@ -96,74 +90,37 @@ func (b *BlockGen) SetDifficulty(diff *big.Int) { b.header.Difficulty = diff } -// Difficulty returns the currently calculated difficulty of the block. -func (b *BlockGen) Difficulty() *big.Int { - return new(big.Int).Set(b.header.Difficulty) -} - -// SetParentBeaconRoot sets the parent beacon root field of the generated -// block. -func (b *BlockGen) SetParentBeaconRoot(root common.Hash) { - b.header.ParentBeaconRoot = &root - var ( - blockContext = NewEVMBlockContext(b.header, b.cm, &b.header.Coinbase) - vmenv = vm.NewEVM(blockContext, vm.TxContext{}, b.statedb, b.cm.config, vm.Config{}) - ) - ProcessBeaconBlockRoot(root, vmenv, b.statedb) +// AddTx adds a transaction to the generated block. If no coinbase has +// been set, the block's coinbase is set to the zero address. +// +// AddTx panics if the transaction cannot be executed. In addition to +// the protocol-imposed limitations (gas limit, etc.), there are some +// further limitations on the content of transactions that can be +// added. Notably, contract code relying on the BLOCKHASH instruction +// will panic during execution. +func (b *BlockGen) AddTx(tx *types.Transaction) { + b.AddTxWithChain(nil, tx) } -// addTx adds a transaction to the generated block. If no coinbase has +// AddTxWithChain adds a transaction to the generated block. If no coinbase has // been set, the block's coinbase is set to the zero address. // -// There are a few options can be passed as well in order to run some -// customized rules. -// - bc: enables the ability to query historical block hashes for BLOCKHASH -// - vmConfig: extends the flexibility for customizing evm rules, e.g. enable extra EIPs -func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transaction) { +// AddTxWithChain panics if the transaction cannot be executed. In addition to +// the protocol-imposed limitations (gas limit, etc.), there are some +// further limitations on the content of transactions that can be +// added. If contract code relies on the BLOCKHASH instruction, +// the block in chain will be returned. +func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) { if b.gasPool == nil { b.SetCoinbase(common.Address{}) } - b.statedb.SetTxContext(tx.Hash(), len(b.txs)) - blockContext := NewEVMBlockContext(b.header, bc, &b.header.Coinbase) - receipt, err := ApplyTransaction(b.cm.config, bc, blockContext, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vmConfig) + b.statedb.Prepare(tx.Hash(), len(b.txs)) + receipt, err := ApplyTransaction(b.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vm.Config{}) if err != nil { panic(err) } b.txs = append(b.txs, tx) b.receipts = append(b.receipts, receipt) - if b.header.BlobGasUsed != nil { - *b.header.BlobGasUsed += receipt.BlobGasUsed - } -} - -// AddTx adds a transaction to the generated block. If no coinbase has -// been set, the block's coinbase is set to the zero address. -// -// AddTx panics if the transaction cannot be executed. In addition to the protocol-imposed -// limitations (gas limit, etc.), there are some further limitations on the content of -// transactions that can be added. Notably, contract code relying on the BLOCKHASH -// instruction will panic during execution if it attempts to access a block number outside -// of the range created by GenerateChain. -func (b *BlockGen) AddTx(tx *types.Transaction) { - b.addTx(nil, vm.Config{}, tx) -} - -// AddTxWithChain adds a transaction to the generated block. If no coinbase has -// been set, the block's coinbase is set to the zero address. -// -// AddTxWithChain panics if the transaction cannot be executed. In addition to the -// protocol-imposed limitations (gas limit, etc.), there are some further limitations on -// the content of transactions that can be added. If contract code relies on the BLOCKHASH -// instruction, the block in chain will be returned. -func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) { - b.addTx(bc, vm.Config{}, tx) -} - -// AddTxWithVMConfig adds a transaction to the generated block. If no coinbase has -// been set, the block's coinbase is set to the zero address. -// The evm interpreter can be customized with the provided vm config. -func (b *BlockGen) AddTxWithVMConfig(tx *types.Transaction, config vm.Config) { - b.addTx(nil, config, tx) } // GetBalance returns the balance of the given address at the generated block. @@ -171,7 +128,8 @@ func (b *BlockGen) GetBalance(addr common.Address) *big.Int { return b.statedb.GetBalance(addr) } -// AddUncheckedTx forcefully adds a transaction to the block without any validation. +// AddUncheckedTx forcefully adds a transaction to the block without any +// validation. // // AddUncheckedTx will cause consensus failures when used during real // chain processing. This is best used in conjunction with raw block insertion. @@ -184,26 +142,11 @@ func (b *BlockGen) Number() *big.Int { return new(big.Int).Set(b.header.Number) } -// Timestamp returns the timestamp of the block being generated. -func (b *BlockGen) Timestamp() uint64 { - return b.header.Time -} - // BaseFee returns the EIP-1559 base fee of the block being generated. func (b *BlockGen) BaseFee() *big.Int { return new(big.Int).Set(b.header.BaseFee) } -// Gas returns the amount of gas left in the current block. -func (b *BlockGen) Gas() uint64 { - return b.header.GasLimit - b.header.GasUsed -} - -// Signer returns a valid signer instance for the current block. -func (b *BlockGen) Signer() types.Signer { - return types.MakeSigner(b.cm.config, b.header.Number, b.header.Time) -} - // AddUncheckedReceipt forcefully adds a receipts to the block without a // backing transaction. // @@ -235,9 +178,9 @@ func (b *BlockGen) PrevBlock(index int) *types.Block { panic(fmt.Errorf("block index %d out of range (%d,%d)", index, -1, b.i)) } if index == -1 { - return b.cm.bottom + return b.parent } - return b.cm.chain[index] + return b.chain[index] } // OffsetTime modifies the time instance of a block, implicitly changing its @@ -245,10 +188,11 @@ func (b *BlockGen) PrevBlock(index int) *types.Block { // tied to chain length directly. func (b *BlockGen) OffsetTime(seconds int64) { b.header.Time += uint64(seconds) - if b.header.Time <= b.cm.bottom.Header().Time { + if b.header.Time <= b.parent.Header().Time { panic("block time out of range") } - b.header.Difficulty = b.engine.CalcDifficulty(b.cm, b.header.Time, b.parent.Header()) + chainreader := &fakeChainReader{config: b.config} + b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, b.parent.Header()) } // SetOnBlockGenerated sets a callback function to be invoked after each block is generated @@ -272,223 +216,121 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse if config == nil { config = params.TestChainConfig } - if engine == nil { - panic("nil consensus engine") - } - cm := newChainMaker(parent, config, engine) - - genblock := func(i int, parent *types.Block, triedb *trie.Database, statedb *state.StateDB) (*types.Block, types.Receipts, error) { - b := &BlockGen{i: i, cm: cm, parent: parent, statedb: statedb, engine: engine} - b.header = cm.makeHeader(parent, gap, statedb, b.engine) - - err := ApplyUpgrades(config, &parent.Header().Time, b, statedb) - if err != nil { - return nil, nil, fmt.Errorf("failed to configure precompiles %w", err) + blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n) + chainreader := &fakeChainReader{config: config} + genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts, error) { + b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} + b.header = makeHeader(chainreader, config, parent, gap, statedb, b.engine) + + // Mutate the state and block according to any hard-fork specs + timestamp := new(big.Int).SetUint64(b.header.Time) + if !config.IsApricotPhase3(timestamp) { + // avoid dynamic fee extra data override + if daoBlock := config.DAOForkBlock; daoBlock != nil { + limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) + if b.header.Number.Cmp(daoBlock) >= 0 && b.header.Number.Cmp(limit) < 0 { + if config.DAOForkSupport { + b.header.Extra = common.CopyBytes(params.DAOForkBlockExtra) + } + } + } + } + if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(b.header.Number) == 0 { + misc.ApplyDAOHardFork(statedb) } - // Execute any user modifications to the block if gen != nil { gen(i, b) } - // Finalize and seal the block - block, err := b.engine.FinalizeAndAssemble(cm, b.header, parent.Header(), statedb, b.txs, b.uncles, b.receipts) - if err != nil { - return nil, nil, fmt.Errorf("Failed to finalize and assemble block at index %d: %w", i, err) - } - - // Write state changes to db - root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), false) - if err != nil { - panic(fmt.Sprintf("state write error: %v", err)) - } - if err = triedb.Commit(root, false); err != nil { - panic(fmt.Sprintf("trie write error: %v", err)) - } - if b.onBlockGenerated != nil { - b.onBlockGenerated(block) + if b.engine != nil { + // Finalize and seal the block + block, err := b.engine.FinalizeAndAssemble(chainreader, b.header, parent.Header(), statedb, b.txs, b.uncles, b.receipts) + if err != nil { + return nil, nil, fmt.Errorf("Failed to finalize and assemble block at index %d: %w", i, err) + } + + // Write state changes to db + root, err := statedb.Commit(config.IsEIP158(b.header.Number), false) + if err != nil { + panic(fmt.Sprintf("state write error: %v", err)) + } + if err := statedb.Database().TrieDB().Commit(root, false, nil); err != nil { + panic(fmt.Sprintf("trie write error: %v", err)) + } + if b.onBlockGenerated != nil { + b.onBlockGenerated(block) + } + return block, b.receipts, nil } - return block, b.receipts, nil + return nil, nil, nil } - // Forcibly use hash-based state scheme for retaining all nodes in disk. - triedb := trie.NewDatabase(db, trie.HashDefaults) - defer triedb.Close() - for i := 0; i < n; i++ { - statedb, err := state.New(parent.Root(), state.NewDatabaseWithNodeDB(db, triedb), nil) + statedb, err := state.New(parent.Root(), state.NewDatabase(db), nil) if err != nil { return nil, nil, err } - block, receipts, err := genblock(i, parent, triedb, statedb) + block, receipt, err := genblock(i, parent, statedb) if err != nil { return nil, nil, err } - - // Post-process the receipts. - // Here we assign the final block hash and other info into the receipt. - // In order for DeriveFields to work, the transaction and receipt lists need to be - // of equal length. If AddUncheckedTx or AddUncheckedReceipt are used, there will be - // extra ones, so we just trim the lists here. - receiptsCount := len(receipts) - txs := block.Transactions() - if len(receipts) > len(txs) { - receipts = receipts[:len(txs)] - } else if len(receipts) < len(txs) { - txs = txs[:len(receipts)] - } - var blobGasPrice *big.Int - if block.ExcessBlobGas() != nil { - blobGasPrice = eip4844.CalcBlobFee(*block.ExcessBlobGas()) - } - if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, txs); err != nil { - panic(err) - } - - // Re-expand to ensure all receipts are returned. - receipts = receipts[:receiptsCount] - - // Advance the chain. - cm.add(block, receipts) + blocks[i] = block + receipts[i] = receipt parent = block } - return cm.chain, cm.receipts, nil + return blocks, receipts, nil } -// GenerateChainWithGenesis is a wrapper of GenerateChain which will initialize -// genesis block to database first according to the provided genesis specification -// then generate chain on top. -func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gap uint64, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts, error) { - db := rawdb.NewMemoryDatabase() - triedb := trie.NewDatabase(db, trie.HashDefaults) - defer triedb.Close() - _, err := genesis.Commit(db, triedb) - if err != nil { - return nil, nil, nil, err +func makeHeader(chain consensus.ChainReader, config *params.ChainConfig, parent *types.Block, gap uint64, state *state.StateDB, engine consensus.Engine) *types.Header { + var time uint64 + if parent.Time() == 0 { + time = gap + } else { + time = parent.Time() + gap } - blocks, receipts, err := GenerateChain(genesis.Config, genesis.ToBlock(), engine, db, n, gap, gen) - return db, blocks, receipts, err -} - -func (cm *chainMaker) makeHeader(parent *types.Block, gap uint64, state *state.StateDB, engine consensus.Engine) *types.Header { - time := parent.Time() + gap // block time is fixed at [gap] seconds + timestamp := new(big.Int).SetUint64(time) var gasLimit uint64 - if cm.config.IsCortina(time) { - gasLimit = params.CortinaGasLimit - } else if cm.config.IsApricotPhase1(time) { + if config.IsApricotPhase1(timestamp) { gasLimit = params.ApricotPhase1GasLimit } else { gasLimit = CalcGasLimit(parent.GasUsed(), parent.GasLimit(), parent.GasLimit(), parent.GasLimit()) } header := &types.Header{ - Root: state.IntermediateRoot(cm.config.IsEIP158(parent.Number())), + Root: state.IntermediateRoot(chain.Config().IsEIP158(parent.Number())), ParentHash: parent.Hash(), Coinbase: parent.Coinbase(), - Difficulty: engine.CalcDifficulty(cm, time, parent.Header()), - GasLimit: gasLimit, - Number: new(big.Int).Add(parent.Number(), common.Big1), - Time: time, + Difficulty: engine.CalcDifficulty(chain, time, &types.Header{ + Number: parent.Number(), + Time: time - gap, + Difficulty: parent.Difficulty(), + UncleHash: parent.UncleHash(), + }), + GasLimit: gasLimit, + Number: new(big.Int).Add(parent.Number(), common.Big1), + Time: time, } - if cm.config.IsApricotPhase3(time) { + if chain.Config().IsApricotPhase3(timestamp) { var err error - header.Extra, header.BaseFee, err = dummy.CalcBaseFee(cm.config, parent.Header(), time) + header.Extra, header.BaseFee, err = dummy.CalcBaseFee(chain.Config(), parent.Header(), time) if err != nil { panic(err) } } - if cm.config.IsCancun(header.Number, header.Time) { - var ( - parentExcessBlobGas uint64 - parentBlobGasUsed uint64 - ) - if parent.ExcessBlobGas() != nil { - parentExcessBlobGas = *parent.ExcessBlobGas() - parentBlobGasUsed = *parent.BlobGasUsed() - } - excessBlobGas := eip4844.CalcExcessBlobGas(parentExcessBlobGas, parentBlobGasUsed) - header.ExcessBlobGas = &excessBlobGas - header.BlobGasUsed = new(uint64) - header.ParentBeaconRoot = new(common.Hash) - } return header } -// chainMaker contains the state of chain generation. -type chainMaker struct { - bottom *types.Block - engine consensus.Engine - config *params.ChainConfig - chain []*types.Block - chainByHash map[common.Hash]*types.Block - receipts []types.Receipts +type fakeChainReader struct { + config *params.ChainConfig } -func newChainMaker(bottom *types.Block, config *params.ChainConfig, engine consensus.Engine) *chainMaker { - return &chainMaker{ - bottom: bottom, - config: config, - engine: engine, - chainByHash: make(map[common.Hash]*types.Block), - } +// Config returns the chain configuration. +func (cr *fakeChainReader) Config() *params.ChainConfig { + return cr.config } -func (cm *chainMaker) add(b *types.Block, r []*types.Receipt) { - cm.chain = append(cm.chain, b) - cm.chainByHash[b.Hash()] = b - cm.receipts = append(cm.receipts, r) -} - -func (cm *chainMaker) blockByNumber(number uint64) *types.Block { - if number == cm.bottom.NumberU64() { - return cm.bottom - } - cur := cm.CurrentHeader().Number.Uint64() - lowest := cm.bottom.NumberU64() + 1 - if number < lowest || number > cur { - return nil - } - return cm.chain[number-lowest] -} - -// ChainReader/ChainContext implementation - -// Config returns the chain configuration (for consensus.ChainReader). -func (cm *chainMaker) Config() *params.ChainConfig { - return cm.config -} - -// Engine returns the consensus engine (for ChainContext). -func (cm *chainMaker) Engine() consensus.Engine { - return cm.engine -} - -func (cm *chainMaker) CurrentHeader() *types.Header { - if len(cm.chain) == 0 { - return cm.bottom.Header() - } - return cm.chain[len(cm.chain)-1].Header() -} - -func (cm *chainMaker) GetHeaderByNumber(number uint64) *types.Header { - b := cm.blockByNumber(number) - if b == nil { - return nil - } - return b.Header() -} - -func (cm *chainMaker) GetHeaderByHash(hash common.Hash) *types.Header { - b := cm.chainByHash[hash] - if b == nil { - return nil - } - return b.Header() -} - -func (cm *chainMaker) GetHeader(hash common.Hash, number uint64) *types.Header { - return cm.GetHeaderByNumber(number) -} - -func (cm *chainMaker) GetBlock(hash common.Hash, number uint64) *types.Block { - return cm.blockByNumber(number) -} +func (cr *fakeChainReader) CurrentHeader() *types.Header { return nil } +func (cr *fakeChainReader) GetHeaderByNumber(number uint64) *types.Header { return nil } +func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header { return nil } +func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil } +func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil } diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go index 668cfba6f6..2d41f1b3a0 100644 --- a/core/chain_makers_test.go +++ b/core/chain_makers_test.go @@ -30,14 +30,13 @@ import ( "fmt" "math/big" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/params" ) func ExampleGenerateChain() { @@ -49,7 +48,6 @@ func ExampleGenerateChain() { addr2 = crypto.PubkeyToAddress(key2.PublicKey) addr3 = crypto.PubkeyToAddress(key3.PublicKey) db = rawdb.NewMemoryDatabase() - genDb = rawdb.NewMemoryDatabase() ) // Ensure that key1 has some funds in the genesis block. @@ -57,13 +55,13 @@ func ExampleGenerateChain() { Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}}, } - genesis := gspec.MustCommit(genDb, trie.NewDatabase(genDb, trie.HashDefaults)) + genesis := gspec.MustCommit(db) // This call generates a chain of 3 blocks. The function runs for // each block and adds different features to gen based on the // block index. signer := types.HomesteadSigner{} - chain, _, err := GenerateChain(gspec.Config, genesis, dummy.NewCoinbaseFaker(), genDb, 3, 10, func(i int, gen *BlockGen) { + chain, _, err := GenerateChain(gspec.Config, genesis, dummy.NewFaker(), db, 3, 10, func(i int, gen *BlockGen) { switch i { case 0: // In block 1, addr1 sends addr2 some ether. @@ -84,7 +82,7 @@ func ExampleGenerateChain() { } // Import the chain. This runs all block validation rules. - blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.HashScheme), gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) + blockchain, _ := NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) defer blockchain.Stop() if i, err := blockchain.InsertChain(chain); err != nil { @@ -93,7 +91,7 @@ func ExampleGenerateChain() { } state, _ := blockchain.State() - fmt.Printf("last block: #%d\n", blockchain.CurrentBlock().Number) + fmt.Printf("last block: #%d\n", blockchain.CurrentBlock().Number()) fmt.Println("balance of addr1:", state.GetBalance(addr1)) fmt.Println("balance of addr2:", state.GetBalance(addr2)) fmt.Println("balance of addr3:", state.GetBalance(addr3)) diff --git a/core/dao_test.go b/core/dao_test.go new file mode 100644 index 0000000000..ea6230fe85 --- /dev/null +++ b/core/dao_test.go @@ -0,0 +1,197 @@ +// (c) 2021-2022, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/params" +) + +// Tests that DAO-fork enabled clients can properly filter out fork-commencing +// blocks based on their extradata fields. +func TestDAOForkRangeExtradata(t *testing.T) { + forkBlock := big.NewInt(32) + + // Generate a common prefix for both pro-forkers and non-forkers + db := rawdb.NewMemoryDatabase() + gspec := &Genesis{ + BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), + Config: params.TestApricotPhase2Config, + } + genesis := gspec.MustCommit(db) + prefix, _, _ := GenerateChain(params.TestApricotPhase2Config, genesis, dummy.NewFaker(), db, int(forkBlock.Int64()-1), 10, func(i int, gen *BlockGen) {}) + + // Create the concurrent, conflicting two nodes + proDb := rawdb.NewMemoryDatabase() + gspec.MustCommit(proDb) + + proConf := *params.TestApricotPhase2Config + proConf.DAOForkBlock = forkBlock + proConf.DAOForkSupport = true + + proBc, _ := NewBlockChain(proDb, DefaultCacheConfig, &proConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + defer proBc.Stop() + + conDb := rawdb.NewMemoryDatabase() + gspec.MustCommit(conDb) + + conConf := *params.TestApricotPhase2Config + conConf.DAOForkBlock = forkBlock + conConf.DAOForkSupport = false + + conBc, _ := NewBlockChain(conDb, DefaultCacheConfig, &conConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + defer conBc.Stop() + + if _, err := proBc.InsertChain(prefix); err != nil { + t.Fatalf("pro-fork: failed to import chain prefix: %v", err) + } + if _, err := conBc.InsertChain(prefix); err != nil { + t.Fatalf("con-fork: failed to import chain prefix: %v", err) + } + // Try to expand both pro-fork and non-fork chains iteratively with other camp's blocks + for i := int64(0); i < params.DAOForkExtraRange.Int64(); i++ { + // Create a pro-fork block, and try to feed into the no-fork chain + db = rawdb.NewMemoryDatabase() + gspec.MustCommit(db) + bc, _ := NewBlockChain(db, DefaultCacheConfig, &conConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + defer bc.Stop() + + blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64())) + for j := 0; j < len(blocks)/2; j++ { + blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] + } + if _, err := bc.InsertChain(blocks); err != nil { + t.Fatalf("failed to import contra-fork chain for expansion: %v", err) + } + if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { + t.Fatalf("failed to commit contra-fork head for expansion: %v", err) + } + blocks, _, _ = GenerateChain(&proConf, conBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) + if _, err := conBc.InsertChain(blocks); err != nil { + t.Fatalf("contra-fork chain accepted pro-fork block: %v", blocks[0]) + } + // Create a proper no-fork block for the contra-forker + blocks, _, _ = GenerateChain(&conConf, conBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) + if _, err := conBc.InsertChain(blocks); err != nil { + t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err) + } + // Create a no-fork block, and try to feed into the pro-fork chain + db = rawdb.NewMemoryDatabase() + gspec.MustCommit(db) + bc, _ = NewBlockChain(db, DefaultCacheConfig, &proConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + defer bc.Stop() + + blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64())) + for j := 0; j < len(blocks)/2; j++ { + blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] + } + if _, err := bc.InsertChain(blocks); err != nil { + t.Fatalf("failed to import pro-fork chain for expansion: %v", err) + } + if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { + t.Fatalf("failed to commit pro-fork head for expansion: %v", err) + } + blocks, _, _ = GenerateChain(&conConf, proBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) + if _, err := proBc.InsertChain(blocks); err != nil { + t.Fatalf("pro-fork chain accepted contra-fork block: %v", blocks[0]) + } + // Create a proper pro-fork block for the pro-forker + blocks, _, _ = GenerateChain(&proConf, proBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) + if _, err := proBc.InsertChain(blocks); err != nil { + t.Fatalf("pro-fork chain didn't accepted pro-fork block: %v", err) + } + } + // Verify that contra-forkers accept pro-fork extra-datas after forking finishes + db = rawdb.NewMemoryDatabase() + gspec.MustCommit(db) + bc, _ := NewBlockChain(db, DefaultCacheConfig, &conConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + defer bc.Stop() + + blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64())) + for j := 0; j < len(blocks)/2; j++ { + blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] + } + if _, err := bc.InsertChain(blocks); err != nil { + t.Fatalf("failed to import contra-fork chain for expansion: %v", err) + } + if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { + t.Fatalf("failed to commit contra-fork head for expansion: %v", err) + } + blocks, _, _ = GenerateChain(&proConf, conBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) + if _, err := conBc.InsertChain(blocks); err != nil { + t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err) + } + // Verify that pro-forkers accept contra-fork extra-datas after forking finishes + db = rawdb.NewMemoryDatabase() + gspec.MustCommit(db) + bc, _ = NewBlockChain(db, DefaultCacheConfig, &proConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + defer bc.Stop() + + blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64())) + for j := 0; j < len(blocks)/2; j++ { + blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] + } + if _, err := bc.InsertChain(blocks); err != nil { + t.Fatalf("failed to import pro-fork chain for expansion: %v", err) + } + if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { + t.Fatalf("failed to commit pro-fork head for expansion: %v", err) + } + blocks, _, _ = GenerateChain(&conConf, proBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) + if _, err := proBc.InsertChain(blocks); err != nil { + t.Fatalf("pro-fork chain didn't accept contra-fork block post-fork: %v", err) + } +} + +func TestDAOForkSupportPostApricotPhase3(t *testing.T) { + forkBlock := big.NewInt(0) + + conf := *params.TestChainConfig + conf.DAOForkSupport = true + conf.DAOForkBlock = forkBlock + + db := rawdb.NewMemoryDatabase() + gspec := &Genesis{ + BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), + Config: &conf, + } + genesis := gspec.MustCommit(db) + bc, _ := NewBlockChain(db, DefaultCacheConfig, &conf, dummy.NewFaker(), vm.Config{}, common.Hash{}) + defer bc.Stop() + + blocks, _, _ := GenerateChain(&conf, genesis, dummy.NewFaker(), db, 32, 10, func(i int, gen *BlockGen) {}) + + if _, err := bc.InsertChain(blocks); err != nil { + t.Fatalf("failed to import blocks: %v", err) + } +} diff --git a/core/error.go b/core/error.go index 461efa3e28..8c3b8045f8 100644 --- a/core/error.go +++ b/core/error.go @@ -29,7 +29,7 @@ package core import ( "errors" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/core/types" ) var ( diff --git a/core/events.go b/core/events.go index 462d26d9ee..9ad589a10c 100644 --- a/core/events.go +++ b/core/events.go @@ -27,8 +27,8 @@ package core import ( - "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/types" ) // NewTxsEvent is posted when a batch of transactions enter the transaction pool. diff --git a/core/evm.go b/core/evm.go index 896cbfac12..7517f9bba1 100644 --- a/core/evm.go +++ b/core/evm.go @@ -29,13 +29,10 @@ package core import ( "math/big" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/consensus/misc/eip4844" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/predicate" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" //"github.com/ethereum/go-ethereum/log" ) @@ -51,37 +48,9 @@ type ChainContext interface { // NewEVMBlockContext creates a new context for use in the EVM. func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common.Address) vm.BlockContext { - predicateBytes, ok := predicate.GetPredicateResultBytes(header.Extra) - if !ok { - return newEVMBlockContext(header, chain, author, nil) - } - // Prior to Durango, the VM enforces the extra data is smaller than or - // equal to this size. After Durango, the VM pre-verifies the extra - // data past the dynamic fee rollup window is valid. - predicateResults, err := predicate.ParseResults(predicateBytes) - if err != nil { - log.Error("failed to parse predicate results creating new block context", "err", err, "extra", header.Extra) - // As mentioned above, we pre-verify the extra data to ensure this never happens. - // If we hit an error, construct a new block context rather than use a potentially half initialized value - // as defense in depth. - return newEVMBlockContext(header, chain, author, nil) - } - return newEVMBlockContext(header, chain, author, predicateResults) -} - -// NewEVMBlockContextWithPredicateResults creates a new context for use in the EVM with an override for the predicate results that is not present -// in header.Extra. -// This function is used to create a BlockContext when the header Extra data is not fully formed yet and it's more efficient to pass in predicateResults -// directly rather than re-encode the latest results when executing each individaul transaction. -func NewEVMBlockContextWithPredicateResults(header *types.Header, chain ChainContext, author *common.Address, predicateResults *predicate.Results) vm.BlockContext { - return newEVMBlockContext(header, chain, author, predicateResults) -} - -func newEVMBlockContext(header *types.Header, chain ChainContext, author *common.Address, predicateResults *predicate.Results) vm.BlockContext { var ( beneficiary common.Address baseFee *big.Int - blobBaseFee *big.Int ) // If we don't have an explicit author (i.e. not mining), extract from the header @@ -93,37 +62,27 @@ func newEVMBlockContext(header *types.Header, chain ChainContext, author *common if header.BaseFee != nil { baseFee = new(big.Int).Set(header.BaseFee) } - if header.ExcessBlobGas != nil { - blobBaseFee = eip4844.CalcBlobFee(*header.ExcessBlobGas) - } return vm.BlockContext{ CanTransfer: CanTransfer, CanTransferMC: CanTransferMC, Transfer: Transfer, TransferMultiCoin: TransferMultiCoin, GetHash: GetHashFn(header, chain), - PredicateResults: predicateResults, Coinbase: beneficiary, BlockNumber: new(big.Int).Set(header.Number), - Time: header.Time, + Time: new(big.Int).SetUint64(header.Time), Difficulty: new(big.Int).Set(header.Difficulty), BaseFee: baseFee, - BlobBaseFee: blobBaseFee, GasLimit: header.GasLimit, } } // NewEVMTxContext creates a new transaction context for a single transaction. -func NewEVMTxContext(msg *Message) vm.TxContext { - ctx := vm.TxContext{ - Origin: msg.From, - GasPrice: new(big.Int).Set(msg.GasPrice), - BlobHashes: msg.BlobHashes, - } - if msg.BlobGasFeeCap != nil { - ctx.BlobFeeCap = new(big.Int).Set(msg.BlobGasFeeCap) +func NewEVMTxContext(msg Message) vm.TxContext { + return vm.TxContext{ + Origin: msg.From(), + GasPrice: new(big.Int).Set(msg.GasPrice()), } - return ctx } // GetHashFn returns a GetHashFunc which retrieves header hashes by number diff --git a/core/gen_genesis.go b/core/gen_genesis.go index a05cee87f1..7ad2d0e9fc 100644 --- a/core/gen_genesis.go +++ b/core/gen_genesis.go @@ -7,7 +7,7 @@ import ( "errors" "math/big" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" diff --git a/core/genesis.go b/core/genesis.go index bb8de14c80..3d9f0141dc 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -34,17 +34,16 @@ import ( "fmt" "math/big" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/trie" ) //go:generate go run github.com/fjl/gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go @@ -67,12 +66,10 @@ type Genesis struct { // These fields are used for consensus tests. Please don't use them // in actual genesis blocks. - Number uint64 `json:"number"` - GasUsed uint64 `json:"gasUsed"` - ParentHash common.Hash `json:"parentHash"` - BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559 - ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844 - BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844 + Number uint64 `json:"number"` + GasUsed uint64 `json:"gasUsed"` + ParentHash common.Hash `json:"parentHash"` + BaseFee *big.Int `json:"baseFeePerGas"` } // GenesisAlloc specifies the initial state that is part of the genesis block. @@ -104,17 +101,15 @@ type GenesisAccount struct { // field type overrides for gencodec type genesisSpecMarshaling struct { - Nonce math.HexOrDecimal64 - Timestamp math.HexOrDecimal64 - ExtraData hexutil.Bytes - GasLimit math.HexOrDecimal64 - GasUsed math.HexOrDecimal64 - Number math.HexOrDecimal64 - Difficulty *math.HexOrDecimal256 - BaseFee *math.HexOrDecimal256 - Alloc map[common.UnprefixedAddress]GenesisAccount - ExcessBlobGas *math.HexOrDecimal64 - BlobGasUsed *math.HexOrDecimal64 + Nonce math.HexOrDecimal64 + Timestamp math.HexOrDecimal64 + ExtraData hexutil.Bytes + GasLimit math.HexOrDecimal64 + GasUsed math.HexOrDecimal64 + Number math.HexOrDecimal64 + Difficulty *math.HexOrDecimal256 + BaseFee *math.HexOrDecimal256 + Alloc map[common.UnprefixedAddress]GenesisAccount } type genesisAccountMarshaling struct { @@ -136,6 +131,7 @@ func (h *storageJSON) UnmarshalText(text []byte) error { } offset := len(h) - len(text)/2 // pad on the left if _, err := hex.Decode(h[offset:], text); err != nil { + fmt.Println(err) return fmt.Errorf("invalid hex storage key/value %q", text) } return nil @@ -162,64 +158,55 @@ func (e *GenesisMismatchError) Error() string { // +------------------------------------------ // db has no genesis | main-net default | genesis // db has genesis | from DB | genesis (if compatible) - -// The argument [genesis] must be specified and must contain a valid chain config. -// If the genesis block has already been set up, then we verify the hash matches the genesis passed in -// and that the chain config contained in genesis is backwards compatible with what is stored in the database. // // The stored chain configuration will be updated if it is compatible (i.e. does not // specify a fork block below the local head block). In case of a conflict, the // error is a *params.ConfigCompatError and the new, unwritten config is returned. -func SetupGenesisBlock( - db ethdb.Database, triedb *trie.Database, genesis *Genesis, lastAcceptedHash common.Hash, skipChainConfigCheckCompatible bool, -) (*params.ChainConfig, common.Hash, error) { +func SetupGenesisBlock(db ethdb.Database, genesis *Genesis, lastAcceptedHash common.Hash) (*params.ChainConfig, error) { if genesis == nil { - return nil, common.Hash{}, ErrNoGenesis + return nil, ErrNoGenesis } if genesis.Config == nil { - return nil, common.Hash{}, errGenesisNoConfig + return nil, errGenesisNoConfig } // Just commit the new block if there is no stored genesis block. stored := rawdb.ReadCanonicalHash(db, 0) if (stored == common.Hash{}) { log.Info("Writing genesis to database") - block, err := genesis.Commit(db, triedb) + _, err := genesis.Commit(db) if err != nil { - return genesis.Config, common.Hash{}, err + return genesis.Config, err } - return genesis.Config, block.Hash(), nil + return genesis.Config, nil } - // The genesis block is present(perhaps in ancient database) while the - // state database is not initialized yet. It can happen that the node - // is initialized with an external ancient store. Commit genesis state - // in this case. + // We have the genesis block in database but the corresponding state is missing. header := rawdb.ReadHeader(db, stored, 0) - if header.Root != types.EmptyRootHash && !triedb.Initialized(header.Root) { + if _, err := state.New(header.Root, state.NewDatabase(db), nil); err != nil { // Ensure the stored genesis matches with the given one. - hash := genesis.ToBlock().Hash() + hash := genesis.ToBlock(nil).Hash() if hash != stored { - return genesis.Config, common.Hash{}, &GenesisMismatchError{stored, hash} + return genesis.Config, &GenesisMismatchError{stored, hash} } - _, err := genesis.Commit(db, triedb) - return genesis.Config, common.Hash{}, err + _, err := genesis.Commit(db) + return genesis.Config, err } // Check whether the genesis block is already written. - hash := genesis.ToBlock().Hash() + hash := genesis.ToBlock(nil).Hash() if hash != stored { - return genesis.Config, common.Hash{}, &GenesisMismatchError{stored, hash} + return genesis.Config, &GenesisMismatchError{stored, hash} } // Get the existing chain configuration. newcfg := genesis.Config if err := newcfg.CheckConfigForkOrder(); err != nil { - return newcfg, common.Hash{}, err + return newcfg, err } storedcfg := rawdb.ReadChainConfig(db, stored) if storedcfg == nil { log.Warn("Found genesis block without chain config") rawdb.WriteChainConfig(db, stored, newcfg) - return newcfg, stored, nil + return newcfg, nil } - storedData, _ := json.Marshal(storedcfg) + // Check config compatibility and write the config. Compatibility errors // are returned to the caller unless we're already at block zero. // we use last accepted block for cfg compatibility check. Note this allows @@ -230,50 +217,25 @@ func SetupGenesisBlock( // when we start syncing from scratch, the last accepted block // will be genesis block if lastBlock == nil { - return newcfg, common.Hash{}, errors.New("missing last accepted block") + return newcfg, fmt.Errorf("missing last accepted block") } height := lastBlock.NumberU64() timestamp := lastBlock.Time() - if skipChainConfigCheckCompatible { - log.Info("skipping verifying activated network upgrades on chain config") - } else { - compatErr := storedcfg.CheckCompatible(newcfg, height, timestamp) - if compatErr != nil && ((height != 0 && compatErr.RewindToBlock != 0) || (timestamp != 0 && compatErr.RewindToTime != 0)) { - return newcfg, stored, compatErr - } - } - // Don't overwrite if the old is identical to the new - if newData, _ := json.Marshal(newcfg); !bytes.Equal(storedData, newData) { - rawdb.WriteChainConfig(db, stored, newcfg) + compatErr := storedcfg.CheckCompatible(newcfg, height, timestamp) + if compatErr != nil && height != 0 && compatErr.RewindTo != 0 { + return newcfg, compatErr } - return newcfg, stored, nil -} - -// IsVerkle indicates whether the state is already stored in a verkle -// tree at genesis time. -func (g *Genesis) IsVerkle() bool { - return g.Config.IsVerkle(new(big.Int).SetUint64(g.Number), g.Timestamp) + rawdb.WriteChainConfig(db, stored, newcfg) + return newcfg, nil } -// ToBlock returns the genesis block according to genesis specification. -func (g *Genesis) ToBlock() *types.Block { - db := rawdb.NewMemoryDatabase() - return g.toBlock(db, trie.NewDatabase(db, g.trieConfig())) -} - -func (g *Genesis) trieConfig() *trie.Config { - if !g.IsVerkle() { - return nil +// ToBlock creates the genesis block and writes state of a genesis specification +// to the given database (or discards it if nil). +func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { + if db == nil { + db = rawdb.NewMemoryDatabase() } - return &trie.Config{ - PathDB: pathdb.Defaults, - IsVerkle: true, - } -} - -// TODO: migrate this function to "flush" for more similarity with upstream. -func (g *Genesis) toBlock(db ethdb.Database, triedb *trie.Database) *types.Block { - statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil) + statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil) if err != nil { panic(err) } @@ -293,10 +255,7 @@ func (g *Genesis) toBlock(db ethdb.Database, triedb *trie.Database) *types.Block } // Configure any stateful precompiles that should be enabled in the genesis. - err = ApplyPrecompileActivations(g.Config, nil, types.NewBlockWithHeader(head), statedb) - if err != nil { - panic(fmt.Sprintf("unable to configure precompiles in genesis block: %v", err)) - } + g.Config.CheckConfigurePrecompiles(nil, types.NewBlockWithHeader(head), statedb) for addr, account := range g.Alloc { statedb.AddBalance(addr, account.Balance) @@ -320,46 +279,25 @@ func (g *Genesis) toBlock(db ethdb.Database, triedb *trie.Database) *types.Block if g.Difficulty == nil { head.Difficulty = params.GenesisDifficulty } - if conf := g.Config; conf != nil { - num := new(big.Int).SetUint64(g.Number) - if conf.IsApricotPhase3(g.Timestamp) { - if g.BaseFee != nil { - head.BaseFee = g.BaseFee - } else { - head.BaseFee = new(big.Int).SetInt64(params.ApricotPhase3InitialBaseFee) - } - } - if conf.IsCancun(num, g.Timestamp) { - // EIP-4788: The parentBeaconBlockRoot of the genesis block is always - // the zero hash. This is because the genesis block does not have a parent - // by definition. - head.ParentBeaconRoot = new(common.Hash) - // EIP-4844 fields - head.ExcessBlobGas = g.ExcessBlobGas - head.BlobGasUsed = g.BlobGasUsed - if head.ExcessBlobGas == nil { - head.ExcessBlobGas = new(uint64) - } - if head.BlobGasUsed == nil { - head.BlobGasUsed = new(uint64) - } + if g.Config != nil && g.Config.IsApricotPhase3(common.Big0) { + if g.BaseFee != nil { + head.BaseFee = g.BaseFee + } else { + head.BaseFee = big.NewInt(params.ApricotPhase3InitialBaseFee) } } - - statedb.Commit(0, false, false) - // Commit newly generated states into disk if it's not empty. - if root != types.EmptyRootHash { - if err := triedb.Commit(root, true); err != nil { - panic(fmt.Sprintf("unable to commit genesis block: %v", err)) - } + statedb.Commit(false, false) + if err := statedb.Database().TrieDB().Commit(root, true, nil); err != nil { + panic(fmt.Sprintf("unable to commit genesis block: %v", err)) } - return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)) + + return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil), nil, false) } // Commit writes the block and state of a genesis specification to the database. // The block is committed as the canonical head block. -func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block, error) { - block := g.toBlock(db, triedb) +func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) { + block := g.ToBlock(db) if block.Number().Sign() != 0 { return nil, errors.New("can't commit genesis block with number > 0") } @@ -381,8 +319,8 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block // MustCommit writes the genesis block and state to db, panicking on error. // The block is committed as the canonical head block. -func (g *Genesis) MustCommit(db ethdb.Database, triedb *trie.Database) *types.Block { - block, err := g.Commit(db, triedb) +func (g *Genesis) MustCommit(db ethdb.Database) *types.Block { + block, err := g.Commit(db) if err != nil { panic(err) } @@ -396,7 +334,7 @@ func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big Alloc: GenesisAlloc{addr: {Balance: balance}}, BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), } - return g.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) + return g.MustCommit(db) } // ReadBlockByHash reads the block with the given hash from the database. diff --git a/core/genesis_test.go b/core/genesis_test.go index 5deb4a8bf9..9fabdac788 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -27,29 +27,25 @@ package core import ( - "bytes" _ "embed" "math/big" "reflect" "testing" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/precompile/contracts/warp" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/trie/triedb/pathdb" - "github.com/ava-labs/coreth/utils" "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/stretchr/testify/require" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/params" ) -func setupGenesisBlock(db ethdb.Database, triedb *trie.Database, genesis *Genesis, lastAcceptedHash common.Hash) (*params.ChainConfig, common.Hash, error) { - return SetupGenesisBlock(db, triedb, genesis, lastAcceptedHash, false) +func setupGenesisBlock(db ethdb.Database, genesis *Genesis, lastAcceptedHash common.Hash) (*params.ChainConfig, common.Hash, error) { + conf, err := SetupGenesisBlock(db, genesis, lastAcceptedHash) + stored := rawdb.ReadCanonicalHash(db, 0) + return conf, stored, err } func TestGenesisBlockForTesting(t *testing.T) { @@ -61,13 +57,8 @@ func TestGenesisBlockForTesting(t *testing.T) { } func TestSetupGenesis(t *testing.T) { - testSetupGenesis(t, rawdb.HashScheme) - testSetupGenesis(t, rawdb.PathScheme) -} - -func testSetupGenesis(t *testing.T, scheme string) { apricotPhase1Config := *params.TestApricotPhase1Config - apricotPhase1Config.ApricotPhase1BlockTimestamp = utils.NewUint64(100) + apricotPhase1Config.ApricotPhase1BlockTimestamp = big.NewInt(100) var ( customghash = common.HexToHash("0x1099a11e9e454bd3ef31d688cf21936671966407bc330f051d754b5ce401e7ed") customg = Genesis{ @@ -80,7 +71,7 @@ func testSetupGenesis(t *testing.T, scheme string) { ) rollbackApricotPhase1Config := apricotPhase1Config - rollbackApricotPhase1Config.ApricotPhase1BlockTimestamp = utils.NewUint64(90) + rollbackApricotPhase1Config.ApricotPhase1BlockTimestamp = big.NewInt(90) oldcustomg.Config = &rollbackApricotPhase1Config tests := []struct { name string @@ -92,7 +83,7 @@ func testSetupGenesis(t *testing.T, scheme string) { { name: "genesis without ChainConfig", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - return setupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), new(Genesis), common.Hash{}) + return setupGenesisBlock(db, new(Genesis), common.Hash{}) }, wantErr: errGenesisNoConfig, wantConfig: nil, @@ -100,7 +91,7 @@ func testSetupGenesis(t *testing.T, scheme string) { { name: "no block in DB, genesis == nil", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - return setupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), nil, common.Hash{}) + return setupGenesisBlock(db, nil, common.Hash{}) }, wantErr: ErrNoGenesis, wantConfig: nil, @@ -108,19 +99,18 @@ func testSetupGenesis(t *testing.T, scheme string) { { name: "custom block in DB, genesis == nil", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - tdb := trie.NewDatabase(db, newDbConfig(scheme)) - customg.Commit(db, tdb) - return setupGenesisBlock(db, tdb, nil, common.Hash{}) + customg.MustCommit(db) + return setupGenesisBlock(db, nil, common.Hash{}) }, wantErr: ErrNoGenesis, + wantHash: customghash, wantConfig: nil, }, { name: "compatible config in DB", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - tdb := trie.NewDatabase(db, newDbConfig(scheme)) - oldcustomg.Commit(db, tdb) - return setupGenesisBlock(db, tdb, &customg, customghash) + oldcustomg.MustCommit(db) + return setupGenesisBlock(db, &customg, customghash) }, wantHash: customghash, wantConfig: customg.Config, @@ -130,21 +120,14 @@ func testSetupGenesis(t *testing.T, scheme string) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { // Commit the 'old' genesis block with ApricotPhase1 transition at 90. // Advance to block #4, past the ApricotPhase1 transition block of customg. - tdb := trie.NewDatabase(db, newDbConfig(scheme)) - genesis, err := oldcustomg.Commit(db, tdb) - if err != nil { - t.Fatal(err) - } + genesis := oldcustomg.MustCommit(db) - bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, dummy.NewFullFaker(), vm.Config{}, genesis.Hash(), false) + bc, _ := NewBlockChain(db, DefaultCacheConfig, oldcustomg.Config, dummy.NewFullFaker(), vm.Config{}, common.Hash{}) defer bc.Stop() - _, blocks, _, err := GenerateChainWithGenesis(&oldcustomg, dummy.NewFullFaker(), 4, 25, nil) - if err != nil { - t.Fatal(err) - } + blocks, _, _ := GenerateChain(oldcustomg.Config, genesis, dummy.NewFullFaker(), db, 4, 25, nil) bc.InsertChain(blocks) - + bc.CurrentBlock() for _, block := range blocks { if err := bc.Accept(block); err != nil { t.Fatal(err) @@ -152,15 +135,15 @@ func testSetupGenesis(t *testing.T, scheme string) { } // This should return a compatibility error. - return setupGenesisBlock(db, tdb, &customg, bc.lastAccepted.Hash()) + return setupGenesisBlock(db, &customg, bc.lastAccepted.Hash()) }, wantHash: customghash, wantConfig: customg.Config, wantErr: ¶ms.ConfigCompatError{ What: "ApricotPhase1 fork block timestamp", - StoredTime: u64(90), - NewTime: u64(100), - RewindToTime: 89, + StoredConfig: big.NewInt(90), + NewConfig: big.NewInt(100), + RewindTo: 89, }, }, } @@ -193,17 +176,19 @@ func testSetupGenesis(t *testing.T, scheme string) { // regression test for precompile activation after header block func TestNetworkUpgradeBetweenHeadAndAcceptedBlock(t *testing.T) { db := rawdb.NewMemoryDatabase() + customg := Genesis{ Config: params.TestApricotPhase1Config, Alloc: GenesisAlloc{ {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, }, } - bc, _ := NewBlockChain(db, DefaultCacheConfig, &customg, dummy.NewFullFaker(), vm.Config{}, common.Hash{}, false) + genesis := customg.MustCommit(db) + bc, _ := NewBlockChain(db, DefaultCacheConfig, customg.Config, dummy.NewFullFaker(), vm.Config{}, common.Hash{}) defer bc.Stop() // Advance header to block #4, past the ApricotPhase2 timestamp. - _, blocks, _, _ := GenerateChainWithGenesis(&customg, dummy.NewFullFaker(), 4, 25, nil) + blocks, _, _ := GenerateChain(customg.Config, genesis, dummy.NewFullFaker(), db, 4, 25, nil) require := require.New(t) _, err := bc.InsertChain(blocks) @@ -217,122 +202,24 @@ func TestNetworkUpgradeBetweenHeadAndAcceptedBlock(t *testing.T) { require.Equal(blocks[1].Hash(), bc.lastAccepted.Hash()) // header must be bigger than last accepted - require.Greater(block.Time, bc.lastAccepted.Time()) + require.Greater(block.Time(), bc.lastAccepted.Time()) activatedGenesis := customg - apricotPhase2Timestamp := utils.NewUint64(51) + apricotPhase2Timestamp := big.NewInt(51) updatedApricotPhase2Config := *params.TestApricotPhase1Config updatedApricotPhase2Config.ApricotPhase2BlockTimestamp = apricotPhase2Timestamp activatedGenesis.Config = &updatedApricotPhase2Config // assert block is after the activation block - require.Greater(block.Time, *apricotPhase2Timestamp) + require.Greater(block.Time(), apricotPhase2Timestamp.Uint64()) // assert last accepted block is before the activation block - require.Less(bc.lastAccepted.Time(), *apricotPhase2Timestamp) + require.Less(bc.lastAccepted.Time(), apricotPhase2Timestamp.Uint64()) // This should not return any error since the last accepted block is before the activation block. - config, _, err := setupGenesisBlock(db, trie.NewDatabase(db, nil), &activatedGenesis, bc.lastAccepted.Hash()) + config, _, err := setupGenesisBlock(db, &activatedGenesis, bc.lastAccepted.Hash()) require.NoError(err) if !reflect.DeepEqual(config, activatedGenesis.Config) { t.Errorf("returned %v\nwant %v", config, activatedGenesis.Config) } } - -func TestGenesisWriteUpgradesRegression(t *testing.T) { - require := require.New(t) - config := *params.TestChainConfig - genesis := &Genesis{ - Config: &config, - Alloc: GenesisAlloc{ - {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, - }, - } - - db := rawdb.NewMemoryDatabase() - trieDB := trie.NewDatabase(db, trie.HashDefaults) - genesisBlock := genesis.MustCommit(db, trieDB) - - _, _, err := SetupGenesisBlock(db, trieDB, genesis, genesisBlock.Hash(), false) - require.NoError(err) - - genesis.Config.UpgradeConfig.PrecompileUpgrades = []params.PrecompileUpgrade{ - { - Config: warp.NewConfig(utils.NewUint64(51), 0), - }, - } - _, _, err = SetupGenesisBlock(db, trieDB, genesis, genesisBlock.Hash(), false) - require.NoError(err) - - timestamp := uint64(100) - lastAcceptedBlock := types.NewBlock(&types.Header{ - ParentHash: common.Hash{1, 2, 3}, - Number: big.NewInt(100), - GasLimit: 8_000_000, - Extra: nil, - Time: timestamp, - }, nil, nil, nil, trie.NewStackTrie(nil)) - rawdb.WriteBlock(db, lastAcceptedBlock) - - // Attempt restart after the chain has advanced past the activation of the precompile upgrade. - // This tests a regression where the UpgradeConfig would not be written to disk correctly. - _, _, err = SetupGenesisBlock(db, trieDB, genesis, lastAcceptedBlock.Hash(), false) - require.NoError(err) -} - -func newDbConfig(scheme string) *trie.Config { - if scheme == rawdb.HashScheme { - return trie.HashDefaults - } - return &trie.Config{PathDB: pathdb.Defaults} -} - -func TestVerkleGenesisCommit(t *testing.T) { - var verkleTime uint64 = 0 - verkleConfig := ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - CancunTime: &verkleTime, - VerkleTime: &verkleTime, - } - - genesis := &Genesis{ - BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), - Config: verkleConfig, - Timestamp: verkleTime, - Difficulty: big.NewInt(0), - Alloc: GenesisAlloc{ - {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, - }, - } - - expected := common.Hex2Bytes("22678ccc2daa04e91013ce47799973bd6c1824f37989d7cea4cbdcd79b39137f") - got := genesis.ToBlock().Root().Bytes() - if !bytes.Equal(got, expected) { - t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got) - } - - db := rawdb.NewMemoryDatabase() - triedb := trie.NewDatabase(db, &trie.Config{IsVerkle: true, PathDB: pathdb.Defaults}) - block := genesis.MustCommit(db, triedb) - if !bytes.Equal(block.Root().Bytes(), expected) { - t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got) - } - - // Test that the trie is verkle - if !triedb.IsVerkle() { - t.Fatalf("expected trie to be verkle") - } - - if !rawdb.ExistsAccountTrieNode(db, nil) { - t.Fatal("could not find node") - } -} diff --git a/core/headerchain.go b/core/headerchain.go index fe8f752d41..a20d70c3bf 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -33,13 +33,13 @@ import ( mrand "math/rand" "sync/atomic" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/lru" - "github.com/ethereum/go-ethereum/ethdb" + lru "github.com/hashicorp/golang-lru" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/params" ) const ( @@ -70,9 +70,9 @@ type HeaderChain struct { currentHeader atomic.Value // Current head of the header chain (may be above the block chain!) currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time) - headerCache *lru.Cache[common.Hash, *types.Header] - numberCache *lru.Cache[common.Hash, uint64] // most recent block numbers - acceptedNumberCache FIFOCache[uint64, *types.Header] // most recent accepted heights to headers (only modified in accept) + headerCache *lru.Cache // Cache for the most recent block headers + tdCache *lru.Cache // Cache for the most recent block total difficulties + numberCache *lru.Cache // Cache for the most recent block numbers rand *mrand.Rand engine consensus.Engine @@ -80,8 +80,10 @@ type HeaderChain struct { // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points // to the parent's interrupt semaphore. -func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, cacheConfig *CacheConfig, engine consensus.Engine) (*HeaderChain, error) { - acceptedNumberCache := NewFIFOCache[uint64, *types.Header](cacheConfig.AcceptedCacheSize) +func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine) (*HeaderChain, error) { + headerCache, _ := lru.New(headerCacheLimit) + tdCache, _ := lru.New(tdCacheLimit) + numberCache, _ := lru.New(numberCacheLimit) // Seed a fast but crypto originating random generator seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) @@ -90,13 +92,13 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, cacheCon } hc := &HeaderChain{ - config: config, - chainDb: chainDb, - headerCache: lru.NewCache[common.Hash, *types.Header](headerCacheLimit), - numberCache: lru.NewCache[common.Hash, uint64](numberCacheLimit), - acceptedNumberCache: acceptedNumberCache, - rand: mrand.New(mrand.NewSource(seed.Int64())), - engine: engine, + config: config, + chainDb: chainDb, + headerCache: headerCache, + tdCache: tdCache, + numberCache: numberCache, + rand: mrand.New(mrand.NewSource(seed.Int64())), + engine: engine, } hc.genesisHeader = hc.GetHeaderByNumber(0) @@ -119,7 +121,8 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, cacheCon // from the cache or database func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { if cached, ok := hc.numberCache.Get(hash); ok { - return &cached + number := cached.(uint64) + return &number } number := rawdb.ReadHeaderNumber(hc.chainDb, hash) if number != nil { @@ -133,7 +136,7 @@ func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { // Short circuit if the header's already in the cache, retrieve otherwise if header, ok := hc.headerCache.Get(hash); ok { - return header + return header.(*types.Header) } header := rawdb.ReadHeader(hc.chainDb, hash, number) if header == nil { @@ -167,9 +170,6 @@ func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool { // GetHeaderByNumber retrieves a block header from the database by number, // caching it (associated with its hash) if found. func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header { - if cachedHeader, ok := hc.acceptedNumberCache.Get(number); ok { - return cachedHeader - } hash := rawdb.ReadCanonicalHash(hc.chainDb, number) if hash == (common.Hash{}) { return nil diff --git a/core/headerchain_test.go b/core/headerchain_test.go index b70a9802f0..6ff5d0c451 100644 --- a/core/headerchain_test.go +++ b/core/headerchain_test.go @@ -32,13 +32,14 @@ import ( "math/big" "testing" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/params" ) func verifyUnbrokenCanonchain(bc *BlockChain) error { @@ -73,27 +74,25 @@ func testInsert(t *testing.T, bc *BlockChain, chain []*types.Block, wantErr erro // This test checks status reporting of InsertHeaderChain. func TestHeaderInsertion(t *testing.T) { var ( - db = rawdb.NewMemoryDatabase() - gspec = &Genesis{ + db = rawdb.NewMemoryDatabase() + genesis = (&Genesis{ BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), Config: params.TestChainConfig, - } + }).MustCommit(db) ) - genesis := gspec.ToBlock() - chain, err := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) + chain, err := NewBlockChain(db, DefaultCacheConfig, params.TestChainConfig, dummy.NewFaker(), vm.Config{}, common.Hash{}) if err != nil { t.Fatal(err) } - defer chain.Stop() - // chain A: G->A1->A2...A128 - chainA, _, _ := GenerateChain(params.TestChainConfig, types.NewBlockWithHeader(genesis.Header()), dummy.NewCoinbaseFaker(), db, 128, 10, func(i int, b *BlockGen) { + chainA, _, _ := GenerateChain(params.TestChainConfig, types.NewBlockWithHeader(genesis.Header()), dummy.NewFaker(), db, 128, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0: byte(10), 19: byte(i)}) }) // chain B: G->A1->B2...B128 - chainB, _, _ := GenerateChain(params.TestChainConfig, types.NewBlockWithHeader(chainA[0].Header()), dummy.NewCoinbaseFaker(), db, 128, 10, func(i int, b *BlockGen) { + chainB, _, _ := GenerateChain(params.TestChainConfig, types.NewBlockWithHeader(chainA[0].Header()), dummy.NewFaker(), db, 128, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0: byte(10), 19: byte(i)}) }) + log.Root().SetHandler(log.StdoutHandler) // Inserting 64 headers on an empty chain testInsert(t, chain, chainA[:64], nil) diff --git a/core/mkalloc.go b/core/mkalloc.go index f481249c60..d05cdde787 100644 --- a/core/mkalloc.go +++ b/core/mkalloc.go @@ -28,10 +28,12 @@ // +build none /* -The mkalloc tool creates the genesis allocation constants in genesis_alloc.go -It outputs a const declaration that contains an RLP-encoded list of (address, balance) tuples. - go run mkalloc.go genesis.json + The mkalloc tool creates the genesis allocation constants in genesis_alloc.go + It outputs a const declaration that contains an RLP-encoded list of (address, balance) tuples. + + go run mkalloc.go genesis.json + */ package main @@ -40,55 +42,32 @@ import ( "fmt" "math/big" "os" + "sort" "strconv" - "github.com/ava-labs/coreth/core" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/exp/slices" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/rlp" ) -type allocItem struct { - Addr *big.Int - Balance *big.Int - Misc *allocItemMisc `rlp:"optional"` -} +type allocItem struct{ Addr, Balance *big.Int } -type allocItemMisc struct { - Nonce uint64 - Code []byte - Slots []allocItemStorageItem -} +type allocList []allocItem -type allocItemStorageItem struct { - Key common.Hash - Val common.Hash -} +func (a allocList) Len() int { return len(a) } +func (a allocList) Less(i, j int) bool { return a[i].Addr.Cmp(a[j].Addr) < 0 } +func (a allocList) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func makelist(g *core.Genesis) []allocItem { - items := make([]allocItem, 0, len(g.Alloc)) +func makelist(g *core.Genesis) allocList { + a := make(allocList, 0, len(g.Alloc)) for addr, account := range g.Alloc { - var misc *allocItemMisc if len(account.Storage) > 0 || len(account.Code) > 0 || account.Nonce != 0 { - misc = &allocItemMisc{ - Nonce: account.Nonce, - Code: account.Code, - Slots: make([]allocItemStorageItem, 0, len(account.Storage)), - } - for key, val := range account.Storage { - misc.Slots = append(misc.Slots, allocItemStorageItem{key, val}) - } - slices.SortFunc(misc.Slots, func(a, b allocItemStorageItem) int { - return a.Key.Cmp(b.Key) - }) + panic(fmt.Sprintf("can't encode account %x", addr)) } bigAddr := new(big.Int).SetBytes(addr.Bytes()) - items = append(items, allocItem{bigAddr, account.Balance, misc}) + a = append(a, allocItem{bigAddr, account.Balance}) } - slices.SortFunc(items, func(a, b allocItem) int { - return a.Addr.Cmp(b.Addr) - }) - return items + sort.Sort(a) + return a } func makealloc(g *core.Genesis) string { diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index d63da4782a..4471978255 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -30,15 +30,13 @@ import ( "bytes" "encoding/binary" "errors" - "math/big" - "github.com/ava-labs/coreth/consensus/misc/eip4844" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rlp" ) // ReadCanonicalHash retrieves the hash assigned to a canonical block number. @@ -86,7 +84,7 @@ type NumberHash struct { Hash common.Hash } -// ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain +// ReadAllHashesInRange retrieves all the hashes assigned to blocks at a certain // heights, both canonical and reorged forks included. // This method considers both limits to be _inclusive_. func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash { @@ -205,11 +203,12 @@ func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { // ReadHeaderRLP retrieves a block header in its raw RLP database encoding. func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { + // Then try to look up the data in leveldb. data, _ := db.Get(headerKey(number, hash)) if len(data) > 0 { return data } - return nil + return nil // Can't find the data anywhere. } // HasHeader verifies the existence of a block header corresponding to the hash. @@ -227,7 +226,7 @@ func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header return nil } header := new(types.Header) - if err := rlp.DecodeBytes(data, header); err != nil { + if err := rlp.Decode(bytes.NewReader(data), header); err != nil { log.Error("Invalid block header RLP", "hash", hash, "err", err) return nil } @@ -273,11 +272,12 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { + // Then try to look up the data in leveldb. data, _ := db.Get(blockBodyKey(number, hash)) if len(data) > 0 { return data } - return nil + return nil // Can't find the data anywhere. } // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical @@ -313,7 +313,7 @@ func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body { return nil } body := new(types.Body) - if err := rlp.DecodeBytes(data, body); err != nil { + if err := rlp.Decode(bytes.NewReader(data), body); err != nil { log.Error("Invalid block body RLP", "hash", hash, "err", err) return nil } @@ -347,11 +347,12 @@ func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding. func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { + // Then try to look up the data in leveldb. data, _ := db.Get(blockReceiptsKey(number, hash)) if len(data) > 0 { return data } - return nil + return nil // Can't find the data anywhere. } // ReadRawReceipts retrieves all the transaction receipts belonging to a block. @@ -383,31 +384,22 @@ func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Rec // The current implementation populates these metadata fields by reading the receipts' // corresponding block body, so if the block body is not found it will return nil even // if the receipt itself is stored. -func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, time uint64, config *params.ChainConfig) types.Receipts { +func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts { // We're deriving many fields from the block body, retrieve beside the receipt receipts := ReadRawReceipts(db, hash, number) if receipts == nil { return nil } + header := ReadHeader(db, hash, number) + if header == nil { + return nil + } body := ReadBody(db, hash, number) if body == nil { log.Error("Missing body but have receipt", "hash", hash, "number", number) return nil } - header := ReadHeader(db, hash, number) - - var baseFee *big.Int - if header == nil { - baseFee = big.NewInt(0) - } else { - baseFee = header.BaseFee - } - // Compute effective blob gas price. - var blobGasPrice *big.Int - if header != nil && header.ExcessBlobGas != nil { - blobGasPrice = eip4844.CalcBlobFee(*header.ExcessBlobGas) - } - if err := receipts.DeriveFields(config, hash, number, time, baseFee, blobGasPrice, body.Transactions); err != nil { + if err := receipts.DeriveFields(config, hash, number, header.Time, body.Transactions); err != nil { log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) return nil } @@ -440,11 +432,10 @@ func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { // storedReceiptRLP is the storage encoding of a receipt. // Re-definition in core/types/receipt.go. -// TODO: Re-use the existing definition. type storedReceiptRLP struct { PostStateOrStatus []byte CumulativeGasUsed uint64 - Logs []*types.Log + Logs []*types.LogForStorage } // ReceiptLogs is a barebone version of ReceiptForStorage which only keeps @@ -460,7 +451,10 @@ func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error { if err := s.Decode(&stored); err != nil { return err } - r.Logs = stored.Logs + r.Logs = make([]*types.Log, len(stored.Logs)) + for i, log := range stored.Logs { + r.Logs[i] = (*types.Log)(log) + } return nil } @@ -485,9 +479,9 @@ func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, t return nil } -// ReadLogs retrieves the logs for all transactions in a block. In case -// receipts is not found, a nil is returned. -// Note: ReadLogs does not derive unstored log fields. +// ReadLogs retrieves the logs for all transactions in a block. The log fields +// are populated with metadata. In case the receipts or the block body +// are not found, a nil is returned. func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log { // Retrieve the flattened receipt slice data := ReadReceiptsRLP(db, hash, number) @@ -500,6 +494,15 @@ func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log { return nil } + body := ReadBody(db, hash, number) + if body == nil { + log.Error("Missing body but have receipt", "hash", hash, "number", number) + return nil + } + if err := deriveLogFields(receipts, hash, number, body.Transactions); err != nil { + log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) + return nil + } logs := make([][]*types.Log, len(receipts)) for i, receipt := range receipts { logs[i] = receipt.Logs @@ -522,7 +525,7 @@ func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block { if body == nil { return nil } - return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles).WithExtData(body.Version, body.ExtData) + return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles, body.Version, body.ExtData) } // WriteBlock serializes a block into the database, header and body separately. @@ -585,22 +588,3 @@ func ReadHeadBlock(db ethdb.Reader) *types.Block { } return ReadBlock(db, headBlockHash, *headBlockNumber) } - -// ReadTxIndexTail retrieves the number of oldest indexed block -// whose transaction indices has been indexed. -func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 { - data, _ := db.Get(txIndexTailKey) - if len(data) != 8 { - return nil - } - number := binary.BigEndian.Uint64(data) - return &number -} - -// WriteTxIndexTail stores the number of oldest indexed block -// into database. -func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) { - if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil { - log.Crit("Failed to store the transaction index tail", "err", err) - } -} diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 27c7cf4a95..8e712f8b65 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -25,10 +25,10 @@ import ( "reflect" "testing" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rlp" "golang.org/x/crypto/sha3" ) diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go index 511fa39d8f..ebad242eb5 100644 --- a/core/rawdb/accessors_indexes.go +++ b/core/rawdb/accessors_indexes.go @@ -30,12 +30,12 @@ import ( "bytes" "math/big" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rlp" ) // ReadTxLookupEntry retrieves the positional metadata associated with a transaction @@ -140,12 +140,8 @@ func ReadReceipt(db ethdb.Reader, hash common.Hash, config *params.ChainConfig) if blockHash == (common.Hash{}) { return nil, common.Hash{}, 0, 0 } - blockHeader := ReadHeader(db, blockHash, *blockNumber) - if blockHeader == nil { - return nil, common.Hash{}, 0, 0 - } // Read all the receipts from the block and return the one with the matching hash - receipts := ReadReceipts(db, blockHash, *blockNumber, blockHeader.Time, config) + receipts := ReadReceipts(db, blockHash, *blockNumber, config) for receiptIndex, receipt := range receipts { if receipt.TxHash == hash { return receipt, blockHash, *blockNumber, uint64(receiptIndex) diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go index 1930d81e91..747784d4d9 100644 --- a/core/rawdb/accessors_indexes_test.go +++ b/core/rawdb/accessors_indexes_test.go @@ -18,17 +18,40 @@ package rawdb import ( "bytes" + "hash" "math/big" "testing" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/internal/blocktest" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/rlp" + "golang.org/x/crypto/sha3" ) -var newTestHasher = blocktest.NewHasher +// testHasher is the helper tool for transaction/receipt list hashing. +// The original hasher is trie, in order to get rid of import cycle, +// use the testing hasher instead. +type testHasher struct { + hasher hash.Hash +} + +func newHasher() *testHasher { + return &testHasher{hasher: sha3.NewLegacyKeccak256()} +} + +func (h *testHasher) Reset() { + h.hasher.Reset() +} + +func (h *testHasher) Update(key, val []byte) { + h.hasher.Write(key) + h.hasher.Write(val) +} + +func (h *testHasher) Hash() common.Hash { + return common.BytesToHash(h.hasher.Sum(nil)) +} // Tests that positional lookup metadata can be stored and retrieved. func TestLookupStorage(t *testing.T) { @@ -75,7 +98,7 @@ func TestLookupStorage(t *testing.T) { tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) txs := []*types.Transaction{tx1, tx2, tx3} - block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newTestHasher()) + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newHasher(), nil, true) // Check that no transactions entries are in a pristine database for i, tx := range txs { diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index c6ad7645a3..506736b9b0 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -30,11 +30,11 @@ import ( "encoding/json" "time" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rlp" ) // ReadDatabaseVersion retrieves the version number of the database. @@ -107,10 +107,10 @@ const crashesToKeep = 10 func PushUncleanShutdownMarker(db ethdb.KeyValueStore) ([]uint64, uint64, error) { var uncleanShutdowns crashList // Read old data - if data, err := db.Get(uncleanShutdownKey); err == nil { - if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil { - return nil, 0, err - } + if data, err := db.Get(uncleanShutdownKey); err != nil { + log.Warn("Error reading unclean shutdown markers", "error", err) + } else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil { + return nil, 0, err } var discarded = uncleanShutdowns.Discarded var previous = make([]uint64, len(uncleanShutdowns.Recent)) diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go index 06a136ba89..00230385df 100644 --- a/core/rawdb/accessors_snapshot.go +++ b/core/rawdb/accessors_snapshot.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/ethdb" ) // ReadSnapshotRoot retrieves the root of the block whose state is contained in diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go index 03cbf44b6c..d747f5b508 100644 --- a/core/rawdb/accessors_state.go +++ b/core/rawdb/accessors_state.go @@ -27,11 +27,9 @@ package rawdb import ( - "encoding/binary" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/ethdb" ) // ReadPreimage retrieves a single preimage of the provided hash. @@ -40,17 +38,6 @@ func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte { return data } -// WritePreimages writes the provided set of preimages to the database. -func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) { - for hash, preimage := range preimages { - if err := db.Put(preimageKey(hash), preimage); err != nil { - log.Crit("Failed to store trie preimage", "err", err) - } - } - preimageCounter.Inc(int64(len(preimages))) - preimageHitCounter.Inc(int64(len(preimages))) -} - // ReadCode retrieves the contract code of the provided code hash. func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte { // Try with the prefixed code scheme first and only. The legacy scheme was never used in coreth. @@ -58,6 +45,12 @@ func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte { return data } +// ReadTrieNode retrieves the trie node of the provided hash. +func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte { + data, _ := db.Get(hash.Bytes()) + return data +} + // HasCode checks if the contract code corresponding to the // provided code hash is present in the db. func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool { @@ -66,81 +59,47 @@ func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool { return ok } -// WriteCode writes the provided contract code database. -func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { - if err := db.Put(codeKey(hash), code); err != nil { - log.Crit("Failed to store contract code", "err", err) - } -} - -// DeleteCode deletes the specified contract code from the database. -func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { - if err := db.Delete(codeKey(hash)); err != nil { - log.Crit("Failed to delete contract code", "err", err) - } -} - -// ReadStateID retrieves the state id with the provided state root. -func ReadStateID(db ethdb.KeyValueReader, root common.Hash) *uint64 { - data, err := db.Get(stateIDKey(root)) - if err != nil || len(data) == 0 { - return nil - } - number := binary.BigEndian.Uint64(data) - return &number -} - -// WriteStateID writes the provided state lookup to database. -func WriteStateID(db ethdb.KeyValueWriter, root common.Hash, id uint64) { - var buff [8]byte - binary.BigEndian.PutUint64(buff[:], id) - if err := db.Put(stateIDKey(root), buff[:]); err != nil { - log.Crit("Failed to store state ID", "err", err) - } +// HasTrieNode checks if the trie node with the provided hash is present in db. +func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool { + ok, _ := db.Has(hash.Bytes()) + return ok } -// DeleteStateID deletes the specified state lookup from the database. -func DeleteStateID(db ethdb.KeyValueWriter, root common.Hash) { - if err := db.Delete(stateIDKey(root)); err != nil { - log.Crit("Failed to delete state ID", "err", err) +// WritePreimages writes the provided set of preimages to the database. +func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) { + for hash, preimage := range preimages { + if err := db.Put(preimageKey(hash), preimage); err != nil { + log.Crit("Failed to store trie preimage", "err", err) + } } + preimageCounter.Inc(int64(len(preimages))) + preimageHitCounter.Inc(int64(len(preimages))) } -// ReadPersistentStateID retrieves the id of the persistent state from the database. -func ReadPersistentStateID(db ethdb.KeyValueReader) uint64 { - data, _ := db.Get(persistentStateIDKey) - if len(data) != 8 { - return 0 +// WriteCode writes the provided contract code database. +func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { + if err := db.Put(codeKey(hash), code); err != nil { + log.Crit("Failed to store contract code", "err", err) } - return binary.BigEndian.Uint64(data) } -// WritePersistentStateID stores the id of the persistent state into database. -func WritePersistentStateID(db ethdb.KeyValueWriter, number uint64) { - if err := db.Put(persistentStateIDKey, encodeBlockNumber(number)); err != nil { - log.Crit("Failed to store the persistent state ID", "err", err) +// WriteTrieNode writes the provided trie node database. +func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) { + if err := db.Put(hash.Bytes(), node); err != nil { + log.Crit("Failed to store trie node", "err", err) } } -// ReadTrieJournal retrieves the serialized in-memory trie nodes of layers saved at -// the last shutdown. -func ReadTrieJournal(db ethdb.KeyValueReader) []byte { - data, _ := db.Get(trieJournalKey) - return data -} - -// WriteTrieJournal stores the serialized in-memory trie nodes of layers to save at -// shutdown. -func WriteTrieJournal(db ethdb.KeyValueWriter, journal []byte) { - if err := db.Put(trieJournalKey, journal); err != nil { - log.Crit("Failed to store tries journal", "err", err) +// DeleteCode deletes the specified contract code from the database. +func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Delete(codeKey(hash)); err != nil { + log.Crit("Failed to delete contract code", "err", err) } } -// DeleteTrieJournal deletes the serialized in-memory trie nodes of layers saved at -// the last shutdown. -func DeleteTrieJournal(db ethdb.KeyValueWriter) { - if err := db.Delete(trieJournalKey); err != nil { - log.Crit("Failed to remove tries journal", "err", err) +// DeleteTrieNode deletes the specified trie node from the database. +func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Delete(hash.Bytes()); err != nil { + log.Crit("Failed to delete trie node", "err", err) } } diff --git a/core/rawdb/accessors_state_sync.go b/core/rawdb/accessors_state_sync.go index baf7168491..2ef9a88ece 100644 --- a/core/rawdb/accessors_state_sync.go +++ b/core/rawdb/accessors_state_sync.go @@ -4,12 +4,9 @@ package rawdb import ( - "encoding/binary" - - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/ethdb" ) // ReadSyncRoot reads the root corresponding to the main trie of an in-progress @@ -86,12 +83,13 @@ func ClearSyncSegments(db ethdb.KeyValueStore, root common.Hash) error { segmentsPrefix := make([]byte, len(syncSegmentsPrefix)+common.HashLength) copy(segmentsPrefix, syncSegmentsPrefix) copy(segmentsPrefix[len(syncSegmentsPrefix):], root[:]) - return ClearPrefix(db, segmentsPrefix, syncSegmentsKeyLength) + + return ClearPrefix(db, segmentsPrefix) } // ClearAllSyncSegments removes all segment markers from db func ClearAllSyncSegments(db ethdb.KeyValueStore) error { - return ClearPrefix(db, syncSegmentsPrefix, syncSegmentsKeyLength) + return ClearPrefix(db, syncSegmentsPrefix) } // UnpackSyncSegmentKey returns the root and start position for a trie segment @@ -130,12 +128,12 @@ func ClearSyncStorageTrie(db ethdb.KeyValueStore, root common.Hash) error { accountsPrefix := make([]byte, len(syncStorageTriesPrefix)+common.HashLength) copy(accountsPrefix, syncStorageTriesPrefix) copy(accountsPrefix[len(syncStorageTriesPrefix):], root[:]) - return ClearPrefix(db, accountsPrefix, syncStorageTriesKeyLength) + return ClearPrefix(db, accountsPrefix) } // ClearAllSyncStorageTries removes all storage tries added for syncing from db func ClearAllSyncStorageTries(db ethdb.KeyValueStore) error { - return ClearPrefix(db, syncStorageTriesPrefix, syncStorageTriesKeyLength) + return ClearPrefix(db, syncStorageTriesPrefix) } // UnpackSyncStorageTrieKey returns the root and account for a storage trie @@ -155,39 +153,3 @@ func packSyncStorageTrieKey(root common.Hash, account common.Hash) []byte { bytes = append(bytes, account[:]...) return bytes } - -// WriteSyncPerformed logs an entry in [db] indicating the VM state synced to [blockNumber]. -func WriteSyncPerformed(db ethdb.KeyValueWriter, blockNumber uint64) error { - syncPerformedPrefixLen := len(syncPerformedPrefix) - bytes := make([]byte, syncPerformedPrefixLen+wrappers.LongLen) - copy(bytes[:syncPerformedPrefixLen], syncPerformedPrefix) - binary.BigEndian.PutUint64(bytes[syncPerformedPrefixLen:], blockNumber) - return db.Put(bytes, []byte{0x01}) -} - -// NewSyncPerformedIterator returns an iterator over all block numbers the VM -// has state synced to. -func NewSyncPerformedIterator(db ethdb.Iteratee) ethdb.Iterator { - return NewKeyLengthIterator(db.NewIterator(syncPerformedPrefix, nil), syncPerformedKeyLength) -} - -// UnpackSyncPerformedKey returns the block number from keys the iterator returned -// from NewSyncPerformedIterator. -func UnpackSyncPerformedKey(key []byte) uint64 { - return binary.BigEndian.Uint64(key[len(syncPerformedPrefix):]) -} - -// GetLatestSyncPerformed returns the latest block number state synced performed to. -func GetLatestSyncPerformed(db ethdb.Iteratee) uint64 { - it := NewSyncPerformedIterator(db) - defer it.Release() - - var latestSyncPerformed uint64 - for it.Next() { - syncPerformed := UnpackSyncPerformedKey(it.Key()) - if syncPerformed > latestSyncPerformed { - latestSyncPerformed = syncPerformed - } - } - return latestSyncPerformed -} diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 0fbec54093..098c453125 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -30,16 +30,14 @@ import ( "bytes" "fmt" "os" - "path/filepath" "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/ethdb/leveldb" - "github.com/ethereum/go-ethereum/ethdb/memorydb" - "github.com/ethereum/go-ethereum/ethdb/pebble" "github.com/ethereum/go-ethereum/log" "github.com/olekukonko/tablewriter" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/ethdb/leveldb" + "github.com/tenderly/coreth/ethdb/memorydb" ) // nofreezedb is a database wrapper that disables freezer data retrievals. @@ -47,83 +45,6 @@ type nofreezedb struct { ethdb.KeyValueStore } -// HasAncient returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) { - return false, errNotSupported -} - -// Ancient returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) { - return nil, errNotSupported -} - -// AncientRange returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) { - return nil, errNotSupported -} - -// Ancients returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) Ancients() (uint64, error) { - return 0, errNotSupported -} - -// Tail returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) Tail() (uint64, error) { - return 0, errNotSupported -} - -// AncientSize returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) AncientSize(kind string) (uint64, error) { - return 0, errNotSupported -} - -// ModifyAncients is not supported. -func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { - return 0, errNotSupported -} - -// TruncateHead returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) TruncateHead(items uint64) (uint64, error) { - return 0, errNotSupported -} - -// TruncateTail returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) TruncateTail(items uint64) (uint64, error) { - return 0, errNotSupported -} - -// Sync returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) Sync() error { - return errNotSupported -} - -func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) { - // Unlike other ancient-related methods, this method does not return - // errNotSupported when invoked. - // The reason for this is that the caller might want to do several things: - // 1. Check if something is in the freezer, - // 2. If not, check leveldb. - // - // This will work, since the ancient-checks inside 'fn' will return errors, - // and the leveldb work will continue. - // - // If we instead were to return errNotSupported here, then the caller would - // have to explicitly check for that, having an extra clause to do the - // non-ancient operations. - return fn(db) -} - -// MigrateTable processes the entries in a given table in sequence -// converting them to a new format if they're of an old format. -func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error { - return errNotSupported -} - -// AncientDatadir returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) AncientDatadir() (string, error) { - return "", errNotSupported -} - // NewDatabase creates a high level database on top of a given key-value data // store without a freezer moving immutable chain segments into cold storage. func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { @@ -150,98 +71,9 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string, r if err != nil { return nil, err } - log.Info("Using LevelDB as the backing database") - return NewDatabase(db), nil -} - -// NewPebbleDBDatabase creates a persistent key-value database without a freezer -// moving immutable chain segments into cold storage. -func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) { - db, err := pebble.New(file, cache, handles, namespace, readonly, ephemeral) - if err != nil { - return nil, err - } return NewDatabase(db), nil } -const ( - dbPebble = "pebble" - dbLeveldb = "leveldb" -) - -// PreexistingDatabase checks the given data directory whether a database is already -// instantiated at that location, and if so, returns the type of database (or the -// empty string). -func PreexistingDatabase(path string) string { - if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { - return "" // No pre-existing db - } - if matches, err := filepath.Glob(filepath.Join(path, "OPTIONS*")); len(matches) > 0 || err != nil { - if err != nil { - panic(err) // only possible if the pattern is malformed - } - return dbPebble - } - return dbLeveldb -} - -// OpenOptions contains the options to apply when opening a database. -// OBS: If AncientsDirectory is empty, it indicates that no freezer is to be used. -type OpenOptions struct { - Type string // "leveldb" | "pebble" - Directory string // the datadir - Namespace string // the namespace for database relevant metrics - Cache int // the capacity(in megabytes) of the data caching - Handles int // number of files to be open simultaneously - ReadOnly bool - // Ephemeral means that filesystem sync operations should be avoided: data integrity in the face of - // a crash is not important. This option should typically be used in tests. - Ephemeral bool -} - -// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble. -// -// type == null type != null -// +---------------------------------------- -// db is non-existent | pebble default | specified type -// db is existent | from db | specified type (if compatible) -func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) { - // Reject any unsupported database type - if len(o.Type) != 0 && o.Type != dbLeveldb && o.Type != dbPebble { - return nil, fmt.Errorf("unknown db.engine %v", o.Type) - } - // Retrieve any pre-existing database's type and use that or the requested one - // as long as there's no conflict between the two types - existingDb := PreexistingDatabase(o.Directory) - if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb { - return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb) - } - if o.Type == dbPebble || existingDb == dbPebble { - log.Info("Using pebble as the backing database") - return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral) - } - if o.Type == dbLeveldb || existingDb == dbLeveldb { - log.Info("Using leveldb as the backing database") - return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) - } - // No pre-existing database, no user-requested one either. Default to Pebble. - log.Info("Defaulting to pebble as the backing database") - return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral) -} - -// Open opens both a disk-based key-value database such as leveldb or pebble, but also -// integrates it with a freezer database -- if the AncientDir option has been -// set on the provided OpenOptions. -// The passed o.AncientDir indicates the path of root ancient directory where -// the chain freezer can be opened. -func Open(o OpenOptions) (ethdb.Database, error) { - kvdb, err := openKeyValueDatabase(o) - if err != nil { - return nil, err - } - return kvdb, nil -} - type counter uint64 func (c counter) String() string { @@ -289,10 +121,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { receipts stat numHashPairings stat hashNumPairings stat - legacyTries stat - stateLookups stat - accountTries stat - storageTries stat + tries stat codes stat txLookups stat accountSnaps stat @@ -302,10 +131,9 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { cliqueSnaps stat // State sync statistics - codeToFetch stat - syncProgress stat - syncSegments stat - syncPerformed stat + codeToFetch stat + syncProgress stat + syncSegments stat // Les statistic chtTrieNodes stat @@ -336,14 +164,8 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { numHashPairings.Add(size) case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength): hashNumPairings.Add(size) - case IsLegacyTrieNode(key, it.Value()): - legacyTries.Add(size) - case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength: - stateLookups.Add(size) - case IsAccountTrieNode(key): - accountTries.Add(size) - case IsStorageTrieNode(key): - storageTries.Add(size) + case len(key) == common.HashLength: + tries.Add(size) case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength: codes.Add(size) case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength): @@ -352,7 +174,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { accountSnaps.Add(size) case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength): storageSnaps.Add(size) - case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength): + case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength): preimages.Add(size) case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength): metadata.Add(size) @@ -360,21 +182,28 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { bloomBits.Add(size) case bytes.HasPrefix(key, BloomBitsIndexPrefix): bloomBits.Add(size) + case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength: + cliqueSnaps.Add(size) + case bytes.HasPrefix(key, []byte("cht-")) || + bytes.HasPrefix(key, []byte("chtIndexV2-")) || + bytes.HasPrefix(key, []byte("chtRootV2-")): // Canonical hash trie + chtTrieNodes.Add(size) + case bytes.HasPrefix(key, []byte("blt-")) || + bytes.HasPrefix(key, []byte("bltIndex-")) || + bytes.HasPrefix(key, []byte("bltRoot-")): // Bloomtrie sub + bloomTrieNodes.Add(size) case bytes.HasPrefix(key, syncStorageTriesPrefix) && len(key) == syncStorageTriesKeyLength: syncProgress.Add(size) case bytes.HasPrefix(key, syncSegmentsPrefix) && len(key) == syncSegmentsKeyLength: syncSegments.Add(size) case bytes.HasPrefix(key, CodeToFetchPrefix) && len(key) == codeToFetchKeyLength: codeToFetch.Add(size) - case bytes.HasPrefix(key, syncPerformedPrefix) && len(key) == syncPerformedKeyLength: - syncPerformed.Add(size) default: var accounted bool for _, meta := range [][]byte{ databaseVersionKey, headHeaderKey, headBlockKey, snapshotRootKey, snapshotBlockHashKey, snapshotGeneratorKey, - uncleanShutdownKey, syncRootKey, txIndexTailKey, - persistentStateIDKey, trieJournalKey, + uncleanShutdownKey, syncRootKey, } { if bytes.Equal(key, meta) { metadata.Add(size) @@ -402,10 +231,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { {"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()}, {"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()}, {"Key-Value store", "Contract codes", codes.Size(), codes.Count()}, - {"Key-Value store", "Hash trie nodes", legacyTries.Size(), legacyTries.Count()}, - {"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()}, - {"Key-Value store", "Path trie account nodes", accountTries.Size(), accountTries.Count()}, - {"Key-Value store", "Path trie storage nodes", storageTries.Size(), storageTries.Count()}, + {"Key-Value store", "Trie nodes", tries.Size(), tries.Count()}, {"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()}, {"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()}, {"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()}, @@ -416,7 +242,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { {"State sync", "Trie segments", syncSegments.Size(), syncSegments.Count()}, {"State sync", "Storage tries to fetch", syncProgress.Size(), syncProgress.Count()}, {"State sync", "Code to fetch", codeToFetch.Size(), codeToFetch.Count()}, - {"State sync", "Block numbers synced to", syncPerformed.Size(), syncPerformed.Count()}, } table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Database", "Category", "Size", "Items"}) @@ -427,22 +252,18 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { if unaccounted.size > 0 { log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count) } + return nil } -// ClearPrefix removes all keys in db that begin with prefix and match an -// expected key length. [keyLen] should include the length of the prefix. -func ClearPrefix(db ethdb.KeyValueStore, prefix []byte, keyLen int) error { +// ClearPrefix removes all keys in db that begin with prefix +func ClearPrefix(db ethdb.KeyValueStore, prefix []byte) error { it := db.NewIterator(prefix, nil) defer it.Release() batch := db.NewBatch() for it.Next() { key := common.CopyBytes(it.Key()) - if len(key) != keyLen { - // avoid deleting keys that do not match the expected length - continue - } if err := batch.Delete(key); err != nil { return err } diff --git a/core/rawdb/key_length_iterator.go b/core/rawdb/key_length_iterator.go index fe95d719f0..1ef48adf21 100644 --- a/core/rawdb/key_length_iterator.go +++ b/core/rawdb/key_length_iterator.go @@ -26,7 +26,7 @@ package rawdb -import "github.com/ethereum/go-ethereum/ethdb" +import "github.com/tenderly/coreth/ethdb" // KeyLengthIterator is a wrapper for a database iterator that ensures only key-value pairs // with a specific key length will be returned. diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 71dff23127..a7b3fda93e 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -31,10 +31,8 @@ import ( "bytes" "encoding/binary" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/metrics" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" + "github.com/tenderly/coreth/metrics" ) // The fields below define the low level database schema prefixing. @@ -48,9 +46,6 @@ var ( // headBlockKey tracks the latest known full block's hash. headBlockKey = []byte("LastBlock") - // persistentStateIDKey tracks the id of latest stored state(for path-based only). - persistentStateIDKey = []byte("LastStateID") - // snapshotRootKey tracks the hash of the last snapshot. snapshotRootKey = []byte("SnapshotRoot") @@ -60,12 +55,6 @@ var ( // snapshotGeneratorKey tracks the snapshot generation marker across restarts. snapshotGeneratorKey = []byte("SnapshotGenerator") - // trieJournalKey tracks the in-memory trie node layers across restarts. - trieJournalKey = []byte("TrieJournal") - - // txIndexTailKey tracks the oldest block whose transactions have been indexed. - txIndexTailKey = []byte("TransactionIndexTail") - // uncleanShutdownKey tracks the list of local crashes uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db @@ -96,20 +85,6 @@ var ( SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value CodePrefix = []byte("c") // CodePrefix + code hash -> account code - // Path-based storage scheme of merkle patricia trie. - trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node - trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node - stateIDPrefix = []byte("L") // stateIDPrefix + state root -> state id - - PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage - configPrefix = []byte("ethereum-config-") // config prefix for the db - - // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress - BloomBitsIndexPrefix = []byte("iB") - - preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil) - preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil) - // State sync progress keys and prefixes syncRootKey = []byte("sync_root") // indicates the root of the main account trie currently being synced syncStorageTriesPrefix = []byte("sync_storage") // syncStorageTriesPrefix + trie root + account hash: indicates a storage trie must be fetched for the account @@ -121,9 +96,14 @@ var ( syncSegmentsKeyLength = len(syncSegmentsPrefix) + 2*common.HashLength codeToFetchKeyLength = len(CodeToFetchPrefix) + common.HashLength - // State sync metadata - syncPerformedPrefix = []byte("sync_performed") - syncPerformedKeyLength = len(syncPerformedPrefix) + wrappers.LongLen // prefix + block number as uint64 + preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage + configPrefix = []byte("ethereum-config-") // config prefix for the db + + // Chain index prefixes (use `i` + single byte to avoid mixing data types). + BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress + + preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil) + preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil) ) // LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary @@ -183,11 +163,7 @@ func accountSnapshotKey(hash common.Hash) []byte { // storageSnapshotKey = SnapshotStoragePrefix + account hash + storage hash func storageSnapshotKey(accountHash, storageHash common.Hash) []byte { - buf := make([]byte, len(SnapshotStoragePrefix)+common.HashLength+common.HashLength) - n := copy(buf, SnapshotStoragePrefix) - n += copy(buf[n:], accountHash.Bytes()) - copy(buf[n:], storageHash.Bytes()) - return buf + return append(append(SnapshotStoragePrefix, accountHash.Bytes()...), storageHash.Bytes()...) } // storageSnapshotsKey = SnapshotStoragePrefix + account hash + storage hash @@ -207,7 +183,7 @@ func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte { // preimageKey = preimagePrefix + hash func preimageKey(hash common.Hash) []byte { - return append(PreimagePrefix, hash.Bytes()...) + return append(preimagePrefix, hash.Bytes()...) } // codeKey = CodePrefix + hash @@ -228,83 +204,3 @@ func IsCodeKey(key []byte) (bool, []byte) { func configKey(hash common.Hash) []byte { return append(configPrefix, hash.Bytes()...) } - -// stateIDKey = stateIDPrefix + root (32 bytes) -func stateIDKey(root common.Hash) []byte { - return append(stateIDPrefix, root.Bytes()...) -} - -// accountTrieNodeKey = trieNodeAccountPrefix + nodePath. -func accountTrieNodeKey(path []byte) []byte { - return append(trieNodeAccountPrefix, path...) -} - -// storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath. -func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte { - buf := make([]byte, len(trieNodeStoragePrefix)+common.HashLength+len(path)) - n := copy(buf, trieNodeStoragePrefix) - n += copy(buf[n:], accountHash.Bytes()) - copy(buf[n:], path) - return buf -} - -// IsLegacyTrieNode reports whether a provided database entry is a legacy trie -// node. The characteristics of legacy trie node are: -// - the key length is 32 bytes -// - the key is the hash of val -func IsLegacyTrieNode(key []byte, val []byte) bool { - if len(key) != common.HashLength { - return false - } - return bytes.Equal(key, crypto.Keccak256(val)) -} - -// ResolveAccountTrieNodeKey reports whether a provided database entry is an -// account trie node in path-based state scheme, and returns the resolved -// node path if so. -func ResolveAccountTrieNodeKey(key []byte) (bool, []byte) { - if !bytes.HasPrefix(key, trieNodeAccountPrefix) { - return false, nil - } - // The remaining key should only consist a hex node path - // whose length is in the range 0 to 64 (64 is excluded - // since leaves are always wrapped with shortNode). - if len(key) >= len(trieNodeAccountPrefix)+common.HashLength*2 { - return false, nil - } - return true, key[len(trieNodeAccountPrefix):] -} - -// IsAccountTrieNode reports whether a provided database entry is an account -// trie node in path-based state scheme. -func IsAccountTrieNode(key []byte) bool { - ok, _ := ResolveAccountTrieNodeKey(key) - return ok -} - -// ResolveStorageTrieNode reports whether a provided database entry is a storage -// trie node in path-based state scheme, and returns the resolved account hash -// and node path if so. -func ResolveStorageTrieNode(key []byte) (bool, common.Hash, []byte) { - if !bytes.HasPrefix(key, trieNodeStoragePrefix) { - return false, common.Hash{}, nil - } - // The remaining key consists of 2 parts: - // - 32 bytes account hash - // - hex node path whose length is in the range 0 to 64 - if len(key) < len(trieNodeStoragePrefix)+common.HashLength { - return false, common.Hash{}, nil - } - if len(key) >= len(trieNodeStoragePrefix)+common.HashLength+common.HashLength*2 { - return false, common.Hash{}, nil - } - accountHash := common.BytesToHash(key[len(trieNodeStoragePrefix) : len(trieNodeStoragePrefix)+common.HashLength]) - return true, accountHash, key[len(trieNodeStoragePrefix)+common.HashLength:] -} - -// IsStorageTrieNode reports whether a provided database entry is a storage -// trie node in path-based state scheme. -func IsStorageTrieNode(key []byte) bool { - ok, _, _ := ResolveStorageTrieNode(key) - return ok -} diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 5dc709080c..c42ca3caab 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -27,7 +27,7 @@ package rawdb import ( - "github.com/ethereum/go-ethereum/ethdb" + "github.com/tenderly/coreth/ethdb" ) // table is a wrapper around a database that prefixes each key access with a pre- @@ -60,80 +60,6 @@ func (t *table) Get(key []byte) ([]byte, error) { return t.db.Get(append([]byte(t.prefix), key...)) } -// HasAncient is a noop passthrough that just forwards the request to the underlying -// database. -func (t *table) HasAncient(kind string, number uint64) (bool, error) { - return t.db.HasAncient(kind, number) -} - -// Ancient is a noop passthrough that just forwards the request to the underlying -// database. -func (t *table) Ancient(kind string, number uint64) ([]byte, error) { - return t.db.Ancient(kind, number) -} - -// AncientRange is a noop passthrough that just forwards the request to the underlying -// database. -func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { - return t.db.AncientRange(kind, start, count, maxBytes) -} - -// Ancients is a noop passthrough that just forwards the request to the underlying -// database. -func (t *table) Ancients() (uint64, error) { - return t.db.Ancients() -} - -// Tail is a noop passthrough that just forwards the request to the underlying -// database. -func (t *table) Tail() (uint64, error) { - return t.db.Tail() -} - -// AncientSize is a noop passthrough that just forwards the request to the underlying -// database. -func (t *table) AncientSize(kind string) (uint64, error) { - return t.db.AncientSize(kind) -} - -// ModifyAncients runs an ancient write operation on the underlying database. -func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) { - return t.db.ModifyAncients(fn) -} - -func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) { - return t.db.ReadAncients(fn) -} - -// TruncateHead is a noop passthrough that just forwards the request to the underlying -// database. -func (t *table) TruncateHead(items uint64) (uint64, error) { - return t.db.TruncateHead(items) -} - -// TruncateTail is a noop passthrough that just forwards the request to the underlying -// database. -func (t *table) TruncateTail(items uint64) (uint64, error) { - return t.db.TruncateTail(items) -} - -// Sync is a noop passthrough that just forwards the request to the underlying -// database. -func (t *table) Sync() error { - return t.db.Sync() -} - -// MigrateTable processes the entries in a given table in sequence -// converting them to a new format if they're of an old format. -func (t *table) MigrateTable(kind string, convert convertLegacyFn) error { - return t.db.MigrateTable(kind, convert) -} - -// AncientDatadir returns the ancient datadir of the underlying database. -func (t *table) AncientDatadir() (string, error) { - return t.db.AncientDatadir() -} - // Put inserts the given value into the database at a prefixed version of the // provided key. func (t *table) Put(key []byte, value []byte) error { @@ -210,13 +136,6 @@ func (t *table) NewBatchWithSize(size int) ethdb.Batch { return &tableBatch{t.db.NewBatchWithSize(size), t.prefix} } -// NewSnapshot creates a database snapshot based on the current state. -// The created snapshot will not be affected by all following mutations -// happened on the database. -func (t *table) NewSnapshot() (ethdb.Snapshot, error) { - return t.db.NewSnapshot() -} - // tableBatch is a wrapper around a database batch that prefixes each key access // with a pre-configured string. type tableBatch struct { @@ -229,7 +148,7 @@ func (b *tableBatch) Put(key, value []byte) error { return b.batch.Put(append([]byte(b.prefix), key...), value) } -// Delete inserts a key removal into the batch for later committing. +// Delete inserts the a key removal into the batch for later committing. func (b *tableBatch) Delete(key []byte) error { return b.batch.Delete(append([]byte(b.prefix), key...)) } diff --git a/core/rawdb/table_test.go b/core/rawdb/table_test.go index 9cb913c2a1..c0013c1f4d 100644 --- a/core/rawdb/table_test.go +++ b/core/rawdb/table_test.go @@ -30,7 +30,7 @@ import ( "bytes" "testing" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/tenderly/coreth/ethdb" ) func TestTableDatabase(t *testing.T) { testTableDatabase(t, "prefix") } diff --git a/core/rlp_test.go b/core/rlp_test.go index 9c9614f800..bc2a260541 100644 --- a/core/rlp_test.go +++ b/core/rlp_test.go @@ -31,20 +31,22 @@ import ( "math/big" "testing" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rlp" "golang.org/x/crypto/sha3" ) func getBlock(transactions int, uncles int, dataSize int) *types.Block { var ( - aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") + aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") + // Generate a canonical chain to act as the main dataset engine = dummy.NewFaker() - + db = rawdb.NewMemoryDatabase() // A sender who makes transactions, has some funds key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") address = crypto.PubkeyToAddress(key.PublicKey) @@ -53,9 +55,11 @@ func getBlock(transactions int, uncles int, dataSize int) *types.Block { Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}, } + genesis = gspec.MustCommit(db) ) + // We need to generate as many blocks +1 as uncles - _, blocks, _, _ := GenerateChainWithGenesis(gspec, engine, uncles+1, 10, + blocks, _, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, uncles+1, 10, func(n int, b *BlockGen) { if n == uncles { // Add transactions and stuff on the last block diff --git a/core/sender_cacher.go b/core/sender_cacher.go index a1c09ec362..e42988cd02 100644 --- a/core/sender_cacher.go +++ b/core/sender_cacher.go @@ -29,7 +29,7 @@ package core import ( "sync" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/core/types" ) // txSenderCacherRequest is a request for recovering transaction senders with a diff --git a/core/state/database.go b/core/state/database.go index f65fd75821..f55c787b5c 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -30,16 +30,13 @@ import ( "errors" "fmt" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/trie/trienode" - "github.com/ava-labs/coreth/trie/utils" - "github.com/crate-crypto/go-ipa/banderwagon" + "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/lru" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" + lru "github.com/hashicorp/golang-lru" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/trie" ) const ( @@ -48,12 +45,6 @@ const ( // Cache size granted for caching clean code. codeCacheSize = 64 * 1024 * 1024 - - // commitmentSize is the size of commitment stored in cache. - commitmentSize = banderwagon.UncompressedSize - - // Cache item granted for caching commitment results. - commitmentCacheItems = 64 * 1024 * 1024 / (commitmentSize + common.AddressLength) ) // Database wraps access to tries and contract code. @@ -62,21 +53,18 @@ type Database interface { OpenTrie(root common.Hash) (Trie, error) // OpenStorageTrie opens the storage trie of an account. - OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, trie Trie) (Trie, error) + OpenStorageTrie(addrHash, root common.Hash) (Trie, error) // CopyTrie returns an independent copy of the given trie. CopyTrie(Trie) Trie // ContractCode retrieves a particular contract's code. - ContractCode(addr common.Address, codeHash common.Hash) ([]byte, error) + ContractCode(addrHash, codeHash common.Hash) ([]byte, error) // ContractCodeSize retrieves a particular contracts code's size. - ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) + ContractCodeSize(addrHash, codeHash common.Hash) (int, error) - // DiskDB returns the underlying key-value disk database. - DiskDB() ethdb.KeyValueStore - - // TrieDB returns the underlying trie database for managing trie nodes. + // TrieDB retrieves the low level trie database used for data storage. TrieDB() *trie.Database } @@ -88,40 +76,29 @@ type Trie interface { // TODO(fjl): remove this when StateTrie is removed GetKey([]byte) []byte - // GetAccount abstracts an account read from the trie. It retrieves the - // account blob from the trie with provided account address and decodes it - // with associated decoding algorithm. If the specified account is not in - // the trie, nil will be returned. If the trie is corrupted(e.g. some nodes - // are missing or the account blob is incorrect for decoding), an error will - // be returned. - GetAccount(address common.Address) (*types.StateAccount, error) - - // GetStorage returns the value for key stored in the trie. The value bytes - // must not be modified by the caller. If a node was not found in the database, - // a trie.MissingNodeError is returned. - GetStorage(addr common.Address, key []byte) ([]byte, error) + // TryGet returns the value for key stored in the trie. The value bytes must + // not be modified by the caller. If a node was not found in the database, a + // trie.MissingNodeError is returned. + TryGet(key []byte) ([]byte, error) - // UpdateAccount abstracts an account write to the trie. It encodes the - // provided account object with associated algorithm and then updates it - // in the trie with provided address. - UpdateAccount(address common.Address, account *types.StateAccount) error + // TryGetAccount abstract an account read from the trie. + TryGetAccount(key []byte) (*types.StateAccount, error) - // UpdateStorage associates key with value in the trie. If value has length zero, - // any existing value is deleted from the trie. The value bytes must not be modified + // TryUpdate associates key with value in the trie. If value has length zero, any + // existing value is deleted from the trie. The value bytes must not be modified // by the caller while they are stored in the trie. If a node was not found in the // database, a trie.MissingNodeError is returned. - UpdateStorage(addr common.Address, key, value []byte) error + TryUpdate(key, value []byte) error - // DeleteAccount abstracts an account deletion from the trie. - DeleteAccount(address common.Address) error + // TryUpdateAccount abstract an account write to the trie. + TryUpdateAccount(key []byte, account *types.StateAccount) error - // DeleteStorage removes any existing value for key from the trie. If a node - // was not found in the database, a trie.MissingNodeError is returned. - DeleteStorage(addr common.Address, key []byte) error + // TryDelete removes any existing value for key from the trie. If a node was not + // found in the database, a trie.MissingNodeError is returned. + TryDelete(key []byte) error - // UpdateContractCode abstracts code write to the trie. It is expected - // to be moved to the stateWriter interface when the latter is ready. - UpdateContractCode(address common.Address, codeHash common.Hash, code []byte) error + // TryDeleteAccount abstracts an account deletion from the trie. + TryDeleteAccount(key []byte) error // Hash returns the root hash of the trie. It does not write to the database and // can be used even if the trie doesn't have one. @@ -133,12 +110,11 @@ type Trie interface { // The returned nodeset can be nil if the trie is clean(nothing to commit). // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage - Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) + Commit(collectLeaf bool) (common.Hash, *trie.NodeSet, error) // NodeIterator returns an iterator that returns nodes of the trie. Iteration - // starts at the key after the given start key. And error will be returned - // if fails to create node iterator. - NodeIterator(startKey []byte) (trie.NodeIterator, error) + // starts at the key after the given start key. + NodeIterator(startKey []byte) trie.NodeIterator // Prove constructs a Merkle proof for key. The result contains all encoded nodes // on the path to the value at key. The value itself is also included in the last @@ -147,7 +123,7 @@ type Trie interface { // If the trie does not contain a value for key, the returned proof contains all // nodes of the longest existing prefix of the key (at least the root), ending // with the node that proves the absence of the key. - Prove(key []byte, proofDb ethdb.KeyValueWriter) error + Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error } // NewDatabase creates a backing store for state. The returned database is safe for @@ -161,37 +137,23 @@ func NewDatabase(db ethdb.Database) Database { // is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a // large memory cache. func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { + csc, _ := lru.New(codeSizeCacheSize) return &cachingDB{ - disk: db, - codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), - codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), - triedb: trie.NewDatabase(db, config), - } -} - -// NewDatabaseWithNodeDB creates a state database with an already initialized node database. -func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database { - return &cachingDB{ - disk: db, - codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), - codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), - triedb: triedb, + db: trie.NewDatabaseWithConfig(db, config), + codeSizeCache: csc, + codeCache: fastcache.New(codeCacheSize), } } type cachingDB struct { - disk ethdb.KeyValueStore - codeSizeCache *lru.Cache[common.Hash, int] - codeCache *lru.SizeConstrainedCache[common.Hash, []byte] - triedb *trie.Database + db *trie.Database + codeSizeCache *lru.Cache + codeCache *fastcache.Cache } // OpenTrie opens the main account trie at a specific root hash. func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { - if db.triedb.IsVerkle() { - return trie.NewVerkleTrie(root, db.triedb, utils.NewPointCache(commitmentCacheItems)) - } - tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb) + tr, err := trie.NewStateTrie(common.Hash{}, root, db.db) if err != nil { return nil, err } @@ -199,14 +161,8 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { } // OpenStorageTrie opens the storage trie of an account. -func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) { - // In the verkle case, there is only one tree. But the two-tree structure - // is hardcoded in the codebase. So we need to return the same trie in this - // case. - if db.triedb.IsVerkle() { - return self, nil - } - tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, crypto.Keccak256Hash(address.Bytes()), root), db.triedb) +func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) { + tr, err := trie.NewStateTrie(addrHash, root, db.db) if err != nil { return nil, err } @@ -224,14 +180,13 @@ func (db *cachingDB) CopyTrie(t Trie) Trie { } // ContractCode retrieves a particular contract's code. -func (db *cachingDB) ContractCode(address common.Address, codeHash common.Hash) ([]byte, error) { - code, _ := db.codeCache.Get(codeHash) - if len(code) > 0 { +func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) { + if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 { return code, nil } - code = rawdb.ReadCode(db.disk, codeHash) + code := rawdb.ReadCode(db.db.DiskDB(), codeHash) if len(code) > 0 { - db.codeCache.Add(codeHash, code) + db.codeCache.Set(codeHash.Bytes(), code) db.codeSizeCache.Add(codeHash, len(code)) return code, nil } @@ -239,20 +194,15 @@ func (db *cachingDB) ContractCode(address common.Address, codeHash common.Hash) } // ContractCodeSize retrieves a particular contracts code's size. -func (db *cachingDB) ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) { +func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) { if cached, ok := db.codeSizeCache.Get(codeHash); ok { - return cached, nil + return cached.(int), nil } - code, err := db.ContractCode(addr, codeHash) + code, err := db.ContractCode(addrHash, codeHash) return len(code), err } -// DiskDB returns the underlying key-value disk database. -func (db *cachingDB) DiskDB() ethdb.KeyValueStore { - return db.disk -} - // TrieDB retrieves any intermediate trie-node caching layer. func (db *cachingDB) TrieDB() *trie.Database { - return db.triedb + return db.db } diff --git a/core/state/dump.go b/core/state/dump.go index a18184ca8c..ab6af91ef9 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -31,12 +31,12 @@ import ( "fmt" "time" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/trie" ) // DumpConfig is a set of options to control what portions of the state will be diff --git a/core/state/iterator.go b/core/state/iterator.go index 6db22d9634..0bb3faab6c 100644 --- a/core/state/iterator.go +++ b/core/state/iterator.go @@ -31,10 +31,10 @@ import ( "errors" "fmt" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/trie" ) // nodeIterator is an iterator to traverse the entire state trie post-order, diff --git a/core/state/metrics.go b/core/state/metrics.go index 5e2f060c3a..7ac971b8ad 100644 --- a/core/state/metrics.go +++ b/core/state/metrics.go @@ -26,22 +26,13 @@ package state -import "github.com/ava-labs/coreth/metrics" +import "github.com/tenderly/coreth/metrics" var ( - accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil) - storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) - accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) - storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) - accountTrieUpdatedMeter = metrics.NewRegisteredMeter("state/update/accountnodes", nil) - storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil) - accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil) - storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil) - - slotDeletionMaxCount = metrics.NewRegisteredGauge("state/delete/storage/max/slot", nil) - slotDeletionMaxSize = metrics.NewRegisteredGauge("state/delete/storage/max/size", nil) - slotDeletionTimer = metrics.NewRegisteredResettingTimer("state/delete/storage/timer", nil) - slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil) - slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil) - slotDeletionSkip = metrics.NewRegisteredGauge("state/delete/storage/skip", nil) + accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil) + storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) + accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) + storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) + accountTrieCommittedMeter = metrics.NewRegisteredMeter("state/commit/accountnodes", nil) + storageTriesCommittedMeter = metrics.NewRegisteredMeter("state/commit/storagenodes", nil) ) diff --git a/core/state/pruner/bloom.go b/core/state/pruner/bloom.go index 158077a4a9..9fde3e640c 100644 --- a/core/state/pruner/bloom.go +++ b/core/state/pruner/bloom.go @@ -31,10 +31,10 @@ import ( "errors" "os" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" bloomfilter "github.com/holiman/bloomfilter/v2" + "github.com/tenderly/coreth/core/rawdb" ) // stateBloomHasher is a wrapper around a byte blob to satisfy the interface API diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 27f7be0b1f..5cd0885f8a 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -37,14 +37,15 @@ import ( "strings" "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state/snapshot" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state/snapshot" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/trie" ) const ( @@ -64,69 +65,61 @@ const ( rangeCompactionThreshold = 100000 ) -// Config includes all the configurations for pruning. -type Config struct { - Datadir string // The directory of the state database - BloomSize uint64 // The Megabytes of memory allocated to bloom-filter -} +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // emptyCode is the known hash of the empty EVM bytecode. + emptyCode = crypto.Keccak256(nil) +) // Pruner is an offline tool to prune the stale state with the // help of the snapshot. The workflow of pruner is very simple: // -// - iterate the snapshot, reconstruct the relevant state -// - iterate the database, delete all other state entries which -// don't belong to the target state and the genesis state +// - iterate the snapshot, reconstruct the relevant state +// - iterate the database, delete all other state entries which +// don't belong to the target state and the genesis state // // It can take several hours(around 2 hours for mainnet) to finish // the whole pruning work. It's recommended to run this offline tool // periodically in order to release the disk usage and improve the // disk read performance to some extent. type Pruner struct { - config Config - chainHeader *types.Header - db ethdb.Database - stateBloom *stateBloom - snaptree *snapshot.Tree + db ethdb.Database + stateBloom *stateBloom + datadir string + headHeader *types.Header + snaptree *snapshot.Tree } // NewPruner creates the pruner instance. -func NewPruner(db ethdb.Database, config Config) (*Pruner, error) { +func NewPruner(db ethdb.Database, datadir string, bloomSize uint64) (*Pruner, error) { headBlock := rawdb.ReadHeadBlock(db) if headBlock == nil { - return nil, errors.New("failed to load head block") + return nil, errors.New("Failed to load head block") } - // Offline pruning is only supported in legacy hash based scheme. - triedb := trie.NewDatabase(db, trie.HashDefaults) - // Note: we refuse to start a pruning session unless the snapshot disk layer exists, which should prevent // us from ever needing to enter RecoverPruning in an invalid pruning session (a session where we do not have // the protected trie in the triedb and in the snapshot disk layer). - - snapconfig := snapshot.Config{ - CacheSize: 256, - AsyncBuild: false, - NoBuild: true, - SkipVerify: true, - } - snaptree, err := snapshot.New(snapconfig, db, triedb, headBlock.Hash(), headBlock.Root()) + snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Hash(), headBlock.Root(), false, false, false) if err != nil { return nil, fmt.Errorf("failed to create snapshot for pruning, must restart without offline pruning disabled to recover: %w", err) // The relevant snapshot(s) might not exist } // Sanitize the bloom filter size if it's too small. - if config.BloomSize < 256 { - log.Warn("Sanitizing bloomfilter size", "provided(MB)", config.BloomSize, "updated(MB)", 256) - config.BloomSize = 256 + if bloomSize < 256 { + log.Warn("Sanitizing bloomfilter size", "provided(MB)", bloomSize, "updated(MB)", 256) + bloomSize = 256 } - stateBloom, err := newStateBloomWithSize(config.BloomSize) + stateBloom, err := newStateBloomWithSize(bloomSize) if err != nil { return nil, err } return &Pruner{ - config: config, - chainHeader: headBlock.Header(), - db: db, - stateBloom: stateBloom, - snaptree: snaptree, + db: db, + stateBloom: stateBloom, + datadir: datadir, + headHeader: headBlock.Header(), + snaptree: snaptree, }, nil } @@ -139,12 +132,12 @@ func prune(maindb ethdb.Database, stateBloom *stateBloom, bloomPath string, star // dangling node is the state root is super low. So the dangling nodes in // theory will never ever be visited again. var ( - skipped, count int - size common.StorageSize - pstart = time.Now() - logged = time.Now() - batch = maindb.NewBatch() - iter = maindb.NewIterator(nil, nil) + count int + size common.StorageSize + pstart = time.Now() + logged = time.Now() + batch = maindb.NewBatch() + iter = maindb.NewIterator(nil, nil) ) // We wrap iter.Release() in an anonymous function so that the [iter] // value captured is the value of [iter] at the end of the function as opposed @@ -166,8 +159,9 @@ func prune(maindb ethdb.Database, stateBloom *stateBloom, bloomPath string, star if isCode { checkKey = codeKey } - if stateBloom.Contain(checkKey) { - skipped += 1 + if ok, err := stateBloom.Contain(checkKey); err != nil { + return err + } else if ok { continue } count += 1 @@ -185,7 +179,7 @@ func prune(maindb ethdb.Database, stateBloom *stateBloom, bloomPath string, star eta = time.Duration(left/speed) * time.Millisecond } if time.Since(logged) > 8*time.Second { - log.Info("Pruning state data", "nodes", count, "skipped", skipped, "size", size, + log.Info("Pruning state data", "nodes", count, "size", size, "elapsed", common.PrettyDuration(time.Since(pstart)), "eta", common.PrettyDuration(eta)) logged = time.Now() } @@ -260,12 +254,12 @@ func (p *Pruner) Prune(root common.Hash) error { // reuse it for pruning instead of generating a new one. It's // mandatory because a part of state may already be deleted, // the recovery procedure is necessary. - _, stateBloomRoot, err := findBloomFilter(p.config.Datadir) + _, stateBloomRoot, err := findBloomFilter(p.datadir) if err != nil { return err } if stateBloomRoot != (common.Hash{}) { - return RecoverPruning(p.config.Datadir, p.db) + return RecoverPruning(p.datadir, p.db) } // If the target state root is not specified, return a fatal error. @@ -275,7 +269,7 @@ func (p *Pruner) Prune(root common.Hash) error { // Ensure the root is really present. The weak assumption // is the presence of root can indicate the presence of the // entire trie. - if !rawdb.HasLegacyTrieNode(p.db, root) { + if !rawdb.HasTrieNode(p.db, root) { return fmt.Errorf("associated state[%x] is not present", root) } else { log.Info("Selecting last accepted block root as the pruning target", "root", root) @@ -292,7 +286,7 @@ func (p *Pruner) Prune(root common.Hash) error { if err := extractGenesis(p.db, p.stateBloom); err != nil { return err } - filterName := bloomFilterName(p.config.Datadir, root) + filterName := bloomFilterName(p.datadir, root) log.Info("Writing state bloom to disk", "name", filterName) if err := p.stateBloom.Commit(filterName, filterName+stateBloomFileTempSuffix); err != nil { @@ -319,7 +313,7 @@ func RecoverPruning(datadir string, db ethdb.Database) error { } headBlock := rawdb.ReadHeadBlock(db) if headBlock == nil { - return errors.New("failed to load head block") + return errors.New("Failed to load head block") } stateBloom, err := NewStateBloomFromDisk(stateBloomPath) if err != nil { @@ -347,14 +341,11 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { if genesis == nil { return errors.New("missing genesis block") } - t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db, trie.HashDefaults)) - if err != nil { - return err - } - accIter, err := t.NodeIterator(nil) + t, err := trie.NewStateTrie(common.Hash{}, genesis.Root(), trie.NewDatabase(db)) if err != nil { return err } + accIter := t.NodeIterator(nil) for accIter.Next(true) { hash := accIter.Hash() @@ -369,16 +360,12 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil { return err } - if acc.Root != types.EmptyRootHash { - id := trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root) - storageTrie, err := trie.NewStateTrie(id, trie.NewDatabase(db, trie.HashDefaults)) - if err != nil { - return err - } - storageIter, err := storageTrie.NodeIterator(nil) + if acc.Root != emptyRoot { + storageTrie, err := trie.NewStateTrie(common.BytesToHash(accIter.LeafKey()), acc.Root, trie.NewDatabase(db)) if err != nil { return err } + storageIter := storageTrie.NodeIterator(nil) for storageIter.Next(true) { hash := storageIter.Hash() if hash != (common.Hash{}) { @@ -389,7 +376,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { return storageIter.Error() } } - if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash.Bytes()) { + if !bytes.Equal(acc.CodeHash, emptyCode) { stateBloom.Put(acc.CodeHash, nil) } } diff --git a/core/state/snapshot/account.go b/core/state/snapshot/account.go new file mode 100644 index 0000000000..d23896b66f --- /dev/null +++ b/core/state/snapshot/account.go @@ -0,0 +1,98 @@ +// (c) 2019-2020, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snapshot + +import ( + "bytes" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/rlp" +) + +// Account is a modified version of a state.Account, where the root is replaced +// with a byte slice. This format can be used to represent full-consensus format +// or slim-snapshot format which replaces the empty root and code hash as nil +// byte slice. +type Account struct { + Nonce uint64 + Balance *big.Int + Root []byte + CodeHash []byte + IsMultiCoin bool +} + +// SlimAccount converts a state.Account content into a slim snapshot account +func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []byte, isMultiCoin bool) Account { + slim := Account{ + Nonce: nonce, + Balance: balance, + IsMultiCoin: isMultiCoin, + } + if root != emptyRoot { + slim.Root = root[:] + } + if !bytes.Equal(codehash, emptyCode[:]) { + slim.CodeHash = codehash + } + return slim +} + +// SlimAccountRLP converts a state.Account content into a slim snapshot +// version RLP encoded. +func SlimAccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte, isMultiCoin bool) []byte { + data, err := rlp.EncodeToBytes(SlimAccount(nonce, balance, root, codehash, isMultiCoin)) + if err != nil { + panic(err) + } + return data +} + +// FullAccount decodes the data on the 'slim RLP' format and return +// the consensus format account. +func FullAccount(data []byte) (Account, error) { + var account Account + if err := rlp.DecodeBytes(data, &account); err != nil { + return Account{}, err + } + if len(account.Root) == 0 { + account.Root = emptyRoot[:] + } + if len(account.CodeHash) == 0 { + account.CodeHash = emptyCode[:] + } + return account, nil +} + +// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format. +func FullAccountRLP(data []byte) ([]byte, error) { + account, err := FullAccount(data) + if err != nil { + return nil, err + } + return rlp.EncodeToBytes(account) +} diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go index 64a2cd3421..75502d7174 100644 --- a/core/state/snapshot/conversion.go +++ b/core/state/snapshot/conversion.go @@ -27,6 +27,7 @@ package snapshot import ( + "bytes" "encoding/binary" "errors" "fmt" @@ -35,13 +36,12 @@ import ( "sync" "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/trie" ) // trieKV represents a trie key-value pair @@ -53,7 +53,7 @@ type trieKV struct { type ( // trieGeneratorFn is the interface of trie generation which can // be implemented by different trie algorithm. - trieGeneratorFn func(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan (trieKV), out chan (common.Hash)) + trieGeneratorFn func(db ethdb.KeyValueWriter, owner common.Hash, in chan (trieKV), out chan (common.Hash)) // leafCallbackFn is the callback invoked at the leaves of the trie, // returns the subtrie root with the specified subtrie identifier. @@ -62,12 +62,12 @@ type ( // GenerateAccountTrieRoot takes an account iterator and reproduces the root hash. func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) { - return generateTrieRoot(nil, "", it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true) + return generateTrieRoot(nil, it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true) } // GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash. func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) { - return generateTrieRoot(nil, "", it, account, stackTrieGenerate, nil, newGenerateStats(), true) + return generateTrieRoot(nil, it, account, stackTrieGenerate, nil, newGenerateStats(), true) } // GenerateTrie takes the whole snapshot tree as the input, traverses all the @@ -81,10 +81,9 @@ func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethd } defer acctIt.Release() - scheme := snaptree.triedb.Scheme() - got, err := generateTrieRoot(dst, scheme, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { + got, err := generateTrieRoot(dst, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { // Migrate the code first, commit the contract code into the tmp db. - if codeHash != types.EmptyCodeHash { + if codeHash != emptyCode { code := rawdb.ReadCode(src, codeHash) if len(code) == 0 { return common.Hash{}, errors.New("failed to read contract code") @@ -98,7 +97,7 @@ func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethd } defer storageIt.Release() - hash, err := generateTrieRoot(dst, scheme, storageIt, accountHash, stackTrieGenerate, nil, stat, false) + hash, err := generateTrieRoot(dst, storageIt, accountHash, stackTrieGenerate, nil, stat, false) if err != nil { return common.Hash{}, err } @@ -147,7 +146,7 @@ func (stat *generateStats) progressAccounts(account common.Hash, done uint64) { stat.head = account } -// finishAccounts updates the generator stats for the finished account range. +// finishAccounts updates the gemerator stats for the finished account range. func (stat *generateStats) finishAccounts(done uint64) { stat.lock.Lock() defer stat.lock.Unlock() @@ -253,7 +252,7 @@ func runReport(stats *generateStats, stop chan bool) { // generateTrieRoot generates the trie hash based on the snapshot iterator. // It can be used for generating account trie, storage trie or even the // whole state which connects the accounts and the corresponding storages. -func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) { +func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) { var ( in = make(chan trieKV) // chan to pass leaves out = make(chan common.Hash, 1) // chan to collect result @@ -264,7 +263,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou wg.Add(1) go func() { defer wg.Done() - generatorFn(db, scheme, account, in, out) + generatorFn(db, account, in, out) }() // Spin up a go-routine for progress logging if report && stats != nil { @@ -310,7 +309,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou fullData []byte ) if leafCallback == nil { - fullData, err = types.FullAccountRLP(it.(AccountIterator).Account()) + fullData, err = FullAccountRLP(it.(AccountIterator).Account()) if err != nil { return stop(err) } @@ -322,7 +321,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou return stop(err) } // Fetch the next account and process it concurrently - account, err := types.FullAccount(it.(AccountIterator).Account()) + account, err := FullAccount(it.(AccountIterator).Account()) if err != nil { return stop(err) } @@ -332,7 +331,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou results <- err return } - if account.Root != subroot { + if !bytes.Equal(account.Root, subroot.Bytes()) { results <- fmt.Errorf("invalid subroot(path %x), want %x, have %x", hash, account.Root, subroot) return } @@ -371,16 +370,16 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou return stop(nil) } -func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) { - options := trie.NewStackTrieOptions() - if db != nil { - options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) { - rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme) - }) - } - t := trie.NewStackTrie(options) +func stackTrieGenerate(db ethdb.KeyValueWriter, owner common.Hash, in chan trieKV, out chan common.Hash) { + t := trie.NewStackTrieWithOwner(db, owner) for leaf := range in { - t.Update(leaf.key[:], leaf.value) + t.TryUpdate(leaf.key[:], leaf.value) + } + var root common.Hash + if db == nil { + root = t.Hash() + } else { + root, _ = t.Commit() } - out <- t.Commit() + out <- root } diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index 0674839fe4..b2f23a7a33 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -31,15 +31,14 @@ import ( "fmt" "math" "math/rand" + "sort" "sync" "sync/atomic" "time" - "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" bloomfilter "github.com/holiman/bloomfilter/v2" - "golang.org/x/exp/slices" + "github.com/tenderly/coreth/rlp" ) var ( @@ -79,7 +78,7 @@ var ( bloomFuncs = math.Round((bloomSize / float64(aggregatorItemLimit)) * math.Log(2)) // the bloom offsets are runtime constants which determines which part of the - // account/storage hash the hasher functions looks at, to determine the + // the account/storage hash the hasher functions looks at, to determine the // bloom key for an account/slot. This is randomized at init(), so that the // global population of nodes do not all display the exact same behaviour with // regards to bloom content @@ -115,7 +114,7 @@ type diffLayer struct { blockHash common.Hash // Block hash to which this snapshot diff belongs to root common.Hash // Root hash to which this snapshot diff belongs to - stale atomic.Bool // Signals that the layer became stale (state progressed) + stale uint32 // Signals that the layer became stale (state progressed) // destructSet is a very special helper marker. If an account is marked as // deleted, then it's recorded in this set. However it's allowed that an account @@ -285,12 +284,12 @@ func (dl *diffLayer) Parent() snapshot { // Stale return whether this layer has become stale (was flattened across) or if // it's still live. func (dl *diffLayer) Stale() bool { - return dl.stale.Load() + return atomic.LoadUint32(&dl.stale) != 0 } // Account directly retrieves the account associated with a particular hash in // the snapshot slim data format. -func (dl *diffLayer) Account(hash common.Hash) (*types.SlimAccount, error) { +func (dl *diffLayer) Account(hash common.Hash) (*Account, error) { data, err := dl.AccountRLP(hash) if err != nil { return nil, err @@ -298,7 +297,7 @@ func (dl *diffLayer) Account(hash common.Hash) (*types.SlimAccount, error) { if len(data) == 0 { // can be both nil and []byte{} return nil, nil } - account := new(types.SlimAccount) + account := new(Account) if err := rlp.DecodeBytes(data, account); err != nil { panic(err) } @@ -310,14 +309,9 @@ func (dl *diffLayer) Account(hash common.Hash) (*types.SlimAccount, error) { // // Note the returned account is not a copy, please don't modify it. func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) { - // Check staleness before reaching further. - dl.lock.RLock() - if dl.Stale() { - dl.lock.RUnlock() - return nil, ErrSnapshotStale - } // Check the bloom filter first whether there's even a point in reaching into // all the maps in all the layers below + dl.lock.RLock() hit := dl.diffed.Contains(accountBloomHasher(hash)) if !hit { hit = dl.diffed.Contains(destructBloomHasher(hash)) @@ -384,11 +378,6 @@ func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro // Check the bloom filter first whether there's even a point in reaching into // all the maps in all the layers below dl.lock.RLock() - // Check staleness before reaching further. - if dl.Stale() { - dl.lock.RUnlock() - return nil, ErrSnapshotStale - } hit := dl.diffed.Contains(storageBloomHasher{accountHash, storageHash}) if !hit { hit = dl.diffed.Contains(destructBloomHasher(accountHash)) @@ -477,7 +466,7 @@ func (dl *diffLayer) flatten() snapshot { // Before actually writing all our data to the parent, first ensure that the // parent hasn't been 'corrupted' by someone else already flattening into it - if parent.stale.Swap(true) { + if atomic.SwapUint32(&parent.stale, 1) != 0 { panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo } // Overwrite all the updated accounts blindly, merge the sorted list @@ -542,7 +531,7 @@ func (dl *diffLayer) AccountList() []common.Hash { dl.accountList = append(dl.accountList, hash) } } - slices.SortFunc(dl.accountList, common.Hash.Cmp) + sort.Sort(hashes(dl.accountList)) dl.memory += uint64(len(dl.accountList) * common.HashLength) return dl.accountList } @@ -580,7 +569,7 @@ func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool) for k := range storageMap { storageList = append(storageList, k) } - slices.SortFunc(storageList, common.Hash.Cmp) + sort.Sort(hashes(storageList)) dl.storageList[accountHash] = storageList dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength) return storageList, destructed diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go index 24fc3fdc69..e4dbc47055 100644 --- a/core/state/snapshot/difflayer_test.go +++ b/core/state/snapshot/difflayer_test.go @@ -28,14 +28,13 @@ package snapshot import ( "bytes" - crand "crypto/rand" "math/rand" "testing" - "github.com/ava-labs/coreth/utils" + "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/tenderly/coreth/ethdb/memorydb" ) func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} { @@ -84,7 +83,7 @@ func TestMergeBasics(t *testing.T) { if rand.Intn(2) == 0 { accStorage := make(map[common.Hash][]byte) value := make([]byte, 32) - crand.Read(value) + rand.Read(value) accStorage[randomHash()] = value storage[h] = accStorage } @@ -246,7 +245,7 @@ func TestInsertAndMerge(t *testing.T) { func emptyLayer() *diskLayer { return &diskLayer{ diskdb: memorydb.New(), - cache: utils.NewMeteredCache(500*1024, "", 0), + cache: fastcache.New(500 * 1024), } } @@ -305,7 +304,7 @@ func BenchmarkSearchSlot(b *testing.B) { accStorage := make(map[common.Hash][]byte) for i := 0; i < 5; i++ { value := make([]byte, 32) - crand.Read(value) + rand.Read(value) accStorage[randomHash()] = value storage[accountKey] = accStorage } @@ -341,7 +340,7 @@ func BenchmarkFlatten(b *testing.B) { accStorage := make(map[common.Hash][]byte) for i := 0; i < 20; i++ { value := make([]byte, 32) - crand.Read(value) + rand.Read(value) accStorage[randomHash()] = value } storage[accountKey] = accStorage diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go index b2a0aa4f5a..96ed409393 100644 --- a/core/state/snapshot/disklayer.go +++ b/core/state/snapshot/disklayer.go @@ -31,20 +31,19 @@ import ( "sync" "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/utils" + "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/trie" ) // diskLayer is a low level persistent snapshot built on top of a key-value store. type diskLayer struct { diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot triedb *trie.Database // Trie node cache for reconstruction purposes - cache *utils.MeteredCache // Cache to avoid hitting the disk for direct access + cache *fastcache.Cache // Cache to avoid hitting the disk for direct access blockHash common.Hash // Block hash of the base snapshot root common.Hash // Root hash of the base snapshot @@ -63,16 +62,6 @@ type diskLayer struct { lock sync.RWMutex } -// Release releases underlying resources; specifically the fastcache requires -// Reset() in order to not leak memory. -// OBS: It does not invoke Close on the diskdb -func (dl *diskLayer) Release() error { - if dl.cache != nil { - dl.cache.Reset() - } - return nil -} - // Root returns root hash for which this snapshot was made. func (dl *diskLayer) Root() common.Hash { return dl.root @@ -99,7 +88,7 @@ func (dl *diskLayer) Stale() bool { // Account directly retrieves the account associated with a particular hash in // the snapshot slim data format. -func (dl *diskLayer) Account(hash common.Hash) (*types.SlimAccount, error) { +func (dl *diskLayer) Account(hash common.Hash) (*Account, error) { data, err := dl.AccountRLP(hash) if err != nil { return nil, err @@ -107,7 +96,7 @@ func (dl *diskLayer) Account(hash common.Hash) (*types.SlimAccount, error) { if len(data) == 0 { // can be both nil and []byte{} return nil, nil } - account := new(types.SlimAccount) + account := new(Account) if err := rlp.DecodeBytes(data, account); err != nil { panic(err) } diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go index 3dede62341..4f211da82b 100644 --- a/core/state/snapshot/disklayer_test.go +++ b/core/state/snapshot/disklayer_test.go @@ -30,10 +30,10 @@ import ( "bytes" "testing" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb/memorydb" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb/memorydb" + "github.com/tenderly/coreth/rlp" ) // reverse reverses the contents of a byte slice. It's used to update random accs diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index f051fcd5c4..c89db04cfb 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -28,24 +28,107 @@ package snapshot import ( "bytes" + "encoding/binary" "fmt" + "math/big" "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/utils" + "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/trie" ) -const ( - snapshotCacheNamespace = "state/snapshot/clean/fastcache" // prefix for detailed stats from the snapshot fastcache - snapshotCacheStatsUpdateFrequency = 1000 // update stats from the snapshot fastcache once per 1000 ops +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // emptyCode is the known hash of the empty EVM bytecode. + emptyCode = crypto.Keccak256Hash(nil) ) +// generatorStats is a collection of statistics gathered by the snapshot generator +// for logging purposes. +type generatorStats struct { + wiping chan struct{} // Notification channel if wiping is in progress + origin uint64 // Origin prefix where generation started + start time.Time // Timestamp when generation started + accounts uint64 // Number of accounts indexed(generated or recovered) + slots uint64 // Number of storage slots indexed(generated or recovered) + storage common.StorageSize // Total account and storage slot size(generation or recovery) +} + +// Info creates an contextual info-level log with the given message and the context pulled +// from the internally maintained statistics. +func (gs *generatorStats) Info(msg string, root common.Hash, marker []byte) { + gs.log(log.LvlInfo, msg, root, marker) +} + +// Debug creates an contextual debug-level log with the given message and the context pulled +// from the internally maintained statistics. +func (gs *generatorStats) Debug(msg string, root common.Hash, marker []byte) { + gs.log(log.LvlDebug, msg, root, marker) +} + +// log creates an contextual log with the given message and the context pulled +// from the internally maintained statistics. +func (gs *generatorStats) log(level log.Lvl, msg string, root common.Hash, marker []byte) { + var ctx []interface{} + if root != (common.Hash{}) { + ctx = append(ctx, []interface{}{"root", root}...) + } + // Figure out whether we're after or within an account + switch len(marker) { + case common.HashLength: + ctx = append(ctx, []interface{}{"at", common.BytesToHash(marker)}...) + case 2 * common.HashLength: + ctx = append(ctx, []interface{}{ + "in", common.BytesToHash(marker[:common.HashLength]), + "at", common.BytesToHash(marker[common.HashLength:]), + }...) + } + // Add the usual measurements + ctx = append(ctx, []interface{}{ + "accounts", gs.accounts, + "slots", gs.slots, + "storage", gs.storage, + "elapsed", common.PrettyDuration(time.Since(gs.start)), + }...) + // Calculate the estimated indexing time based on current stats + if len(marker) > 0 { + if done := binary.BigEndian.Uint64(marker[:8]) - gs.origin; done > 0 { + left := math.MaxUint64 - binary.BigEndian.Uint64(marker[:8]) + + speed := done/uint64(time.Since(gs.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero + ctx = append(ctx, []interface{}{ + "eta", common.PrettyDuration(time.Duration(left/speed) * time.Millisecond), + }...) + } + } + + switch level { + case log.LvlTrace: + log.Trace(msg, ctx...) + case log.LvlDebug: + log.Debug(msg, ctx...) + case log.LvlInfo: + log.Info(msg, ctx...) + case log.LvlWarn: + log.Warn(msg, ctx...) + case log.LvlError: + log.Error(msg, ctx...) + case log.LvlCrit: + log.Crit(msg, ctx...) + default: + log.Error(fmt.Sprintf("log with invalid log level %s: %s", level, msg), ctx...) + } +} + // generateSnapshot regenerates a brand new snapshot based on an existing state // database and head block asynchronously. The snapshot is returned immediately // and generation is continued in the background until done. @@ -72,7 +155,7 @@ func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache i triedb: triedb, blockHash: blockHash, root: root, - cache: newMeteredSnapshotCache(cache * 1024 * 1024), + cache: fastcache.New(cache * 1024 * 1024), genMarker: genMarker, genPending: make(chan struct{}), genAbort: make(chan chan struct{}), @@ -191,8 +274,7 @@ func (dl *diskLayer) generate(stats *generatorStats) { } } // Create an account and state iterator pointing to the current generator marker - trieId := trie.StateTrieID(dl.root) - accTrie, err := trie.NewStateTrie(trieId, dl.triedb) + accTrie, err := trie.NewStateTrie(common.Hash{}, dl.root, dl.triedb) if err != nil { // The account trie is missing (GC), surf the chain until one becomes available stats.Info("Trie missing, state snapshotting paused", dl.root, dl.genMarker) @@ -207,15 +289,7 @@ func (dl *diskLayer) generate(stats *generatorStats) { if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that accMarker = dl.genMarker[:common.HashLength] } - nodeIt, err := accTrie.NodeIterator(accMarker) - if err != nil { - log.Error("Generator failed to create account iterator", "root", dl) - abort := <-dl.genAbort - dl.genStats = stats - close(abort) - return - } - accIt := trie.NewIterator(nodeIt) + accIt := trie.NewIterator(accTrie.NodeIterator(accMarker)) batch := dl.diskdb.NewBatch() // Iterate from the previous marker and continue generating the state snapshot @@ -224,11 +298,17 @@ func (dl *diskLayer) generate(stats *generatorStats) { // Retrieve the current account and flatten it into the internal format accountHash := common.BytesToHash(accIt.Key) - var acc types.StateAccount + var acc struct { + Nonce uint64 + Balance *big.Int + Root common.Hash + CodeHash []byte + IsMultiCoin bool + } if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil { log.Crit("Invalid account encountered during snapshot creation", "err", err) } - data := types.SlimAccountRLP(acc) + data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash, acc.IsMultiCoin) // If the account is not yet in-progress, write it out if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) { @@ -248,9 +328,8 @@ func (dl *diskLayer) generate(stats *generatorStats) { } // If the iterated account is a contract, iterate through corresponding contract // storage to generate snapshot entries. - if acc.Root != types.EmptyRootHash { - storeTrieId := trie.StorageTrieID(dl.root, accountHash, acc.Root) - storeTrie, err := trie.NewStateTrie(storeTrieId, dl.triedb) + if acc.Root != emptyRoot { + storeTrie, err := trie.NewStateTrie(accountHash, acc.Root, dl.triedb) if err != nil { log.Error("Generator failed to access storage trie", "root", dl.root, "account", accountHash, "stroot", acc.Root, "err", err) abort := <-dl.genAbort @@ -262,15 +341,7 @@ func (dl *diskLayer) generate(stats *generatorStats) { if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength { storeMarker = dl.genMarker[common.HashLength:] } - nodeIt, err := storeTrie.NodeIterator(storeMarker) - if err != nil { - log.Error("Generator failed to create storage iterator", "root", dl.root, "account", accountHash, "stroot", acc.Root, "err", err) - abort := <-dl.genAbort - dl.genStats = stats - close(abort) - return - } - storeIt := trie.NewIterator(nodeIt) + storeIt := trie.NewIterator(storeTrie.NodeIterator(storeMarker)) for storeIt.Next() { rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(storeIt.Key), storeIt.Value) stats.storage += common.StorageSize(1 + 2*common.HashLength + len(storeIt.Value)) @@ -328,7 +399,3 @@ func (dl *diskLayer) generate(stats *generatorStats) { abort := <-dl.genAbort close(abort) } - -func newMeteredSnapshotCache(size int) *utils.MeteredCache { - return utils.NewMeteredCache(size, snapshotCacheNamespace, snapshotCacheStatsUpdateFrequency) -} diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go index c805348925..505039917f 100644 --- a/core/state/snapshot/generate_test.go +++ b/core/state/snapshot/generate_test.go @@ -33,16 +33,12 @@ import ( "testing" "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/trie/triedb/hashdb" - "github.com/ava-labs/coreth/trie/triedb/pathdb" - "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/trie" "golang.org/x/crypto/sha3" ) @@ -59,20 +55,14 @@ func hashData(input []byte) common.Hash { // Tests that snapshot generation from an empty database. func TestGeneration(t *testing.T) { - testGeneration(t, rawdb.HashScheme) - testGeneration(t, rawdb.PathScheme) -} - -func testGeneration(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, - // two of which also has the same 3-slot storage trie attached. - var helper = newHelper(scheme) + var helper = newHelper() stRoot := helper.makeStorageTrie(common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false) - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) @@ -99,27 +89,22 @@ func testGeneration(t *testing.T, scheme string) { // Tests that snapshot generation with existent flat state. func TestGenerateExistentState(t *testing.T) { - testGenerateExistentState(t, rawdb.HashScheme) - testGenerateExistentState(t, rawdb.PathScheme) -} - -func testGenerateExistentState(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // two of which also has the same 3-slot storage trie attached. - var helper = newHelper(scheme) + var helper = newHelper() stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addSnapAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) + helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) root, snap := helper.CommitAndGenerate() @@ -142,12 +127,12 @@ func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) { t.Helper() accIt := snap.AccountIterator(common.Hash{}) defer accIt.Release() - snapRoot, err := generateTrieRoot(nil, "", accIt, common.Hash{}, stackTrieGenerate, + snapRoot, err := generateTrieRoot(nil, accIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { storageIt, _ := snap.StorageIterator(accountHash, common.Hash{}) defer storageIt.Release() - hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false) + hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false) if err != nil { return common.Hash{}, err } @@ -168,38 +153,33 @@ type testHelper struct { diskdb ethdb.Database triedb *trie.Database accTrie *trie.StateTrie - nodes *trienode.MergedNodeSet + nodes *trie.MergedNodeSet } -func newHelper(scheme string) *testHelper { +func newHelper() *testHelper { diskdb := rawdb.NewMemoryDatabase() - config := &trie.Config{} - if scheme == rawdb.PathScheme { - config.PathDB = &pathdb.Config{} // disable caching - } else { - config.HashDB = &hashdb.Config{} // disable caching - } - triedb := trie.NewDatabase(diskdb, config) - accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), triedb) + triedb := trie.NewDatabase(diskdb) + accTrie, _ := trie.NewStateTrie(common.Hash{}, common.Hash{}, triedb) return &testHelper{ diskdb: diskdb, triedb: triedb, accTrie: accTrie, - nodes: trienode.NewMergedNodeSet(), + nodes: trie.NewMergedNodeSet(), } } -func (t *testHelper) addTrieAccount(acckey string, acc *types.StateAccount) { +func (t *testHelper) addTrieAccount(acckey string, acc *Account) { val, _ := rlp.EncodeToBytes(acc) - t.accTrie.MustUpdate([]byte(acckey), val) + t.accTrie.Update([]byte(acckey), val) } -func (t *testHelper) addSnapAccount(acckey string, acc *types.StateAccount) { +func (t *testHelper) addSnapAccount(acckey string, acc *Account) { + val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte(acckey)) - rawdb.WriteAccountSnapshot(t.diskdb, key, types.SlimAccountRLP(*acc)) + rawdb.WriteAccountSnapshot(t.diskdb, key, val) } -func (t *testHelper) addAccount(acckey string, acc *types.StateAccount) { +func (t *testHelper) addAccount(acckey string, acc *Account) { t.addTrieAccount(acckey, acc) t.addSnapAccount(acckey, acc) } @@ -211,20 +191,19 @@ func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string) } } -func (t *testHelper) makeStorageTrie(owner common.Hash, keys []string, vals []string, commit bool) common.Hash { - id := trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash) - stTrie, _ := trie.NewStateTrie(id, t.triedb) +func (t *testHelper) makeStorageTrie(owner common.Hash, keys []string, vals []string, commit bool) []byte { + stTrie, _ := trie.NewStateTrie(owner, common.Hash{}, t.triedb) for i, k := range keys { - stTrie.MustUpdate([]byte(k), []byte(vals[i])) + stTrie.Update([]byte(k), []byte(vals[i])) } if !commit { - return stTrie.Hash() + return stTrie.Hash().Bytes() } root, nodes, _ := stTrie.Commit(false) if nodes != nil { t.nodes.Merge(nodes) } - return root + return root.Bytes() } func (t *testHelper) Commit() common.Hash { @@ -232,8 +211,8 @@ func (t *testHelper) Commit() common.Hash { if nodes != nil { t.nodes.Merge(nodes) } - t.triedb.Update(root, types.EmptyRootHash, 0, t.nodes, nil) - t.triedb.Commit(root, false) + t.triedb.Update(t.nodes) + t.triedb.Commit(root, false, nil) return root } @@ -251,47 +230,40 @@ func (t *testHelper) CommitAndGenerate() (common.Hash, *diskLayer) { // - miss in the beginning // - miss in the middle // - miss in the end -// // - the contract(non-empty storage) has wrong storage slots // - wrong slots in the beginning // - wrong slots in the middle // - wrong slots in the end -// // - the contract(non-empty storage) has extra storage slots // - extra slots in the beginning // - extra slots in the middle // - extra slots in the end func TestGenerateExistentStateWithWrongStorage(t *testing.T) { - testGenerateExistentStateWithWrongStorage(t, rawdb.HashScheme) - testGenerateExistentStateWithWrongStorage(t, rawdb.PathScheme) -} - -func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) { - helper := newHelper(scheme) + helper := newHelper() // Account one, empty root but non-empty database - helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) // Account two, non empty root but empty database stRoot := helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // Miss slots { // Account three, non empty root but misses slots in the beginning helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"}) // Account four, non empty root but misses slots in the middle helper.makeStorageTrie(hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"}) // Account five, non empty root but misses slots in the end helper.makeStorageTrie(hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"}) } @@ -299,22 +271,22 @@ func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) { { // Account six, non empty root but wrong slots in the beginning helper.makeStorageTrie(hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"}) // Account seven, non empty root but wrong slots in the middle helper.makeStorageTrie(hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"}) // Account eight, non empty root but wrong slots in the end helper.makeStorageTrie(hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-8", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"}) // Account 9, non empty root but rotated slots helper.makeStorageTrie(hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-9", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"}) } @@ -322,17 +294,17 @@ func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) { { // Account 10, non empty root but extra slots in the beginning helper.makeStorageTrie(hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-10", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"}) // Account 11, non empty root but extra slots in the middle helper.makeStorageTrie(hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-11", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"}) // Account 12, non empty root but extra slots in the end helper.makeStorageTrie(hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-12", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"}) } @@ -359,12 +331,7 @@ func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) { // - wrong accounts // - extra accounts func TestGenerateExistentStateWithWrongAccounts(t *testing.T) { - testGenerateExistentStateWithWrongAccounts(t, rawdb.HashScheme) - testGenerateExistentStateWithWrongAccounts(t, rawdb.PathScheme) -} - -func testGenerateExistentStateWithWrongAccounts(t *testing.T, scheme string) { - helper := newHelper(scheme) + helper := newHelper() helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) @@ -377,25 +344,25 @@ func testGenerateExistentStateWithWrongAccounts(t *testing.T, scheme string) { // Missing accounts, only in the trie { - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning - helper.addTrieAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle - helper.addTrieAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // Beginning + helper.addTrieAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // Middle + helper.addTrieAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // End } // Wrong accounts { - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")}) - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) } // Extra accounts, only in the snap { - helper.addSnapAccount("acc-0", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning - helper.addSnapAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: common.Hex2Bytes("0x1234")}) // Middle - helper.addSnapAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // after the end + helper.addSnapAccount("acc-0", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyRoot.Bytes()}) // before the beginning + helper.addSnapAccount("acc-5", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: common.Hex2Bytes("0x1234")}) // Middle + helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyRoot.Bytes()}) // after the end } root, snap := helper.CommitAndGenerate() @@ -419,27 +386,20 @@ func testGenerateExistentStateWithWrongAccounts(t *testing.T, scheme string) { // Tests that snapshot generation errors out correctly in case of a missing trie // node in the account trie. func TestGenerateCorruptAccountTrie(t *testing.T) { - testGenerateCorruptAccountTrie(t, rawdb.HashScheme) - testGenerateCorruptAccountTrie(t, rawdb.PathScheme) -} - -func testGenerateCorruptAccountTrie(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // without any storage slots to keep the test smaller. - helper := newHelper(scheme) + helper := newHelper() - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x7dd654835190324640832972b7c4c6eaa0c50541e36766d054ed57721f1dc7eb - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0xf73118e0254ce091588d66038744a0afae5f65a194de67cff310c683ae43329e - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x515d3de35e143cd976ad476398d910aa7bf8a02e8fd7eb9e3baacddbbcbfcb41 + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x7dd654835190324640832972b7c4c6eaa0c50541e36766d054ed57721f1dc7eb + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0xf73118e0254ce091588d66038744a0afae5f65a194de67cff310c683ae43329e + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x515d3de35e143cd976ad476398d910aa7bf8a02e8fd7eb9e3baacddbbcbfcb41 root := helper.Commit() // Root: 0xfa04f652e8bd3938971bf7d71c3c688574af334ca8bc20e64b01ba610ae93cad - // Delete an account trie node and ensure the generator chokes - targetPath := []byte{0xc} - targetHash := common.HexToHash("0xf73118e0254ce091588d66038744a0afae5f65a194de67cff310c683ae43329e") - - rawdb.DeleteTrieNode(helper.diskdb, common.Hash{}, targetPath, targetHash, scheme) + // Delete an account trie leaf and ensure the generator chokes + helper.triedb.Commit(root, false, nil) + helper.diskdb.Delete(common.HexToHash("0xf73118e0254ce091588d66038744a0afae5f65a194de67cff310c683ae43329e").Bytes()) snap := generateSnapshot(helper.diskdb, helper.triedb, 16, testBlockHash, root, nil) select { @@ -460,30 +420,20 @@ func testGenerateCorruptAccountTrie(t *testing.T, scheme string) { // trie node for a storage trie. It's similar to internal corruption but it is // handled differently inside the generator. func TestGenerateMissingStorageTrie(t *testing.T) { - testGenerateMissingStorageTrie(t, rawdb.HashScheme) - testGenerateMissingStorageTrie(t, rawdb.PathScheme) -} - -func testGenerateMissingStorageTrie(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // two of which also has the same 3-slot storage trie attached. - var ( - acc1 = hashData([]byte("acc-1")) - acc3 = hashData([]byte("acc-3")) - helper = newHelper(scheme) - ) - stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 - stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x70da4ebd7602dd313c936b39000ed9ab7f849986a90ea934f0c3ec4cc9840441 + helper := newHelper() + stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x547b07c3a71669c00eda14077d85c7fd14575b92d459572540b25b9a11914dcb + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0xf73118e0254ce091588d66038744a0afae5f65a194de67cff310c683ae43329e + stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x70da4ebd7602dd313c936b39000ed9ab7f849986a90ea934f0c3ec4cc9840441 root := helper.Commit() - // Delete storage trie root of account one and three. - rawdb.DeleteTrieNode(helper.diskdb, acc1, nil, stRoot, scheme) - rawdb.DeleteTrieNode(helper.diskdb, acc3, nil, stRoot, scheme) + // Delete a storage trie root and ensure the generator chokes + helper.diskdb.Delete(stRoot) // We can only corrupt the disk database, so flush the tries out snap := generateSnapshot(helper.diskdb, helper.triedb, 16, testBlockHash, root, nil) select { @@ -503,29 +453,20 @@ func testGenerateMissingStorageTrie(t *testing.T, scheme string) { // Tests that snapshot generation errors out correctly in case of a missing trie // node in a storage trie. func TestGenerateCorruptStorageTrie(t *testing.T) { - testGenerateCorruptStorageTrie(t, rawdb.HashScheme) - testGenerateCorruptStorageTrie(t, rawdb.PathScheme) -} - -func testGenerateCorruptStorageTrie(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // two of which also has the same 3-slot storage trie attached. - helper := newHelper(scheme) + helper := newHelper() - stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 + stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x547b07c3a71669c00eda14077d85c7fd14575b92d459572540b25b9a11914dcb + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0xf73118e0254ce091588d66038744a0afae5f65a194de67cff310c683ae43329e stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x70da4ebd7602dd313c936b39000ed9ab7f849986a90ea934f0c3ec4cc9840441 - + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x70da4ebd7602dd313c936b39000ed9ab7f849986a90ea934f0c3ec4cc9840441 root := helper.Commit() - // Delete a node in the storage trie. - targetPath := []byte{0x4} - targetHash := common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371") - rawdb.DeleteTrieNode(helper.diskdb, hashData([]byte("acc-1")), targetPath, targetHash, scheme) - rawdb.DeleteTrieNode(helper.diskdb, hashData([]byte("acc-3")), targetPath, targetHash, scheme) + // Delete a storage trie leaf and ensure the generator chokes + helper.diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes()) snap := generateSnapshot(helper.diskdb, helper.triedb, 16, testBlockHash, root, nil) select { @@ -544,12 +485,7 @@ func testGenerateCorruptStorageTrie(t *testing.T, scheme string) { // Tests that snapshot generation when an extra account with storage exists in the snap state. func TestGenerateWithExtraAccounts(t *testing.T) { - testGenerateWithExtraAccounts(t, rawdb.HashScheme) - testGenerateWithExtraAccounts(t, rawdb.PathScheme) -} - -func testGenerateWithExtraAccounts(t *testing.T, scheme string) { - helper := newHelper(scheme) + helper := newHelper() { // Account one in the trie stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), @@ -557,18 +493,18 @@ func testGenerateWithExtraAccounts(t *testing.T, scheme string) { []string{"val-1", "val-2", "val-3", "val-4", "val-5"}, true, ) - acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()} val, _ := rlp.EncodeToBytes(acc) - helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e + helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e // Identical in the snap key := hashData([]byte("acc-1")) - rawdb.WriteAccountSnapshot(helper.diskdb, key, val) - rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-1")), []byte("val-1")) - rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-2")), []byte("val-2")) - rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-3")), []byte("val-3")) - rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-4")), []byte("val-4")) - rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-5")), []byte("val-5")) + rawdb.WriteAccountSnapshot(helper.triedb.DiskDB(), key, val) + rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-1")), []byte("val-1")) + rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-2")), []byte("val-2")) + rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-3")), []byte("val-3")) + rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-4")), []byte("val-4")) + rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-5")), []byte("val-5")) } { // Account two exists only in the snapshot @@ -577,18 +513,18 @@ func testGenerateWithExtraAccounts(t *testing.T, scheme string) { []string{"val-1", "val-2", "val-3", "val-4", "val-5"}, true, ) - acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()} val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte("acc-2")) - rawdb.WriteAccountSnapshot(helper.diskdb, key, val) - rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-1")), []byte("b-val-1")) - rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-2")), []byte("b-val-2")) - rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-3")), []byte("b-val-3")) + rawdb.WriteAccountSnapshot(helper.triedb.DiskDB(), key, val) + rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("b-key-1")), []byte("b-val-1")) + rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("b-key-2")), []byte("b-val-2")) + rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("b-key-3")), []byte("b-val-3")) } root := helper.Commit() // To verify the test: If we now inspect the snap db, there should exist extraneous storage items - if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil { + if data := rawdb.ReadStorageSnapshot(helper.triedb.DiskDB(), hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil { t.Fatalf("expected snap storage to exist") } snap := generateSnapshot(helper.diskdb, helper.triedb, 16, testBlockHash, root, nil) @@ -606,26 +542,21 @@ func testGenerateWithExtraAccounts(t *testing.T, scheme string) { snap.genAbort <- stop <-stop // If we now inspect the snap db, there should exist no extraneous storage items - if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil { + if data := rawdb.ReadStorageSnapshot(helper.triedb.DiskDB(), hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil { t.Fatalf("expected slot to be removed, got %v", string(data)) } } func enableLogging() { - log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true))) + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) } // Tests that snapshot generation when an extra account with storage exists in the snap state. func TestGenerateWithManyExtraAccounts(t *testing.T) { - testGenerateWithManyExtraAccounts(t, rawdb.HashScheme) - testGenerateWithManyExtraAccounts(t, rawdb.PathScheme) -} - -func testGenerateWithManyExtraAccounts(t *testing.T, scheme string) { if false { enableLogging() } - helper := newHelper(scheme) + helper := newHelper() { // Account one in the trie stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), @@ -633,9 +564,9 @@ func testGenerateWithManyExtraAccounts(t *testing.T, scheme string) { []string{"val-1", "val-2", "val-3"}, true, ) - acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()} val, _ := rlp.EncodeToBytes(acc) - helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x547b07c3a71669c00eda14077d85c7fd14575b92d459572540b25b9a11914dcb + helper.accTrie.Update([]byte("acc-1"), val) // 0x547b07c3a71669c00eda14077d85c7fd14575b92d459572540b25b9a11914dcb // Identical in the snap key := hashData([]byte("acc-1")) @@ -647,7 +578,8 @@ func testGenerateWithManyExtraAccounts(t *testing.T, scheme string) { { // 100 accounts exist only in snapshot for i := 0; i < 1000; i++ { - acc := &types.StateAccount{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()} + //acc := &Account{Balance: big.NewInt(int64(i)), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()} + acc := &Account{Balance: big.NewInt(int64(i)), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()} val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte(fmt.Sprintf("acc-%d", i))) rawdb.WriteAccountSnapshot(helper.diskdb, key, val) @@ -678,20 +610,15 @@ func testGenerateWithManyExtraAccounts(t *testing.T, scheme string) { // So in trie, we iterate 2 entries 0x03, 0x07. We create the 0x07 in the database and abort the procedure, because the trie is exhausted. // But in the database, we still have the stale storage slots 0x04, 0x05. They are not iterated yet, but the procedure is finished. func TestGenerateWithExtraBeforeAndAfter(t *testing.T) { - testGenerateWithExtraBeforeAndAfter(t, rawdb.HashScheme) - testGenerateWithExtraBeforeAndAfter(t, rawdb.PathScheme) -} - -func testGenerateWithExtraBeforeAndAfter(t *testing.T, scheme string) { if false { enableLogging() } - helper := newHelper(scheme) + helper := newHelper() { - acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()} val, _ := rlp.EncodeToBytes(acc) - helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val) - helper.accTrie.MustUpdate(common.HexToHash("0x07").Bytes(), val) + helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val) + helper.accTrie.Update(common.HexToHash("0x07").Bytes(), val) rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x01"), val) rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x02"), val) @@ -719,19 +646,14 @@ func testGenerateWithExtraBeforeAndAfter(t *testing.T, scheme string) { // TestGenerateWithMalformedSnapdata tests what happes if we have some junk // in the snapshot database, which cannot be parsed back to an account func TestGenerateWithMalformedSnapdata(t *testing.T) { - testGenerateWithMalformedSnapdata(t, rawdb.HashScheme) - testGenerateWithMalformedSnapdata(t, rawdb.PathScheme) -} - -func testGenerateWithMalformedSnapdata(t *testing.T, scheme string) { if false { enableLogging() } - helper := newHelper(scheme) + helper := newHelper() { - acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()} val, _ := rlp.EncodeToBytes(acc) - helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val) + helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val) junk := make([]byte, 100) copy(junk, []byte{0xde, 0xad}) @@ -760,18 +682,13 @@ func testGenerateWithMalformedSnapdata(t *testing.T, scheme string) { } func TestGenerateFromEmptySnap(t *testing.T) { - testGenerateFromEmptySnap(t, rawdb.HashScheme) - testGenerateFromEmptySnap(t, rawdb.PathScheme) -} - -func testGenerateFromEmptySnap(t *testing.T, scheme string) { //enableLogging() - helper := newHelper(scheme) + helper := newHelper() // Add 1K accounts to the trie for i := 0; i < 400; i++ { stRoot := helper.makeStorageTrie(hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.addTrieAccount(fmt.Sprintf("acc-%d", i), - &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) } root, snap := helper.CommitAndGenerate() t.Logf("Root: %#x\n", root) // Root: 0x2609234ce43f5e471202c87e017ffb4dfecdb3163cfcbaa55de04baa59cad42d @@ -798,12 +715,7 @@ func testGenerateFromEmptySnap(t *testing.T, scheme string) { // This hits a case where the snap verification passes, but there are more elements in the trie // which we must also add. func TestGenerateWithIncompleteStorage(t *testing.T) { - testGenerateWithIncompleteStorage(t, rawdb.HashScheme) - testGenerateWithIncompleteStorage(t, rawdb.PathScheme) -} - -func testGenerateWithIncompleteStorage(t *testing.T, scheme string) { - helper := newHelper(scheme) + helper := newHelper() stKeys := []string{"1", "2", "3", "4", "5", "6", "7", "8"} stVals := []string{"v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"} // We add 8 accounts, each one is missing exactly one of the storage slots. This means @@ -812,7 +724,7 @@ func testGenerateWithIncompleteStorage(t *testing.T, scheme string) { for i := 0; i < 8; i++ { accKey := fmt.Sprintf("acc-%d", i) stRoot := helper.makeStorageTrie(hashData([]byte(accKey)), stKeys, stVals, true) - helper.addAccount(accKey, &types.StateAccount{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: emptyCode.Bytes()}) var moddedKeys []string var moddedVals []string for ii := 0; ii < 8; ii++ { @@ -901,19 +813,14 @@ func populateDangling(disk ethdb.KeyValueStore) { // // This test will populate some dangling storages to see if they can be cleaned up. func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) { - testGenerateCompleteSnapshotWithDanglingStorage(t, rawdb.HashScheme) - testGenerateCompleteSnapshotWithDanglingStorage(t, rawdb.PathScheme) -} - -func testGenerateCompleteSnapshotWithDanglingStorage(t *testing.T, scheme string) { - var helper = newHelper(scheme) + var helper = newHelper() - stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) + stRoot := helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) + helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) @@ -941,19 +848,14 @@ func testGenerateCompleteSnapshotWithDanglingStorage(t *testing.T, scheme string // // This test will populate some dangling storages to see if they can be cleaned up. func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) { - testGenerateBrokenSnapshotWithDanglingStorage(t, rawdb.HashScheme) - testGenerateBrokenSnapshotWithDanglingStorage(t, rawdb.PathScheme) -} - -func testGenerateBrokenSnapshotWithDanglingStorage(t *testing.T, scheme string) { - var helper = newHelper(scheme) + var helper = newHelper() stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) populateDangling(helper.diskdb) diff --git a/core/state/snapshot/iterator.go b/core/state/snapshot/iterator.go index b7cf84ec91..e214317935 100644 --- a/core/state/snapshot/iterator.go +++ b/core/state/snapshot/iterator.go @@ -31,9 +31,9 @@ import ( "fmt" "sort" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb" ) // Iterator is an iterator to step over all the accounts or the specific diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go index 7fc374a15a..2486be257f 100644 --- a/core/state/snapshot/iterator_test.go +++ b/core/state/snapshot/iterator_test.go @@ -34,8 +34,8 @@ import ( "math/rand" "testing" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/rawdb" ) // TestAccountIteratorBasics tests some simple single-layer(diff and disk) iteration diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index cc73974ef0..ddea7274ab 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -32,12 +32,13 @@ import ( "fmt" "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/trie" + "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/trie" ) // journalGenerator is a disk layer entry containing the generator progress marker. @@ -56,12 +57,12 @@ type journalGenerator struct { // loadSnapshot loads a pre-existing state snapshot backed by a key-value // store. If loading the snapshot from disk is successful, this function also // returns a boolean indicating whether or not the snapshot is fully generated. -func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, blockHash, root common.Hash, noBuild bool) (snapshot, bool, error) { +func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, blockHash, root common.Hash) (snapshot, bool, error) { // Retrieve the block number and hash of the snapshot, failing if no snapshot // is present in the database (or crashed mid-update). baseBlockHash := rawdb.ReadSnapshotBlockHash(diskdb) if baseBlockHash == (common.Hash{}) { - return nil, false, errors.New("missing or corrupted snapshot, no snapshot block hash") + return nil, false, fmt.Errorf("missing or corrupted snapshot, no snapshot block hash") } if baseBlockHash != blockHash { return nil, false, fmt.Errorf("block hash stored on disk (%#x) does not match last accepted (%#x)", baseBlockHash, blockHash) @@ -90,18 +91,18 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, snapshot := &diskLayer{ diskdb: diskdb, triedb: triedb, - cache: newMeteredSnapshotCache(cache * 1024 * 1024), + cache: fastcache.New(cache * 1024 * 1024), root: baseRoot, blockHash: baseBlockHash, created: time.Now(), } - var wiper chan struct{} - // Load the disk layer status from the generator if it's not complete + // Everything loaded correctly, resume any suspended operations if !generator.Done { // If the generator was still wiping, restart one from scratch (fine for // now as it's rare and the wiper deletes the stuff it touches anyway, so // restarting won't incur a lot of extra database hops. + var wiper chan struct{} if generator.Wiping { log.Info("Resuming previous snapshot wipe") wiper = WipeSnapshot(diskdb, false) @@ -111,11 +112,6 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, if snapshot.genMarker == nil { snapshot.genMarker = []byte{} } - } - - // Everything loaded correctly, resume any suspended operations - // if the background generation is allowed - if !generator.Done && !noBuild { snapshot.genPending = make(chan struct{}) snapshot.genAbort = make(chan chan struct{}) diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index a82c49c7e0..d2f4bbdfab 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -32,15 +32,16 @@ import ( "errors" "fmt" "sync" + "sync/atomic" "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/coreth/trie" + "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/metrics" + "github.com/tenderly/coreth/trie" ) const ( @@ -124,7 +125,7 @@ type Snapshot interface { // Account directly retrieves the account associated with a particular hash in // the snapshot slim data format. - Account(hash common.Hash) (*types.SlimAccount, error) + Account(hash common.Hash) (*Account, error) // AccountRLP directly retrieves the account RLP associated with a particular // hash in the snapshot slim data format. @@ -166,14 +167,6 @@ type snapshot interface { Stale() bool } -// Config includes the configurations for snapshots. -type Config struct { - CacheSize int // Megabytes permitted to use for read caches - NoBuild bool // Indicator that the snapshots generation is disallowed - AsyncBuild bool // The snapshot generation is allowed to be constructed asynchronously - SkipVerify bool // Indicator that all verification should be bypassed -} - // Tree is an Ethereum state snapshot tree. It consists of one persistent base // layer backed by a key-value store, on top of which arbitrarily many in-memory // diff layers are topped. The memory diffs can form a tree with branching, but @@ -184,9 +177,9 @@ type Config struct { // storage data to avoid expensive multi-level trie lookups; and to allow sorted, // cheap iteration of the account/storage tries for sync aid. type Tree struct { - config Config // Snapshots configurations diskdb ethdb.KeyValueStore // Persistent database to store the snapshot triedb *trie.Database // In-memory cache to access the trie through + cache int // Megabytes permitted to use for read caches // Collection of all known layers // blockHash -> snapshot blockLayers map[common.Hash]snapshot @@ -208,24 +201,24 @@ type Tree struct { // If the snapshot is missing or the disk layer is broken, the snapshot will be // reconstructed using both the existing data and the state trie. // The repair happens on a background thread. -func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, blockHash, root common.Hash) (*Tree, error) { +func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, blockHash, root common.Hash, async bool, rebuild bool, verify bool) (*Tree, error) { // Create a new, empty snapshot tree snap := &Tree{ - config: config, diskdb: diskdb, triedb: triedb, + cache: cache, blockLayers: make(map[common.Hash]snapshot), stateLayers: make(map[common.Hash]map[common.Hash]snapshot), - verified: config.SkipVerify, // if SkipVerify is true, all verification will be bypassed + verified: !verify, // if verify is false, all verification will be bypassed } // Attempt to load a previously persisted snapshot and rebuild one if failed - head, generated, err := loadSnapshot(diskdb, triedb, config.CacheSize, blockHash, root, config.NoBuild) + head, generated, err := loadSnapshot(diskdb, triedb, cache, blockHash, root) if err != nil { - log.Warn("Failed to load snapshot, regenerating", "err", err) - if !config.NoBuild { + if rebuild { + log.Warn("Failed to load snapshot, regenerating", "err", err) snap.Rebuild(blockHash, root) - if !config.AsyncBuild { + if !async { if err := snap.verifyIntegrity(snap.disklayer(), true); err != nil { return nil, err } @@ -246,8 +239,8 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, block } // Verify any synchronously generated or loaded snapshot from disk - if !config.AsyncBuild || generated { - if err := snap.verifyIntegrity(snap.disklayer(), !config.AsyncBuild && !generated); err != nil { + if !async || generated { + if err := snap.verifyIntegrity(snap.disklayer(), !async && !generated); err != nil { return nil, err } } @@ -386,13 +379,11 @@ func (t *Tree) verifyIntegrity(base *diskLayer, waitBuild bool) error { // Note: a blockHash is used instead of a state root so that the exact state // transition between the two states is well defined. This is intended to // prevent the following edge case -// -// A -// / \ -// B C -// | -// D -// +// A +// / \ +// B C +// | +// D // In this scenario, it's possible For (A, B) and (A, C, D) to be two // different paths to the resulting state. We use block hashes and parent // block hashes to ensure that the exact path through which we flatten @@ -577,10 +568,7 @@ func (dl *diskLayer) abortGeneration() bool { } // If the disk layer is running a snapshot generator, abort it - dl.lock.RLock() - shouldAbort := dl.genAbort != nil && dl.genStats == nil - dl.lock.RUnlock() - if shouldAbort { + if dl.genAbort != nil && dl.genStats == nil { abort := make(chan struct{}) dl.genAbort <- abort <-abort @@ -611,7 +599,6 @@ func diffToDisk(bottom *diffLayer) (*diskLayer, bool, error) { // Mark the original base as stale as we're going to create a new wrapper base.lock.Lock() if base.stale { - base.lock.Unlock() return nil, false, ErrStaleParentLayer // we've committed into the same base from two children, boo } base.stale = true @@ -637,7 +624,7 @@ func diffToDisk(bottom *diffLayer) (*diskLayer, bool, error) { // Ensure we don't delete too much data blindly (contract can be // huge). It's ok to flush, the root will go missing in case of a // crash and we'll detect and regenerate the snapshot. - if batch.ValueSize() > 64*1024*1024 { + if batch.ValueSize() > ethdb.IdealBatchSize { if err := batch.Write(); err != nil { log.Crit("Failed to write storage deletions", "err", err) } @@ -663,7 +650,7 @@ func diffToDisk(bottom *diffLayer) (*diskLayer, bool, error) { // Ensure we don't write too much data blindly. It's ok to flush, the // root will go missing in case of a crash and we'll detect and regen // the snapshot. - if batch.ValueSize() > 64*1024*1024 { + if batch.ValueSize() > ethdb.IdealBatchSize { if err := batch.Write(); err != nil { log.Crit("Failed to write storage deletions", "err", err) } @@ -742,13 +729,6 @@ func diffToDisk(bottom *diffLayer) (*diskLayer, bool, error) { return res, base.genMarker == nil, nil } -// Release releases resources -func (t *Tree) Release() { - if dl := t.disklayer(); dl != nil { - dl.Release() - } -} - // Rebuild wipes all available snapshot data from the persistent database and // discard all caches and diff layers. Afterwards, it starts a new snapshot // generator with the given root hash. @@ -781,7 +761,7 @@ func (t *Tree) Rebuild(blockHash, root common.Hash) { case *diffLayer: // If the layer is a simple diff, simply mark as stale layer.lock.Lock() - layer.stale.Store(true) + atomic.StoreUint32(&layer.stale, 1) layer.lock.Unlock() default: @@ -791,7 +771,7 @@ func (t *Tree) Rebuild(blockHash, root common.Hash) { // Start generating a new snapshot from scratch on a background thread. The // generator will run a wiper first if there's not one running right now. log.Info("Rebuilding state snapshot") - base := generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, blockHash, root, wiper) + base := generateSnapshot(t.diskdb, t.triedb, t.cache, blockHash, root, wiper) t.blockLayers = map[common.Hash]snapshot{ blockHash: base, } @@ -853,14 +833,14 @@ func (t *Tree) verify(root common.Hash, force bool) error { } defer acctIt.Release() - got, err := generateTrieRoot(nil, "", acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { + got, err := generateTrieRoot(nil, acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { storageIt, err := t.StorageIterator(root, accountHash, common.Hash{}, force) if err != nil { return common.Hash{}, err } defer storageIt.Release() - hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false) + hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false) if err != nil { return common.Hash{}, err } @@ -922,7 +902,7 @@ func (t *Tree) generating() (bool, error) { return layer.genMarker != nil, nil } -// DiskRoot is a external helper function to return the disk layer root. +// diskRoot is a external helper function to return the disk layer root. func (t *Tree) DiskRoot() common.Hash { t.lock.Lock() defer t.lock.Unlock() @@ -930,20 +910,51 @@ func (t *Tree) DiskRoot() common.Hash { return t.diskRoot() } -// Size returns the memory usage of the diff layers above the disk layer and the -// dirty nodes buffered in the disk layer. Currently, the implementation uses a -// special diff layer (the first) as an aggregator simulating a dirty buffer, so -// the second return will always be 0. However, this will be made consistent with -// the pathdb, which will require a second return. -func (t *Tree) Size() (diffs common.StorageSize, buf common.StorageSize) { - t.lock.RLock() - defer t.lock.RUnlock() +func (t *Tree) DiskAccountIterator(seek common.Hash) AccountIterator { + t.lock.Lock() + defer t.lock.Unlock() - var size common.StorageSize - for _, layer := range t.blockLayers { - if layer, ok := layer.(*diffLayer); ok { - size += common.StorageSize(layer.memory) - } + return t.disklayer().AccountIterator(seek) +} + +func (t *Tree) DiskStorageIterator(account common.Hash, seek common.Hash) StorageIterator { + t.lock.Lock() + defer t.lock.Unlock() + + it, _ := t.disklayer().StorageIterator(account, seek) + return it +} + +// NewDiskLayer creates a diskLayer for direct access to the contents of the on-disk +// snapshot. Does not perform any validation. +func NewDiskLayer(diskdb ethdb.KeyValueStore) Snapshot { + return &diskLayer{ + diskdb: diskdb, + created: time.Now(), + + // state sync uses iterators to access data, so this cache is not used. + // initializing it out of caution. + cache: fastcache.New(32 * 1024), + } +} + +// NewTestTree creates a *Tree with a pre-populated diskLayer +func NewTestTree(diskdb ethdb.KeyValueStore, blockHash, root common.Hash) *Tree { + base := &diskLayer{ + diskdb: diskdb, + root: root, + blockHash: blockHash, + cache: fastcache.New(128 * 256), + created: time.Now(), + } + return &Tree{ + blockLayers: map[common.Hash]snapshot{ + blockHash: base, + }, + stateLayers: map[common.Hash]map[common.Hash]snapshot{ + root: { + blockHash: base, + }, + }, } - return size, 0 } diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go index 7ca901c241..66eaa917dc 100644 --- a/core/state/snapshot/snapshot_test.go +++ b/core/state/snapshot/snapshot_test.go @@ -27,23 +27,21 @@ package snapshot import ( - crand "crypto/rand" "fmt" "math/big" "math/rand" "testing" "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/rlp" ) // randomHash generates a random blob of data and returns it as a hash. func randomHash() common.Hash { var hash common.Hash - if n, err := crand.Read(hash[:]); n != common.HashLength || err != nil { + if n, err := rand.Read(hash[:]); n != common.HashLength || err != nil { panic(err) } return hash @@ -51,11 +49,12 @@ func randomHash() common.Hash { // randomAccount generates a random account and returns it RLP encoded. func randomAccount() []byte { - a := &types.StateAccount{ + root := randomHash() + a := Account{ Balance: big.NewInt(rand.Int63()), Nonce: rand.Uint64(), - Root: randomHash(), - CodeHash: types.EmptyCodeHash[:], + Root: root[:], + CodeHash: emptyCode[:], } data, _ := rlp.EncodeToBytes(a) return data @@ -120,7 +119,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) { if err := snaps.Flatten(common.HexToHash("0x02")); err != nil { t.Fatalf("failed to merge diff layer onto disk: %v", err) } - // Since the base layer was modified, ensure that data retrievals on the external reference fail + // Since the base layer was modified, ensure that data retrieval on the external reference fail if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { t.Errorf("stale reference returned account: %v (err: %v)", acc, err) } @@ -167,7 +166,7 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) { if err := snaps.Flatten(common.HexToHash("0x02")); err != nil { t.Fatalf("Failed to flatten diff layer onto disk: %v", err) } - // Since the base layer was modified, ensure that data retrieval on the external reference fail + // Since the base layer was modified, ensure that data retrieval on the external reference fails if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { t.Errorf("stale reference returned account: %v (err: %v)", acc, err) } @@ -186,10 +185,6 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) { // be returned with junk data. This version of the test retains the bottom diff // layer to check the usual mode of operation where the accumulator is retained. func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) { - // Un-commenting this triggers the bloom set to be deterministic. The values below - // were used to trigger the flaw described in https://github.com/ethereum/go-ethereum/issues/27254. - // bloomDestructHasherOffset, bloomAccountHasherOffset, bloomStorageHasherOffset = 14, 24, 5 - // Create an empty base layer and a snapshot tree out of it snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01")) // Commit three diffs on top and retrieve a reference to the bottommost @@ -217,7 +212,7 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) { if err := snaps.Flatten(common.HexToHash("0x02")); err != nil { t.Fatal(err) } - // Since the accumulator diff layer was modified, ensure that data retrievals on the external reference fail + // Since the accumulator diff layer was modified, ensure that data retrieval on the external reference fails if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { t.Errorf("stale reference returned account: %v (err: %v)", acc, err) } @@ -401,19 +396,17 @@ func TestPostFlattenBasicDataAccess(t *testing.T) { // different blocks inserted with an identical state root. // In this example, (B, C) and (D, E) share the identical state root, but were // inserted under different blocks. -// -// A -// / \ -// B C -// | | -// D E +// A +// / \ +// B C +// | | +// D E // // `t.Flatten(C)` should result in: // -// B C -// | | -// D E -// +// B C +// | | +// D E // With the branch D, E, hanging and relying on Discard to be called to // garbage collect the references. func TestTreeFlattenDoesNotDropPendingLayers(t *testing.T) { @@ -695,7 +688,7 @@ func TestReadStateDuringFlattening(t *testing.T) { snap := snaps.Snapshot(diffRootC) // Register the testing hook to access the state after flattening - var result = make(chan *types.SlimAccount) + var result = make(chan *Account) snaps.onFlatten = func() { // Spin up a thread to read the account from the pre-created // snapshot handler. It's expected to be blocked. diff --git a/core/state/snapshot/wipe.go b/core/state/snapshot/wipe.go index 37963032e6..ad51de0c8f 100644 --- a/core/state/snapshot/wipe.go +++ b/core/state/snapshot/wipe.go @@ -30,10 +30,10 @@ import ( "bytes" "time" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb" ) // WipeSnapshot starts a goroutine to iterate over the entire key-value database diff --git a/core/state/snapshot/wipe_test.go b/core/state/snapshot/wipe_test.go index 74afec5fce..0ee64a6df8 100644 --- a/core/state/snapshot/wipe_test.go +++ b/core/state/snapshot/wipe_test.go @@ -30,9 +30,9 @@ import ( "math/rand" "testing" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb/memorydb" ) // Tests that given a database with random data content, all parts of a snapshot diff --git a/core/state/state_test.go b/core/state/state_test.go index ad096e9cbf..1ba519229f 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -27,63 +27,18 @@ package state import ( - "bytes" - "encoding/json" - "math/big" - "testing" - - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb" ) -type stateEnv struct { +type stateTest struct { db ethdb.Database state *StateDB } -func newStateEnv() *stateEnv { - db := rawdb.NewMemoryDatabase() - sdb, _ := New(types.EmptyRootHash, NewDatabase(db), nil) - return &stateEnv{db: db, state: sdb} -} - -func TestIterativeDump(t *testing.T) { +func newStateTest() *stateTest { db := rawdb.NewMemoryDatabase() - tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) - sdb, _ := New(types.EmptyRootHash, tdb, nil) - s := &stateEnv{db: db, state: sdb} - - // generate a few entries - obj1 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01})) - obj1.AddBalance(big.NewInt(22)) - obj2 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02})) - obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3}) - obj3 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x02})) - obj3.SetBalance(big.NewInt(44)) - obj4 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x00})) - obj4.AddBalance(big.NewInt(1337)) - - // write some of them to the trie - s.state.updateStateObject(obj1) - s.state.updateStateObject(obj2) - root, _ := s.state.Commit(0, false, false) - s.state, _ = New(root, tdb, nil) - - b := &bytes.Buffer{} - s.state.IterativeDump(nil, json.NewEncoder(b)) - // check that DumpToCollector contains the state objects that are in trie - got := b.String() - want := `{"root":"0x0ffca661efa3b7504ac015083994c94fd7d0d24db60354c717c936afcced762a"} -{"balance":"22","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000001","key":"0x1468288056310c82aa4c01a7e12a10f8111a0560e72b700555479031b86c357d"} -{"balance":"1337","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000000","key":"0x5380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312a"} -{"balance":"0","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0x87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3","code":"0x03030303030303","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000102","key":"0xa17eacbc25cda025e81db9c5c62868822c73ce097cee2a63e33a2e41268358a1"} -{"balance":"44","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000002","key":"0xd52688a8f926c816ca1e079067caba944f158e764817b83fc43594370ca9cf62"} -` - if got != want { - t.Errorf("DumpToCollector mismatch:\ngot: %s\nwant: %s\n", got, want) - } + sdb, _ := New(common.Hash{}, NewDatabase(db), nil) + return &stateTest{db: db, state: sdb} } diff --git a/core/state/statedb.go b/core/state/statedb.go index 6d49444cfb..fb2b950d8d 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -28,29 +28,21 @@ package state import ( + "errors" "fmt" "math/big" "sort" "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state/snapshot" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/predicate" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/trie/trienode" - "github.com/ava-labs/coreth/trie/triestate" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" -) - -const ( - // storageDeleteLimit denotes the highest permissible memory allocation - // employed for contract storage deletion. - storageDeleteLimit = 512 * 1024 * 1024 + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state/snapshot" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/metrics" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/trie" ) type revision struct { @@ -58,71 +50,66 @@ type revision struct { journalIndex int } +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") +) + +type proofList [][]byte + +func (n *proofList) Put(key []byte, value []byte) error { + *n = append(*n, value) + return nil +} + +func (n *proofList) Delete(key []byte) error { + panic("not supported") +} + // StateDB structs within the ethereum protocol are used to store anything // within the merkle trie. StateDBs take care of caching and storing // nested states. It's the general query interface to retrieve: -// // * Contracts // * Accounts -// -// Once the state is committed, tries cached in stateDB (including account -// trie, storage tries) will no longer be functional. A new state instance -// must be created with new root and updated database for accessing post- -// commit states. type StateDB struct { db Database prefetcher *triePrefetcher trie Trie hasher crypto.KeccakState - snap snapshot.Snapshot // Nil if snapshot is not available // originalRoot is the pre-state root, before any changes were made. // It will be updated when the Commit is called. originalRoot common.Hash - // These maps hold the state changes (including the corresponding - // original value) that occurred in this **block**. - accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding - storages map[common.Hash]map[common.Hash][]byte // The mutated slots in prefix-zero trimmed rlp format - accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding - storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format + snap snapshot.Snapshot + snapDestructs map[common.Hash]struct{} + snapAccounts map[common.Hash][]byte + snapStorage map[common.Hash]map[common.Hash][]byte - // This map holds 'live' objects, which will get modified while processing - // a state transition. - stateObjects map[common.Address]*stateObject - stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie - stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution - stateObjectsDestruct map[common.Address]*types.StateAccount // State objects destructed in the block along with its previous value + // This map holds 'live' objects, which will get modified while processing a state transition. + stateObjects map[common.Address]*stateObject + stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie + stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution // DB error. // State objects are used by the consensus core and VM which are // unable to deal with database-level errors. Any error that occurs - // during a database read is memoized here and will eventually be - // returned by StateDB.Commit. Notably, this error is also shared - // by all cached state objects in case the database failure occurs - // when accessing state of accounts. + // during a database read is memoized here and will eventually be returned + // by StateDB.Commit. dbErr error // The refund counter, also used by state transitioning. refund uint64 - // The tx context and all occurred logs in the scope of transaction. thash common.Hash txIndex int logs map[common.Hash][]*types.Log logSize uint - // Preimages occurred seen by VM in the scope of block. preimages map[common.Hash][]byte // Per-transaction access list accessList *accessList - // Ordered storage slots to be used in predicate verification as set in the tx access list. - // Only set in PrepareAccessList, and un-modified through execution. - predicateStorageSlots map[common.Address][][]byte - - // Transient storage - transientStorage transientStorage // Journal of state modifications. This is the backbone of // Snapshot and RevertToSnapshot. @@ -142,15 +129,11 @@ type StateDB struct { SnapshotAccountReads time.Duration SnapshotStorageReads time.Duration SnapshotCommits time.Duration - TrieDBCommits time.Duration AccountUpdated int StorageUpdated int AccountDeleted int StorageDeleted int - - // Testing hooks - onCommit func(states *triestate.Set) // Hook invoked when commit is performed } // New creates a new state from a given trie. @@ -172,30 +155,26 @@ func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*St return nil, err } sdb := &StateDB{ - db: db, - trie: tr, - originalRoot: root, - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - accountsOrigin: make(map[common.Address][]byte), - storagesOrigin: make(map[common.Address]map[common.Hash][]byte), - stateObjects: make(map[common.Address]*stateObject), - stateObjectsPending: make(map[common.Address]struct{}), - stateObjectsDirty: make(map[common.Address]struct{}), - stateObjectsDestruct: make(map[common.Address]*types.StateAccount), - logs: make(map[common.Hash][]*types.Log), - preimages: make(map[common.Hash][]byte), - journal: newJournal(), - predicateStorageSlots: make(map[common.Address][][]byte), - accessList: newAccessList(), - transientStorage: newTransientStorage(), - hasher: crypto.NewKeccakState(), + db: db, + trie: tr, + originalRoot: root, + stateObjects: make(map[common.Address]*stateObject), + stateObjectsPending: make(map[common.Address]struct{}), + stateObjectsDirty: make(map[common.Address]struct{}), + logs: make(map[common.Hash][]*types.Log), + preimages: make(map[common.Hash][]byte), + journal: newJournal(), + accessList: newAccessList(), + hasher: crypto.NewKeccakState(), } if snap != nil { if snap.Root() != root { return nil, fmt.Errorf("cannot create new statedb for root: %s, using snapshot with mismatched root: %s", root, snap.Root().Hex()) } sdb.snap = snap + sdb.snapDestructs = make(map[common.Hash]struct{}) + sdb.snapAccounts = make(map[common.Hash][]byte) + sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte) } return sdb, nil } @@ -203,13 +182,13 @@ func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*St // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the // commit phase, most of the needed data is already hot. -func (s *StateDB) StartPrefetcher(namespace string, maxConcurrency int) { +func (s *StateDB) StartPrefetcher(namespace string) { if s.prefetcher != nil { s.prefetcher.close() s.prefetcher = nil } if s.snap != nil { - s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace, maxConcurrency) + s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace) } } @@ -229,21 +208,11 @@ func (s *StateDB) setError(err error) { } } -// Error returns the memorized database failure occurred earlier. func (s *StateDB) Error() error { return s.dbErr } -// AddLog adds a log with the specified parameters to the statedb -// Note: blockNumber is a required argument because StateDB does not -// know the current block number. -func (s *StateDB) AddLog(addr common.Address, topics []common.Hash, data []byte, blockNumber uint64) { - log := &types.Log{ - Address: addr, - Topics: topics, - Data: data, - BlockNumber: blockNumber, - } +func (s *StateDB) AddLog(log *types.Log) { s.journal.append(addLogChange{txhash: s.thash}) log.TxHash = s.thash @@ -253,12 +222,9 @@ func (s *StateDB) AddLog(addr common.Address, topics []common.Hash, data []byte, s.logSize++ } -// GetLogs returns the logs matching the specified transaction hash, and annotates -// them with the given blockNumber and blockHash. -func (s *StateDB) GetLogs(hash common.Hash, blockNumber uint64, blockHash common.Hash) []*types.Log { +func (s *StateDB) GetLogs(hash common.Hash, blockHash common.Hash) []*types.Log { logs := s.logs[hash] for _, l := range logs { - l.BlockNumber = blockNumber l.BlockHash = blockHash } return logs @@ -272,20 +238,6 @@ func (s *StateDB) Logs() []*types.Log { return logs } -// GetLogData returns the underlying topics and data from each log included in the StateDB -// Test helper function. -func (s *StateDB) GetLogData() ([][]common.Hash, [][]byte) { - var logData [][]byte - var topics [][]common.Hash - for _, lgs := range s.logs { - for _, log := range lgs { - topics = append(topics, log.Topics) - logData = append(logData, common.CopyBytes(log.Data)) - } - } - return topics, logData -} - // AddPreimage records a SHA3 preimage seen by the VM. func (s *StateDB) AddPreimage(hash common.Hash, preimage []byte) { if _, ok := s.preimages[hash]; !ok { @@ -308,7 +260,7 @@ func (s *StateDB) AddRefund(gas uint64) { } // SubRefund removes gas from the refund counter. -// This method will set the refund counter to 0 if the gas is greater than the current refund. +// This method will panic if the refund counter goes below zero func (s *StateDB) SubRefund(gas uint64) { s.journal.append(refundChange{prev: s.refund}) if gas > s.refund { @@ -320,7 +272,7 @@ func (s *StateDB) SubRefund(gas uint64) { } // Exist reports whether the given account address exists in the state. -// Notably this also returns true for self-destructed accounts. +// Notably this also returns true for suicided accounts. func (s *StateDB) Exist(addr common.Address) bool { return s.getStateObject(addr) != nil } @@ -350,7 +302,6 @@ func (s *StateDB) GetBalanceMultiCoin(addr common.Address, coinID common.Hash) * return new(big.Int).Set(common.Big0) } -// GetNonce retrieves the nonce from the given address or 0 if object not found func (s *StateDB) GetNonce(addr common.Address) uint64 { stateObject := s.getStateObject(addr) if stateObject != nil { @@ -360,16 +311,6 @@ func (s *StateDB) GetNonce(addr common.Address) uint64 { return 0 } -// GetStorageRoot retrieves the storage root from the given address or empty -// if object not found. -func (s *StateDB) GetStorageRoot(addr common.Address) common.Hash { - stateObject := s.getStateObject(addr) - if stateObject != nil { - return stateObject.Root() - } - return common.Hash{} -} - // TxIndex returns the current transaction index set by Prepare. func (s *StateDB) TxIndex() int { return s.txIndex @@ -378,7 +319,7 @@ func (s *StateDB) TxIndex() int { func (s *StateDB) GetCode(addr common.Address) []byte { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.Code() + return stateObject.Code(s.db) } return nil } @@ -386,7 +327,7 @@ func (s *StateDB) GetCode(addr common.Address) []byte { func (s *StateDB) GetCodeSize(addr common.Address) int { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.CodeSize() + return stateObject.CodeSize(s.db) } return 0 } @@ -404,16 +345,39 @@ func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { stateObject := s.getStateObject(addr) if stateObject != nil { NormalizeStateKey(&hash) - return stateObject.GetState(hash) + return stateObject.GetState(s.db, hash) } return common.Hash{} } +// GetProof returns the Merkle proof for a given account. +func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) { + return s.GetProofByHash(crypto.Keccak256Hash(addr.Bytes())) +} + +// GetProofByHash returns the Merkle proof for a given account. +func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) { + var proof proofList + err := s.trie.Prove(addrHash[:], 0, &proof) + return proof, err +} + +// GetStorageProof returns the Merkle proof for given storage slot. +func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) { + var proof proofList + trie := s.StorageTrie(a) + if trie == nil { + return proof, errors.New("storage trie for requested address does not exist") + } + err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) + return proof, err +} + // GetCommittedState retrieves a value from the given account's committed storage trie. func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.GetCommittedState(hash) + return stateObject.GetCommittedState(s.db, hash) } return common.Hash{} } @@ -423,7 +387,7 @@ func (s *StateDB) GetCommittedStateAP1(addr common.Address, hash common.Hash) co stateObject := s.getStateObject(addr) if stateObject != nil { NormalizeStateKey(&hash) - return stateObject.GetCommittedState(hash) + return stateObject.GetCommittedState(s.db, hash) } return common.Hash{} } @@ -433,10 +397,22 @@ func (s *StateDB) Database() Database { return s.db } -func (s *StateDB) HasSelfDestructed(addr common.Address) bool { +// StorageTrie returns the storage trie of an account. +// The return value is a copy and is nil for non-existent accounts. +func (s *StateDB) StorageTrie(addr common.Address) Trie { + stateObject := s.getStateObject(addr) + if stateObject == nil { + return nil + } + cpy := stateObject.deepCopy(s) + cpy.updateTrie(s.db) + return cpy.getTrie(s.db) +} + +func (s *StateDB) HasSuicided(addr common.Address) bool { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.selfDestructed + return stateObject.suicided } return false } @@ -509,86 +485,38 @@ func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { NormalizeStateKey(&key) - stateObject.SetState(key, value) + stateObject.SetState(s.db, key, value) } } // SetStorage replaces the entire storage for the specified account with given -// storage. This function should only be used for debugging and the mutations -// must be discarded afterwards. +// storage. This function should only be used for debugging. func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) { - // SetStorage needs to wipe existing storage. We achieve this by pretending - // that the account self-destructed earlier in this block, by flagging - // it in stateObjectsDestruct. The effect of doing so is that storage lookups - // will not hit disk, since it is assumed that the disk-data is belonging - // to a previous incarnation of the object. - // - // TODO(rjl493456442) this function should only be supported by 'unwritable' - // state and all mutations made should all be discarded afterwards. - if _, ok := s.stateObjectsDestruct[addr]; !ok { - s.stateObjectsDestruct[addr] = nil - } stateObject := s.GetOrNewStateObject(addr) - for k, v := range storage { - stateObject.SetState(k, v) + if stateObject != nil { + stateObject.SetStorage(storage) } } -// SelfDestruct marks the given account as selfdestructed. +// Suicide marks the given account as suicided. // This clears the account balance. // // The account's state object is still available until the state is committed, -// getStateObject will return a non-nil account after SelfDestruct. -func (s *StateDB) SelfDestruct(addr common.Address) { +// getStateObject will return a non-nil account after Suicide. +func (s *StateDB) Suicide(addr common.Address) bool { stateObject := s.getStateObject(addr) if stateObject == nil { - return + return false } - s.journal.append(selfDestructChange{ + s.journal.append(suicideChange{ account: &addr, - prev: stateObject.selfDestructed, + prev: stateObject.suicided, prevbalance: new(big.Int).Set(stateObject.Balance()), }) - stateObject.markSelfdestructed() + stateObject.markSuicided() stateObject.data.Balance = new(big.Int) -} - -func (s *StateDB) Selfdestruct6780(addr common.Address) { - stateObject := s.getStateObject(addr) - if stateObject == nil { - return - } - - if stateObject.created { - s.SelfDestruct(addr) - } -} - -// SetTransientState sets transient storage for a given account. It -// adds the change to the journal so that it can be rolled back -// to its previous value if there is a revert. -func (s *StateDB) SetTransientState(addr common.Address, key, value common.Hash) { - prev := s.GetTransientState(addr, key) - if prev == value { - return - } - s.journal.append(transientStorageChange{ - account: &addr, - key: key, - prevalue: prev, - }) - s.setTransientState(addr, key, value) -} - -// setTransientState is a lower level setter for transient storage. It -// is called during a revert to prevent modifications to the journal. -func (s *StateDB) setTransientState(addr common.Address, key, value common.Hash) { - s.transientStorage.Set(addr, key, value) -} -// GetTransientState gets transient storage for a given account. -func (s *StateDB) GetTransientState(addr common.Address, key common.Hash) common.Hash { - return s.transientStorage.Get(addr, key) + return true } // @@ -603,27 +531,16 @@ func (s *StateDB) updateStateObject(obj *stateObject) { } // Encode the account and update the account trie addr := obj.Address() - if err := s.trie.UpdateAccount(addr, &obj.data); err != nil { + if err := s.trie.TryUpdateAccount(addr[:], &obj.data); err != nil { s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) } - if obj.dirtyCode { - s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code) - } - // Cache the data until commit. Note, this update mechanism is not symmetric - // to the deletion, because whereas it is enough to track account updates - // at commit time, deletions need tracking at transaction boundary level to - // ensure we capture state clearing. - s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data) - - // Track the original value of mutated account, nil means it was not present. - // Skip if it has been tracked (because updateStateObject may be called - // multiple times in a block). - if _, ok := s.accountsOrigin[obj.address]; !ok { - if obj.origin == nil { - s.accountsOrigin[obj.address] = nil - } else { - s.accountsOrigin[obj.address] = types.SlimAccountRLP(*obj.origin) - } + + // If state snapshotting is active, cache the data til commit. Note, this + // update mechanism is not symmetric to the deletion, because whereas it is + // enough to track account updates at commit time, deletions need tracking + // at transaction boundary level to ensure we capture state clearing. + if s.snap != nil { + s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash, obj.data.IsMultiCoin) } } @@ -635,7 +552,7 @@ func (s *StateDB) deleteStateObject(obj *stateObject) { } // Delete the account from the trie addr := obj.Address() - if err := s.trie.DeleteAccount(addr); err != nil { + if err := s.trie.TryDeleteAccount(addr[:]); err != nil { s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) } } @@ -679,10 +596,10 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { Root: common.BytesToHash(acc.Root), } if len(data.CodeHash) == 0 { - data.CodeHash = types.EmptyCodeHash.Bytes() + data.CodeHash = emptyCodeHash } if data.Root == (common.Hash{}) { - data.Root = types.EmptyRootHash + data.Root = emptyRoot } } } @@ -690,7 +607,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { if data == nil { start := time.Now() var err error - data, err = s.trie.GetAccount(addr) + data, err = s.trie.TryGetAccount(addr.Bytes()) if metrics.EnabledExpensive { s.AccountReads += time.Since(start) } @@ -703,7 +620,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { } } // Insert into the live set - obj := newObject(s, addr, data) + obj := newObject(s, addr, *data) s.setStateObject(obj) return obj } @@ -725,36 +642,19 @@ func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject { // the given address, it is overwritten and returned as the second return value. func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) { prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! - newobj = newObject(s, addr, nil) + + var prevdestruct bool + if s.snap != nil && prev != nil { + _, prevdestruct = s.snapDestructs[prev.addrHash] + if !prevdestruct { + s.snapDestructs[prev.addrHash] = struct{}{} + } + } + newobj = newObject(s, addr, types.StateAccount{}) if prev == nil { s.journal.append(createObjectChange{account: &addr}) } else { - // The original account should be marked as destructed and all cached - // account and storage data should be cleared as well. Note, it must - // be done here, otherwise the destruction event of "original account" - // will be lost. - _, prevdestruct := s.stateObjectsDestruct[prev.address] - if !prevdestruct { - s.stateObjectsDestruct[prev.address] = prev.origin - } - // There may be some cached account/storage data already since IntermediateRoot - // will be called for each transaction before byzantium fork which will always - // cache the latest account/storage data. - prevAccount, ok := s.accountsOrigin[prev.address] - s.journal.append(resetObjectChange{ - account: &addr, - prev: prev, - prevdestruct: prevdestruct, - prevAccount: s.accounts[prev.addrHash], - prevStorage: s.storages[prev.addrHash], - prevAccountOriginExist: ok, - prevAccountOrigin: prevAccount, - prevStorageOrigin: s.storagesOrigin[prev.address], - }) - delete(s.accounts, prev.addrHash) - delete(s.storages, prev.addrHash) - delete(s.accountsOrigin, prev.address) - delete(s.storagesOrigin, prev.address) + s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) } s.setStateObject(newobj) if prev != nil && !prev.deleted { @@ -780,17 +680,33 @@ func (s *StateDB) CreateAccount(addr common.Address) { } } -// copyPredicateStorageSlots creates a deep copy of the provided predicateStorageSlots map. -func copyPredicateStorageSlots(predicateStorageSlots map[common.Address][][]byte) map[common.Address][][]byte { - res := make(map[common.Address][][]byte, len(predicateStorageSlots)) - for address, predicates := range predicateStorageSlots { - copiedPredicates := make([][]byte, len(predicates)) - for i, predicateBytes := range predicates { - copiedPredicates[i] = common.CopyBytes(predicateBytes) +func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { + so := db.getStateObject(addr) + if so == nil { + return nil + } + it := trie.NewIterator(so.getTrie(db.db).NodeIterator(nil)) + + for it.Next() { + key := common.BytesToHash(db.trie.GetKey(it.Key)) + if value, dirty := so.dirtyStorage[key]; dirty { + if !cb(key, value) { + return nil + } + continue + } + + if len(it.Value) > 0 { + _, content, _, err := rlp.Split(it.Value) + if err != nil { + return err + } + if !cb(key, common.BytesToHash(content)) { + return nil + } } - res[address] = copiedPredicates } - return res + return nil } // Copy creates a deep, independent copy of the state. @@ -798,29 +714,18 @@ func copyPredicateStorageSlots(predicateStorageSlots map[common.Address][][]byte func (s *StateDB) Copy() *StateDB { // Copy all the basic fields, initialize the memory ones state := &StateDB{ - db: s.db, - trie: s.db.CopyTrie(s.trie), - originalRoot: s.originalRoot, - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - accountsOrigin: make(map[common.Address][]byte), - storagesOrigin: make(map[common.Address]map[common.Hash][]byte), - stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)), - stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), - stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), - stateObjectsDestruct: make(map[common.Address]*types.StateAccount, len(s.stateObjectsDestruct)), - refund: s.refund, - logs: make(map[common.Hash][]*types.Log, len(s.logs)), - logSize: s.logSize, - preimages: make(map[common.Hash][]byte, len(s.preimages)), - journal: newJournal(), - hasher: crypto.NewKeccakState(), - - // In order for the block producer to be able to use and make additions - // to the snapshot tree, we need to copy that as well. Otherwise, any - // block mined by ourselves will cause gaps in the tree, and force the - // miner to operate trie-backed only. - snap: s.snap, + db: s.db, + trie: s.db.CopyTrie(s.trie), + originalRoot: s.originalRoot, + stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)), + stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), + stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), + refund: s.refund, + logs: make(map[common.Hash][]*types.Log, len(s.logs)), + logSize: s.logSize, + preimages: make(map[common.Hash][]byte, len(s.preimages)), + journal: newJournal(), + hasher: crypto.NewKeccakState(), } // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { @@ -830,7 +735,7 @@ func (s *StateDB) Copy() *StateDB { // nil if object, exist := s.stateObjects[addr]; exist { // Even though the original object is dirty, we are not copying the journal, - // so we need to make sure that any side-effect the journal would have caused + // so we need to make sure that anyside effect the journal would have caused // during a commit (or similar op) is already applied to the copy. state.stateObjects[addr] = object.deepCopy(state) @@ -838,10 +743,9 @@ func (s *StateDB) Copy() *StateDB { state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits } } - // Above, we don't copy the actual journal. This means that if the copy - // is copied, the loop above will be a no-op, since the copy's journal - // is empty. Thus, here we iterate over stateObjects, to enable copies - // of copies. + // Above, we don't copy the actual journal. This means that if the copy is copied, the + // loop above will be a no-op, since the copy's journal is empty. + // Thus, here we iterate over stateObjects, to enable copies of copies for addr := range s.stateObjectsPending { if _, exist := state.stateObjects[addr]; !exist { state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) @@ -854,18 +758,6 @@ func (s *StateDB) Copy() *StateDB { } state.stateObjectsDirty[addr] = struct{}{} } - // Deep copy the destruction markers. - for addr, value := range s.stateObjectsDestruct { - state.stateObjectsDestruct[addr] = value - } - // Deep copy the state changes made in the scope of block - // along with their original values. - state.accounts = copySet(s.accounts) - state.storages = copy2DSet(s.storages) - state.accountsOrigin = copySet(state.accountsOrigin) - state.storagesOrigin = copy2DSet(state.storagesOrigin) - - // Deep copy the logs occurred in the scope of block for hash, logs := range s.logs { cpy := make([]*types.Log, len(logs)) for i, l := range logs { @@ -874,19 +766,15 @@ func (s *StateDB) Copy() *StateDB { } state.logs[hash] = cpy } - // Deep copy the preimages occurred in the scope of block for hash, preimage := range s.preimages { state.preimages[hash] = preimage } - // Do we need to copy the access list and transient storage? - // In practice: No. At the start of a transaction, these two lists are empty. - // In practice, we only ever copy state _between_ transactions/blocks, never - // in the middle of a transaction. However, it doesn't cost us much to copy - // empty lists, so we do it anyway to not blow up if we ever decide copy them - // in the middle of a transaction. + // Do we need to copy the access list? In practice: No. At the start of a + // transaction, the access list is empty. In practice, we only ever copy state + // _between_ transactions/blocks, never in the middle of a transaction. + // However, it doesn't cost us much to copy an empty list, so we do it anyway + // to not blow up if we ever decide copy it in the middle of a transaction state.accessList = s.accessList.Copy() - state.transientStorage = s.transientStorage.Copy() - state.predicateStorageSlots = copyPredicateStorageSlots(s.predicateStorageSlots) // If there's a prefetcher running, make an inactive copy of it that can // only access data but does not actively preload (since the user will not @@ -894,6 +782,30 @@ func (s *StateDB) Copy() *StateDB { if s.prefetcher != nil { state.prefetcher = s.prefetcher.copy() } + if s.snap != nil { + // In order for the miner to be able to use and make additions + // to the snapshot tree, we need to copy that aswell. + // Otherwise, any block mined by ourselves will cause gaps in the tree, + // and force the miner to operate trie-backed only + state.snap = s.snap + // deep copy needed + state.snapDestructs = make(map[common.Hash]struct{}) + for k, v := range s.snapDestructs { + state.snapDestructs[k] = v + } + state.snapAccounts = make(map[common.Hash][]byte) + for k, v := range s.snapAccounts { + state.snapAccounts[k] = v + } + state.snapStorage = make(map[common.Hash]map[common.Hash][]byte) + for k, v := range s.snapStorage { + temp := make(map[common.Hash][]byte) + for kk, vv := range v { + temp[kk] = vv + } + state.snapStorage[k] = temp + } + } return state } @@ -942,26 +854,21 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // Thus, we can safely ignore it here continue } - if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) { + if obj.suicided || (deleteEmptyObjects && obj.empty()) { obj.deleted = true - // We need to maintain account deletions explicitly (will remain - // set indefinitely). Note only the first occurred self-destruct - // event is tracked. - if _, ok := s.stateObjectsDestruct[obj.address]; !ok { - s.stateObjectsDestruct[obj.address] = obj.origin - } + // If state snapshotting is active, also mark the destruction there. // Note, we can't do this only at the end of a block because multiple // transactions within the same block might self destruct and then // resurrect an account; but the snapshotter needs both events. - delete(s.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) - delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) + if s.snap != nil { + s.snapDestructs[obj.addrHash] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely) + delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a ressurrect) + delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a ressurrect) + } } else { obj.finalise(true) // Prefetch slots in the background } - obj.created = false s.stateObjectsPending[addr] = struct{}{} s.stateObjectsDirty[addr] = struct{}{} @@ -971,7 +878,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure } if s.prefetcher != nil && len(addressesToPrefetch) > 0 { - s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) + s.prefetcher.prefetch(common.Hash{}, s.originalRoot, addressesToPrefetch) } // Invalidate journal because reverting across transactions is not allowed. s.clearJournalAndRefund() @@ -1005,7 +912,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // to pull useful data from disk. for addr := range s.stateObjectsPending { if obj := s.stateObjects[addr]; !obj.deleted { - obj.updateRoot() + obj.updateRoot(s.db) } } // Now we're about to start to write changes to the trie. The trie is so far @@ -1040,10 +947,9 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { return s.trie.Hash() } -// SetTxContext sets the current transaction hash and index which are -// used when the EVM emits new state logs. It should be invoked before -// transaction execution. -func (s *StateDB) SetTxContext(thash common.Hash, ti int) { +// Prepare sets the current transaction hash and index which are +// used when the EVM emits new state logs. +func (s *StateDB) Prepare(thash common.Hash, ti int) { s.thash = thash s.txIndex = ti } @@ -1056,231 +962,19 @@ func (s *StateDB) clearJournalAndRefund() { s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entries } -// fastDeleteStorage is the function that efficiently deletes the storage trie -// of a specific account. It leverages the associated state snapshot for fast -// storage iteration and constructs trie node deletion markers by creating -// stack trie with iterated slots. -func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { - iter, _ := s.snap.StorageIterator(addrHash, common.Hash{}) - defer iter.Release() - - var ( - size common.StorageSize - nodes = trienode.NewNodeSet(addrHash) - slots = make(map[common.Hash][]byte) - ) - options := trie.NewStackTrieOptions() - options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) { - nodes.AddNode(path, trienode.NewDeleted()) - size += common.StorageSize(len(path)) - }) - stack := trie.NewStackTrie(options) - for iter.Next() { - if size > storageDeleteLimit { - return true, size, nil, nil, nil - } - slot := common.CopyBytes(iter.Slot()) - if err := iter.Error(); err != nil { // error might occur after Slot function - return false, 0, nil, nil, err - } - size += common.StorageSize(common.HashLength + len(slot)) - slots[iter.Hash()] = slot - - if err := stack.Update(iter.Hash().Bytes(), slot); err != nil { - return false, 0, nil, nil, err - } - } - if err := iter.Error(); err != nil { // error might occur during iteration - return false, 0, nil, nil, err - } - if stack.Hash() != root { - return false, 0, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash()) - } - return false, size, slots, nodes, nil -} - -// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage," -// employed when the associated state snapshot is not available. It iterates the -// storage slots along with all internal trie nodes via trie directly. -func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { - tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie) - if err != nil { - return false, 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err) - } - it, err := tr.NodeIterator(nil) - if err != nil { - return false, 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err) - } - var ( - size common.StorageSize - nodes = trienode.NewNodeSet(addrHash) - slots = make(map[common.Hash][]byte) - ) - for it.Next(true) { - if size > storageDeleteLimit { - return true, size, nil, nil, nil - } - if it.Leaf() { - slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob()) - size += common.StorageSize(common.HashLength + len(it.LeafBlob())) - continue - } - if it.Hash() == (common.Hash{}) { - continue - } - size += common.StorageSize(len(it.Path())) - nodes.AddNode(it.Path(), trienode.NewDeleted()) - } - if err := it.Error(); err != nil { - return false, 0, nil, nil, err - } - return false, size, slots, nodes, nil -} - -// deleteStorage is designed to delete the storage trie of a designated account. -// It could potentially be terminated if the storage size is excessively large, -// potentially leading to an out-of-memory panic. The function will make an attempt -// to utilize an efficient strategy if the associated state snapshot is reachable; -// otherwise, it will resort to a less-efficient approach. -func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) { - var ( - start = time.Now() - err error - aborted bool - size common.StorageSize - slots map[common.Hash][]byte - nodes *trienode.NodeSet - ) - // The fast approach can be failed if the snapshot is not fully - // generated, or it's internally corrupted. Fallback to the slow - // one just in case. - if s.snap != nil { - aborted, size, slots, nodes, err = s.fastDeleteStorage(addrHash, root) - } - if s.snap == nil || err != nil { - aborted, size, slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root) - } - if err != nil { - return false, nil, nil, err - } - if metrics.EnabledExpensive { - if aborted { - slotDeletionSkip.Inc(1) - } - n := int64(len(slots)) - - slotDeletionMaxCount.UpdateIfGt(int64(len(slots))) - slotDeletionMaxSize.UpdateIfGt(int64(size)) - - slotDeletionTimer.UpdateSince(start) - slotDeletionCount.Mark(n) - slotDeletionSize.Mark(int64(size)) - } - return aborted, slots, nodes, nil -} - -// handleDestruction processes all destruction markers and deletes the account -// and associated storage slots if necessary. There are four possible situations -// here: -// -// - the account was not existent and be marked as destructed -// -// - the account was not existent and be marked as destructed, -// however, it's resurrected later in the same block. -// -// - the account was existent and be marked as destructed -// -// - the account was existent and be marked as destructed, -// however it's resurrected later in the same block. -// -// In case (a), nothing needs be deleted, nil to nil transition can be ignored. -// -// In case (b), nothing needs be deleted, nil is used as the original value for -// newly created account and storages -// -// In case (c), **original** account along with its storages should be deleted, -// with their values be tracked as original value. -// -// In case (d), **original** account along with its storages should be deleted, -// with their values be tracked as original value. -func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.Address]struct{}, error) { - // Short circuit if geth is running with hash mode. This procedure can consume - // considerable time and storage deletion isn't supported in hash mode, thus - // preemptively avoiding unnecessary expenses. - incomplete := make(map[common.Address]struct{}) - if s.db.TrieDB().Scheme() == rawdb.HashScheme { - return incomplete, nil - } - for addr, prev := range s.stateObjectsDestruct { - // The original account was non-existing, and it's marked as destructed - // in the scope of block. It can be case (a) or (b). - // - for (a), skip it without doing anything. - // - for (b), track account's original value as nil. It may overwrite - // the data cached in s.accountsOrigin set by 'updateStateObject'. - addrHash := crypto.Keccak256Hash(addr[:]) - if prev == nil { - if _, ok := s.accounts[addrHash]; ok { - s.accountsOrigin[addr] = nil // case (b) - } - continue - } - // It can overwrite the data in s.accountsOrigin set by 'updateStateObject'. - s.accountsOrigin[addr] = types.SlimAccountRLP(*prev) // case (c) or (d) - - // Short circuit if the storage was empty. - if prev.Root == types.EmptyRootHash { - continue - } - // Remove storage slots belong to the account. - aborted, slots, set, err := s.deleteStorage(addr, addrHash, prev.Root) - if err != nil { - return nil, fmt.Errorf("failed to delete storage, err: %w", err) - } - // The storage is too huge to handle, skip it but mark as incomplete. - // For case (d), the account is resurrected might with a few slots - // created. In this case, wipe the entire storage state diff because - // of aborted deletion. - if aborted { - incomplete[addr] = struct{}{} - delete(s.storagesOrigin, addr) - continue - } - if s.storagesOrigin[addr] == nil { - s.storagesOrigin[addr] = slots - } else { - // It can overwrite the data in s.storagesOrigin[addrHash] set by - // 'object.updateTrie'. - for key, val := range slots { - s.storagesOrigin[addr][key] = val - } - } - if err := nodes.Merge(set); err != nil { - return nil, err - } - } - return incomplete, nil -} - // Commit writes the state to the underlying in-memory trie database. -func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, referenceRoot bool) (common.Hash, error) { - return s.commit(block, deleteEmptyObjects, nil, common.Hash{}, common.Hash{}, referenceRoot) +func (s *StateDB) Commit(deleteEmptyObjects bool, referenceRoot bool) (common.Hash, error) { + return s.commit(deleteEmptyObjects, nil, common.Hash{}, common.Hash{}, referenceRoot) } // CommitWithSnap writes the state to the underlying in-memory trie database and // generates a snapshot layer for the newly committed state. -func (s *StateDB) CommitWithSnap(block uint64, deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) { - return s.commit(block, deleteEmptyObjects, snaps, blockHash, parentHash, referenceRoot) +func (s *StateDB) CommitWithSnap(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) { + return s.commit(deleteEmptyObjects, snaps, blockHash, parentHash, referenceRoot) } -// Once the state is committed, tries cached in stateDB (including account -// trie, storage tries) will no longer be functional. A new state instance -// must be created with new root and updated database for accessing post- -// commit states. -// -// The associated block number of the state transition is also provided -// for more chain context. -func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) { - // Short circuit in case any database failure occurred earlier. +// Commit writes the state to the underlying in-memory trie database. +func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) { if s.dbErr != nil { return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) } @@ -1289,46 +983,35 @@ func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot. // Commit objects to the trie, measuring the elapsed time var ( - accountTrieNodesUpdated int - accountTrieNodesDeleted int - storageTrieNodesUpdated int - storageTrieNodesDeleted int - nodes = trienode.NewMergedNodeSet() - codeWriter = s.db.DiskDB().NewBatch() + accountTrieNodes int + storageTrieNodes int + nodes = trie.NewMergedNodeSet() ) - // Handle all state deletions first - incomplete, err := s.handleDestruction(nodes) - if err != nil { - return common.Hash{}, err - } - // Handle all state updates afterwards + codeWriter := s.db.TrieDB().DiskDB().NewBatch() for addr := range s.stateObjectsDirty { - obj := s.stateObjects[addr] - if obj.deleted { - continue - } - // Write any contract code associated with the state object - if obj.code != nil && obj.dirtyCode { - rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) - obj.dirtyCode = false - } - // Write any storage changes in the state object to its storage trie - set, err := obj.commit() - if err != nil { - return common.Hash{}, err - } - // Merge the dirty nodes of storage trie into global set. It is possible - // that the account was destructed and then resurrected in the same block. - // In this case, the node set is shared by both accounts. - if set != nil { - if err := nodes.Merge(set); err != nil { + if obj := s.stateObjects[addr]; !obj.deleted { + // Write any contract code associated with the state object + if obj.code != nil && obj.dirtyCode { + rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) + obj.dirtyCode = false + } + // Write any storage changes in the state object to its storage trie + set, err := obj.CommitTrie(s.db) + if err != nil { return common.Hash{}, err } - updates, deleted := set.Size() - storageTrieNodesUpdated += updates - storageTrieNodesDeleted += deleted + // Merge the dirty nodes of storage trie into global set + if set != nil { + if err := nodes.Merge(set); err != nil { + return common.Hash{}, err + } + storageTrieNodes += set.Len() + } } } + if len(s.stateObjectsDirty) > 0 { + s.stateObjectsDirty = make(map[common.Address]struct{}) + } if codeWriter.ValueSize() > 0 { if err := codeWriter.Write(); err != nil { log.Crit("Failed to commit dirty codes", "error", err) @@ -1348,7 +1031,7 @@ func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot. if err := nodes.Merge(set); err != nil { return common.Hash{}, err } - accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size() + accountTrieNodes = set.Len() } if metrics.EnabledExpensive { s.AccountCommits += time.Since(start) @@ -1357,105 +1040,64 @@ func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot. storageUpdatedMeter.Mark(int64(s.StorageUpdated)) accountDeletedMeter.Mark(int64(s.AccountDeleted)) storageDeletedMeter.Mark(int64(s.StorageDeleted)) - accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated)) - accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted)) - storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated)) - storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted)) + accountTrieCommittedMeter.Mark(int64(accountTrieNodes)) + storageTriesCommittedMeter.Mark(int64(storageTrieNodes)) s.AccountUpdated, s.AccountDeleted = 0, 0 s.StorageUpdated, s.StorageDeleted = 0, 0 } // If snapshotting is enabled, update the snapshot tree with this new version if snaps != nil { - start := time.Now() if s.snap == nil { log.Error(fmt.Sprintf("cannot commit with snaps without a pre-existing snap layer, parentHash: %s, blockHash: %s", parentHash, blockHash)) } - if err := snaps.Update(blockHash, root, parentHash, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil { - log.Warn("Failed to update snapshot tree", "to", root, "err", err) - } if metrics.EnabledExpensive { - s.SnapshotCommits += time.Since(start) + defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now()) } - s.snap = nil - } - if root == (common.Hash{}) { - root = types.EmptyRootHash - } - origin := s.originalRoot - if origin == (common.Hash{}) { - origin = types.EmptyRootHash - } - if root != origin { - start := time.Now() - set := triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete) - if referenceRoot { - if err := s.db.TrieDB().UpdateAndReferenceRoot(root, origin, block, nodes, set); err != nil { - return common.Hash{}, err - } - } else { - if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil { - return common.Hash{}, err - } + if err := snaps.Update(blockHash, root, parentHash, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil { + log.Warn("Failed to update snapshot tree", "to", root, "err", err) } - s.originalRoot = root - if metrics.EnabledExpensive { - s.TrieDBCommits += time.Since(start) + s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil + } + if referenceRoot { + if err := s.db.TrieDB().UpdateAndReferenceRoot(nodes, root); err != nil { + return common.Hash{}, err } - if s.onCommit != nil { - s.onCommit(set) + } else { + if err := s.db.TrieDB().Update(nodes); err != nil { + return common.Hash{}, err } } - // Clear all internal flags at the end of commit operation. - s.accounts = make(map[common.Hash][]byte) - s.storages = make(map[common.Hash]map[common.Hash][]byte) - s.accountsOrigin = make(map[common.Address][]byte) - s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte) - s.stateObjectsDirty = make(map[common.Address]struct{}) - s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount) - return root, nil + s.originalRoot = root + return root, err } -// Prepare handles the preparatory steps for executing a state transition with. -// This method must be invoked before state transition. +// PrepareAccessList handles the preparatory steps for executing a state transition with +// regards to both EIP-2929 and EIP-2930: // -// Berlin fork (aka ApricotPhase2): // - Add sender to access list (2929) // - Add destination to access list (2929) // - Add precompiles to access list (2929) // - Add the contents of the optional tx access list (2930) // -// Potential EIPs: -// - Reset access list (Berlin/ApricotPhase2) -// - Add coinbase to access list (EIP-3651/Durango) -// - Reset transient storage (EIP-1153) -func (s *StateDB) Prepare(rules params.Rules, sender, coinbase common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { - if rules.IsApricotPhase2 { - // Clear out any leftover from previous executions - al := newAccessList() - s.accessList = al - - al.AddAddress(sender) - if dst != nil { - al.AddAddress(*dst) - // If it's a create-tx, the destination will be added inside evm.create +// This method should only be called if Berlin/ApricotPhase2/2929+2930 is applicable at the current number. +func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { + // Clear out any leftover from previous executions + s.accessList = newAccessList() + + s.AddAddressToAccessList(sender) + if dst != nil { + s.AddAddressToAccessList(*dst) + // If it's a create-tx, the destination will be added inside evm.create + } + for _, addr := range precompiles { + s.AddAddressToAccessList(addr) + } + for _, el := range list { + s.AddAddressToAccessList(el.Address) + for _, key := range el.StorageKeys { + s.AddSlotToAccessList(el.Address, key) } - for _, addr := range precompiles { - al.AddAddress(addr) - } - for _, el := range list { - al.AddAddress(el.Address) - for _, key := range el.StorageKeys { - al.AddSlot(el.Address, key) - } - } - if rules.IsDurango { // EIP-3651: warm coinbase - al.AddAddress(coinbase) - } - - s.predicateStorageSlots = predicate.PreparePredicateStorageSlots(rules, list) } - // Reset transient storage at the beginning of transaction execution - s.transientStorage = newTransientStorage() } // AddAddressToAccessList adds the given address to the access list @@ -1492,69 +1134,3 @@ func (s *StateDB) AddressInAccessList(addr common.Address) bool { func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) { return s.accessList.Contains(addr, slot) } - -// GetTxHash returns the current tx hash on the StateDB set by SetTxContext. -func (s *StateDB) GetTxHash() common.Hash { - return s.thash -} - -// GetPredicateStorageSlots returns the storage slots associated with the address, index pair. -// A list of access tuples can be included within transaction types post EIP-2930. The address -// is declared directly on the access tuple and the index is the i'th occurrence of an access -// tuple with the specified address. -// -// Ex. AccessList[[AddrA, Predicate1], [AddrB, Predicate2], [AddrA, Predicate3]] -// In this case, the caller could retrieve predicates 1-3 with the following calls: -// GetPredicateStorageSlots(AddrA, 0) -> Predicate1 -// GetPredicateStorageSlots(AddrB, 0) -> Predicate2 -// GetPredicateStorageSlots(AddrA, 1) -> Predicate3 -func (s *StateDB) GetPredicateStorageSlots(address common.Address, index int) ([]byte, bool) { - predicates, exists := s.predicateStorageSlots[address] - if !exists { - return nil, false - } - if index >= len(predicates) { - return nil, false - } - return predicates[index], true -} - -// convertAccountSet converts a provided account set from address keyed to hash keyed. -func (s *StateDB) convertAccountSet(set map[common.Address]*types.StateAccount) map[common.Hash]struct{} { - ret := make(map[common.Hash]struct{}, len(set)) - for addr := range set { - obj, exist := s.stateObjects[addr] - if !exist { - ret[crypto.Keccak256Hash(addr[:])] = struct{}{} - } else { - ret[obj.addrHash] = struct{}{} - } - } - return ret -} - -// SetPredicateStorageSlots sets the predicate storage slots for the given address -func (s *StateDB) SetPredicateStorageSlots(address common.Address, predicates [][]byte) { - s.predicateStorageSlots[address] = predicates -} - -// copySet returns a deep-copied set. -func copySet[k comparable](set map[k][]byte) map[k][]byte { - copied := make(map[k][]byte, len(set)) - for key, val := range set { - copied[key] = common.CopyBytes(val) - } - return copied -} - -// copy2DSet returns a two-dimensional deep-copied set. -func copy2DSet[k comparable](set map[k]map[common.Hash][]byte) map[k]map[common.Hash][]byte { - copied := make(map[k]map[common.Hash][]byte, len(set)) - for addr, subset := range set { - copied[addr] = make(map[common.Hash][]byte, len(subset)) - for key, val := range subset { - copied[addr][key] = common.CopyBytes(val) - } - } - return copied -} diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 19f1a5198f..0ad72a3e35 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -29,7 +29,6 @@ package state import ( "bytes" "encoding/binary" - "errors" "fmt" "math" "math/big" @@ -40,28 +39,19 @@ import ( "testing" "testing/quick" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state/snapshot" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/trie/triedb/hashdb" - "github.com/ava-labs/coreth/trie/triedb/pathdb" - "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" - "github.com/holiman/uint256" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state/snapshot" + "github.com/tenderly/coreth/core/types" ) // Tests that updating a state trie does not leak any database writes prior to // actually committing the state. func TestUpdateLeaks(t *testing.T) { // Create an empty state database - var ( - db = rawdb.NewMemoryDatabase() - tdb = trie.NewDatabase(db, nil) - ) - state, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(db, tdb), nil) + db := rawdb.NewMemoryDatabase() + state, _ := New(common.Hash{}, NewDatabase(db), nil) // Update it with some accounts for i := byte(0); i < 255; i++ { @@ -77,7 +67,7 @@ func TestUpdateLeaks(t *testing.T) { } root := state.IntermediateRoot(false) - if err := tdb.Commit(root, false); err != nil { + if err := state.Database().TrieDB().Commit(root, false, nil); err != nil { t.Errorf("can not commit trie %v to persistent database", root.Hex()) } @@ -95,10 +85,8 @@ func TestIntermediateLeaks(t *testing.T) { // Create two state databases, one transitioning to the final state, the other final from the beginning transDb := rawdb.NewMemoryDatabase() finalDb := rawdb.NewMemoryDatabase() - transNdb := trie.NewDatabase(transDb, nil) - finalNdb := trie.NewDatabase(finalDb, nil) - transState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(transDb, transNdb), nil) - finalState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(finalDb, finalNdb), nil) + transState, _ := New(common.Hash{}, NewDatabase(transDb), nil) + finalState, _ := New(common.Hash{}, NewDatabase(finalDb), nil) modify := func(state *StateDB, addr common.Address, i, tweak byte) { state.SetBalance(addr, big.NewInt(int64(11*i)+int64(tweak))) @@ -126,19 +114,19 @@ func TestIntermediateLeaks(t *testing.T) { } // Commit and cross check the databases. - transRoot, err := transState.Commit(0, false, false) + transRoot, err := transState.Commit(false, false) if err != nil { t.Fatalf("failed to commit transition state: %v", err) } - if err = transNdb.Commit(transRoot, false); err != nil { + if err = transState.Database().TrieDB().Commit(transRoot, false, nil); err != nil { t.Errorf("can not commit trie %v to persistent database", transRoot.Hex()) } - finalRoot, err := finalState.Commit(0, false, false) + finalRoot, err := finalState.Commit(false, false) if err != nil { t.Fatalf("failed to commit final state: %v", err) } - if err = finalNdb.Commit(finalRoot, false); err != nil { + if err = finalState.Database().TrieDB().Commit(finalRoot, false, nil); err != nil { t.Errorf("can not commit trie %v to persistent database", finalRoot.Hex()) } @@ -173,7 +161,7 @@ func TestIntermediateLeaks(t *testing.T) { // https://github.com/ethereum/go-ethereum/pull/15549. func TestCopy(t *testing.T) { // Create a random state test to copy and modify "independently" - orig, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) + orig, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) for i := byte(0); i < 255; i++ { obj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i})) @@ -321,9 +309,9 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction { }, }, { - name: "SelfDestruct", + name: "Suicide", fn: func(a testAction, s *StateDB) { - s.SelfDestruct(addr) + s.Suicide(addr) }, }, { @@ -339,7 +327,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction { fn: func(a testAction, s *StateDB) { data := make([]byte, 2) binary.BigEndian.PutUint16(data, uint16(a.args[0])) - s.AddLog(addr, nil, data, 0) + s.AddLog(&types.Log{Address: addr, Data: data}) }, args: make([]int64, 1), }, @@ -366,16 +354,6 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction { }, args: make([]int64, 1), }, - { - name: "SetTransientState", - fn: func(a testAction, s *StateDB) { - var key, val common.Hash - binary.BigEndian.PutUint16(key[:], uint16(a.args[0])) - binary.BigEndian.PutUint16(val[:], uint16(a.args[1])) - s.SetTransientState(addr, key, val) - }, - args: make([]int64, 2), - }, } action := actions[r.Intn(len(actions))] var nameargs []string @@ -433,15 +411,13 @@ func (test *snapshotTest) String() string { func (test *snapshotTest) run() bool { // Run all actions and create snapshots. var ( - state, _ = New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) + state, _ = New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) snapshotRevs = make([]int, len(test.snapshots)) sindex = 0 - checkstates = make([]*StateDB, len(test.snapshots)) ) for i, action := range test.actions { if len(test.snapshots) > sindex && i == test.snapshots[sindex] { snapshotRevs[sindex] = state.Snapshot() - checkstates[sindex] = state.Copy() sindex++ } action.fn(action, state) @@ -449,8 +425,12 @@ func (test *snapshotTest) run() bool { // Revert all snapshots in reverse order. Each revert must yield a state // that is equivalent to fresh state with all actions up the snapshot applied. for sindex--; sindex >= 0; sindex-- { + checkstate, _ := New(common.Hash{}, state.Database(), nil) + for _, action := range test.actions[:test.snapshots[sindex]] { + action.fn(action, checkstate) + } state.RevertToSnapshot(snapshotRevs[sindex]) - if err := test.checkEqual(state, checkstates[sindex]); err != nil { + if err := test.checkEqual(state, checkstate); err != nil { test.err = fmt.Errorf("state mismatch after revert to snapshot %d\n%v", sindex, err) return false } @@ -458,43 +438,6 @@ func (test *snapshotTest) run() bool { return true } -func forEachStorage(s *StateDB, addr common.Address, cb func(key, value common.Hash) bool) error { - so := s.getStateObject(addr) - if so == nil { - return nil - } - tr, err := so.getTrie() - if err != nil { - return err - } - trieIt, err := tr.NodeIterator(nil) - if err != nil { - return err - } - it := trie.NewIterator(trieIt) - - for it.Next() { - key := common.BytesToHash(s.trie.GetKey(it.Key)) - if value, dirty := so.dirtyStorage[key]; dirty { - if !cb(key, value) { - return nil - } - continue - } - - if len(it.Value) > 0 { - _, content, _, err := rlp.Split(it.Value) - if err != nil { - return err - } - if !cb(key, common.BytesToHash(content)) { - return nil - } - } - } - return nil -} - // checkEqual checks that methods of state and checkstate return the same values. func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { for _, addr := range test.addrs { @@ -508,7 +451,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { } // Check basic accessor methods. checkeq("Exist", state.Exist(addr), checkstate.Exist(addr)) - checkeq("HasSelfdestructed", state.HasSelfDestructed(addr), checkstate.HasSelfDestructed(addr)) + checkeq("HasSuicided", state.HasSuicided(addr), checkstate.HasSuicided(addr)) checkeq("GetBalance", state.GetBalance(addr), checkstate.GetBalance(addr)) checkeq("GetNonce", state.GetNonce(addr), checkstate.GetNonce(addr)) checkeq("GetCode", state.GetCode(addr), checkstate.GetCode(addr)) @@ -516,10 +459,10 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { checkeq("GetCodeSize", state.GetCodeSize(addr), checkstate.GetCodeSize(addr)) // Check storage. if obj := state.getStateObject(addr); obj != nil { - forEachStorage(state, addr, func(key, value common.Hash) bool { + state.ForEachStorage(addr, func(key, value common.Hash) bool { return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value) }) - forEachStorage(checkstate, addr, func(key, value common.Hash) bool { + checkstate.ForEachStorage(addr, func(key, value common.Hash) bool { return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value) }) } @@ -532,17 +475,17 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { return fmt.Errorf("got GetRefund() == %d, want GetRefund() == %d", state.GetRefund(), checkstate.GetRefund()) } - if !reflect.DeepEqual(state.GetLogs(common.Hash{}, 0, common.Hash{}), checkstate.GetLogs(common.Hash{}, 0, common.Hash{})) { + if !reflect.DeepEqual(state.GetLogs(common.Hash{}, common.Hash{}), checkstate.GetLogs(common.Hash{}, common.Hash{})) { return fmt.Errorf("got GetLogs(common.Hash{}) == %v, want GetLogs(common.Hash{}) == %v", - state.GetLogs(common.Hash{}, 0, common.Hash{}), checkstate.GetLogs(common.Hash{}, 0, common.Hash{})) + state.GetLogs(common.Hash{}, common.Hash{}), checkstate.GetLogs(common.Hash{}, common.Hash{})) } return nil } func TestTouchDelete(t *testing.T) { - s := newStateEnv() + s := newStateTest() s.state.GetOrNewStateObject(common.Address{}) - root, _ := s.state.Commit(0, false, false) + root, _ := s.state.Commit(false, false) s.state, _ = NewWithSnapshot(root, s.state.db, s.state.snap) snapshot := s.state.Snapshot() @@ -560,7 +503,7 @@ func TestTouchDelete(t *testing.T) { // TestCopyOfCopy tests that modified objects are carried over to the copy, and the copy of the copy. // See https://github.com/ethereum/go-ethereum/pull/15225#issuecomment-380191512 func TestCopyOfCopy(t *testing.T) { - state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) + state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) addr := common.HexToAddress("aaaa") state.SetBalance(addr, big.NewInt(42)) @@ -577,8 +520,7 @@ func TestCopyOfCopy(t *testing.T) { // // See https://github.com/ethereum/go-ethereum/issues/20106. func TestCopyCommitCopy(t *testing.T) { - tdb := NewDatabase(rawdb.NewMemoryDatabase()) - state, _ := New(types.EmptyRootHash, tdb, nil) + state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) // Create an account and check if the retrieved balance is correct addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") @@ -615,6 +557,20 @@ func TestCopyCommitCopy(t *testing.T) { if val := copyOne.GetCommittedState(addr, skey); val != (common.Hash{}) { t.Fatalf("first copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{}) } + + copyOne.Commit(false, false) + if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { + t.Fatalf("first copy post-commit balance mismatch: have %v, want %v", balance, 42) + } + if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) { + t.Fatalf("first copy post-commit code mismatch: have %x, want %x", code, []byte("hello")) + } + if val := copyOne.GetState(addr, skey); val != sval { + t.Fatalf("first copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval) + } + if val := copyOne.GetCommittedState(addr, skey); val != sval { + t.Fatalf("first copy post-commit committed storage slot mismatch: have %x, want %x", val, sval) + } // Copy the copy and check the balance once more copyTwo := copyOne.Copy() if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { @@ -626,23 +582,8 @@ func TestCopyCommitCopy(t *testing.T) { if val := copyTwo.GetState(addr, skey); val != sval { t.Fatalf("second copy non-committed storage slot mismatch: have %x, want %x", val, sval) } - if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) { - t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval) - } - // Commit state, ensure states can be loaded from disk - root, _ := state.Commit(0, false, false) - state, _ = New(root, tdb, nil) - if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { - t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42) - } - if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) { - t.Fatalf("state post-commit code mismatch: have %x, want %x", code, []byte("hello")) - } - if val := state.GetState(addr, skey); val != sval { - t.Fatalf("state post-commit non-committed storage slot mismatch: have %x, want %x", val, sval) - } - if val := state.GetCommittedState(addr, skey); val != sval { - t.Fatalf("state post-commit committed storage slot mismatch: have %x, want %x", val, sval) + if val := copyTwo.GetCommittedState(addr, skey); val != sval { + t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval) } } @@ -651,7 +592,7 @@ func TestCopyCommitCopy(t *testing.T) { // // See https://github.com/ethereum/go-ethereum/issues/20106. func TestCopyCopyCommitCopy(t *testing.T) { - state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) + state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) // Create an account and check if the retrieved balance is correct addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") @@ -702,6 +643,19 @@ func TestCopyCopyCommitCopy(t *testing.T) { if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) { t.Fatalf("second copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{}) } + copyTwo.Commit(false, false) + if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { + t.Fatalf("second copy post-commit balance mismatch: have %v, want %v", balance, 42) + } + if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) { + t.Fatalf("second copy post-commit code mismatch: have %x, want %x", code, []byte("hello")) + } + if val := copyTwo.GetState(addr, skey); val != sval { + t.Fatalf("second copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval) + } + if val := copyTwo.GetCommittedState(addr, skey); val != sval { + t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval) + } // Copy the copy-copy and check the balance once more copyThree := copyTwo.Copy() if balance := copyThree.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { @@ -713,56 +667,11 @@ func TestCopyCopyCommitCopy(t *testing.T) { if val := copyThree.GetState(addr, skey); val != sval { t.Fatalf("third copy non-committed storage slot mismatch: have %x, want %x", val, sval) } - if val := copyThree.GetCommittedState(addr, skey); val != (common.Hash{}) { + if val := copyThree.GetCommittedState(addr, skey); val != sval { t.Fatalf("third copy committed storage slot mismatch: have %x, want %x", val, sval) } } -// TestCommitCopy tests the copy from a committed state is not functional. -func TestCommitCopy(t *testing.T) { - state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) - - // Create an account and check if the retrieved balance is correct - addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") - skey := common.HexToHash("aaa") - sval := common.HexToHash("bbb") - - state.SetBalance(addr, big.NewInt(42)) // Change the account trie - state.SetCode(addr, []byte("hello")) // Change an external metadata - state.SetState(addr, skey, sval) // Change the storage trie - - if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { - t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42) - } - if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) { - t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello")) - } - if val := state.GetState(addr, skey); val != sval { - t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval) - } - if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) { - t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{}) - } - // Copy the committed state database, the copied one is not functional. - state.Commit(0, true, false) - copied := state.Copy() - if balance := copied.GetBalance(addr); balance.Cmp(big.NewInt(0)) != 0 { - t.Fatalf("unexpected balance: have %v", balance) - } - if code := copied.GetCode(addr); code != nil { - t.Fatalf("unexpected code: have %x", code) - } - if val := copied.GetState(addr, skey); val != (common.Hash{}) { - t.Fatalf("unexpected storage slot: have %x", val) - } - if val := copied.GetCommittedState(addr, skey); val != (common.Hash{}) { - t.Fatalf("unexpected storage slot: have %x", val) - } - if !errors.Is(copied.Error(), trie.ErrCommitted) { - t.Fatalf("unexpected state error, %v", copied.Error()) - } -} - // TestDeleteCreateRevert tests a weird state transition corner case that we hit // while changing the internals of StateDB. The workflow is that a contract is // self-destructed, then in a follow-up transaction (but same block) it's created @@ -773,16 +682,16 @@ func TestCommitCopy(t *testing.T) { // first, but the journal wiped the entire state object on create-revert. func TestDeleteCreateRevert(t *testing.T) { // Create an initial state with a single contract - state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) + state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) addr := common.BytesToAddress([]byte("so")) state.SetBalance(addr, big.NewInt(1)) - root, _ := state.Commit(0, false, false) + root, _ := state.Commit(false, false) state, _ = NewWithSnapshot(root, state.db, state.snap) // Simulate self-destructing in one transaction, then create-reverting in another - state.SelfDestruct(addr) + state.Suicide(addr) state.Finalise(true) id := state.Snapshot() @@ -790,7 +699,7 @@ func TestDeleteCreateRevert(t *testing.T) { state.RevertToSnapshot(id) // Commit the entire state and make sure we don't crash and have the correct state - root, _ = state.Commit(0, true, false) + root, _ = state.Commit(true, false) state, _ = NewWithSnapshot(root, state.db, state.snap) if state.getStateObject(addr) != nil { @@ -802,30 +711,11 @@ func TestDeleteCreateRevert(t *testing.T) { // the Commit operation fails with an error // If we are missing trie nodes, we should not continue writing to the trie func TestMissingTrieNodes(t *testing.T) { - testMissingTrieNodes(t, rawdb.HashScheme) - testMissingTrieNodes(t, rawdb.PathScheme) -} - -func testMissingTrieNodes(t *testing.T, scheme string) { // Create an initial state with a few accounts - var ( - triedb *trie.Database - memDb = rawdb.NewMemoryDatabase() - ) - if scheme == rawdb.PathScheme { - triedb = trie.NewDatabase(memDb, &trie.Config{PathDB: &pathdb.Config{ - CleanCacheSize: 0, - DirtyCacheSize: 0, - }}) // disable caching - } else { - triedb = trie.NewDatabase(memDb, &trie.Config{HashDB: &hashdb.Config{ - CleanCacheSize: 0, - }}) // disable caching - } - db := NewDatabaseWithNodeDB(memDb, triedb) - + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) var root common.Hash - state, _ := New(types.EmptyRootHash, db, nil) + state, _ := New(common.Hash{}, db, nil) addr := common.BytesToAddress([]byte("so")) { state.SetBalance(addr, big.NewInt(1)) @@ -833,10 +723,10 @@ func testMissingTrieNodes(t *testing.T, scheme string) { a2 := common.BytesToAddress([]byte("another")) state.SetBalance(a2, big.NewInt(100)) state.SetCode(a2, []byte{1, 2, 4}) - root, _ = state.Commit(0, false, false) + root, _ = state.Commit(false, false) t.Logf("root: %x", root) // force-flush - triedb.Commit(root, false) + state.Database().TrieDB().Cap(0) } // Create a new state on the old root state, _ = New(root, db, nil) @@ -857,7 +747,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) { } // Modify the state state.SetBalance(addr, big.NewInt(2)) - root, err := state.Commit(0, false, false) + root, err := state.Commit(false, false) if err == nil { t.Fatalf("expected error, got root :%x", root) } @@ -874,7 +764,7 @@ func TestStateDBAccessList(t *testing.T) { memDb := rawdb.NewMemoryDatabase() db := NewDatabase(memDb) - state, _ := New(types.EmptyRootHash, db, nil) + state, _ := New(common.Hash{}, db, nil) state.accessList = newAccessList() verifyAddrs := func(astrings ...string) { @@ -1038,12 +928,12 @@ func TestStateDBAccessList(t *testing.T) { } func TestMultiCoinOperations(t *testing.T) { - s := newStateEnv() + s := newStateTest() addr := common.Address{1} assetID := common.Hash{2} s.state.GetOrNewStateObject(addr) - root, _ := s.state.Commit(0, false, false) + root, _ := s.state.Commit(false, false) s.state, _ = NewWithSnapshot(root, s.state.db, s.state.snap) s.state.AddBalance(addr, new(big.Int)) @@ -1100,14 +990,14 @@ func TestMultiCoinSnapshot(t *testing.T) { assertBalances(10, 0, 0) // Commit and get the new root - root, _ = stateDB.Commit(0, false, false) + root, _ = stateDB.Commit(false, false) assertBalances(10, 0, 0) // Create a new state from the latest root, add a multicoin balance, and // commit it to the tree. stateDB, _ = New(root, sdb, snapTree) stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(10)) - root, _ = stateDB.Commit(0, false, false) + root, _ = stateDB.Commit(false, false) assertBalances(10, 10, 0) // Add more layers than the cap and ensure the balances and layers are correct @@ -1115,7 +1005,7 @@ func TestMultiCoinSnapshot(t *testing.T) { stateDB, _ = New(root, sdb, snapTree) stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(1)) stateDB.AddBalanceMultiCoin(addr, assetID2, big.NewInt(2)) - root, _ = stateDB.Commit(0, false, false) + root, _ = stateDB.Commit(false, false) } assertBalances(10, 266, 512) @@ -1124,8 +1014,7 @@ func TestMultiCoinSnapshot(t *testing.T) { stateDB, _ = New(root, sdb, snapTree) stateDB.AddBalance(addr, big.NewInt(1)) stateDB.AddBalanceMultiCoin(addr, assetID1, big.NewInt(1)) - root, _ = stateDB.Commit(0, false, false) - stateDB, _ = New(root, sdb, snapTree) + _, _ = stateDB.Commit(false, false) assertBalances(11, 267, 512) } @@ -1146,23 +1035,17 @@ func TestGenerateMultiCoinAccounts(t *testing.T) { t.Fatal(err) } stateDB.SetBalanceMultiCoin(addr, assetID, assetBalance) - root, err := stateDB.Commit(0, false, false) + root, err := stateDB.Commit(false, false) if err != nil { t.Fatal(err) } triedb := database.TrieDB() - if err := triedb.Commit(root, true); err != nil { + if err := triedb.Commit(root, true, nil); err != nil { t.Fatal(err) } // Build snapshot from scratch - snapConfig := snapshot.Config{ - CacheSize: 16, - AsyncBuild: false, - NoBuild: false, - SkipVerify: true, - } - snaps, err := snapshot.New(snapConfig, diskdb, triedb, common.Hash{}, root) + snaps, err := snapshot.New(diskdb, triedb, 16, common.Hash{}, root, false, true, false) if err != nil { t.Error("Unexpected error while rebuilding snapshot:", err) } @@ -1196,9 +1079,8 @@ func TestFlushOrderDataLoss(t *testing.T) { // Create a state trie with many accounts and slots var ( memdb = rawdb.NewMemoryDatabase() - triedb = trie.NewDatabase(memdb, nil) - statedb = NewDatabaseWithNodeDB(memdb, triedb) - state, _ = New(types.EmptyRootHash, statedb, nil) + statedb = NewDatabase(memdb) + state, _ = New(common.Hash{}, statedb, nil) ) for a := byte(0); a < 10; a++ { state.CreateAccount(common.Address{a}) @@ -1206,15 +1088,15 @@ func TestFlushOrderDataLoss(t *testing.T) { state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s}) } } - root, err := state.Commit(0, false, false) + root, err := state.Commit(false, false) if err != nil { t.Fatalf("failed to commit state trie: %v", err) } - triedb.Reference(root, common.Hash{}) - if err := triedb.Cap(1024); err != nil { + statedb.TrieDB().Reference(root, common.Hash{}) + if err := statedb.TrieDB().Cap(1024); err != nil { t.Fatalf("failed to cap trie dirty cache: %v", err) } - if err := triedb.Commit(root, false); err != nil { + if err := statedb.TrieDB().Commit(root, false, nil); err != nil { t.Fatalf("failed to commit state trie: %v", err) } // Reopen the state trie from flushed disk and verify it @@ -1230,125 +1112,3 @@ func TestFlushOrderDataLoss(t *testing.T) { } } } - -func TestStateDBTransientStorage(t *testing.T) { - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) - state, _ := New(types.EmptyRootHash, db, nil) - - key := common.Hash{0x01} - value := common.Hash{0x02} - addr := common.Address{} - - state.SetTransientState(addr, key, value) - if exp, got := 1, state.journal.length(); exp != got { - t.Fatalf("journal length mismatch: have %d, want %d", got, exp) - } - // the retrieved value should equal what was set - if got := state.GetTransientState(addr, key); got != value { - t.Fatalf("transient storage mismatch: have %x, want %x", got, value) - } - - // revert the transient state being set and then check that the - // value is now the empty hash - state.journal.revert(state, 0) - if got, exp := state.GetTransientState(addr, key), (common.Hash{}); exp != got { - t.Fatalf("transient storage mismatch: have %x, want %x", got, exp) - } - - // set transient state and then copy the statedb and ensure that - // the transient state is copied - state.SetTransientState(addr, key, value) - cpy := state.Copy() - if got := cpy.GetTransientState(addr, key); got != value { - t.Fatalf("transient storage mismatch: have %x, want %x", got, value) - } -} - -func TestResetObject(t *testing.T) { - var ( - disk = rawdb.NewMemoryDatabase() - tdb = trie.NewDatabase(disk, nil) - db = NewDatabaseWithNodeDB(disk, tdb) - snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, common.Hash{}, types.EmptyRootHash) - state, _ = New(types.EmptyRootHash, db, snaps) - addr = common.HexToAddress("0x1") - slotA = common.HexToHash("0x1") - slotB = common.HexToHash("0x2") - ) - // Initialize account with balance and storage in first transaction. - state.SetBalance(addr, big.NewInt(1)) - state.SetState(addr, slotA, common.BytesToHash([]byte{0x1})) - state.IntermediateRoot(true) - - // Reset account and mutate balance and storages - state.CreateAccount(addr) - state.SetBalance(addr, big.NewInt(2)) - state.SetState(addr, slotB, common.BytesToHash([]byte{0x2})) - root, _ := state.CommitWithSnap(0, true, snaps, common.Hash{}, common.Hash{}, false) - - // Ensure the original account is wiped properly - snap := snaps.Snapshot(root) - slot, _ := snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotA.Bytes())) - if len(slot) != 0 { - t.Fatalf("Unexpected storage slot") - } - slot, _ = snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotB.Bytes())) - if !bytes.Equal(slot, []byte{0x2}) { - t.Fatalf("Unexpected storage slot value %v", slot) - } -} - -func TestDeleteStorage(t *testing.T) { - var ( - disk = rawdb.NewMemoryDatabase() - tdb = trie.NewDatabase(disk, nil) - db = NewDatabaseWithNodeDB(disk, tdb) - snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, common.Hash{}, types.EmptyRootHash) - state, _ = New(types.EmptyRootHash, db, snaps) - addr = common.HexToAddress("0x1") - ) - // Initialize account and populate storage - state.SetBalance(addr, big.NewInt(1)) - state.CreateAccount(addr) - for i := 0; i < 1000; i++ { - slot := common.Hash(uint256.NewInt(uint64(i)).Bytes32()) - value := common.Hash(uint256.NewInt(uint64(10 * i)).Bytes32()) - state.SetState(addr, slot, value) - } - root, _ := state.CommitWithSnap(0, true, snaps, common.Hash{}, common.Hash{}, false) - // Init phase done, create two states, one with snap and one without - fastState, _ := New(root, db, snaps) - slowState, _ := New(root, db, nil) - - obj := fastState.GetOrNewStateObject(addr) - storageRoot := obj.data.Root - - _, _, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) - if err != nil { - t.Fatal(err) - } - - _, _, slowNodes, err := slowState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) - if err != nil { - t.Fatal(err) - } - check := func(set *trienode.NodeSet) string { - var a []string - set.ForEachWithOrder(func(path string, n *trienode.Node) { - if n.Hash != (common.Hash{}) { - t.Fatal("delete should have empty hashes") - } - if len(n.Blob) != 0 { - t.Fatal("delete should have have empty blobs") - } - a = append(a, fmt.Sprintf("%x", path)) - }) - return strings.Join(a, ",") - } - slowRes := check(slowNodes) - fastRes := check(fastNodes) - if slowRes != fastRes { - t.Fatalf("difference found:\nfast: %v\nslow: %v\n", fastRes, slowRes) - } -} diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 5b01083f59..18e987fb11 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -28,16 +28,16 @@ package state import ( "sync" - "time" - "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/metrics" ) -// triePrefetchMetricsPrefix is the prefix under which to publish the metrics. -const triePrefetchMetricsPrefix = "trie/prefetch/" +var ( + // triePrefetchMetricsPrefix is the prefix under which to publish the metrics. + triePrefetchMetricsPrefix = "trie/prefetch/" +) // triePrefetcher is an active prefetcher, which receives accounts or storage // items and does trie-loading of them. The goal is to get as much useful content @@ -47,94 +47,60 @@ const triePrefetchMetricsPrefix = "trie/prefetch/" type triePrefetcher struct { db Database // Database to fetch trie nodes through root common.Hash // Root hash of the account trie for metrics - fetches map[string]Trie // Partially or fully fetched tries. Only populated for inactive copies. + fetches map[string]Trie // Partially or fully fetcher tries fetchers map[string]*subfetcher // Subfetchers for each trie - maxConcurrency int - workers *utils.BoundedWorkers - - subfetcherWorkersMeter metrics.Meter - subfetcherWaitTimer metrics.Counter - subfetcherCopiesMeter metrics.Meter - + deliveryMissMeter metrics.Meter accountLoadMeter metrics.Meter accountDupMeter metrics.Meter accountSkipMeter metrics.Meter accountWasteMeter metrics.Meter - - storageFetchersMeter metrics.Meter - storageLoadMeter metrics.Meter - storageLargestLoadMeter metrics.Meter - storageDupMeter metrics.Meter - storageSkipMeter metrics.Meter - storageWasteMeter metrics.Meter + storageLoadMeter metrics.Meter + storageDupMeter metrics.Meter + storageSkipMeter metrics.Meter + storageWasteMeter metrics.Meter } -func newTriePrefetcher(db Database, root common.Hash, namespace string, maxConcurrency int) *triePrefetcher { +func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePrefetcher { prefix := triePrefetchMetricsPrefix + namespace - return &triePrefetcher{ + p := &triePrefetcher{ db: db, root: root, fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map - maxConcurrency: maxConcurrency, - workers: utils.NewBoundedWorkers(maxConcurrency), // Scale up as needed to [maxConcurrency] - - subfetcherWorkersMeter: metrics.GetOrRegisterMeter(prefix+"/subfetcher/workers", nil), - subfetcherWaitTimer: metrics.GetOrRegisterCounter(prefix+"/subfetcher/wait", nil), - subfetcherCopiesMeter: metrics.GetOrRegisterMeter(prefix+"/subfetcher/copies", nil), - + deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil), accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil), accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil), accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil), accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil), - - storageFetchersMeter: metrics.GetOrRegisterMeter(prefix+"/storage/fetchers", nil), - storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), - storageLargestLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/lload", nil), - storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), - storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), - storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), + storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), + storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), + storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), + storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), } + return p } // close iterates over all the subfetchers, aborts any that were left spinning // and reports the stats to the metrics subsystem. func (p *triePrefetcher) close() { - // If the prefetcher is an inactive one, bail out - if p.fetches != nil { - return - } - - // Collect stats from all fetchers - var ( - storageFetchers int64 - largestLoad int64 - ) for _, fetcher := range p.fetchers { - fetcher.abort() // safe to call multiple times (should be a no-op on happy path) + fetcher.abort() // safe to do multiple times if metrics.Enabled { - p.subfetcherCopiesMeter.Mark(int64(fetcher.copies())) - if fetcher.root == p.root { p.accountLoadMeter.Mark(int64(len(fetcher.seen))) p.accountDupMeter.Mark(int64(fetcher.dups)) - p.accountSkipMeter.Mark(int64(fetcher.skips())) + p.accountSkipMeter.Mark(int64(len(fetcher.tasks))) for _, key := range fetcher.used { delete(fetcher.seen, string(key)) } p.accountWasteMeter.Mark(int64(len(fetcher.seen))) } else { - storageFetchers++ - oseen := int64(len(fetcher.seen)) - if oseen > largestLoad { - largestLoad = oseen - } - p.storageLoadMeter.Mark(oseen) + p.storageLoadMeter.Mark(int64(len(fetcher.seen))) p.storageDupMeter.Mark(int64(fetcher.dups)) - p.storageSkipMeter.Mark(int64(fetcher.skips())) + p.storageSkipMeter.Mark(int64(len(fetcher.tasks))) for _, key := range fetcher.used { delete(fetcher.seen, string(key)) @@ -143,20 +109,6 @@ func (p *triePrefetcher) close() { } } } - if metrics.Enabled { - p.storageFetchersMeter.Mark(storageFetchers) - p.storageLargestLoadMeter.Mark(largestLoad) - } - - // Stop all workers once fetchers are aborted (otherwise - // could stop while waiting) - // - // Record number of workers that were spawned during this run - workersUsed := int64(p.workers.Wait()) - if metrics.Enabled { - p.subfetcherWorkersMeter.Mark(workersUsed) - } - // Clear out all fetchers (will crash on a second call, deliberate) p.fetchers = nil } @@ -169,30 +121,21 @@ func (p *triePrefetcher) copy() *triePrefetcher { copy := &triePrefetcher{ db: p.db, root: p.root, - fetches: make(map[string]Trie), // Active prefetchers use the fetchers map - - subfetcherWorkersMeter: p.subfetcherWorkersMeter, - subfetcherWaitTimer: p.subfetcherWaitTimer, - subfetcherCopiesMeter: p.subfetcherCopiesMeter, + fetches: make(map[string]Trie), // Active prefetchers use the fetches map + deliveryMissMeter: p.deliveryMissMeter, accountLoadMeter: p.accountLoadMeter, accountDupMeter: p.accountDupMeter, accountSkipMeter: p.accountSkipMeter, accountWasteMeter: p.accountWasteMeter, - - storageFetchersMeter: p.storageFetchersMeter, - storageLoadMeter: p.storageLoadMeter, - storageLargestLoadMeter: p.storageLargestLoadMeter, - storageDupMeter: p.storageDupMeter, - storageSkipMeter: p.storageSkipMeter, - storageWasteMeter: p.storageWasteMeter, + storageLoadMeter: p.storageLoadMeter, + storageDupMeter: p.storageDupMeter, + storageSkipMeter: p.storageSkipMeter, + storageWasteMeter: p.storageWasteMeter, } // If the prefetcher is already a copy, duplicate the data if p.fetches != nil { for root, fetch := range p.fetches { - if fetch == nil { - continue - } copy.fetches[root] = p.db.CopyTrie(fetch) } return copy @@ -205,17 +148,16 @@ func (p *triePrefetcher) copy() *triePrefetcher { } // prefetch schedules a batch of trie items to prefetch. -func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) { +func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, keys [][]byte) { // If the prefetcher is an inactive one, bail out if p.fetches != nil { return } - // Active fetcher, schedule the retrievals id := p.trieID(owner, root) fetcher := p.fetchers[id] if fetcher == nil { - fetcher = newSubfetcher(p, owner, root, addr) + fetcher = newSubfetcher(p.db, owner, root) p.fetchers[id] = fetcher } fetcher.schedule(keys) @@ -229,27 +171,24 @@ func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { if p.fetches != nil { trie := p.fetches[id] if trie == nil { + p.deliveryMissMeter.Mark(1) return nil } return p.db.CopyTrie(trie) } - // Otherwise the prefetcher is active, bail if no trie was prefetched for this root fetcher := p.fetchers[id] if fetcher == nil { + p.deliveryMissMeter.Mark(1) return nil } + // Interrupt the prefetcher if it's by any chance still running and return + // a copy of any pre-loaded trie. + fetcher.abort() // safe to do multiple times - // Wait for the fetcher to finish and shutdown orchestrator, if it exists - start := time.Now() - fetcher.wait() - if metrics.Enabled { - p.subfetcherWaitTimer.Inc(time.Since(start).Milliseconds()) - } - - // Return a copy of one of the prefetched tries trie := fetcher.peek() if trie == nil { + p.deliveryMissMeter.Mark(1) return nil } return trie @@ -265,10 +204,7 @@ func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte // trieID returns an unique trie identifier consists the trie owner and root hash. func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string { - trieID := make([]byte, common.HashLength*2) - copy(trieID, owner.Bytes()) - copy(trieID[common.HashLength:], root.Bytes()) - return string(trieID) + return string(append(owner.Bytes(), root.Bytes()...)) } // subfetcher is a trie fetcher goroutine responsible for pulling entries for a @@ -276,15 +212,18 @@ func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string { // main prefetcher is paused and either all requested items are processed or if // the trie being worked on is retrieved from the prefetcher. type subfetcher struct { - p *triePrefetcher + db Database // Database to load trie nodes through + owner common.Hash // Owner of the trie, usually account hash + root common.Hash // Root hash of the trie to prefetch + trie Trie // Trie being populated with nodes - db Database // Database to load trie nodes through - state common.Hash // Root hash of the state to prefetch - owner common.Hash // Owner of the trie, usually account hash - root common.Hash // Root hash of the trie to prefetch - addr common.Address // Address of the account that the trie belongs to + tasks [][]byte // Items queued up for retrieval + lock sync.Mutex // Lock protecting the task queue - to *trieOrchestrator // Orchestrate concurrent fetching of a single trie + wake chan struct{} // Wake channel if a new task is scheduled + stop chan struct{} // Channel to interrupt processing + term chan struct{} // Channel to signal interruption + copy chan chan Trie // Channel to request a copy of the current trie seen map[string]struct{} // Tracks the entries already loaded dups int // Number of duplicate preload tasks @@ -293,348 +232,137 @@ type subfetcher struct { // newSubfetcher creates a goroutine to prefetch state items belonging to a // particular root hash. -func newSubfetcher(p *triePrefetcher, owner common.Hash, root common.Hash, addr common.Address) *subfetcher { +func newSubfetcher(db Database, owner common.Hash, root common.Hash) *subfetcher { sf := &subfetcher{ - p: p, - db: p.db, - state: p.root, + db: db, owner: owner, root: root, - addr: addr, + wake: make(chan struct{}, 1), + stop: make(chan struct{}), + term: make(chan struct{}), + copy: make(chan chan Trie), seen: make(map[string]struct{}), } - sf.to = newTrieOrchestrator(sf) - if sf.to != nil { - go sf.to.processTasks() - } - // We return [sf] here to ensure we don't try to re-create if - // we aren't able to setup a [newTrieOrchestrator] the first time. + go sf.loop() return sf } // schedule adds a batch of trie keys to the queue to prefetch. -// This should never block, so an array is used instead of a channel. -// -// This is not thread-safe. func (sf *subfetcher) schedule(keys [][]byte) { // Append the tasks to the current queue - tasks := make([][]byte, 0, len(keys)) - for _, key := range keys { - // Check if keys already seen - sk := string(key) - if _, ok := sf.seen[sk]; ok { - sf.dups++ - continue - } - sf.seen[sk] = struct{}{} - tasks = append(tasks, key) - } + sf.lock.Lock() + sf.tasks = append(sf.tasks, keys...) + sf.lock.Unlock() - // After counting keys, exit if they can't be prefetched - if sf.to == nil { - return + // Notify the prefetcher, it's fine if it's already terminated + select { + case sf.wake <- struct{}{}: + default: } - - // Add tasks to queue for prefetching - sf.to.enqueueTasks(tasks) } // peek tries to retrieve a deep copy of the fetcher's trie in whatever form it // is currently. func (sf *subfetcher) peek() Trie { - if sf.to == nil { - return nil - } - return sf.to.copyBase() -} + ch := make(chan Trie) + select { + case sf.copy <- ch: + // Subfetcher still alive, return copy from it + return <-ch -// wait must only be called if [triePrefetcher] has not been closed. If this happens, -// workers will not finish. -func (sf *subfetcher) wait() { - if sf.to == nil { - // Unable to open trie - return + case <-sf.term: + // Subfetcher already terminated, return a copy directly + if sf.trie == nil { + return nil + } + return sf.db.CopyTrie(sf.trie) } - sf.to.wait() } +// abort interrupts the subfetcher immediately. It is safe to call abort multiple +// times but it is not thread safe. func (sf *subfetcher) abort() { - if sf.to == nil { - // Unable to open trie - return - } - sf.to.abort() -} - -func (sf *subfetcher) skips() int { - if sf.to == nil { - // Unable to open trie - return 0 - } - return sf.to.skipCount() -} - -func (sf *subfetcher) copies() int { - if sf.to == nil { - // Unable to open trie - return 0 + select { + case <-sf.stop: + default: + close(sf.stop) } - return sf.to.copies + <-sf.term } -// trieOrchestrator is not thread-safe. -type trieOrchestrator struct { - sf *subfetcher - - // base is an unmodified Trie we keep for - // creating copies for each worker goroutine. - // - // We care more about quick copies than good copies - // because most (if not all) of the nodes that will be populated - // in the copy will come from the underlying triedb cache. Ones - // that don't come from this cache probably had to be fetched - // from disk anyways. - base Trie - baseLock sync.Mutex - - tasksAllowed bool - skips int // number of tasks skipped - pendingTasks [][]byte - taskLock sync.Mutex - - processingTasks sync.WaitGroup - - wake chan struct{} - stop chan struct{} - stopOnce sync.Once - loopTerm chan struct{} - - copies int - copyChan chan Trie - copySpawner chan struct{} -} +// loop waits for new tasks to be scheduled and keeps loading them until it runs +// out of tasks or its underlying trie is retrieved for committing. +func (sf *subfetcher) loop() { + // No matter how the loop stops, signal anyone waiting that it's terminated + defer close(sf.term) -func newTrieOrchestrator(sf *subfetcher) *trieOrchestrator { // Start by opening the trie and stop processing if it fails - var ( - base Trie - err error - ) if sf.owner == (common.Hash{}) { - base, err = sf.db.OpenTrie(sf.root) + trie, err := sf.db.OpenTrie(sf.root) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) - return nil + return } + sf.trie = trie } else { - // The trie argument can be nil as verkle doesn't support prefetching - // yet. TODO FIX IT(rjl493456442), otherwise code will panic here. - base, err = sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, nil) + trie, err := sf.db.OpenStorageTrie(sf.owner, sf.root) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) - return nil + return } + sf.trie = trie } - // Instantiate trieOrchestrator - to := &trieOrchestrator{ - sf: sf, - base: base, - - tasksAllowed: true, - wake: make(chan struct{}, 1), - stop: make(chan struct{}), - loopTerm: make(chan struct{}), - - copyChan: make(chan Trie, sf.p.maxConcurrency), - copySpawner: make(chan struct{}, sf.p.maxConcurrency), - } - - // Create initial trie copy - to.copies++ - to.copySpawner <- struct{}{} - to.copyChan <- to.copyBase() - return to -} - -func (to *trieOrchestrator) copyBase() Trie { - to.baseLock.Lock() - defer to.baseLock.Unlock() - - return to.sf.db.CopyTrie(to.base) -} - -func (to *trieOrchestrator) skipCount() int { - to.taskLock.Lock() - defer to.taskLock.Unlock() - - return to.skips -} - -func (to *trieOrchestrator) enqueueTasks(tasks [][]byte) { - to.taskLock.Lock() - defer to.taskLock.Unlock() - - if len(tasks) == 0 { - return - } - - // Add tasks to [pendingTasks] - if !to.tasksAllowed { - to.skips += len(tasks) - return - } - to.processingTasks.Add(len(tasks)) - to.pendingTasks = append(to.pendingTasks, tasks...) - - // Wake up processor - select { - case to.wake <- struct{}{}: - default: - } -} - -func (to *trieOrchestrator) handleStop(remaining int) { - to.taskLock.Lock() - to.skips += remaining - to.taskLock.Unlock() - to.processingTasks.Add(-remaining) -} - -func (to *trieOrchestrator) processTasks() { - defer close(to.loopTerm) - + // Trie opened successfully, keep prefetching items for { - // Determine if we should process or exit select { - case <-to.wake: - case <-to.stop: - return - } - - // Get current tasks - to.taskLock.Lock() - tasks := to.pendingTasks - to.pendingTasks = nil - to.taskLock.Unlock() - - // Enqueue more work as soon as trie copies are available - lt := len(tasks) - for i := 0; i < lt; i++ { - // Try to stop as soon as possible, if channel is closed - remaining := lt - i - select { - case <-to.stop: - to.handleStop(remaining) - return - default: - } - - // Try to create to get an active copy first (select is non-deterministic, - // so we may end up creating a new copy when we don't need to) - var t Trie - select { - case t = <-to.copyChan: - default: - // Wait for an available copy or create one, if we weren't - // able to get a previously created copy + case <-sf.wake: + // Subfetcher was woken up, retrieve any tasks to avoid spinning the lock + sf.lock.Lock() + tasks := sf.tasks + sf.tasks = nil + sf.lock.Unlock() + + // Prefetch any tasks until the loop is interrupted + for i, task := range tasks { select { - case <-to.stop: - to.handleStop(remaining) + case <-sf.stop: + // If termination is requested, add any leftover back and return + sf.lock.Lock() + sf.tasks = append(sf.tasks, tasks[i:]...) + sf.lock.Unlock() return - case t = <-to.copyChan: - case to.copySpawner <- struct{}{}: - to.copies++ - t = to.copyBase() - } - } - // Enqueue work, unless stopped. - fTask := tasks[i] - f := func() { - // Perform task - var err error - if len(fTask) == common.AddressLength { - _, err = t.GetAccount(common.BytesToAddress(fTask)) - } else { - _, err = t.GetStorage(to.sf.addr, fTask) + case ch := <-sf.copy: + // Somebody wants a copy of the current trie, grant them + ch <- sf.db.CopyTrie(sf.trie) + + default: + // No termination request yet, prefetch the next entry + if _, ok := sf.seen[string(task)]; ok { + sf.dups++ + } else { + var err error + if len(task) == len(common.Address{}) { + _, err = sf.trie.TryGetAccount(task) + } else { + _, err = sf.trie.TryGet(task) + } + if err != nil { + log.Error("Trie prefetcher failed fetching", "root", sf.root, "err", err) + } + sf.seen[string(task)] = struct{}{} + } } - if err != nil { - log.Error("Trie prefetcher failed fetching", "root", to.sf.root, "err", err) - } - to.processingTasks.Done() - - // Return copy when we are done with it, so someone else can use it - // - // channel is buffered and will not block - to.copyChan <- t } - // Enqueue task for processing (may spawn new goroutine - // if not at [maxConcurrency]) - // - // If workers are stopped before calling [Execute], this function may - // panic. - to.sf.p.workers.Execute(f) - } - } -} - -func (to *trieOrchestrator) stopAcceptingTasks() { - to.taskLock.Lock() - defer to.taskLock.Unlock() + case ch := <-sf.copy: + // Somebody wants a copy of the current trie, grant them + ch <- sf.db.CopyTrie(sf.trie) - if !to.tasksAllowed { - return + case <-sf.stop: + // Termination is requested, abort and leave remaining tasks + return + } } - to.tasksAllowed = false - - // We don't clear [to.pendingTasks] here because - // it will be faster to prefetch them even though we - // are still waiting. -} - -// wait stops accepting new tasks and waits for ongoing tasks to complete. If -// wait is called, it is not necessary to call [abort]. -// -// It is safe to call wait multiple times. -func (to *trieOrchestrator) wait() { - // Prevent more tasks from being enqueued - to.stopAcceptingTasks() - - // Wait for processing tasks to complete - to.processingTasks.Wait() - - // Stop orchestrator loop - to.stopOnce.Do(func() { - close(to.stop) - }) - <-to.loopTerm -} - -// abort stops any ongoing tasks and shuts down the orchestrator loop. If abort -// is called, it is not necessary to call [wait]. -// -// It is safe to call abort multiple times. -func (to *trieOrchestrator) abort() { - // Prevent more tasks from being enqueued - to.stopAcceptingTasks() - - // Stop orchestrator loop - to.stopOnce.Do(func() { - close(to.stop) - }) - <-to.loopTerm - - // Capture any dangling pending tasks (processTasks - // may exit before enqueing all pendingTasks) - to.taskLock.Lock() - pendingCount := len(to.pendingTasks) - to.skips += pendingCount - to.pendingTasks = nil - to.taskLock.Unlock() - to.processingTasks.Add(-pendingCount) - - // Wait for processing tasks to complete - to.processingTasks.Wait() } diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go index 285a7b16da..fcb58a0e44 100644 --- a/core/state/trie_prefetcher_test.go +++ b/core/state/trie_prefetcher_test.go @@ -31,15 +31,12 @@ import ( "testing" "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/rawdb" ) -const maxConcurrency = 4 - func filledStateDB() *StateDB { - state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) + state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) // Create an account and check if the retrieved balance is correct addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") @@ -58,21 +55,21 @@ func filledStateDB() *StateDB { func TestCopyAndClose(t *testing.T) { db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency) + prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) + prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) + prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) time.Sleep(1 * time.Second) a := prefetcher.trie(common.Hash{}, db.originalRoot) - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) + prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) b := prefetcher.trie(common.Hash{}, db.originalRoot) cpy := prefetcher.copy() - cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) + cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) + cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) c := cpy.trie(common.Hash{}, db.originalRoot) prefetcher.close() cpy2 := cpy.copy() - cpy2.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) + cpy2.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) d := cpy2.trie(common.Hash{}, db.originalRoot) cpy.close() cpy2.close() @@ -83,9 +80,9 @@ func TestCopyAndClose(t *testing.T) { func TestUseAfterClose(t *testing.T) { db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency) + prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) + prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) a := prefetcher.trie(common.Hash{}, db.originalRoot) prefetcher.close() b := prefetcher.trie(common.Hash{}, db.originalRoot) @@ -99,9 +96,9 @@ func TestUseAfterClose(t *testing.T) { func TestCopyClose(t *testing.T) { db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency) + prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) + prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) cpy := prefetcher.copy() a := prefetcher.trie(common.Hash{}, db.originalRoot) b := cpy.trie(common.Hash{}, db.originalRoot) diff --git a/core/state_manager.go b/core/state_manager.go index 8fc7de11c3..b66d8181de 100644 --- a/core/state_manager.go +++ b/core/state_manager.go @@ -31,9 +31,9 @@ import ( "math/rand" "time" - "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" ) func init() { @@ -65,9 +65,9 @@ type TrieWriter interface { } type TrieDB interface { - Dereference(root common.Hash) error - Commit(root common.Hash, report bool) error - Size() (common.StorageSize, common.StorageSize, common.StorageSize) + Dereference(root common.Hash) + Commit(root common.Hash, report bool, callback func(common.Hash)) error + Size() (common.StorageSize, common.StorageSize) Cap(limit common.StorageSize) error } @@ -103,11 +103,12 @@ func (np *noPruningTrieWriter) InsertTrie(block *types.Block) error { func (np *noPruningTrieWriter) AcceptTrie(block *types.Block) error { // We don't need to call [Dereference] on the block root at the end of this // function because it is removed from the [TrieDB.Dirties] map in [Commit]. - return np.TrieDB.Commit(block.Root(), false) + return np.TrieDB.Commit(block.Root(), false, nil) } func (np *noPruningTrieWriter) RejectTrie(block *types.Block) error { - return np.TrieDB.Dereference(block.Root()) + np.TrieDB.Dereference(block.Root()) + return nil } func (np *noPruningTrieWriter) Shutdown() error { return nil } @@ -120,13 +121,13 @@ type cappedMemoryTrieWriter struct { imageCap common.StorageSize commitInterval uint64 - tipBuffer *BoundedBuffer[common.Hash] + tipBuffer *BoundedBuffer } func (cm *cappedMemoryTrieWriter) InsertTrie(block *types.Block) error { // The use of [Cap] in [InsertTrie] prevents exceeding the configured memory // limit (and OOM) in case there is a large backlog of processing (unaccepted) blocks. - _, nodes, imgs := cm.TrieDB.Size() // all memory is contained within the nodes return for hashdb + nodes, imgs := cm.TrieDB.Size() if nodes <= cm.memoryCap && imgs <= cm.imageCap { return nil } @@ -145,14 +146,12 @@ func (cm *cappedMemoryTrieWriter) AcceptTrie(block *types.Block) error { // // Note: It is safe to dereference roots that have been committed to disk // (they are no-ops). - if err := cm.tipBuffer.Insert(root); err != nil { - return err - } + cm.tipBuffer.Insert(root) // Commit this root if we have reached the [commitInterval]. modCommitInterval := block.NumberU64() % cm.commitInterval if modCommitInterval == 0 { - if err := cm.TrieDB.Commit(root, true); err != nil { + if err := cm.TrieDB.Commit(root, true, nil); err != nil { return fmt.Errorf("failed to commit trie for block %s: %w", block.Hash().Hex(), err) } return nil @@ -174,7 +173,7 @@ func (cm *cappedMemoryTrieWriter) AcceptTrie(block *types.Block) error { return nil } targetMemory := cm.targetCommitSize + cm.flushStepSize*common.StorageSize(distanceFromCommit) - _, nodes, _ := cm.TrieDB.Size() + nodes, _ := cm.TrieDB.Size() if nodes <= targetMemory { return nil } @@ -193,12 +192,12 @@ func (cm *cappedMemoryTrieWriter) RejectTrie(block *types.Block) error { func (cm *cappedMemoryTrieWriter) Shutdown() error { // If [tipBuffer] entry is empty, no need to do any cleanup on // shutdown. - last, exists := cm.tipBuffer.Last() - if !exists { + last := cm.tipBuffer.Last() + if last == (common.Hash{}) { return nil } // Attempt to commit last item added to [dereferenceQueue] on shutdown to avoid // re-processing the state on the next startup. - return cm.TrieDB.Commit(last, true) + return cm.TrieDB.Commit(last, true, nil) } diff --git a/core/state_manager_test.go b/core/state_manager_test.go index 2fb47add49..92d722fdd5 100644 --- a/core/state_manager_test.go +++ b/core/state_manager_test.go @@ -7,7 +7,7 @@ import ( "math/big" "testing" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/core/types" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" @@ -18,16 +18,15 @@ type MockTrieDB struct { LastCommit common.Hash } -func (t *MockTrieDB) Dereference(root common.Hash) error { +func (t *MockTrieDB) Dereference(root common.Hash) { t.LastDereference = root - return nil } -func (t *MockTrieDB) Commit(root common.Hash, report bool) error { +func (t *MockTrieDB) Commit(root common.Hash, report bool, callback func(common.Hash)) error { t.LastCommit = root return nil } -func (t *MockTrieDB) Size() (common.StorageSize, common.StorageSize, common.StorageSize) { - return 0, 0, 0 +func (t *MockTrieDB) Size() (common.StorageSize, common.StorageSize) { + return 0, 0 } func (t *MockTrieDB) Cap(limit common.StorageSize) error { return nil @@ -45,7 +44,7 @@ func TestCappedMemoryTrieWriter(t *testing.T) { Root: common.BigToHash(bigI), Number: bigI, }, - nil, nil, nil, nil, + nil, nil, nil, nil, nil, true, ) assert.NoError(w.InsertTrie(block)) @@ -84,7 +83,7 @@ func TestNoPruningTrieWriter(t *testing.T) { Root: common.BigToHash(bigI), Number: bigI, }, - nil, nil, nil, nil, + nil, nil, nil, nil, nil, true, ) assert.NoError(w.InsertTrie(block)) diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go new file mode 100644 index 0000000000..a97f3fdf7b --- /dev/null +++ b/core/state_prefetcher.go @@ -0,0 +1,105 @@ +// (c) 2019-2020, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "math/big" + "sync/atomic" + + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/params" +) + +// statePrefetcher is a basic Prefetcher, which blindly executes a block on top +// of an arbitrary state with the goal of prefetching potentially useful state +// data from disk before the main block processor start executing. +type statePrefetcher struct { + config *params.ChainConfig // Chain configuration options + bc *BlockChain // Canonical block chain + engine consensus.Engine // Consensus engine used for block rewards +} + +// newStatePrefetcher initialises a new statePrefetcher. +func newStatePrefetcher(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine) *statePrefetcher { + return &statePrefetcher{ + config: config, + bc: bc, + engine: engine, + } +} + +// Prefetch processes the state changes according to the Ethereum rules by running +// the transaction messages using the statedb, but any changes are discarded. The +// only goal is to pre-cache transaction signatures and state trie nodes. +func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *uint32) { + var ( + header = block.Header() + gaspool = new(GasPool).AddGas(block.GasLimit()) + blockContext = NewEVMBlockContext(header, p.bc, nil) + evm = vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) + signer = types.MakeSigner(p.config, header.Number, new(big.Int).SetUint64(header.Time)) + ) + // Iterate over and process the individual transactions + byzantium := p.config.IsByzantium(block.Number()) + for i, tx := range block.Transactions() { + // If block precaching was interrupted, abort + if interrupt != nil && atomic.LoadUint32(interrupt) == 1 { + return + } + // Convert the transaction into an executable message and pre-cache its sender + msg, err := tx.AsMessage(signer, header.BaseFee) + if err != nil { + return // Also invalid block, bail out + } + statedb.Prepare(tx.Hash(), i) + if err := precacheTransaction(msg, p.config, gaspool, statedb, header, evm); err != nil { + return // Ugh, something went horribly wrong, bail out + } + // If we're pre-byzantium, pre-load trie nodes for the intermediate root + if !byzantium { + statedb.IntermediateRoot(true) + } + } + // If were post-byzantium, pre-load trie nodes for the final root hash + if byzantium { + statedb.IntermediateRoot(true) + } +} + +// precacheTransaction attempts to apply a transaction to the given state database +// and uses the input parameters for its environment. The goal is not to execute +// the transaction successfully, rather to warm up touched data slots. +func precacheTransaction(msg types.Message, config *params.ChainConfig, gaspool *GasPool, statedb *state.StateDB, header *types.Header, evm *vm.EVM) error { + // Update the evm with the new transaction context. + evm.Reset(NewEVMTxContext(msg), statedb) + // Add addresses to access list if applicable + _, err := ApplyMessage(evm, msg, gaspool) + return err +} diff --git a/core/state_processor.go b/core/state_processor.go index 4304744e3d..7afb5411ca 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -27,20 +27,17 @@ package core import ( - "encoding/json" "fmt" "math/big" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/precompile/contract" - "github.com/ava-labs/coreth/precompile/modules" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/consensus/misc" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/params" ) // StateProcessor is a basic Processor, which takes care of transitioning @@ -78,31 +75,26 @@ func (p *StateProcessor) Process(block *types.Block, parent *types.Header, state blockNumber = block.Number() allLogs []*types.Log gp = new(GasPool).AddGas(block.GasLimit()) + timestamp = new(big.Int).SetUint64(header.Time) ) - // Configure any upgrades that should go into effect during this block. - err := ApplyUpgrades(p.config, &parent.Time, block, statedb) - if err != nil { - log.Error("failed to configure precompiles processing block", "hash", block.Hash(), "number", block.NumberU64(), "timestamp", block.Time(), "err", err) - return nil, nil, 0, err - } + // Configure any stateful precompiles that should go into effect during this block. + p.config.CheckConfigurePrecompiles(new(big.Int).SetUint64(parent.Time), block, statedb) - var ( - context = NewEVMBlockContext(header, p.bc, nil) - vmenv = vm.NewEVM(context, vm.TxContext{}, statedb, p.config, cfg) - signer = types.MakeSigner(p.config, header.Number, header.Time) - ) - if beaconRoot := block.BeaconRoot(); beaconRoot != nil { - ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb) + // Mutate the block and state according to any hard-fork specs + if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 { + misc.ApplyDAOHardFork(statedb) } + blockContext := NewEVMBlockContext(header, p.bc, nil) + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) // Iterate over and process the individual transactions for i, tx := range block.Transactions() { - msg, err := TransactionToMessage(tx, signer, header.BaseFee) + msg, err := tx.AsMessage(types.MakeSigner(p.config, header.Number, timestamp), header.BaseFee) if err != nil { return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) } - statedb.SetTxContext(tx.Hash(), i) - receipt, err := applyTransaction(msg, p.config, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv) + statedb.Prepare(tx.Hash(), i) + receipt, err := applyTransaction(msg, p.config, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv) if err != nil { return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) } @@ -117,7 +109,7 @@ func (p *StateProcessor) Process(block *types.Block, parent *types.Header, state return receipts, allLogs, *usedGas, nil } -func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) { +func applyTransaction(msg types.Message, config *params.ChainConfig, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) { // Create a new context to be used in the EVM environment. txContext := NewEVMTxContext(msg) evm.Reset(txContext, statedb) @@ -148,18 +140,13 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta receipt.TxHash = tx.Hash() receipt.GasUsed = result.UsedGas - if tx.Type() == types.BlobTxType { - receipt.BlobGasUsed = uint64(len(tx.BlobHashes()) * params.BlobTxBlobGasPerBlob) - receipt.BlobGasPrice = evm.Context.BlobBaseFee - } - // If the transaction created a contract, store the creation address in the receipt. - if msg.To == nil { + if msg.To() == nil { receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) } // Set the receipt logs and create the bloom filter. - receipt.Logs = statedb.GetLogs(tx.Hash(), blockNumber.Uint64(), blockHash) + receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash) receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) receipt.BlockHash = blockHash receipt.BlockNumber = blockNumber @@ -171,92 +158,13 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, // indicating the block was invalid. -func ApplyTransaction(config *params.ChainConfig, bc ChainContext, blockContext vm.BlockContext, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, error) { - msg, err := TransactionToMessage(tx, types.MakeSigner(config, header.Number, header.Time), header.BaseFee) +func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, error) { + msg, err := tx.AsMessage(types.MakeSigner(config, header.Number, new(big.Int).SetUint64(header.Time)), header.BaseFee) if err != nil { return nil, err } // Create a new context to be used in the EVM environment - txContext := NewEVMTxContext(msg) - vmenv := vm.NewEVM(blockContext, txContext, statedb, config, cfg) - return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) -} - -// ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root -// contract. This method is exported to be used in tests. -func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *state.StateDB) { - // If EIP-4788 is enabled, we need to invoke the beaconroot storage contract with - // the new root - msg := &Message{ - From: params.SystemAddress, - GasLimit: 30_000_000, - GasPrice: common.Big0, - GasFeeCap: common.Big0, - GasTipCap: common.Big0, - To: ¶ms.BeaconRootsStorageAddress, - Data: beaconRoot[:], - } - vmenv.Reset(NewEVMTxContext(msg), statedb) - statedb.AddAddressToAccessList(params.BeaconRootsStorageAddress) - _, _, _ = vmenv.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.Big0) - statedb.Finalise(true) -} - -// ApplyPrecompileActivations checks if any of the precompiles specified by the chain config are enabled or disabled by the block -// transition from [parentTimestamp] to the timestamp set in [blockContext]. If this is the case, it calls [Configure] -// to apply the necessary state transitions for the upgrade. -// This function is called within genesis setup to configure the starting state for precompiles enabled at genesis. -// In block processing and building, ApplyUpgrades is called instead which also applies state upgrades. -func ApplyPrecompileActivations(c *params.ChainConfig, parentTimestamp *uint64, blockContext contract.ConfigurationBlockContext, statedb *state.StateDB) error { - blockTimestamp := blockContext.Timestamp() - // Note: RegisteredModules returns precompiles sorted by module addresses. - // This ensures that the order we call Configure for each precompile is consistent. - // This ensures even if precompiles read/write state other than their own they will observe - // an identical global state in a deterministic order when they are configured. - for _, module := range modules.RegisteredModules() { - for _, activatingConfig := range c.GetActivatingPrecompileConfigs(module.Address, parentTimestamp, blockTimestamp, c.PrecompileUpgrades) { - // If this transition activates the upgrade, configure the stateful precompile. - // (or deconfigure it if it is being disabled.) - if activatingConfig.IsDisabled() { - log.Info("Disabling precompile", "name", module.ConfigKey) - statedb.SelfDestruct(module.Address) - // Calling Finalise here effectively commits Suicide call and wipes the contract state. - // This enables re-configuration of the same contract state in the same block. - // Without an immediate Finalise call after the Suicide, a reconfigured precompiled state can be wiped out - // since Suicide will be committed after the reconfiguration. - statedb.Finalise(true) - } else { - var printIntf interface{} - marshalled, err := json.Marshal(activatingConfig) - if err == nil { - printIntf = string(marshalled) - } else { - printIntf = activatingConfig - } - - log.Info("Activating new precompile", "name", module.ConfigKey, "config", printIntf) - // Set the nonce of the precompile's address (as is done when a contract is created) to ensure - // that it is marked as non-empty and will not be cleaned up when the statedb is finalized. - statedb.SetNonce(module.Address, 1) - // Set the code of the precompile's address to a non-zero length byte slice to ensure that the precompile - // can be called from within Solidity contracts. Solidity adds a check before invoking a contract to ensure - // that it does not attempt to invoke a non-existent contract. - statedb.SetCode(module.Address, []byte{0x1}) - if err := module.Configure(c, activatingConfig, statedb, blockContext); err != nil { - return fmt.Errorf("could not configure precompile, name: %s, reason: %w", module.ConfigKey, err) - } - } - } - } - return nil -} - -// ApplyUpgrades checks if any of the precompile or state upgrades specified by the chain config are activated by the block -// transition from [parentTimestamp] to the timestamp set in [header]. If this is the case, it calls [Configure] -// to apply the necessary state transitions for the upgrade. -// This function is called: -// - in block processing to update the state when processing a block. -// - in the miner to apply the state upgrades when producing a block. -func ApplyUpgrades(c *params.ChainConfig, parentTimestamp *uint64, blockContext contract.ConfigurationBlockContext, statedb *state.StateDB) error { - return ApplyPrecompileActivations(c, parentTimestamp, blockContext, statedb) + blockContext := NewEVMBlockContext(header, bc, author) + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg) + return applyTransaction(msg, config, author, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) } diff --git a/core/state_processor_test.go b/core/state_processor_test.go index f6dcb789a6..e553cf64bc 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -27,42 +27,33 @@ package core import ( - "crypto/ecdsa" "math/big" "testing" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/consensus/misc/eip4844" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/holiman/uint256" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/trie" "golang.org/x/crypto/sha3" ) -func u64(val uint64) *uint64 { return &val } - // TestStateProcessorErrors tests the output from the 'core' errors // as defined in core/error.go. These errors are generated when the // blockchain imports bad blocks, meaning blocks which have valid headers but // contain invalid transactions func TestStateProcessorErrors(t *testing.T) { - cpcfg := *params.TestChainConfig - config := &cpcfg - config.CancunTime = u64(0) - var ( - signer = types.LatestSigner(config) - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + config = params.TestChainConfig + signer = types.LatestSigner(config) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") ) - var makeTx = func(key *ecdsa.PrivateKey, nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction { - tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, key) + var makeTx = func(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction { + tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, testKey) return tx } var mkDynamicTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int) *types.Transaction { @@ -73,37 +64,9 @@ func TestStateProcessorErrors(t *testing.T) { Gas: gasLimit, To: &to, Value: big.NewInt(0), - }), signer, key1) + }), signer, testKey) return tx } - var mkDynamicCreationTx = func(nonce uint64, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, data []byte) *types.Transaction { - tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ - Nonce: nonce, - GasTipCap: gasTipCap, - GasFeeCap: gasFeeCap, - Gas: gasLimit, - Value: big.NewInt(0), - Data: data, - }), signer, key1) - return tx - } - var mkBlobTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap, blobGasFeeCap *big.Int, hashes []common.Hash) *types.Transaction { - tx, err := types.SignTx(types.NewTx(&types.BlobTx{ - Nonce: nonce, - GasTipCap: uint256.MustFromBig(gasTipCap), - GasFeeCap: uint256.MustFromBig(gasFeeCap), - Gas: gasLimit, - To: to, - BlobHashes: hashes, - BlobFeeCap: uint256.MustFromBig(blobGasFeeCap), - Value: new(uint256.Int), - }), signer, key1) - if err != nil { - t.Fatal(err) - } - return tx - } - { // Tests against a 'recent' chain definition var ( db = rawdb.NewMemoryDatabase() @@ -111,18 +74,17 @@ func TestStateProcessorErrors(t *testing.T) { Config: config, Alloc: GenesisAlloc{ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ - Balance: big.NewInt(4000000000000000000), // 4 ether + Balance: big.NewInt(2000000000000000000), // 2 ether Nonce: 0, }, }, - GasLimit: params.CortinaGasLimit, + GasLimit: params.ApricotPhase1GasLimit, } - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) - tooBigInitCode = [params.MaxInitCodeSize + 1]byte{} + genesis = gspec.MustCommit(db) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) ) - defer blockchain.Stop() - bigNumber := new(big.Int).SetBytes(common.MaxHash.Bytes()) + bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) tooBigNumber := new(big.Int).Set(bigNumber) tooBigNumber.Add(tooBigNumber, common.Big1) for i, tt := range []struct { @@ -131,34 +93,34 @@ func TestStateProcessorErrors(t *testing.T) { }{ { // ErrNonceTooLow txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil), - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil), }, want: "could not apply tx 1 [0x734d821c990099c6ae42d78072aadd3931c35328cf03ef4cf5b2a4ac9c398522]: nonce too low: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 0 state: 1", }, { // ErrNonceTooHigh txs: []*types.Transaction{ - makeTx(key1, 100, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil), + makeTx(100, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil), }, want: "could not apply tx 0 [0x0df36254cfbef8ed6961b38fc68aecc777177166144c8a56bc8919e23a559bf4]: nonce too high: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 100 state: 0", }, { // ErrGasLimitReached txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), 15000001, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), 8000001, big.NewInt(225000000000), nil), }, - want: "could not apply tx 0 [0x1354370681d2ab68247073d889736f8be4a8d87e35956f0c02658d3670803a66]: gas limit reached", + want: "could not apply tx 0 [0xfbe38b817aaa760c2766b56c019fcdba506560a28fd41c69ae96bdaa4569e317]: gas limit reached", }, { // ErrInsufficientFundsForTransfer txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(4000000000000000000), params.TxGas, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(2000000000000000000), params.TxGas, big.NewInt(225000000000), nil), }, - want: "could not apply tx 0 [0x1632f2bffcce84a5c91dd8ab2016128fccdbcfbe0485d2c67457e1c793c72a4b]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 4004725000000000000", + want: "could not apply tx 0 [0xae1601ef55b676ebb824ee7e16a0d14af725b7f9cf5ec79e21f14833c26b5b35]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 2000000000000000000 want 2004725000000000000", }, { // ErrInsufficientFunds txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(900000000000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(900000000000000000), nil), }, - want: "could not apply tx 0 [0x4a69690c4b0cd85e64d0d9ea06302455b01e10a83db964d60281739752003440]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 18900000000000000000000", + want: "could not apply tx 0 [0x4a69690c4b0cd85e64d0d9ea06302455b01e10a83db964d60281739752003440]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 2000000000000000000 want 18900000000000000000000", }, // ErrGasUintOverflow // One missing 'core' error is ErrGasUintOverflow: "gas uint64 overflow", @@ -166,21 +128,21 @@ func TestStateProcessorErrors(t *testing.T) { // multiplication len(data) +gas_per_byte overflows uint64. Not testable at the moment { // ErrIntrinsicGas txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas-1000, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas-1000, big.NewInt(225000000000), nil), }, want: "could not apply tx 0 [0x2fc3e3b5cc26917d413e26983fe189475f47d4f0757e32aaa5561fcb9c9dc432]: intrinsic gas too low: have 20000, want 21000", }, { // ErrGasLimitReached txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas*762, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas*381, big.NewInt(225000000000), nil), }, - want: "could not apply tx 0 [0x76c07cc2b32007eb1a9c3fa066d579a3d77ec4ecb79bbc266624a601d7b08e46]: gas limit reached", + want: "could not apply tx 0 [0x9ee548e001369418ae53aaa11b5d823f081cc7fa9c9a7ee71a978ae17a2aece0]: gas limit reached", }, { // ErrFeeCapTooLow txs: []*types.Transaction{ mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(0), big.NewInt(0)), }, - want: "could not apply tx 0 [0xc4ab868fef0c82ae0387b742aee87907f2d0fc528fc6ea0a021459fb0fc4a4a8]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 0, baseFee: 225000000000", + want: "could not apply tx 0 [0xc4ab868fef0c82ae0387b742aee87907f2d0fc528fc6ea0a021459fb0fc4a4a8]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 0 baseFee: 225000000000", }, { // ErrTipVeryHigh txs: []*types.Transaction{ @@ -207,36 +169,18 @@ func TestStateProcessorErrors(t *testing.T) { // This test is designed to have the effective cost be covered by the balance, but // the extended requirement on FeeCap*gas < balance to fail txs: []*types.Transaction{ - mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(200000000000000)), + mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(100000000000000)), }, - want: "could not apply tx 0 [0xa3840aa3cad37eec8607b9f4846813d4a80e70b462a793fa21f64138156f849b]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 4200000000000000000", + want: "could not apply tx 0 [0x3388378ed60640e75d2edf728d5528a305f599997abc4f23ec46b351b6197499]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 2000000000000000000 want 2100000000000000000", }, { // Another ErrInsufficientFunds, this one to ensure that feecap/tip of max u256 is allowed txs: []*types.Transaction{ mkDynamicTx(0, common.Address{}, params.TxGas, bigNumber, bigNumber), }, - want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 2431633873983640103894990685182446064918669677978451844828609264166175722438635000", - }, - { // ErrMaxInitCodeSizeExceeded - txs: []*types.Transaction{ - mkDynamicCreationTx(0, 500000, common.Big0, big.NewInt(params.ApricotPhase3InitialBaseFee), tooBigInitCode[:]), - }, - want: "could not apply tx 0 [0x18a05f40f29ff16d5287f6f88b21c9f3c7fbc268f707251144996294552c4cd6]: max initcode size exceeded: code size 49153 limit 49152", - }, - { // ErrIntrinsicGas: Not enough gas to cover init code - txs: []*types.Transaction{ - mkDynamicCreationTx(0, 54299, common.Big0, big.NewInt(params.ApricotPhase3InitialBaseFee), make([]byte, 320)), - }, - want: "could not apply tx 0 [0x849278f616d51ab56bba399551317213ce7a10e4d9cbc3d14bb663e50cb7ab99]: intrinsic gas too low: have 54299, want 54300", - }, - { // ErrBlobFeeCapTooLow - txs: []*types.Transaction{ - mkBlobTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(1), big.NewInt(0), []common.Hash{(common.Hash{1})}), - }, - want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1, baseFee: 225000000000", + want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 2000000000000000000 want 2431633873983640103894990685182446064918669677978451844828609264166175722438635000", }, } { - block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(genesis, dummy.NewFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -256,6 +200,7 @@ func TestStateProcessorErrors(t *testing.T) { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), EIP150Block: big.NewInt(0), + EIP150Hash: common.Hash{}, EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), @@ -263,8 +208,8 @@ func TestStateProcessorErrors(t *testing.T) { PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: utils.NewUint64(0), - ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase1BlockTimestamp: big.NewInt(0), + ApricotPhase2BlockTimestamp: big.NewInt(0), }, Alloc: GenesisAlloc{ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ @@ -274,7 +219,8 @@ func TestStateProcessorErrors(t *testing.T) { }, GasLimit: params.ApricotPhase1GasLimit, } - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) + genesis = gspec.MustCommit(db) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) ) defer blockchain.Stop() for i, tt := range []struct { @@ -288,7 +234,7 @@ func TestStateProcessorErrors(t *testing.T) { want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: transaction type not supported", }, } { - block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(genesis, dummy.NewFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -312,9 +258,10 @@ func TestStateProcessorErrors(t *testing.T) { Code: common.FromHex("0xB0B0FACE"), }, }, - GasLimit: params.CortinaGasLimit, + GasLimit: params.ApricotPhase1GasLimit, } - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) + genesis = gspec.MustCommit(db) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) ) defer blockchain.Stop() for i, tt := range []struct { @@ -328,7 +275,7 @@ func TestStateProcessorErrors(t *testing.T) { want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: sender not an eoa: address 0x71562b71999873DB5b286dF957af199Ec94617F7, codehash: 0x9280914443471259d4570a8661015ae4a5b80186dbc619658fb494bebc3da3d1", }, } { - block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(genesis, dummy.NewFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -345,11 +292,10 @@ func TestStateProcessorErrors(t *testing.T) { // valid to be considered for import: // - valid pow (fake), ancestry, difficulty, gaslimit etc func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Transactions, config *params.ChainConfig) *types.Block { - fakeChainReader := newChainMaker(nil, config, engine) header := &types.Header{ ParentHash: parent.Hash(), Coinbase: parent.Coinbase(), - Difficulty: engine.CalcDifficulty(fakeChainReader, parent.Time()+10, &types.Header{ + Difficulty: engine.CalcDifficulty(&fakeChainReader{config}, parent.Time()+10, &types.Header{ Number: parent.Number(), Time: parent.Time(), Difficulty: parent.Difficulty(), @@ -360,10 +306,10 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr Time: parent.Time() + 10, UncleHash: types.EmptyUncleHash, } - if config.IsApricotPhase3(header.Time) { + if config.IsApricotPhase3(new(big.Int).SetUint64(header.Time)) { header.Extra, header.BaseFee, _ = dummy.CalcBaseFee(config, parent.Header(), header.Time) } - if config.IsApricotPhase4(header.Time) { + if config.IsApricotPhase4(new(big.Int).SetUint64(header.Time)) { header.BlockGasCost = big.NewInt(0) header.ExtDataGasUsed = big.NewInt(0) } @@ -373,7 +319,6 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr hasher := sha3.NewLegacyKeccak256() hasher.Write(header.Number.Bytes()) var cumulativeGas uint64 - var nBlobs int for _, tx := range txs { txh := tx.Hash() hasher.Write(txh[:]) @@ -382,22 +327,8 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr receipt.GasUsed = tx.Gas() receipts = append(receipts, receipt) cumulativeGas += tx.Gas() - nBlobs += len(tx.BlobHashes()) } header.Root = common.BytesToHash(hasher.Sum(nil)) - if config.IsCancun(header.Number, header.Time) { - var pExcess, pUsed = uint64(0), uint64(0) - if parent.ExcessBlobGas() != nil { - pExcess = *parent.ExcessBlobGas() - pUsed = *parent.BlobGasUsed() - } - excess := eip4844.CalcExcessBlobGas(pExcess, pUsed) - used := uint64(nBlobs * params.BlobTxBlobGasPerBlob) - header.ExcessBlobGas = &excess - header.BlobGasUsed = &used - - header.ParentBeaconRoot = new(common.Hash) - } // Assemble and return the final block for sealing - return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) + return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil), nil, true) } diff --git a/core/state_transition.go b/core/state_transition.go index 3bb37897e7..820ae2dab0 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -32,22 +32,71 @@ import ( "math" "math/big" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/utils" - "github.com/ava-labs/coreth/vmerrs" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" - cmath "github.com/ethereum/go-ethereum/common/math" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/vmerrs" ) +var emptyCodeHash = crypto.Keccak256Hash(nil) + +/* +The State Transitioning Model + +A state transition is a change made when a transaction is applied to the current world state +The state transitioning model does all the necessary work to work out a valid new state root. + +1) Nonce handling +2) Pre pay gas +3) Create a new state object if the recipient is \0*32 +4) Value transfer +== If contract creation == + 4a) Attempt to run transaction data + 4b) If valid, use result as code for the new state object +== end == +5) Run Script section +6) Derive new state root +*/ +type StateTransition struct { + gp *GasPool + msg Message + gas uint64 + gasPrice *big.Int + gasFeeCap *big.Int + gasTipCap *big.Int + initialGas uint64 + value *big.Int + data []byte + state vm.StateDB + evm *vm.EVM +} + +// Message represents a message sent to a contract. +type Message interface { + From() common.Address + To() *common.Address + + GasPrice() *big.Int + GasFeeCap() *big.Int + GasTipCap() *big.Int + Gas() uint64 + Value() *big.Int + + Nonce() uint64 + IsFake() bool + Data() []byte + AccessList() types.AccessList +} + // ExecutionResult includes all output after executing given evm // message no matter the execution itself is successful or not. type ExecutionResult struct { - UsedGas uint64 // Total used gas, not including the refunded gas - RefundedGas uint64 // Total gas refunded after execution - Err error // Any error encountered during the execution(listed in core/vm/errors.go) - ReturnData []byte // Returned data from evm(function result or data supplied with revert opcode) + UsedGas uint64 // Total used gas but include the refunded gas + Err error // Any error encountered during the execution(listed in core/vm/errors.go) + ReturnData []byte // Returned data from evm(function result or data supplied with revert opcode) } // Unwrap returns the internal evm error which allows us for further @@ -78,17 +127,16 @@ func (result *ExecutionResult) Revert() []byte { } // IntrinsicGas computes the 'intrinsic gas' for a message with the given data. -func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, rules params.Rules) (uint64, error) { +func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028 bool) (uint64, error) { // Set the starting gas for the raw transaction var gas uint64 - if isContractCreation && rules.IsHomestead { + if isContractCreation && isHomestead { gas = params.TxGasContractCreation } else { gas = params.TxGas } - dataLen := uint64(len(data)) // Bump the required gas by the amount of transactional data - if dataLen > 0 { + if len(data) > 0 { // Zero and non-zero bytes are priced differently var nz uint64 for _, byt := range data { @@ -98,7 +146,7 @@ func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation b } // Make sure we don't exceed uint64 for all data combinations nonZeroGas := params.TxDataNonZeroGasFrontier - if rules.IsIstanbul { + if isEIP2028 { nonZeroGas = params.TxDataNonZeroGasEIP2028 } if (math.MaxUint64-gas)/nonZeroGas < nz { @@ -106,127 +154,32 @@ func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation b } gas += nz * nonZeroGas - z := dataLen - nz + z := uint64(len(data)) - nz if (math.MaxUint64-gas)/params.TxDataZeroGas < z { return 0, ErrGasUintOverflow } gas += z * params.TxDataZeroGas - - if isContractCreation && rules.IsDurango { - lenWords := toWordSize(dataLen) - if (math.MaxUint64-gas)/params.InitCodeWordGas < lenWords { - return 0, ErrGasUintOverflow - } - gas += lenWords * params.InitCodeWordGas - } } if accessList != nil { - accessListGas, err := accessListGas(rules, accessList) - if err != nil { - return 0, err - } - totalGas, overflow := cmath.SafeAdd(gas, accessListGas) - if overflow { - return 0, ErrGasUintOverflow - } - gas = totalGas - } - - return gas, nil -} - -func accessListGas(rules params.Rules, accessList types.AccessList) (uint64, error) { - var gas uint64 - if !rules.PredicatersExist() { gas += uint64(len(accessList)) * params.TxAccessListAddressGas gas += uint64(accessList.StorageKeys()) * params.TxAccessListStorageKeyGas - return gas, nil - } - - for _, accessTuple := range accessList { - address := accessTuple.Address - predicaterContract, ok := rules.Predicaters[address] - if !ok { - // Previous access list gas calculation does not use safemath because an overflow would not be possible with - // the size of access lists that could be included in a block and standard access list gas costs. - // Therefore, we only check for overflow when adding to [totalGas], which could include the sum of values - // returned by a predicate. - accessTupleGas := params.TxAccessListAddressGas + uint64(len(accessTuple.StorageKeys))*params.TxAccessListStorageKeyGas - totalGas, overflow := cmath.SafeAdd(gas, accessTupleGas) - if overflow { - return 0, ErrGasUintOverflow - } - gas = totalGas - } else { - predicateGas, err := predicaterContract.PredicateGas(utils.HashSliceToBytes(accessTuple.StorageKeys)) - if err != nil { - return 0, err - } - totalGas, overflow := cmath.SafeAdd(gas, predicateGas) - if overflow { - return 0, ErrGasUintOverflow - } - gas = totalGas - } } - return gas, nil } -// toWordSize returns the ceiled word size required for init code payment calculation. -func toWordSize(size uint64) uint64 { - if size > math.MaxUint64-31 { - return math.MaxUint64/32 + 1 - } - - return (size + 31) / 32 -} - -// A Message contains the data derived from a single transaction that is relevant to state -// processing. -type Message struct { - To *common.Address - From common.Address - Nonce uint64 - Value *big.Int - GasLimit uint64 - GasPrice *big.Int - GasFeeCap *big.Int - GasTipCap *big.Int - Data []byte - AccessList types.AccessList - BlobGasFeeCap *big.Int - BlobHashes []common.Hash - - // When SkipAccountChecks is true, the message nonce is not checked against the - // account nonce in state. It also disables checking that the sender is an EOA. - // This field will be set to true for operations like RPC eth_call. - SkipAccountChecks bool -} - -// TransactionToMessage converts a transaction into a Message. -func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.Int) (*Message, error) { - msg := &Message{ - Nonce: tx.Nonce(), - GasLimit: tx.Gas(), - GasPrice: new(big.Int).Set(tx.GasPrice()), - GasFeeCap: new(big.Int).Set(tx.GasFeeCap()), - GasTipCap: new(big.Int).Set(tx.GasTipCap()), - To: tx.To(), - Value: tx.Value(), - Data: tx.Data(), - AccessList: tx.AccessList(), - SkipAccountChecks: false, - BlobHashes: tx.BlobHashes(), - BlobGasFeeCap: tx.BlobGasFeeCap(), - } - // If baseFee provided, set gasPrice to effectiveGasPrice. - if baseFee != nil { - msg.GasPrice = cmath.BigMin(msg.GasPrice.Add(msg.GasTipCap, baseFee), msg.GasFeeCap) +// NewStateTransition initialises and returns a new state transition object. +func NewStateTransition(evm *vm.EVM, msg Message, gp *GasPool) *StateTransition { + return &StateTransition{ + gp: gp, + evm: evm, + msg: msg, + gasPrice: msg.GasPrice(), + gasFeeCap: msg.GasFeeCap(), + gasTipCap: msg.GasTipCap(), + value: msg.Value(), + data: msg.Data(), + state: evm.StateDB, } - var err error - msg.From, err = types.Sender(s, tx) - return msg, err } // ApplyMessage computes the new state by applying the given message @@ -236,169 +189,86 @@ func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.In // the gas used (which includes gas refunds) and an error if it failed. An error always // indicates a core error meaning that the message would always fail for that particular // state and would never be accepted within a block. -func ApplyMessage(evm *vm.EVM, msg *Message, gp *GasPool) (*ExecutionResult, error) { +func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool) (*ExecutionResult, error) { return NewStateTransition(evm, msg, gp).TransitionDb() } -// StateTransition represents a state transition. -// -// == The State Transitioning Model -// -// A state transition is a change made when a transaction is applied to the current world -// state. The state transitioning model does all the necessary work to work out a valid new -// state root. -// -// 1. Nonce handling -// 2. Pre pay gas -// 3. Create a new state object if the recipient is nil -// 4. Value transfer -// -// == If contract creation == -// -// 4a. Attempt to run transaction data -// 4b. If valid, use result as code for the new state object -// -// == end == -// -// 5. Run Script section -// 6. Derive new state root -type StateTransition struct { - gp *GasPool - msg *Message - gasRemaining uint64 - initialGas uint64 - state vm.StateDB - evm *vm.EVM -} - -// NewStateTransition initialises and returns a new state transition object. -func NewStateTransition(evm *vm.EVM, msg *Message, gp *GasPool) *StateTransition { - return &StateTransition{ - gp: gp, - evm: evm, - msg: msg, - state: evm.StateDB, - } -} - // to returns the recipient of the message. func (st *StateTransition) to() common.Address { - if st.msg == nil || st.msg.To == nil /* contract creation */ { + if st.msg == nil || st.msg.To() == nil /* contract creation */ { return common.Address{} } - return *st.msg.To + return *st.msg.To() } func (st *StateTransition) buyGas() error { - mgval := new(big.Int).SetUint64(st.msg.GasLimit) - mgval = mgval.Mul(mgval, st.msg.GasPrice) - balanceCheck := new(big.Int).Set(mgval) - if st.msg.GasFeeCap != nil { - balanceCheck.SetUint64(st.msg.GasLimit) - balanceCheck = balanceCheck.Mul(balanceCheck, st.msg.GasFeeCap) - balanceCheck.Add(balanceCheck, st.msg.Value) - } - if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { - if blobGas := st.blobGasUsed(); blobGas > 0 { - // Check that the user has enough funds to cover blobGasUsed * tx.BlobGasFeeCap - blobBalanceCheck := new(big.Int).SetUint64(blobGas) - blobBalanceCheck.Mul(blobBalanceCheck, st.msg.BlobGasFeeCap) - balanceCheck.Add(balanceCheck, blobBalanceCheck) - // Pay for blobGasUsed * actual blob fee - blobFee := new(big.Int).SetUint64(blobGas) - blobFee.Mul(blobFee, st.evm.Context.BlobBaseFee) - mgval.Add(mgval, blobFee) - } + mgval := new(big.Int).SetUint64(st.msg.Gas()) + mgval = mgval.Mul(mgval, st.gasPrice) + balanceCheck := mgval + if st.gasFeeCap != nil { + balanceCheck = new(big.Int).SetUint64(st.msg.Gas()) + balanceCheck.Mul(balanceCheck, st.gasFeeCap) + balanceCheck.Add(balanceCheck, st.value) } - if have, want := st.state.GetBalance(st.msg.From), balanceCheck; have.Cmp(want) < 0 { - return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From.Hex(), have, want) + if have, want := st.state.GetBalance(st.msg.From()), balanceCheck; have.Cmp(want) < 0 { + return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From().Hex(), have, want) } - if err := st.gp.SubGas(st.msg.GasLimit); err != nil { + if err := st.gp.SubGas(st.msg.Gas()); err != nil { return err } - st.gasRemaining += st.msg.GasLimit + st.gas += st.msg.Gas() - st.initialGas = st.msg.GasLimit - st.state.SubBalance(st.msg.From, mgval) + st.initialGas = st.msg.Gas() + st.state.SubBalance(st.msg.From(), mgval) return nil } func (st *StateTransition) preCheck() error { // Only check transactions that are not fake - msg := st.msg - if !msg.SkipAccountChecks { + if !st.msg.IsFake() { // Make sure this transaction's nonce is correct. - stNonce := st.state.GetNonce(msg.From) - if msgNonce := msg.Nonce; stNonce < msgNonce { + stNonce := st.state.GetNonce(st.msg.From()) + if msgNonce := st.msg.Nonce(); stNonce < msgNonce { return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooHigh, - msg.From.Hex(), msgNonce, stNonce) + st.msg.From().Hex(), msgNonce, stNonce) } else if stNonce > msgNonce { return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooLow, - msg.From.Hex(), msgNonce, stNonce) + st.msg.From().Hex(), msgNonce, stNonce) } else if stNonce+1 < stNonce { return fmt.Errorf("%w: address %v, nonce: %d", ErrNonceMax, - msg.From.Hex(), stNonce) + st.msg.From().Hex(), stNonce) } // Make sure the sender is an EOA - codeHash := st.state.GetCodeHash(msg.From) - if codeHash != (common.Hash{}) && codeHash != types.EmptyCodeHash { + if codeHash := st.state.GetCodeHash(st.msg.From()); codeHash != emptyCodeHash && codeHash != (common.Hash{}) { return fmt.Errorf("%w: address %v, codehash: %s", ErrSenderNoEOA, - msg.From.Hex(), codeHash) + st.msg.From().Hex(), codeHash) } // Make sure the sender is not prohibited - if vm.IsProhibited(msg.From) { - return fmt.Errorf("%w: address %v", vmerrs.ErrAddrProhibited, msg.From) + if vm.IsProhibited(st.msg.From()) { + return fmt.Errorf("%w: address %v", vmerrs.ErrAddrProhibited, st.msg.From()) } } // Make sure that transaction gasFeeCap is greater than the baseFee (post london) if st.evm.ChainConfig().IsApricotPhase3(st.evm.Context.Time) { // Skip the checks if gas fields are zero and baseFee was explicitly disabled (eth_call) - skipCheck := st.evm.Config.NoBaseFee && msg.GasFeeCap.BitLen() == 0 && msg.GasTipCap.BitLen() == 0 - if !skipCheck { - if l := msg.GasFeeCap.BitLen(); l > 256 { + if !st.evm.Config.NoBaseFee || st.gasFeeCap.BitLen() > 0 || st.gasTipCap.BitLen() > 0 { + if l := st.gasFeeCap.BitLen(); l > 256 { return fmt.Errorf("%w: address %v, maxFeePerGas bit length: %d", ErrFeeCapVeryHigh, - msg.From.Hex(), l) + st.msg.From().Hex(), l) } - if l := msg.GasTipCap.BitLen(); l > 256 { + if l := st.gasTipCap.BitLen(); l > 256 { return fmt.Errorf("%w: address %v, maxPriorityFeePerGas bit length: %d", ErrTipVeryHigh, - msg.From.Hex(), l) + st.msg.From().Hex(), l) } - if msg.GasFeeCap.Cmp(msg.GasTipCap) < 0 { + if st.gasFeeCap.Cmp(st.gasTipCap) < 0 { return fmt.Errorf("%w: address %v, maxPriorityFeePerGas: %s, maxFeePerGas: %s", ErrTipAboveFeeCap, - msg.From.Hex(), msg.GasTipCap, msg.GasFeeCap) + st.msg.From().Hex(), st.gasTipCap, st.gasFeeCap) } // This will panic if baseFee is nil, but basefee presence is verified // as part of header validation. - if msg.GasFeeCap.Cmp(st.evm.Context.BaseFee) < 0 { - return fmt.Errorf("%w: address %v, maxFeePerGas: %s, baseFee: %s", ErrFeeCapTooLow, - msg.From.Hex(), msg.GasFeeCap, st.evm.Context.BaseFee) - } - } - } - // Check the blob version validity - if msg.BlobHashes != nil { - if len(msg.BlobHashes) == 0 { - return errors.New("blob transaction missing blob hashes") - } - for i, hash := range msg.BlobHashes { - if hash[0] != params.BlobTxHashVersion { - return fmt.Errorf("blob %d hash version mismatch (have %d, supported %d)", - i, hash[0], params.BlobTxHashVersion) - } - } - } - // Check that the user is paying at least the current blob fee - if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { - if st.blobGasUsed() > 0 { - // Skip the checks if gas fields are zero and blobBaseFee was explicitly disabled (eth_call) - skipCheck := st.evm.Config.NoBaseFee && msg.BlobGasFeeCap.BitLen() == 0 - if !skipCheck { - // This will panic if blobBaseFee is nil, but blobBaseFee presence - // is verified as part of header validation. - if msg.BlobGasFeeCap.Cmp(st.evm.Context.BlobBaseFee) < 0 { - return fmt.Errorf("%w: address %v blobGasFeeCap: %v, blobBaseFee: %v", ErrBlobFeeCapTooLow, - msg.From.Hex(), msg.BlobGasFeeCap, st.evm.Context.BlobBaseFee) - } + if st.gasFeeCap.Cmp(st.evm.Context.BaseFee) < 0 { + return fmt.Errorf("%w: address %v, maxFeePerGas: %s baseFee: %s", ErrFeeCapTooLow, + st.msg.From().Hex(), st.gasFeeCap, st.evm.Context.BaseFee) } } } @@ -408,10 +278,13 @@ func (st *StateTransition) preCheck() error { // TransitionDb will transition the state by applying the current message and // returning the evm execution result with following fields. // -// - used gas: total gas used (including gas being refunded) -// - returndata: the returned data from evm -// - concrete execution error: various EVM errors which abort the execution, e.g. -// ErrOutOfGas, ErrExecutionReverted +// - used gas: +// total gas used (including gas being refunded) +// - returndata: +// the returned data from evm +// - concrete execution error: +// various **EVM** error which aborts the execution, +// e.g. ErrOutOfGas, ErrExecutionReverted // // However if any consensus issue encountered, return the error directly with // nil evm execution result. @@ -431,96 +304,88 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { return nil, err } - if tracer := st.evm.Config.Tracer; tracer != nil { - tracer.CaptureTxStart(st.initialGas) + if st.evm.Config.Debug { + st.evm.Config.Tracer.CaptureTxStart(st.initialGas) defer func() { - tracer.CaptureTxEnd(st.gasRemaining) + st.evm.Config.Tracer.CaptureTxEnd(st.gas) }() } var ( msg = st.msg - sender = vm.AccountRef(msg.From) - rules = st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber, st.evm.Context.Time) - contractCreation = msg.To == nil + sender = vm.AccountRef(msg.From()) + rules = st.evm.ChainConfig().AvalancheRules(st.evm.Context.BlockNumber, st.evm.Context.Time) + contractCreation = msg.To() == nil ) // Check clauses 4-5, subtract intrinsic gas if everything is correct - gas, err := IntrinsicGas(msg.Data, msg.AccessList, contractCreation, rules) + gas, err := IntrinsicGas(st.data, st.msg.AccessList(), contractCreation, rules.IsHomestead, rules.IsIstanbul) if err != nil { return nil, err } - if st.gasRemaining < gas { - return nil, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gasRemaining, gas) + if st.gas < gas { + return nil, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gas, gas) } - st.gasRemaining -= gas + st.gas -= gas // Check clause 6 - if msg.Value.Sign() > 0 && !st.evm.Context.CanTransfer(st.state, msg.From, msg.Value) { - return nil, fmt.Errorf("%w: address %v", ErrInsufficientFundsForTransfer, msg.From.Hex()) + if msg.Value().Sign() > 0 && !st.evm.Context.CanTransfer(st.state, msg.From(), msg.Value()) { + return nil, fmt.Errorf("%w: address %v", ErrInsufficientFundsForTransfer, msg.From().Hex()) } - // Check whether the init code size has been exceeded. - if rules.IsDurango && contractCreation && len(msg.Data) > params.MaxInitCodeSize { - return nil, fmt.Errorf("%w: code size %v limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(msg.Data), params.MaxInitCodeSize) + // Set up the initial access list. + if rules.IsApricotPhase2 { + st.state.PrepareAccessList(msg.From(), msg.To(), vm.ActivePrecompiles(rules), msg.AccessList()) } - - // Execute the preparatory steps for state transition which includes: - // - prepare accessList(post-berlin/ApricotPhase2) - // - reset transient storage(eip 1153) - st.state.Prepare(rules, msg.From, st.evm.Context.Coinbase, msg.To, vm.ActivePrecompiles(rules), msg.AccessList) - var ( ret []byte vmerr error // vm errors do not effect consensus and are therefore not assigned to err ) if contractCreation { - ret, _, st.gasRemaining, vmerr = st.evm.Create(sender, msg.Data, st.gasRemaining, msg.Value) + ret, _, st.gas, vmerr = st.evm.Create(sender, st.data, st.gas, st.value) } else { // Increment the nonce for the next transaction - st.state.SetNonce(msg.From, st.state.GetNonce(sender.Address())+1) - ret, st.gasRemaining, vmerr = st.evm.Call(sender, st.to(), msg.Data, st.gasRemaining, msg.Value) + st.state.SetNonce(msg.From(), st.state.GetNonce(sender.Address())+1) + ret, st.gas, vmerr = st.evm.Call(sender, st.to(), st.data, st.gas, st.value) } - gasRefund := st.refundGas(rules.IsApricotPhase1) - st.state.AddBalance(st.evm.Context.Coinbase, new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), msg.GasPrice)) + if errors.Is(vmerr, vmerrs.ErrToAddrProhibitedSoft) { // Only invalidate soft error here + return &ExecutionResult{ + UsedGas: st.gasUsed(), + Err: vmerr, + ReturnData: ret, + }, vmerr + } + st.refundGas(rules.IsApricotPhase1) + st.state.AddBalance(st.evm.Context.Coinbase, new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.gasPrice)) return &ExecutionResult{ - UsedGas: st.gasUsed(), - RefundedGas: gasRefund, - Err: vmerr, - ReturnData: ret, + UsedGas: st.gasUsed(), + Err: vmerr, + ReturnData: ret, }, nil } -func (st *StateTransition) refundGas(apricotPhase1 bool) uint64 { - var refund uint64 +func (st *StateTransition) refundGas(apricotPhase1 bool) { // Inspired by: https://gist.github.com/holiman/460f952716a74eeb9ab358bb1836d821#gistcomment-3642048 if !apricotPhase1 { // Apply refund counter, capped to half of the used gas. - refund = st.gasUsed() / 2 + refund := st.gasUsed() / 2 if refund > st.state.GetRefund() { refund = st.state.GetRefund() } - st.gasRemaining += refund + st.gas += refund } // Return ETH for remaining gas, exchanged at the original rate. - remaining := new(big.Int).Mul(new(big.Int).SetUint64(st.gasRemaining), st.msg.GasPrice) - st.state.AddBalance(st.msg.From, remaining) + remaining := new(big.Int).Mul(new(big.Int).SetUint64(st.gas), st.gasPrice) + st.state.AddBalance(st.msg.From(), remaining) // Also return remaining gas to the block gas counter so it is // available for the next transaction. - st.gp.AddGas(st.gasRemaining) - - return refund + st.gp.AddGas(st.gas) } // gasUsed returns the amount of gas used up by the state transition. func (st *StateTransition) gasUsed() uint64 { - return st.initialGas - st.gasRemaining -} - -// blobGasUsed returns the amount of blob gas used by the message. -func (st *StateTransition) blobGasUsed() uint64 { - return uint64(len(st.msg.BlobHashes) * params.BlobTxBlobGasPerBlob) + return st.initialGas - st.gas } diff --git a/core/stateful_precompile_test.go b/core/stateful_precompile_test.go new file mode 100644 index 0000000000..06a28e3a07 --- /dev/null +++ b/core/stateful_precompile_test.go @@ -0,0 +1,43 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package core + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/precompile" +) + +var ( + _ precompile.BlockContext = &mockBlockContext{} + _ precompile.PrecompileAccessibleState = &mockAccessibleState{} +) + +type mockBlockContext struct { + blockNumber *big.Int + timestamp uint64 +} + +func (mb *mockBlockContext) Number() *big.Int { return mb.blockNumber } +func (mb *mockBlockContext) Timestamp() *big.Int { return new(big.Int).SetUint64(mb.timestamp) } + +type mockAccessibleState struct { + state *state.StateDB + blockContext *mockBlockContext + + // NativeAssetCall return values + ret []byte + remainingGas uint64 + err error +} + +func (m *mockAccessibleState) GetStateDB() precompile.StateDB { return m.state } + +func (m *mockAccessibleState) GetBlockContext() precompile.BlockContext { return m.blockContext } + +func (m *mockAccessibleState) NativeAssetCall(common.Address, []byte, uint64, uint64, bool) ([]byte, uint64, error) { + return m.ret, m.remainingGas, m.err +} diff --git a/core/test_blockchain.go b/core/test_blockchain.go index 7c0807b829..91b7706733 100644 --- a/core/test_blockchain.go +++ b/core/test_blockchain.go @@ -8,36 +8,22 @@ import ( "math/big" "strings" "testing" - "time" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/params" ) -var TestCallbacks = dummy.ConsensusCallbacks{ - OnExtraStateChange: func(block *types.Block, sdb *state.StateDB) (*big.Int, *big.Int, error) { - sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(block.Number().Int64())) - return nil, nil, nil - }, - OnFinalizeAndAssemble: func(header *types.Header, sdb *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { - sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(header.Number.Int64())) - return nil, nil, nil, nil - }, -} - type ChainTest struct { Name string testFunc func( t *testing.T, - create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error), + create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error), ) } @@ -109,12 +95,13 @@ func copyMemDB(db ethdb.Database) (ethdb.Database, error) { func checkBlockChainState( t *testing.T, bc *BlockChain, - gspec *Genesis, + genesis *Genesis, originalDB ethdb.Database, - create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error), + create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error), checkState func(sdb *state.StateDB) error, ) (*BlockChain, *BlockChain, *BlockChain) { var ( + chainConfig = bc.Config() lastAcceptedBlock = bc.LastConsensusAcceptedBlock() newDB = rawdb.NewMemoryDatabase() ) @@ -127,11 +114,12 @@ func checkBlockChainState( t.Fatalf("Check state failed for original blockchain due to: %s", err) } - newBlockChain, err := create(newDB, gspec, common.Hash{}) + _ = genesis.MustCommit(newDB) + + newBlockChain, err := create(newDB, chainConfig, common.Hash{}) if err != nil { t.Fatalf("Failed to create new blockchain instance: %s", err) } - defer newBlockChain.Stop() for i := uint64(1); i <= lastAcceptedBlock.NumberU64(); i++ { block := bc.GetBlockByNumber(i) @@ -166,13 +154,13 @@ func checkBlockChainState( if err != nil { t.Fatal(err) } - restartedChain, err := create(originalDB, gspec, lastAcceptedBlock.Hash()) + restartedChain, err := create(originalDB, chainConfig, lastAcceptedBlock.Hash()) if err != nil { t.Fatal(err) } defer restartedChain.Stop() if currentBlock := restartedChain.CurrentBlock(); currentBlock.Hash() != lastAcceptedBlock.Hash() { - t.Fatalf("Expected restarted chain to have current block %s:%d, but found %s:%d", lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) + t.Fatalf("Expected restarted chain to have current block %s:%d, but found %s:%d", lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) } if restartedLastAcceptedBlock := restartedChain.LastConsensusAcceptedBlock(); restartedLastAcceptedBlock.Hash() != lastAcceptedBlock.Hash() { t.Fatalf("Expected restarted chain to have current block %s:%d, but found %s:%d", lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64(), restartedLastAcceptedBlock.Hash().Hex(), restartedLastAcceptedBlock.NumberU64()) @@ -190,12 +178,15 @@ func checkBlockChainState( return bc, newBlockChain, restartedChain } -func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) + // We use two separate databases since GenerateChain commits the state roots to its underlying + // database. + genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -205,7 +196,10 @@ func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Databas Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - blockchain, err := create(chainDB, gspec, common.Hash{}) + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) + + blockchain, err := create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } @@ -213,7 +207,9 @@ func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Databas // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} - _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { + // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing + // to the BlockChain's database while generating blocks. + chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 10, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) @@ -259,12 +255,15 @@ func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Databas checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) + // We use two separate databases since GenerateChain commits the state roots to its underlying + // database. + genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -274,8 +273,10 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, gspe Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec, common.Hash{}) + blockchain, err := create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } @@ -283,7 +284,9 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, gspe numBlocks := 129 signer := types.HomesteadSigner{} - _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(i int, gen *BlockGen) { + // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing + // to the BlockChain's database while generating blocks. + chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks, 10, func(i int, gen *BlockGen) { // Generate a transaction to create a unique block tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -293,7 +296,7 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, gspe } // Generate the forked chain to be longer than the original chain to check for a regression where // a longer chain can trigger a reorg. - _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks+1, 10, func(i int, gen *BlockGen) { + chain2, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks+1, 10, func(i int, gen *BlockGen) { // Generate a transaction with a different amount to ensure [chain2] is different than [chain1]. tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(5000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -332,7 +335,7 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, gspe currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[len(chain1)-1] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) } if err := blockchain.ValidateCanonicalChain(); err != nil { @@ -422,7 +425,7 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, gspe checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -430,6 +433,7 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, gs addr2 = crypto.PubkeyToAddress(key2.PublicKey) // We use two separate databases since GenerateChain commits the state roots to its underlying // database. + genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -439,8 +443,10 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, gs Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec, common.Hash{}) + blockchain, err := create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } @@ -448,7 +454,9 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, gs numBlocks := 3 signer := types.HomesteadSigner{} - _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(i int, gen *BlockGen) { + // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing + // to the BlockChain's database while generating blocks. + chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks, 10, func(i int, gen *BlockGen) { // Generate a transaction to create a unique block tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -456,7 +464,7 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, gs if err != nil { t.Fatal(err) } - _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(i int, gen *BlockGen) { + chain2, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks, 10, func(i int, gen *BlockGen) { // Generate a transaction with a different amount to create a chain of blocks different from [chain1] tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(5000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -476,7 +484,7 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, gs currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[len(chain1)-1] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) } if err := blockchain.ValidateCanonicalChain(); err != nil { @@ -531,12 +539,15 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, gs checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) + // We use two separate databases since GenerateChain commits the state roots to its underlying + // database. + genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -546,8 +557,10 @@ func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, gspec Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec, common.Hash{}) + blockchain, err := create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } @@ -555,7 +568,9 @@ func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, gspec numBlocks := 3 signer := types.HomesteadSigner{} - _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(i int, gen *BlockGen) { + // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing + // to the BlockChain's database while generating blocks. + chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks, 10, func(i int, gen *BlockGen) { // Generate a transaction to create a unique block tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -572,7 +587,7 @@ func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, gspec currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain[len(chain)-1] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) } if err := blockchain.ValidateCanonicalChain(); err != nil { @@ -588,7 +603,7 @@ func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, gspec currentBlock = blockchain.CurrentBlock() expectedCurrentBlock = chain[0] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) } lastAcceptedBlock := blockchain.LastConsensusAcceptedBlock() @@ -661,7 +676,7 @@ func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, gspec checkBlockChainState(t, blockchain, gspec, chainDB, create, checkUpdatedState) } -func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -669,6 +684,9 @@ func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, gspec addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) addr3 = crypto.PubkeyToAddress(key3.PublicKey) + // We use two separate databases since GenerateChain commits the state roots to its underlying + // database. + genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -681,8 +699,10 @@ func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, gspec addr3: {Balance: genesisBalance}, }, } + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec, common.Hash{}) + blockchain, err := create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } @@ -690,7 +710,9 @@ func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, gspec // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} - genDB, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 20, 10, func(i int, gen *BlockGen) { + // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing + // to the BlockChain's database while generating blocks. + chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 20, 10, func(i int, gen *BlockGen) { // Send all funds back and forth between the two accounts if i%2 == 0 { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, genesisBalance, params.TxGas, nil, nil), signer, key1) @@ -827,22 +849,31 @@ func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, gspec checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestEmptyBlocks(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { - chainDB := rawdb.NewMemoryDatabase() +func TestEmptyBlocks(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { + var ( + // We use two separate databases since GenerateChain commits the state roots to its underlying + // database. + genDB = rawdb.NewMemoryDatabase() + chainDB = rawdb.NewMemoryDatabase() + ) // Ensure that key1 has some funds in the genesis block. gspec := &Genesis{ Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{}, } + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec, common.Hash{}) + blockchain, err := create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } defer blockchain.Stop() - _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) {}) + // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing + // to the BlockChain's database while generating blocks. + chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 10, func(i int, gen *BlockGen) {}) if err != nil { t.Fatal(err) } @@ -866,7 +897,7 @@ func TestEmptyBlocks(t *testing.T, create func(db ethdb.Database, gspec *Genesis checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -884,9 +915,10 @@ func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, gspec *Genes Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.ToBlock() + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec, common.Hash{}) + blockchain, err := create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } @@ -965,22 +997,22 @@ func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, gspec *Genes // Insert two different chains that result in the identical state root. // Once we accept one of the chains, we insert and accept A3 on top of the shared // state root -// -// G (genesis) -// / \ -// A1 B1 -// | | -// A2 B2 (A2 and B2 represent two different paths to the identical state trie) -// | -// A3 -// -//nolint:goimports -func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { +// G (genesis) +// / \ +// A1 B1 +// | | +// A2 B2 (A2 and B2 represent two different paths to the identical state trie) +// | +// A3 +func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) + // We use two separate databases since GenerateChain commits the state roots to its underlying + // database. + genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -990,15 +1022,19 @@ func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Databa Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec, common.Hash{}) + blockchain, err := create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } defer blockchain.Stop() signer := types.HomesteadSigner{} - _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { + // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing + // to the BlockChain's database while generating blocks. + chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 10, func(i int, gen *BlockGen) { if i < 2 { // Send half the funds from addr1 to addr2 in one transaction per each of the two blocks in [chain1] tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(500000000), params.TxGas, nil, nil), signer, key1) @@ -1009,7 +1045,7 @@ func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Databa if err != nil { t.Fatal(err) } - _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 2, 10, func(i int, gen *BlockGen) { + chain2, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 2, 10, func(i int, gen *BlockGen) { // Send 1/4 of the funds from addr1 to addr2 in tx1 and 3/4 of the funds in tx2. This will produce the identical state // root in the second block of [chain2] as is present in the second block of [chain1]. if i == 0 { @@ -1042,7 +1078,7 @@ func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Databa currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[1] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) } // Accept the first block in [chain1] and reject all of [chain2] @@ -1108,22 +1144,22 @@ func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Databa // Once we insert both of the chains, we restart, insert both the chains again, // and then we accept one of the chains and accept A3 on top of the shared state // root -// -// G (genesis) -// / \ -// A1 B1 -// | | -// A2 B2 (A2 and B2 represent two different paths to the identical state trie) -// | -// A3 -// -//nolint:goimports -func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { +// G (genesis) +// / \ +// A1 B1 +// | | +// A2 B2 (A2 and B2 represent two different paths to the identical state trie) +// | +// A3 +func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) + // We use two separate databases since GenerateChain commits the state roots to its underlying + // database. + genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1133,14 +1169,18 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec, common.Hash{}) + blockchain, err := create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } signer := types.HomesteadSigner{} - _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { + // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing + // to the BlockChain's database while generating blocks. + chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 10, func(i int, gen *BlockGen) { if i < 2 { // Send half the funds from addr1 to addr2 in one transaction per each of the two blocks in [chain1] tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(500000000), params.TxGas, nil, nil), signer, key1) @@ -1151,7 +1191,7 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth if err != nil { t.Fatal(err) } - _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 2, 10, func(i int, gen *BlockGen) { + chain2, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 2, 10, func(i int, gen *BlockGen) { // Send 1/4 of the funds from addr1 to addr2 in tx1 and 3/4 of the funds in tx2. This will produce the identical state // root in the second block of [chain2] as is present in the second block of [chain1]. if i == 0 { @@ -1184,13 +1224,12 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[1] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) } blockchain.Stop() - chainDB = rawdb.NewMemoryDatabase() - blockchain, err = create(chainDB, gspec, common.Hash{}) + blockchain, err = create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } @@ -1209,7 +1248,7 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth currentBlock = blockchain.CurrentBlock() expectedCurrentBlock = chain1[1] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) } // Accept the first block in [chain1] and reject all of [chain2] @@ -1271,12 +1310,15 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) + // We use two separate databases since GenerateChain commits the state roots to its underlying + // database. + genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1286,8 +1328,10 @@ func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Databas Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec, common.Hash{}) + blockchain, err := create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } @@ -1295,7 +1339,9 @@ func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Databas // This call generates a chain of 3 blocks. signer := types.LatestSigner(params.TestChainConfig) - _, _, _, err = GenerateChainWithGenesis(gspec, blockchain.engine, 3, 0, func(i int, gen *BlockGen) { + // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing + // to the BlockChain's database while generating blocks. + _, _, err = GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 0, func(i int, gen *BlockGen) { tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, Nonce: gen.TxNonce(addr1), @@ -1320,12 +1366,15 @@ func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Databas } } -func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) + // We use two separate databases since GenerateChain commits the state roots to its underlying + // database. + genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1335,8 +1384,10 @@ func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec, common.Hash{}) + blockchain, err := create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } @@ -1344,8 +1395,19 @@ func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, // This call generates a chain of 3 blocks. signer := types.LatestSigner(params.TestChainConfig) - eng := dummy.NewFakerWithMode(TestCallbacks, dummy.Mode{ModeSkipBlockFee: true, ModeSkipCoinbase: true}) - _, chain, _, err := GenerateChainWithGenesis(gspec, eng, 3, 0, func(i int, gen *BlockGen) { + // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing + // to the BlockChain's database while generating blocks. + eng := dummy.NewComplexETHFaker(&dummy.ConsensusCallbacks{ + OnExtraStateChange: func(block *types.Block, sdb *state.StateDB) (*big.Int, *big.Int, error) { + sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(block.Number().Int64())) + return nil, nil, nil + }, + OnFinalizeAndAssemble: func(header *types.Header, sdb *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { + sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(header.Number.Int64())) + return nil, nil, nil, nil + }, + }) + chain, _, err := GenerateChain(params.TestChainConfig, genesis, eng, genDB, 3, 0, func(i int, gen *BlockGen) { tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, Nonce: gen.TxNonce(addr1), @@ -1374,7 +1436,7 @@ func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, } } -func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -1382,6 +1444,7 @@ func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, g addr2 = crypto.PubkeyToAddress(key2.PublicKey) // We use two separate databases since GenerateChain commits the state roots to its underlying // database. + genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1391,8 +1454,10 @@ func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, g Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } + genesis := gspec.MustCommit(genDB) + _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec, common.Hash{}) + blockchain, err := create(chainDB, gspec.Config, common.Hash{}) if err != nil { t.Fatal(err) } @@ -1400,9 +1465,11 @@ func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, g // This call generates a chain of 3 blocks. signer := types.LatestSigner(params.TestChainConfig) + // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing + // to the BlockChain's database while generating blocks. tip := big.NewInt(50000 * params.GWei) transfer := big.NewInt(10000) - _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 0, func(i int, gen *BlockGen) { + chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 0, func(i int, gen *BlockGen) { feeCap := new(big.Int).Add(gen.BaseFee(), tip) tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, @@ -1463,48 +1530,3 @@ func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, g checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } - -// CheckTxIndices checks that the transaction indices are correctly stored in the database ([tail, head]). -func CheckTxIndices(t *testing.T, expectedTail *uint64, head uint64, db ethdb.Database, allowNilBlocks bool) { - var tailValue uint64 - if expectedTail != nil { - tailValue = *expectedTail - } - checkTxIndicesHelper(t, expectedTail, tailValue, head, head, db, allowNilBlocks) -} - -// checkTxIndicesHelper checks that the transaction indices are correctly stored in the database. -// [expectedTail] is the expected value of the tail index. -// [indexedFrom] is the block number from which the transactions should be indexed. -// [indexedTo] is the block number to which the transactions should be indexed. -// [head] is the block number of the head block. -func checkTxIndicesHelper(t *testing.T, expectedTail *uint64, indexedFrom uint64, indexedTo uint64, head uint64, db ethdb.Database, allowNilBlocks bool) { - if expectedTail == nil { - require.Nil(t, rawdb.ReadTxIndexTail(db)) - } else { - var stored uint64 - tailValue := *expectedTail - - require.EventuallyWithTf(t, - func(c *assert.CollectT) { - stored = *rawdb.ReadTxIndexTail(db) - require.Equalf(t, tailValue, stored, "expected tail to be %d, found %d", tailValue, stored) - }, - 30*time.Second, 500*time.Millisecond, "expected tail to be %d eventually", tailValue) - } - - for i := uint64(0); i <= head; i++ { - block := rawdb.ReadBlock(db, rawdb.ReadCanonicalHash(db, i), i) - if block == nil && allowNilBlocks { - continue - } - for _, tx := range block.Transactions() { - index := rawdb.ReadTxLookupEntry(db, tx.Hash()) - if i < indexedFrom { - require.Nilf(t, index, "Transaction indices should be deleted, number %d hash %s", i, tx.Hash().Hex()) - } else if i <= indexedTo { - require.NotNilf(t, index, "Missing transaction indices, number %d hash %s", i, tx.Hash().Hex()) - } - } - } -} diff --git a/core/txpool/legacypool/journal.go b/core/txpool/legacypool/journal.go index 68769933bd..4e06253691 100644 --- a/core/txpool/legacypool/journal.go +++ b/core/txpool/legacypool/journal.go @@ -24,7 +24,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package legacypool +package core import ( "errors" @@ -32,10 +32,10 @@ import ( "io/fs" "os" - "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/core/types" ) // errNoActiveJournal is returned if a transaction is attempted to be inserted @@ -51,23 +51,23 @@ type devNull struct{} func (*devNull) Write(p []byte) (n int, err error) { return len(p), nil } func (*devNull) Close() error { return nil } -// journal is a rotating log of transactions with the aim of storing locally +// txJournal is a rotating log of transactions with the aim of storing locally // created transactions to allow non-executed ones to survive node restarts. -type journal struct { +type txJournal struct { path string // Filesystem path to store the transactions at writer io.WriteCloser // Output stream to write new transactions into } // newTxJournal creates a new transaction journal to -func newTxJournal(path string) *journal { - return &journal{ +func newTxJournal(path string) *txJournal { + return &txJournal{ path: path, } } // load parses a transaction journal dump from disk, loading its contents into // the specified pool. -func (journal *journal) load(add func([]*types.Transaction) []error) error { +func (journal *txJournal) load(add func([]*types.Transaction) []error) error { // Open the journal for loading any past transactions input, err := os.Open(journal.path) if errors.Is(err, fs.ErrNotExist) { @@ -128,7 +128,7 @@ func (journal *journal) load(add func([]*types.Transaction) []error) error { } // insert adds the specified transaction to the local disk journal. -func (journal *journal) insert(tx *types.Transaction) error { +func (journal *txJournal) insert(tx *types.Transaction) error { if journal.writer == nil { return errNoActiveJournal } @@ -140,7 +140,7 @@ func (journal *journal) insert(tx *types.Transaction) error { // rotate regenerates the transaction journal based on the current contents of // the transaction pool. -func (journal *journal) rotate(all map[common.Address]types.Transactions) error { +func (journal *txJournal) rotate(all map[common.Address]types.Transactions) error { // Close the current journal (if any is open) if journal.writer != nil { if err := journal.writer.Close(); err != nil { @@ -180,7 +180,7 @@ func (journal *journal) rotate(all map[common.Address]types.Transactions) error } // close flushes the transaction journal contents to disk and closes the file. -func (journal *journal) close() error { +func (journal *txJournal) close() error { var err error if journal.writer != nil { diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 3ef445d8c6..3bda676b7d 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -1,4 +1,4 @@ -// (c) 2024, Ava Labs, Inc. +// (c) 2019-2020, Ava Labs, Inc. // // This file is a derived work, based on the go-ethereum library whose original // notices appear below. @@ -24,11 +24,11 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// Package legacypool implements the normal EVM execution transaction pool. -package legacypool +package core import ( "errors" + "fmt" "math" "math/big" "sort" @@ -36,21 +36,21 @@ import ( "sync/atomic" "time" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/txpool" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/metrics" + "github.com/tenderly/coreth/params" ) const ( + // chainHeadChanSize is the size of channel listening to ChainHeadEvent. + chainHeadChanSize = 10 + // txSlotSize is used to calculate how many data slots a single transaction // takes up based on its size. The slots are used as DoS protection, ensuring // that validating a new transaction remains a constant operation (in reality @@ -63,19 +63,47 @@ const ( // to validate whether they fit into the pool or not. // // Note: the max contract size is 24KB - txMaxSize = 4 * txSlotSize // 128KB + txMaxSize = 32 * 1024 // 32 KB ) var ( - // ErrTxPoolOverflow is returned if the transaction pool is full and can't accept + // ErrAlreadyKnown is returned if the transactions is already contained + // within the pool. + ErrAlreadyKnown = errors.New("already known") + + // ErrInvalidSender is returned if the transaction contains an invalid signature. + ErrInvalidSender = errors.New("invalid sender") + + // ErrUnderpriced is returned if a transaction's gas price is below the minimum + // configured for the transaction pool. + ErrUnderpriced = errors.New("transaction underpriced") + + // ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet // another remote transaction. ErrTxPoolOverflow = errors.New("txpool is full") + + // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced + // with a different one without the required price bump. + ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") + + // ErrGasLimit is returned if a transaction's requested gas limit exceeds the + // maximum allowance of the current block. + ErrGasLimit = errors.New("exceeds block gas limit") + + // ErrNegativeValue is a sanity error to ensure no one is able to specify a + // transaction with a negative value. + ErrNegativeValue = errors.New("negative value") + + // ErrOversizedData is returned if the input data of a transaction is greater + // than some meaningful limit a user might use. This is not a consensus error + // making the transaction invalid, rather a DOS protection. + ErrOversizedData = errors.New("oversized data") ) var ( evictionInterval = time.Minute // Time interval to check for evictable transactions statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats - baseFeeUpdateInterval = 10 * time.Second // Time interval at which to schedule a base fee update for the tx pool after ApricotPhase3 is enabled + baseFeeUpdateInterval = 10 * time.Second // Time interval at which to schedule a base fee update for the tx pool after Apricot Phase 3 is enabled ) var ( @@ -98,7 +126,6 @@ var ( invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) - // throttleTxMeter counts how many transactions are rejected due to too-many-changes between // txpool reorgs. throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) @@ -116,26 +143,29 @@ var ( reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) ) -// BlockChain defines the minimal set of methods needed to back a tx pool with -// a chain. Exists to allow mocking the live chain out of tests. -type BlockChain interface { - // Config retrieves the chain's fork configuration. - Config() *params.ChainConfig +// TxStatus is the current status of a transaction as seen by the pool. +type TxStatus uint - // CurrentBlock returns the current head of the chain. - CurrentBlock() *types.Header +const ( + TxStatusUnknown TxStatus = iota + TxStatusQueued + TxStatusPending + TxStatusIncluded +) - // GetBlock retrieves a specific block, used during pool resets. +// blockChain provides the state of blockchain and current gas limit to do +// some pre checks in tx pool and event subscribers. +type blockChain interface { + CurrentBlock() *types.Block GetBlock(hash common.Hash, number uint64) *types.Block - - // StateAt returns a state database for a given root hash (generally the head). StateAt(root common.Hash) (*state.StateDB, error) + SenderCacher() *TxSenderCacher - SenderCacher() *core.TxSenderCacher + SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription } -// Config are the configuration parameters of the transaction pool. -type Config struct { +// TxPoolConfig are the configuration parameters of the transaction pool. +type TxPoolConfig struct { Locals []common.Address // Addresses that should be treated by default as local NoLocals bool // Whether local transaction handling should be disabled Journal string // Journal of local transactions to survive node restarts @@ -152,11 +182,10 @@ type Config struct { Lifetime time.Duration // Maximum amount of time non-executable transaction are queued } -// DefaultConfig contains the default configurations for the transaction pool. -var DefaultConfig = Config{ - // If we re-enable txpool journaling, we should also add the saved local - // transactions to the p2p gossip on startup. - Journal: "", +// DefaultTxPoolConfig contains the default configurations for the transaction +// pool. +var DefaultTxPoolConfig = TxPoolConfig{ + Journal: "transactions.rlp", Rejournal: time.Hour, PriceLimit: 1, @@ -167,93 +196,103 @@ var DefaultConfig = Config{ AccountQueue: 64, GlobalQueue: 1024, - Lifetime: 10 * time.Minute, + Lifetime: 3 * time.Hour, } // sanitize checks the provided user configurations and changes anything that's // unreasonable or unworkable. -func (config *Config) sanitize() Config { +func (config *TxPoolConfig) sanitize() TxPoolConfig { conf := *config if conf.Rejournal < time.Second { log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) conf.Rejournal = time.Second } if conf.PriceLimit < 1 { - log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit) - conf.PriceLimit = DefaultConfig.PriceLimit + log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) + conf.PriceLimit = DefaultTxPoolConfig.PriceLimit } if conf.PriceBump < 1 { - log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump) - conf.PriceBump = DefaultConfig.PriceBump + log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) + conf.PriceBump = DefaultTxPoolConfig.PriceBump } if conf.AccountSlots < 1 { - log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots) - conf.AccountSlots = DefaultConfig.AccountSlots + log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) + conf.AccountSlots = DefaultTxPoolConfig.AccountSlots } if conf.GlobalSlots < 1 { - log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots) - conf.GlobalSlots = DefaultConfig.GlobalSlots + log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) + conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots } if conf.AccountQueue < 1 { - log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue) - conf.AccountQueue = DefaultConfig.AccountQueue + log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) + conf.AccountQueue = DefaultTxPoolConfig.AccountQueue } if conf.GlobalQueue < 1 { - log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue) - conf.GlobalQueue = DefaultConfig.GlobalQueue + log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) + conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue } if conf.Lifetime < 1 { - log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime) - conf.Lifetime = DefaultConfig.Lifetime + log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) + conf.Lifetime = DefaultTxPoolConfig.Lifetime } return conf } -// LegacyPool contains all currently known transactions. Transactions +// TxPool contains all currently known transactions. Transactions // enter the pool when they are received from the network or submitted // locally. They exit the pool when they are included in the blockchain. // // The pool separates processable transactions (which can be applied to the // current state) and future transactions. Transactions move between those // two states over time as they are received and processed. -type LegacyPool struct { - config Config +type TxPool struct { + config TxPoolConfig chainconfig *params.ChainConfig - chain BlockChain - gasTip atomic.Pointer[big.Int] + chain blockChain + gasPrice *big.Int minimumFee *big.Int txFeed event.Feed + headFeed event.Feed + reorgFeed event.Feed + scope event.SubscriptionScope signer types.Signer mu sync.RWMutex + istanbul bool // Fork indicator whether we are in the istanbul stage. + eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. + eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. + + currentHead *types.Header + // [currentState] is the state of the blockchain head. It is reset whenever + // head changes. + currentState *state.StateDB // [currentStateLock] is required to allow concurrent access to address nonces // and balances during reorgs and gossip handling. currentStateLock sync.Mutex - // closed when the transaction pool is stopped. Any goroutine can listen - // to this to be notified if it should shut down. - generalShutdownChan chan struct{} - currentHead atomic.Pointer[types.Header] // Current head of the blockchain - currentState *state.StateDB // Current state in the blockchain head - pendingNonces *noncer // Pending state tracking virtual nonces + pendingNonces *txNoncer // Pending state tracking virtual nonces + currentMaxGas uint64 // Current gas limit for transaction caps locals *accountSet // Set of local transaction to exempt from eviction rules - journal *journal // Journal of local transaction to back up to disk + journal *txJournal // Journal of local transaction to back up to disk - reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools - pending map[common.Address]*list // All currently processable transactions - queue map[common.Address]*list // Queued but non-processable transactions + pending map[common.Address]*txList // All currently processable transactions + queue map[common.Address]*txList // Queued but non-processable transactions beats map[common.Address]time.Time // Last heartbeat from each known account - all *lookup // All transactions to allow lookups - priced *pricedList // All transactions sorted by price - - reqResetCh chan *txpoolResetRequest - reqPromoteCh chan *accountSet - queueTxEventCh chan *types.Transaction - reorgDoneCh chan chan struct{} - reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop - wg sync.WaitGroup // tracks loop, scheduleReorgLoop - initDoneCh chan struct{} // is closed once the pool is initialized (for tests) + all *txLookup // All transactions to allow lookups + priced *txPricedList // All transactions sorted by price + + chainHeadCh chan ChainHeadEvent + chainHeadSub event.Subscription + reqResetCh chan *txpoolResetRequest + reqPromoteCh chan *accountSet + queueTxEventCh chan *types.Transaction + reorgDoneCh chan chan struct{} + reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop + generalShutdownChan chan struct{} // closed when the transaction pool is stopped. Any goroutine can listen + // to this to be notified if it should shut down. + wg sync.WaitGroup // tracks loop, scheduleReorgLoop + initDoneCh chan struct{} // is closed once the pool is initialized (for tests) changesSinceReorg int // A counter for how many drops we've performed in-between reorg. } @@ -262,22 +301,23 @@ type txpoolResetRequest struct { oldHead, newHead *types.Header } -// New creates a new transaction pool to gather, sort and filter inbound +// NewTxPool creates a new transaction pool to gather, sort and filter inbound // transactions from the network. -func New(config Config, chain BlockChain) *LegacyPool { +func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { // Sanitize the input to ensure no vulnerable gas prices are set config = (&config).sanitize() // Create the transaction pool with its initial settings - pool := &LegacyPool{ + pool := &TxPool{ config: config, + chainconfig: chainconfig, chain: chain, - chainconfig: chain.Config(), - signer: types.LatestSigner(chain.Config()), - pending: make(map[common.Address]*list), - queue: make(map[common.Address]*list), + signer: types.LatestSigner(chainconfig), + pending: make(map[common.Address]*txList), + queue: make(map[common.Address]*txList), beats: make(map[common.Address]time.Time), - all: newLookup(), + all: newTxLookup(), + chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), reqResetCh: make(chan *txpoolResetRequest), reqPromoteCh: make(chan *accountSet), queueTxEventCh: make(chan *types.Transaction), @@ -285,91 +325,56 @@ func New(config Config, chain BlockChain) *LegacyPool { reorgShutdownCh: make(chan struct{}), initDoneCh: make(chan struct{}), generalShutdownChan: make(chan struct{}), + gasPrice: new(big.Int).SetUint64(config.PriceLimit), } pool.locals = newAccountSet(pool.signer) for _, addr := range config.Locals { log.Info("Setting new local account", "address", addr) pool.locals.add(addr) } - pool.priced = newPricedList(pool.all) - - if !config.NoLocals && config.Journal != "" { - pool.journal = newTxJournal(config.Journal) - } - return pool -} - -// Filter returns whether the given transaction can be consumed by the legacy -// pool, specifically, whether it is a Legacy, AccessList or Dynamic transaction. -func (pool *LegacyPool) Filter(tx *types.Transaction) bool { - switch tx.Type() { - case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType: - return true - default: - return false - } -} - -// Init sets the gas price needed to keep a transaction in the pool and the chain -// head to allow balance / nonce checks. The transaction journal will be loaded -// from disk and filtered based on the provided starting settings. The internal -// goroutines will be spun up and the pool deemed operational afterwards. -func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error { - // Set the address reserver to request exclusive access to pooled accounts - pool.reserve = reserve - - // Set the basic pool parameters - pool.gasTip.Store(gasTip) + pool.priced = newTxPricedList(pool.all) + pool.reset(nil, chain.CurrentBlock().Header()) - // Initialize the state with head block, or fallback to empty one in - // case the head state is not available(might occur when node is not - // fully synced). - statedb, err := pool.chain.StateAt(head.Root) - if err != nil { - statedb, err = pool.chain.StateAt(types.EmptyRootHash) - } - if err != nil { - return err - } - pool.currentHead.Store(head) - pool.currentState = statedb - pool.pendingNonces = newNoncer(statedb) - - // Start the reorg loop early, so it can handle requests generated during - // journal loading. + // Start the reorg loop early so it can handle requests generated during journal loading. pool.wg.Add(1) go pool.scheduleReorgLoop() // If local transactions and journaling is enabled, load from disk - if pool.journal != nil { - if err := pool.journal.load(pool.addLocals); err != nil { + if !config.NoLocals && config.Journal != "" { + pool.journal = newTxJournal(config.Journal) + + if err := pool.journal.load(pool.AddLocals); err != nil { log.Warn("Failed to load transaction journal", "err", err) } if err := pool.journal.rotate(pool.local()); err != nil { log.Warn("Failed to rotate transaction journal", "err", err) } } + + // Subscribe events from blockchain and start the main event loop. + pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) pool.wg.Add(1) go pool.loop() pool.startPeriodicFeeUpdate() - return nil + return pool } // loop is the transaction pool's main event loop, waiting for and reacting to // outside blockchain events as well as for various reporting and transaction // eviction events. -func (pool *LegacyPool) loop() { +func (pool *TxPool) loop() { defer pool.wg.Done() var ( prevPending, prevQueued, prevStales int - // Start the stats reporting and transaction eviction tickers report = time.NewTicker(statsReportInterval) evict = time.NewTicker(evictionInterval) journal = time.NewTicker(pool.config.Rejournal) + // Track the previous head headers for transaction reorgs + head = pool.chain.CurrentBlock() ) defer report.Stop() defer evict.Stop() @@ -379,8 +384,17 @@ func (pool *LegacyPool) loop() { close(pool.initDoneCh) for { select { - // Handle pool shutdown - case <-pool.reorgShutdownCh: + // Handle ChainHeadEvent + case ev := <-pool.chainHeadCh: + if ev.Block != nil { + pool.requestReset(head.Header(), ev.Block.Header()) + head = ev.Block + pool.headFeed.Send(NewTxPoolHeadEvent{Block: head}) + } + + // System shutdown. + case <-pool.chainHeadSub.Err(): + close(pool.reorgShutdownCh) return // Handle stats reporting ticks @@ -388,7 +402,7 @@ func (pool *LegacyPool) loop() { pool.mu.RLock() pending, queued := pool.stats() pool.mu.RUnlock() - stales := int(pool.priced.stales.Load()) + stales := int(atomic.LoadInt64(&pool.priced.stales)) if pending != prevPending || queued != prevQueued || stales != prevStales { log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) @@ -407,7 +421,7 @@ func (pool *LegacyPool) loop() { if time.Since(pool.beats[addr]) > pool.config.Lifetime { list := pool.queue[addr].Flatten() for _, tx := range list { - pool.removeTx(tx.Hash(), true, true) + pool.removeTx(tx.Hash(), true) } queuedEvictionMeter.Mark(int64(len(list))) } @@ -427,60 +441,70 @@ func (pool *LegacyPool) loop() { } } -// Close terminates the transaction pool. -func (pool *LegacyPool) Close() error { - close(pool.generalShutdownChan) +// Stop terminates the transaction pool. +func (pool *TxPool) Stop() { + // Unsubscribe all subscriptions registered from txpool + pool.scope.Close() - // Terminate the pool reorger and return - close(pool.reorgShutdownCh) + close(pool.generalShutdownChan) + // Unsubscribe subscriptions registered from blockchain + pool.chainHeadSub.Unsubscribe() pool.wg.Wait() if pool.journal != nil { pool.journal.close() } log.Info("Transaction pool stopped") - return nil } -// Reset implements txpool.SubPool, allowing the legacy pool's internal state to be -// kept in sync with the main transaction pool's internal state. -func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) { - wait := pool.requestReset(oldHead, newHead) - <-wait +// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and +// starts sending event to the given channel. +func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { + return pool.scope.Track(pool.txFeed.Subscribe(ch)) +} + +// SubscribeNewHeadEvent registers a subscription of NewHeadEvent and +// starts sending event to the given channel. +func (pool *TxPool) SubscribeNewHeadEvent(ch chan<- NewTxPoolHeadEvent) event.Subscription { + return pool.scope.Track(pool.headFeed.Subscribe(ch)) } -// SubscribeTransactions registers a subscription for new transaction events, -// supporting feeding only newly seen or also resurrected transactions. -func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { - // The legacy pool has a very messed up internal shuffling, so it's kind of - // hard to separate newly discovered transaction from resurrected ones. This - // is because the new txs are added to the queue, resurrected ones too and - // reorgs run lazily, so separating the two would need a marker. - return pool.txFeed.Subscribe(ch) +// SubscribeNewReorgEvent registers a subscription of NewReorgEvent and +// starts sending event to the given channel. +func (pool *TxPool) SubscribeNewReorgEvent(ch chan<- NewTxPoolReorgEvent) event.Subscription { + return pool.scope.Track(pool.reorgFeed.Subscribe(ch)) } -// SetGasTip updates the minimum gas tip required by the transaction pool for a +// GasPrice returns the current gas price enforced by the transaction pool. +func (pool *TxPool) GasPrice() *big.Int { + pool.mu.RLock() + defer pool.mu.RUnlock() + + return new(big.Int).Set(pool.gasPrice) +} + +// SetGasPrice updates the minimum price required by the transaction pool for a // new transaction, and drops all transactions below this threshold. -func (pool *LegacyPool) SetGasTip(tip *big.Int) { +func (pool *TxPool) SetGasPrice(price *big.Int) { pool.mu.Lock() defer pool.mu.Unlock() - old := pool.gasTip.Load() - pool.gasTip.Store(new(big.Int).Set(tip)) - - // If the min miner fee increased, remove transactions below the new threshold - if tip.Cmp(old) > 0 { + old := pool.gasPrice + pool.gasPrice = price + // if the min miner fee increased, remove transactions below the new threshold + if price.Cmp(old) > 0 { // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead - drop := pool.all.RemotesBelowTip(tip) + drop := pool.all.RemotesBelowTip(price) for _, tx := range drop { - pool.removeTx(tx.Hash(), false, true) + pool.removeTx(tx.Hash(), false) } pool.priced.Removed(len(drop)) } - log.Info("Legacy pool tip threshold updated", "tip", tip) + + log.Info("Transaction pool price threshold updated", "price", price) } -func (pool *LegacyPool) SetMinFee(minFee *big.Int) { +func (pool *TxPool) SetMinFee(minFee *big.Int) { pool.mu.Lock() defer pool.mu.Unlock() @@ -489,7 +513,7 @@ func (pool *LegacyPool) SetMinFee(minFee *big.Int) { // Nonce returns the next nonce of an account, with all transactions executable // by the pool already applied on top. -func (pool *LegacyPool) Nonce(addr common.Address) uint64 { +func (pool *TxPool) Nonce(addr common.Address) uint64 { pool.mu.RLock() defer pool.mu.RUnlock() @@ -498,7 +522,7 @@ func (pool *LegacyPool) Nonce(addr common.Address) uint64 { // Stats retrieves the current pool stats, namely the number of pending and the // number of queued (non-executable) transactions. -func (pool *LegacyPool) Stats() (int, int) { +func (pool *TxPool) Stats() (int, int) { pool.mu.RLock() defer pool.mu.RUnlock() @@ -507,7 +531,7 @@ func (pool *LegacyPool) Stats() (int, int) { // stats retrieves the current pool stats, namely the number of pending and the // number of queued (non-executable) transactions. -func (pool *LegacyPool) stats() (int, int) { +func (pool *TxPool) stats() (int, int) { pending := 0 for _, list := range pool.pending { pending += list.Len() @@ -521,15 +545,15 @@ func (pool *LegacyPool) stats() (int, int) { // Content retrieves the data content of the transaction pool, returning all the // pending as well as queued transactions, grouped by account and sorted by nonce. -func (pool *LegacyPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { +func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { pool.mu.Lock() defer pool.mu.Unlock() - pending := make(map[common.Address][]*types.Transaction, len(pool.pending)) + pending := make(map[common.Address]types.Transactions) for addr, list := range pool.pending { pending[addr] = list.Flatten() } - queued := make(map[common.Address][]*types.Transaction, len(pool.queue)) + queued := make(map[common.Address]types.Transactions) for addr, list := range pool.queue { queued[addr] = list.Flatten() } @@ -538,15 +562,15 @@ func (pool *LegacyPool) Content() (map[common.Address][]*types.Transaction, map[ // ContentFrom retrieves the data content of the transaction pool, returning the // pending as well as queued transactions of this address, grouped by nonce. -func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { +func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { pool.mu.RLock() defer pool.mu.RUnlock() - var pending []*types.Transaction + var pending types.Transactions if list, ok := pool.pending[addr]; ok { pending = list.Flatten() } - var queued []*types.Transaction + var queued types.Transactions if list, ok := pool.queue[addr]; ok { queued = list.Flatten() } @@ -560,70 +584,42 @@ func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction, // The enforceTips parameter can be used to do an extra filtering on the pending // transactions and only return those whose **effective** tip is large enough in // the next pending execution environment. -func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction { - return pool.PendingWithBaseFee(enforceTips, nil) -} - -// If baseFee is nil, then pool.priced.urgent.baseFee is used. -func (pool *LegacyPool) PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[common.Address][]*txpool.LazyTransaction { +func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { pool.mu.Lock() defer pool.mu.Unlock() - if baseFee == nil { - baseFee = pool.priced.urgent.baseFee - } - - pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending)) + pending := make(map[common.Address]types.Transactions) for addr, list := range pool.pending { txs := list.Flatten() // If the miner requests tip enforcement, cap the lists now if enforceTips && !pool.locals.contains(addr) { for i, tx := range txs { - if tx.EffectiveGasTipIntCmp(pool.gasTip.Load(), baseFee) < 0 { + if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { txs = txs[:i] break } } } if len(txs) > 0 { - lazies := make([]*txpool.LazyTransaction, len(txs)) - for i := 0; i < len(txs); i++ { - lazies[i] = &txpool.LazyTransaction{ - Pool: pool, - Hash: txs[i].Hash(), - Tx: txs[i].WithoutBlobTxSidecar(), - Time: txs[i].Time(), - GasFeeCap: txs[i].GasFeeCap(), - GasTipCap: txs[i].GasTipCap(), - Gas: txs[i].Gas(), - BlobGas: txs[i].BlobGas(), - } - } - pending[addr] = lazies + pending[addr] = txs } } return pending } -// IteratePending iterates over [pool.pending] until [f] returns false. -// The caller must not modify [tx]. Returns false if iteration was interrupted. -func (pool *LegacyPool) IteratePending(f func(tx *types.Transaction) bool) bool { - pool.mu.RLock() - defer pool.mu.RUnlock() - - for _, list := range pool.pending { - for _, tx := range list.txs.items { - if !f(tx) { - return false - } - } +// PendingSize returns the number of pending txs in the tx pool. +func (pool *TxPool) PendingSize() int { + pending := pool.Pending(true) + count := 0 + for _, txs := range pending { + count += len(txs) } - return true + return count } // Locals retrieves the accounts currently considered local by the pool. -func (pool *LegacyPool) Locals() []common.Address { +func (pool *TxPool) Locals() []common.Address { pool.mu.Lock() defer pool.mu.Unlock() @@ -633,7 +629,7 @@ func (pool *LegacyPool) Locals() []common.Address { // local retrieves all currently known local transactions, grouped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. -func (pool *LegacyPool) local() map[common.Address]types.Transactions { +func (pool *TxPool) local() map[common.Address]types.Transactions { txs := make(map[common.Address]types.Transactions) for addr := range pool.locals.accounts { if pending := pool.pending[addr]; pending != nil { @@ -646,72 +642,88 @@ func (pool *LegacyPool) local() map[common.Address]types.Transactions { return txs } -// validateTxBasics checks whether a transaction is valid according to the consensus -// rules, but does not check state-dependent validation such as sufficient balance. -// This check is meant as an early check which only needs to be performed once, -// and does not require the pool mutex to be held. -func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) error { - opts := &txpool.ValidationOptions{ - Config: pool.chainconfig, - Accept: 0 | - 1< txNonce { + return fmt.Errorf("%w: address %s current nonce (%d) > tx nonce (%d)", + ErrNonceTooLow, from.Hex(), currentNonce, txNonce) } return nil } // validateTx checks whether a transaction is valid according to the consensus // rules and adheres to some heuristic limits of the local node (price and size). -func (pool *LegacyPool) validateTx(tx *types.Transaction, local bool) error { - pool.currentStateLock.Lock() - defer pool.currentStateLock.Unlock() +func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { + // Accept only legacy transactions until EIP-2718/2930 activates. + if !pool.eip2718 && tx.Type() != types.LegacyTxType { + return ErrTxTypeNotSupported + } + // Reject dynamic fee transactions until EIP-1559 activates. + if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType { + return ErrTxTypeNotSupported + } + // Reject transactions over defined size to prevent DOS attacks + if txSize := uint64(tx.Size()); txSize > txMaxSize { + return fmt.Errorf("%w tx size %d > max size %d", ErrOversizedData, txSize, txMaxSize) + } + // Transactions can't be negative. This may never happen using RLP decoded + // transactions but may occur if you create a transaction using the RPC. + if tx.Value().Sign() < 0 { + return ErrNegativeValue + } + // Ensure the transaction doesn't exceed the current block limit gas. + if txGas := tx.Gas(); pool.currentMaxGas < txGas { + return fmt.Errorf("%w: tx gas (%d) > current max gas (%d)", ErrGasLimit, txGas, pool.currentMaxGas) + } + // Sanity check for extremely large numbers + if tx.GasFeeCap().BitLen() > 256 { + return ErrFeeCapVeryHigh + } + if tx.GasTipCap().BitLen() > 256 { + return ErrTipVeryHigh + } + // Ensure gasFeeCap is greater than or equal to gasTipCap. + if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + return ErrTipAboveFeeCap + } + // Make sure the transaction is signed properly. + from, err := types.Sender(pool.signer, tx) + if err != nil { + return ErrInvalidSender + } + // Drop non-local transactions under our own minimal accepted gas price or tip + if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { + return fmt.Errorf("%w: address %s have gas tip cap (%d) < pool gas tip cap (%d)", ErrUnderpriced, from.Hex(), tx.GasTipCap(), pool.gasPrice) + } + // Drop the transaction if the gas fee cap is below the pool's minimum fee + if pool.minimumFee != nil && tx.GasFeeCapIntCmp(pool.minimumFee) < 0 { + return fmt.Errorf("%w: address %s have gas fee cap (%d) < pool minimum fee cap (%d)", ErrUnderpriced, from.Hex(), tx.GasFeeCap(), pool.minimumFee) + } - opts := &txpool.ValidationOptionsWithState{ - State: pool.currentState, - Rules: pool.chainconfig.Rules( - pool.currentHead.Load().Number, - pool.currentHead.Load().Time, - ), - MinimumFee: pool.minimumFee, - - FirstNonceGap: nil, // Pool allows arbitrary arrival order, don't invalidate nonce gaps - UsedAndLeftSlots: func(addr common.Address) (int, int) { - var have int - if list := pool.pending[addr]; list != nil { - have += list.Len() - } - if list := pool.queue[addr]; list != nil { - have += list.Len() - } - return have, math.MaxInt - }, - ExistingExpenditure: func(addr common.Address) *big.Int { - if list := pool.pending[addr]; list != nil { - return list.totalcost - } - return new(big.Int) - }, - ExistingCost: func(addr common.Address, nonce uint64) *big.Int { - if list := pool.pending[addr]; list != nil { - if tx := list.txs.Get(nonce); tx != nil { - return tx.Cost() - } - } - return nil - }, + // Ensure the transaction adheres to nonce ordering + if err := pool.checkTxState(from, tx); err != nil { + return err } - if err := txpool.ValidateTransactionWithState(tx, pool.signer, opts); err != nil { + // Transactor should have enough funds to cover the costs + + // Ensure the transaction has more gas than the basic tx fee. + intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) + if err != nil { return err } + if txGas := tx.Gas(); txGas < intrGas { + return fmt.Errorf("%w: address %v tx gas (%v) < intrinsic gas (%v)", ErrIntrinsicGas, from.Hex(), tx.Gas(), intrGas) + } return nil } @@ -720,15 +732,15 @@ func (pool *LegacyPool) validateTx(tx *types.Transaction, local bool) error { // pending or queued one, it overwrites the previous transaction if its price is higher. // // If a newly added transaction is marked as local, its sending account will be -// added to the allowlist, preventing any associated transaction from being dropped +// be added to the allowlist, preventing any associated transaction from being dropped // out of the pool due to pricing constraints. -func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { +func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { // If the transaction is already known, discard it hash := tx.Hash() if pool.all.Get(hash) != nil { log.Trace("Discarding already known transaction", "hash", hash) knownTxMeter.Mark(1) - return false, txpool.ErrAlreadyKnown + return false, ErrAlreadyKnown } // Make the local flag. If it's from local source or it's from the network but // the sender is marked as local previously, treat it as the local transaction. @@ -740,40 +752,14 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e invalidTxMeter.Mark(1) return false, err } - // already validated by this point - from, _ := types.Sender(pool.signer, tx) - - // If the address is not yet known, request exclusivity to track the account - // only by this subpool until all transactions are evicted - var ( - _, hasPending = pool.pending[from] - _, hasQueued = pool.queue[from] - ) - if !hasPending && !hasQueued { - if err := pool.reserve(from, true); err != nil { - return false, err - } - defer func() { - // If the transaction is rejected by some post-validation check, remove - // the lock on the reservation set. - // - // Note, `err` here is the named error return, which will be initialized - // by a return statement before running deferred methods. Take care with - // removing or subscoping err as it will break this clause. - if err != nil { - pool.reserve(from, false) - } - }() - } // If the transaction pool is full, discard underpriced transactions if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { // If the new transaction is underpriced, don't accept it if !isLocal && pool.priced.Underpriced(tx) { log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) underpricedTxMeter.Mark(1) - return false, txpool.ErrUnderpriced + return false, ErrUnderpriced } - // We're about to replace a transaction. The reorg does a more thorough // analysis of what to remove and how, but it runs async. We don't want to // do too many replacements between reorg-runs, so we cap the number of @@ -794,46 +780,23 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e overflowedTxMeter.Mark(1) return false, ErrTxPoolOverflow } - - // If the new transaction is a future transaction it should never churn pending transactions - if !isLocal && pool.isGapped(from, tx) { - var replacesPending bool - for _, dropTx := range drop { - dropSender, _ := types.Sender(pool.signer, dropTx) - if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) { - replacesPending = true - break - } - } - // Add all transactions back to the priced queue - if replacesPending { - for _, dropTx := range drop { - pool.priced.Put(dropTx, false) - } - log.Trace("Discarding future transaction replacing pending tx", "hash", hash) - return false, txpool.ErrFutureReplacePending - } - } - + // Bump the counter of rejections-since-reorg + pool.changesSinceReorg += len(drop) // Kick out the underpriced remote transactions. for _, tx := range drop { log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) underpricedTxMeter.Mark(1) - - sender, _ := types.Sender(pool.signer, tx) - dropped := pool.removeTx(tx.Hash(), false, sender != from) // Don't unreserve the sender of the tx being added if last from the acc - - pool.changesSinceReorg += dropped + pool.removeTx(tx.Hash(), false) } } - // Try to replace an existing transaction in the pending pool - if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) { + from, _ := types.Sender(pool.signer, tx) // already validated + if list := pool.pending[from]; list != nil && list.Overlaps(tx) { // Nonce already pending, check if required price bump is met inserted, old := list.Add(tx, pool.config.PriceBump) if !inserted { pendingDiscardMeter.Mark(1) - return false, txpool.ErrReplaceUnderpriced + return false, ErrReplaceUnderpriced } // New transaction is better, replace old one if old != nil { @@ -871,44 +834,20 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e return replaced, nil } -// isGapped reports whether the given transaction is immediately executable. -func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) bool { - // Short circuit if transaction falls within the scope of the pending list - // or matches the next pending nonce which can be promoted as an executable - // transaction afterwards. Note, the tx staleness is already checked in - // 'validateTx' function previously. - next := pool.pendingNonces.get(from) - if tx.Nonce() <= next { - return false - } - // The transaction has a nonce gap with pending list, it's only considered - // as executable if transactions in queue can fill up the nonce gap. - queue, ok := pool.queue[from] - if !ok { - return true - } - for nonce := next; nonce < tx.Nonce(); nonce++ { - if !queue.Contains(nonce) { - return true // txs in queue can't fill up the nonce gap - } - } - return false -} - // enqueueTx inserts a new transaction into the non-executable transaction queue. // // Note, this method assumes the pool lock is held! -func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { +func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { // Try to insert the transaction into the future queue from, _ := types.Sender(pool.signer, tx) // already validated if pool.queue[from] == nil { - pool.queue[from] = newList(false) + pool.queue[from] = newTxList(false) } inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) if !inserted { // An older transaction was better, discard this queuedDiscardMeter.Mark(1) - return false, txpool.ErrReplaceUnderpriced + return false, ErrReplaceUnderpriced } // Discard any previous transaction and mark this if old != nil { @@ -937,7 +876,7 @@ func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local // journalTx adds the specified transaction to the local disk journal if it is // deemed to have been sent from a local account. -func (pool *LegacyPool) journalTx(from common.Address, tx *types.Transaction) { +func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { // Only journal if it's enabled and the transaction is local if pool.journal == nil || !pool.locals.contains(from) { return @@ -951,10 +890,10 @@ func (pool *LegacyPool) journalTx(from common.Address, tx *types.Transaction) { // and returns whether it was inserted or an older was better. // // Note, this method assumes the pool lock is held! -func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { +func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { // Try to insert the transaction into the pending queue if pool.pending[addr] == nil { - pool.pending[addr] = newList(true) + pool.pending[addr] = newTxList(true) } list := pool.pending[addr] @@ -983,55 +922,53 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ return true } -// addLocals enqueues a batch of transactions into the pool if they are valid, marking the -// senders as local ones, ensuring they go around the local pricing constraints. +// AddLocals enqueues a batch of transactions into the pool if they are valid, marking the +// senders as a local ones, ensuring they go around the local pricing constraints. // // This method is used to add transactions from the RPC API and performs synchronous pool // reorganization and event propagation. -func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error { - return pool.Add(txs, !pool.config.NoLocals, true) +func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { + return pool.addTxs(txs, !pool.config.NoLocals, true) } -// addLocal enqueues a single local transaction into the pool if it is valid. This is -// a convenience wrapper around addLocals. -func (pool *LegacyPool) addLocal(tx *types.Transaction) error { - return pool.addLocals([]*types.Transaction{tx})[0] +// AddLocal enqueues a single local transaction into the pool if it is valid. This is +// a convenience wrapper aroundd AddLocals. +func (pool *TxPool) AddLocal(tx *types.Transaction) error { + errs := pool.AddLocals([]*types.Transaction{tx}) + return errs[0] } -// addRemotes enqueues a batch of transactions into the pool if they are valid. If the +// AddRemotes enqueues a batch of transactions into the pool if they are valid. If the // senders are not among the locally tracked ones, full pricing constraints will apply. // // This method is used to add transactions from the p2p network and does not wait for pool // reorganization and internal event propagation. -func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error { - return pool.Add(txs, false, false) +func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { + return pool.addTxs(txs, false, false) } -// addRemote enqueues a single transaction into the pool if it is valid. This is a convenience -// wrapper around addRemotes. -func (pool *LegacyPool) addRemote(tx *types.Transaction) error { - return pool.addRemotes([]*types.Transaction{tx})[0] +// This is like AddRemotes, but waits for pool reorganization. Tests use this method. +func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { + return pool.addTxs(txs, false, true) } -// addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method. -func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error { - return pool.Add(txs, false, true) +// This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. +func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { + errs := pool.AddRemotesSync([]*types.Transaction{tx}) + return errs[0] } -// This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method. -func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error { - return pool.Add([]*types.Transaction{tx}, false, true)[0] -} - -// Add enqueues a batch of transactions into the pool if they are valid. Depending -// on the local flag, full pricing constraints will or will not be applied. +// AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience +// wrapper around AddRemotes. // -// If sync is set, the method will block until all internal maintenance related -// to the add is finished. Only use this during tests for determinism! -func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error { - // Do not treat as local if local transactions have been disabled - local = local && !pool.config.NoLocals +// Deprecated: use AddRemotes +func (pool *TxPool) AddRemote(tx *types.Transaction) error { + errs := pool.AddRemotes([]*types.Transaction{tx}) + return errs[0] +} +// addTxs attempts to queue a batch of transactions if they are valid. +func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { // Filter out known ones without obtaining the pool lock or recovering signatures var ( errs = make([]error, len(txs)) @@ -1040,16 +977,16 @@ func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error for i, tx := range txs { // If the transaction is known, pre-set the error slot if pool.all.Get(tx.Hash()) != nil { - errs[i] = txpool.ErrAlreadyKnown + errs[i] = ErrAlreadyKnown knownTxMeter.Mark(1) continue } - // Exclude transactions with basic errors, e.g invalid signatures and - // insufficient intrinsic gas as soon as possible and cache senders - // in transactions before obtaining lock - if err := pool.validateTxBasics(tx, local); err != nil { - errs[i] = err - log.Trace("Discarding invalid transaction", "hash", tx.Hash(), "err", err) + // Exclude transactions with invalid signatures as soon as + // possible and cache senders in transactions before + // obtaining lock + _, err := types.Sender(pool.signer, tx) + if err != nil { + errs[i] = ErrInvalidSender invalidTxMeter.Mark(1) continue } @@ -1065,7 +1002,7 @@ func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error newErrs, dirtyAddrs := pool.addTxsLocked(news, local) pool.mu.Unlock() - var nilSlot = 0 + nilSlot := 0 for _, err := range newErrs { for errs[nilSlot] != nil { nilSlot++ @@ -1083,7 +1020,7 @@ func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error // addTxsLocked attempts to queue a batch of transactions if they are valid. // The transaction pool lock must be held. -func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { +func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { dirty := newAccountSet(pool.signer) errs := make([]error, len(txs)) for i, tx := range txs { @@ -1099,79 +1036,61 @@ func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction, local bool) ([]er // Status returns the status (unknown/pending/queued) of a batch of transactions // identified by their hashes. -func (pool *LegacyPool) Status(hash common.Hash) txpool.TxStatus { - tx := pool.get(hash) - if tx == nil { - return txpool.TxStatusUnknown - } - from, _ := types.Sender(pool.signer, tx) // already validated - - pool.mu.RLock() - defer pool.mu.RUnlock() - - if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { - return txpool.TxStatusPending - } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { - return txpool.TxStatusQueued +func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { + status := make([]TxStatus, len(hashes)) + for i, hash := range hashes { + tx := pool.Get(hash) + if tx == nil { + continue + } + from, _ := types.Sender(pool.signer, tx) // already validated + pool.mu.RLock() + if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { + status[i] = TxStatusPending + } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { + status[i] = TxStatusQueued + } + // implicit else: the tx may have been included into a block between + // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct + pool.mu.RUnlock() } - return txpool.TxStatusUnknown + return status } // Get returns a transaction if it is contained in the pool and nil otherwise. -func (pool *LegacyPool) Get(hash common.Hash) *types.Transaction { - tx := pool.get(hash) - if tx == nil { - return nil - } - return tx -} - -// get returns a transaction if it is contained in the pool and nil otherwise. -func (pool *LegacyPool) get(hash common.Hash) *types.Transaction { +func (pool *TxPool) Get(hash common.Hash) *types.Transaction { return pool.all.Get(hash) } // Has returns an indicator whether txpool has a transaction cached with the // given hash. -func (pool *LegacyPool) Has(hash common.Hash) bool { +func (pool *TxPool) Has(hash common.Hash) bool { return pool.all.Get(hash) != nil } -func (pool *LegacyPool) HasLocal(hash common.Hash) bool { +// Has returns an indicator whether txpool has a local transaction cached with +// the given hash. +func (pool *TxPool) HasLocal(hash common.Hash) bool { return pool.all.GetLocal(hash) != nil } +func (pool *TxPool) RemoveTx(hash common.Hash) { + pool.mu.Lock() + defer pool.mu.Unlock() + + pool.removeTx(hash, true) +} + // removeTx removes a single transaction from the queue, moving all subsequent // transactions back to the future queue. -// -// In unreserve is false, the account will not be relinquished to the main txpool -// even if there are no more references to it. This is used to handle a race when -// a tx being added, and it evicts a previously scheduled tx from the same account, -// which could lead to a premature release of the lock. -// -// Returns the number of transactions removed from the pending queue. -func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bool) int { +func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { // Fetch the transaction we wish to delete tx := pool.all.Get(hash) if tx == nil { - return 0 + return } addr, _ := types.Sender(pool.signer, tx) // already validated during insertion - // If after deletion there are no more transactions belonging to this account, - // relinquish the address reservation. It's a bit convoluted do this, via a - // defer, but it's safer vs. the many return pathways. - if unreserve { - defer func() { - var ( - _, hasPending = pool.pending[addr] - _, hasQueued = pool.queue[addr] - ) - if !hasPending && !hasQueued { - pool.reserve(addr, false) - } - }() - } // Remove it from the list of known transactions pool.all.Remove(hash) if outofbound { @@ -1196,7 +1115,7 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bo pool.pendingNonces.setIfLower(addr, tx.Nonce()) // Reduce the pending counter pendingGauge.Dec(int64(1 + len(invalids))) - return 1 + len(invalids) + return } } // Transaction is in the future queue @@ -1210,12 +1129,11 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bo delete(pool.beats, addr) } } - return 0 } // requestReset requests a pool reset to the new head block. // The returned channel is closed when the reset has occurred. -func (pool *LegacyPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { +func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { select { case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: return <-pool.reorgDoneCh @@ -1226,7 +1144,7 @@ func (pool *LegacyPool) requestReset(oldHead *types.Header, newHead *types.Heade // requestPromoteExecutables requests transaction promotion checks for the given addresses. // The returned channel is closed when the promotion checks have occurred. -func (pool *LegacyPool) requestPromoteExecutables(set *accountSet) chan struct{} { +func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { select { case pool.reqPromoteCh <- set: return <-pool.reorgDoneCh @@ -1236,7 +1154,7 @@ func (pool *LegacyPool) requestPromoteExecutables(set *accountSet) chan struct{} } // queueTxEvent enqueues a transaction event to be sent in the next reorg run. -func (pool *LegacyPool) queueTxEvent(tx *types.Transaction) { +func (pool *TxPool) queueTxEvent(tx *types.Transaction) { select { case pool.queueTxEventCh <- tx: case <-pool.reorgShutdownCh: @@ -1246,7 +1164,7 @@ func (pool *LegacyPool) queueTxEvent(tx *types.Transaction) { // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not // call those methods directly, but request them being run using requestReset and // requestPromoteExecutables instead. -func (pool *LegacyPool) scheduleReorgLoop() { +func (pool *TxPool) scheduleReorgLoop() { defer pool.wg.Done() var ( @@ -1255,7 +1173,7 @@ func (pool *LegacyPool) scheduleReorgLoop() { launchNextRun bool reset *txpoolResetRequest dirtyAccounts *accountSet - queuedEvents = make(map[common.Address]*sortedMap) + queuedEvents = make(map[common.Address]*txSortedMap) ) for { // Launch next background reorg if needed @@ -1268,7 +1186,7 @@ func (pool *LegacyPool) scheduleReorgLoop() { launchNextRun = false reset, dirtyAccounts = nil, nil - queuedEvents = make(map[common.Address]*sortedMap) + queuedEvents = make(map[common.Address]*txSortedMap) } select { @@ -1297,7 +1215,7 @@ func (pool *LegacyPool) scheduleReorgLoop() { // request one later if they want the events sent. addr, _ := types.Sender(pool.signer, tx) if _, ok := queuedEvents[addr]; !ok { - queuedEvents[addr] = newSortedMap() + queuedEvents[addr] = newTxSortedMap() } queuedEvents[addr].Put(tx) @@ -1316,7 +1234,7 @@ func (pool *LegacyPool) scheduleReorgLoop() { } // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. -func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) { +func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { defer func(t0 time.Time) { reorgDurationTimer.Update(time.Since(t0)) }(time.Now()) @@ -1355,15 +1273,13 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, // because of another transaction (e.g. higher gas price). if reset != nil { pool.demoteUnexecutables() - if reset.newHead != nil { - if pool.chainconfig.IsApricotPhase3(reset.newHead.Time) { - if err := pool.updateBaseFeeAt(reset.newHead); err != nil { - log.Error("error at updating base fee in tx pool", "error", err) - } - } else { - pool.priced.Reheap() + if reset.newHead != nil && pool.chainconfig.IsApricotPhase3(new(big.Int).SetUint64(reset.newHead.Time)) { + _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, reset.newHead, uint64(time.Now().Unix())) + if err == nil { + pool.priced.SetBaseFee(baseFeeEstimate) } } + // Update all accounts to the latest known pending nonce nonces := make(map[common.Address]uint64, len(pool.pending)) for addr, list := range pool.pending { @@ -1380,11 +1296,15 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, pool.changesSinceReorg = 0 // Reset change counter pool.mu.Unlock() + if reset != nil && reset.newHead != nil { + pool.reorgFeed.Send(NewTxPoolReorgEvent{reset.newHead}) + } + // Notify subsystems for newly added transactions for _, tx := range promoted { addr, _ := types.Sender(pool.signer, tx) if _, ok := events[addr]; !ok { - events[addr] = newSortedMap() + events[addr] = newTxSortedMap() } events[addr].Put(tx) } @@ -1393,13 +1313,13 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, for _, set := range events { txs = append(txs, set.Flatten()...) } - pool.txFeed.Send(core.NewTxsEvent{Txs: txs}) + pool.txFeed.Send(NewTxsEvent{txs}) } } // reset retrieves the current state of the blockchain and ensures the content // of the transaction pool is valid with regard to the chain state. -func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { +func (pool *TxPool) reset(oldHead, newHead *types.Header) { // If we're reorging an old state, reinject all dropped transactions var reinject types.Transactions @@ -1412,6 +1332,7 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { log.Debug("Skipping deep transaction reorg", "depth", depth) } else { // Reorg seems shallow enough to pull in all transactions into memory + var discarded, included types.Transactions var ( rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) @@ -1419,11 +1340,11 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { if rem == nil { // This can happen if a setHead is performed, where we simply discard the old // head from the chain. - // If that is the case, we don't have the lost transactions anymore, and + // If that is the case, we don't have the lost transactions any more, and // there's nothing to add if newNum >= oldNum { // If we reorged to a same or higher number, then it's not a case of setHead - log.Warn("Transaction pool reset with missing old head", + log.Warn("Transaction pool reset with missing oldhead", "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) return } @@ -1432,15 +1353,6 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) // We still need to update the current state s.th. the lost transactions can be readded by the user } else { - if add == nil { - // if the new head is nil, it means that something happened between - // the firing of newhead-event and _now_: most likely a - // reorg caused by sync-reversion or explicit sethead back to an - // earlier block. - log.Warn("Transaction pool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash()) - return - } - var discarded, included types.Transactions for rem.NumberU64() > add.NumberU64() { discarded = append(discarded, rem.Transactions()...) if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { @@ -1467,41 +1379,44 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { return } } - lost := make([]*types.Transaction, 0, len(discarded)) - for _, tx := range types.TxDifference(discarded, included) { - if pool.Filter(tx) { - lost = append(lost, tx) - } - } - reinject = lost + reinject = types.TxDifference(discarded, included) } } } // Initialize the internal state to the current head if newHead == nil { - newHead = pool.chain.CurrentBlock() // Special case during testing + newHead = pool.chain.CurrentBlock().Header() // Special case during testing } statedb, err := pool.chain.StateAt(newHead.Root) if err != nil { log.Error("Failed to reset txpool state", "err", err, "root", newHead.Root) return } - pool.currentHead.Store(newHead) + pool.currentHead = newHead pool.currentStateLock.Lock() pool.currentState = statedb pool.currentStateLock.Unlock() - pool.pendingNonces = newNoncer(statedb) + pool.pendingNonces = newTxNoncer(statedb) + pool.currentMaxGas = newHead.GasLimit // Inject any transactions discarded due to reorgs log.Debug("Reinjecting stale transactions", "count", len(reinject)) pool.chain.SenderCacher().Recover(pool.signer, reinject) pool.addTxsLocked(reinject, false) + + // Update all fork indicator by next pending block number. + next := new(big.Int).Add(newHead.Number, big.NewInt(1)) + pool.istanbul = pool.chainconfig.IsIstanbul(next) + + timestamp := new(big.Int).SetUint64(newHead.Time) + pool.eip2718 = pool.chainconfig.IsApricotPhase2(timestamp) + pool.eip1559 = pool.chainconfig.IsApricotPhase3(timestamp) } // promoteExecutables moves transactions that have become processable from the // future queue to the set of pending transactions. During this process, all // invalidated transactions (low nonce, low balance) are deleted. -func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction { +func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { pool.currentStateLock.Lock() defer pool.currentStateLock.Unlock() @@ -1509,7 +1424,6 @@ func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.T var promoted []*types.Transaction // Iterate over all accounts and promote any executable transactions - gasLimit := pool.currentHead.Load().GasLimit for _, addr := range accounts { list := pool.queue[addr] if list == nil { @@ -1523,7 +1437,7 @@ func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.T } log.Trace("Removed old queued transactions", "count", len(forwards)) // Drop all transactions that are too costly (low balance or out of gas) - drops, _ := list.Filter(pool.currentState.GetBalance(addr), gasLimit) + drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) for _, tx := range drops { hash := tx.Hash() pool.all.Remove(hash) @@ -1563,9 +1477,6 @@ func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.T if list.Empty() { delete(pool.queue, addr) delete(pool.beats, addr) - if _, ok := pool.pending[addr]; !ok { - pool.reserve(addr, false) - } } } return promoted @@ -1574,7 +1485,7 @@ func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.T // truncatePending removes transactions from the pending queue if the pool is above the // pending limit. The algorithm tries to reduce transaction counts by an approximately // equal number for all for accounts with many pending transactions. -func (pool *LegacyPool) truncatePending() { +func (pool *TxPool) truncatePending() { pending := uint64(0) for _, list := range pool.pending { pending += uint64(list.Len()) @@ -1585,7 +1496,7 @@ func (pool *LegacyPool) truncatePending() { pendingBeforeCap := pending // Assemble a spam order to penalize large transactors first - spammers := prque.New[int64, common.Address](nil) + spammers := prque.New(nil) for addr, list := range pool.pending { // Only evict transactions from high rollers if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { @@ -1597,12 +1508,12 @@ func (pool *LegacyPool) truncatePending() { for pending > pool.config.GlobalSlots && !spammers.Empty() { // Retrieve the next offender if not local address offender, _ := spammers.Pop() - offenders = append(offenders, offender) + offenders = append(offenders, offender.(common.Address)) // Equalize balances until all the same or below threshold if len(offenders) > 1 { // Calculate the equalization threshold for all current offenders - threshold := pool.pending[offender].Len() + threshold := pool.pending[offender.(common.Address)].Len() // Iteratively reduce all offenders until below limit or threshold reached for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { @@ -1659,7 +1570,7 @@ func (pool *LegacyPool) truncatePending() { } // truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit. -func (pool *LegacyPool) truncateQueue() { +func (pool *TxPool) truncateQueue() { queued := uint64(0) for _, list := range pool.queue { queued += uint64(list.Len()) @@ -1687,7 +1598,7 @@ func (pool *LegacyPool) truncateQueue() { // Drop all transactions if they are less than the overflow if size := uint64(list.Len()); size <= drop { for _, tx := range list.Flatten() { - pool.removeTx(tx.Hash(), true, true) + pool.removeTx(tx.Hash(), true) } drop -= size queuedRateLimitMeter.Mark(int64(size)) @@ -1696,7 +1607,7 @@ func (pool *LegacyPool) truncateQueue() { // Otherwise drop only last few transactions txs := list.Flatten() for i := len(txs) - 1; i >= 0 && drop > 0; i-- { - pool.removeTx(txs[i].Hash(), true, true) + pool.removeTx(txs[i].Hash(), true) drop-- queuedRateLimitMeter.Mark(1) } @@ -1710,12 +1621,11 @@ func (pool *LegacyPool) truncateQueue() { // Note: transactions are not marked as removed in the priced list because re-heaping // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful // to trigger a re-heap is this function -func (pool *LegacyPool) demoteUnexecutables() { +func (pool *TxPool) demoteUnexecutables() { pool.currentStateLock.Lock() defer pool.currentStateLock.Unlock() // Iterate over all accounts and demote any non-executable transactions - gasLimit := pool.currentHead.Load().GasLimit for addr, list := range pool.pending { nonce := pool.currentState.GetNonce(addr) @@ -1727,7 +1637,7 @@ func (pool *LegacyPool) demoteUnexecutables() { log.Trace("Removed old pending transaction", "hash", hash) } // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later - drops, invalids := list.Filter(pool.currentState.GetBalance(addr), gasLimit) + drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) for _, tx := range drops { hash := tx.Hash() log.Trace("Removed unpayable pending transaction", "hash", hash) @@ -1756,26 +1666,24 @@ func (pool *LegacyPool) demoteUnexecutables() { // Internal shuffle shouldn't touch the lookup set. pool.enqueueTx(hash, tx, false, false) } + // This might happen in a reorg, so log it to the metering pendingGauge.Dec(int64(len(gapped))) } // Delete the entire pending entry if it became empty. if list.Empty() { delete(pool.pending, addr) - if _, ok := pool.queue[addr]; !ok { - pool.reserve(addr, false) - } } } } -func (pool *LegacyPool) startPeriodicFeeUpdate() { +func (pool *TxPool) startPeriodicFeeUpdate() { if pool.chainconfig.ApricotPhase3BlockTimestamp == nil { return } // Call updateBaseFee here to ensure that there is not a [baseFeeUpdateInterval] delay // when starting up in ApricotPhase3 before the base fee is updated. - if time.Now().After(utils.Uint64ToTime(pool.chainconfig.ApricotPhase3BlockTimestamp)) { + if time.Now().After(time.Unix(pool.chainconfig.ApricotPhase3BlockTimestamp.Int64(), 0)) { pool.updateBaseFee() } @@ -1783,12 +1691,12 @@ func (pool *LegacyPool) startPeriodicFeeUpdate() { go pool.periodicBaseFeeUpdate() } -func (pool *LegacyPool) periodicBaseFeeUpdate() { +func (pool *TxPool) periodicBaseFeeUpdate() { defer pool.wg.Done() // Sleep until its time to start the periodic base fee update or the tx pool is shutting down select { - case <-time.After(time.Until(utils.Uint64ToTime(pool.chainconfig.ApricotPhase3BlockTimestamp))): + case <-time.After(time.Until(time.Unix(pool.chainconfig.ApricotPhase3BlockTimestamp.Int64(), 0))): case <-pool.generalShutdownChan: return // Return early if shutting down } @@ -1805,24 +1713,16 @@ func (pool *LegacyPool) periodicBaseFeeUpdate() { } } -func (pool *LegacyPool) updateBaseFee() { +func (pool *TxPool) updateBaseFee() { pool.mu.Lock() defer pool.mu.Unlock() - err := pool.updateBaseFeeAt(pool.currentHead.Load()) - if err != nil { - log.Error("failed to update base fee", "currentHead", pool.currentHead.Load().Hash(), "err", err) - } -} - -// assumes lock is already held -func (pool *LegacyPool) updateBaseFeeAt(head *types.Header) error { - _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, head, uint64(time.Now().Unix())) - if err != nil { - return err + _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, pool.currentHead, uint64(time.Now().Unix())) + if err == nil { + pool.priced.SetBaseFee(baseFeeEstimate) + } else { + log.Error("failed to update base fee", "currentHead", pool.currentHead.Hash(), "err", err) } - pool.priced.SetBaseFee(baseFeeEstimate) - return nil } // addressByHeartbeat is an account address tagged with its last activity timestamp. @@ -1849,7 +1749,7 @@ type accountSet struct { // derivations. func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { as := &accountSet{ - accounts: make(map[common.Address]struct{}, len(addrs)), + accounts: make(map[common.Address]struct{}), signer: signer, } for _, addr := range addrs { @@ -1907,28 +1807,28 @@ func (as *accountSet) merge(other *accountSet) { as.cache = nil } -// lookup is used internally by LegacyPool to track transactions while allowing +// txLookup is used internally by TxPool to track transactions while allowing // lookup without mutex contention. // // Note, although this type is properly protected against concurrent access, it // is **not** a type that should ever be mutated or even exposed outside of the // transaction pool, since its internal state is tightly coupled with the pools // internal mechanisms. The sole purpose of the type is to permit out-of-bound -// peeking into the pool in LegacyPool.Get without having to acquire the widely scoped -// LegacyPool.mu mutex. +// peeking into the pool in TxPool.Get without having to acquire the widely scoped +// TxPool.mu mutex. // // This lookup set combines the notion of "local transactions", which is useful // to build upper-level structure. -type lookup struct { +type txLookup struct { slots int lock sync.RWMutex locals map[common.Hash]*types.Transaction remotes map[common.Hash]*types.Transaction } -// newLookup returns a new lookup structure. -func newLookup() *lookup { - return &lookup{ +// newTxLookup returns a new txLookup structure. +func newTxLookup() *txLookup { + return &txLookup{ locals: make(map[common.Hash]*types.Transaction), remotes: make(map[common.Hash]*types.Transaction), } @@ -1937,7 +1837,7 @@ func newLookup() *lookup { // Range calls f on each key and value present in the map. The callback passed // should return the indicator whether the iteration needs to be continued. // Callers need to specify which set (or both) to be iterated. -func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { +func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { t.lock.RLock() defer t.lock.RUnlock() @@ -1958,7 +1858,7 @@ func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local boo } // Get returns a transaction if it exists in the lookup, or nil if not found. -func (t *lookup) Get(hash common.Hash) *types.Transaction { +func (t *txLookup) Get(hash common.Hash) *types.Transaction { t.lock.RLock() defer t.lock.RUnlock() @@ -1969,7 +1869,7 @@ func (t *lookup) Get(hash common.Hash) *types.Transaction { } // GetLocal returns a transaction if it exists in the lookup, or nil if not found. -func (t *lookup) GetLocal(hash common.Hash) *types.Transaction { +func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction { t.lock.RLock() defer t.lock.RUnlock() @@ -1977,7 +1877,7 @@ func (t *lookup) GetLocal(hash common.Hash) *types.Transaction { } // GetRemote returns a transaction if it exists in the lookup, or nil if not found. -func (t *lookup) GetRemote(hash common.Hash) *types.Transaction { +func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction { t.lock.RLock() defer t.lock.RUnlock() @@ -1985,7 +1885,7 @@ func (t *lookup) GetRemote(hash common.Hash) *types.Transaction { } // Count returns the current number of transactions in the lookup. -func (t *lookup) Count() int { +func (t *txLookup) Count() int { t.lock.RLock() defer t.lock.RUnlock() @@ -1993,7 +1893,7 @@ func (t *lookup) Count() int { } // LocalCount returns the current number of local transactions in the lookup. -func (t *lookup) LocalCount() int { +func (t *txLookup) LocalCount() int { t.lock.RLock() defer t.lock.RUnlock() @@ -2001,7 +1901,7 @@ func (t *lookup) LocalCount() int { } // RemoteCount returns the current number of remote transactions in the lookup. -func (t *lookup) RemoteCount() int { +func (t *txLookup) RemoteCount() int { t.lock.RLock() defer t.lock.RUnlock() @@ -2009,7 +1909,7 @@ func (t *lookup) RemoteCount() int { } // Slots returns the current number of slots used in the lookup. -func (t *lookup) Slots() int { +func (t *txLookup) Slots() int { t.lock.RLock() defer t.lock.RUnlock() @@ -2017,7 +1917,7 @@ func (t *lookup) Slots() int { } // Add adds a transaction to the lookup. -func (t *lookup) Add(tx *types.Transaction, local bool) { +func (t *txLookup) Add(tx *types.Transaction, local bool) { t.lock.Lock() defer t.lock.Unlock() @@ -2032,7 +1932,7 @@ func (t *lookup) Add(tx *types.Transaction, local bool) { } // Remove removes a transaction from the lookup. -func (t *lookup) Remove(hash common.Hash) { +func (t *txLookup) Remove(hash common.Hash) { t.lock.Lock() defer t.lock.Unlock() @@ -2053,7 +1953,7 @@ func (t *lookup) Remove(hash common.Hash) { // RemoteToLocals migrates the transactions belongs to the given locals to locals // set. The assumption is held the locals set is thread-safe to be used. -func (t *lookup) RemoteToLocals(locals *accountSet) int { +func (t *txLookup) RemoteToLocals(locals *accountSet) int { t.lock.Lock() defer t.lock.Unlock() @@ -2069,7 +1969,7 @@ func (t *lookup) RemoteToLocals(locals *accountSet) int { } // RemotesBelowTip finds all remote transactions below the given tip threshold. -func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions { +func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions { found := make(types.Transactions, 0, 128) t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { if tx.GasTipCapIntCmp(threshold) < 0 { diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index f2bc9d402e..8c819fb05d 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -24,80 +24,84 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package legacypool +package core import ( "crypto/ecdsa" - crand "crypto/rand" "errors" "fmt" "math/big" "math/rand" "os" + "strings" "sync" - "sync/atomic" "testing" "time" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/txpool" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/trie" ) var ( // testTxPoolConfig is a transaction pool configuration without stateful disk // sideeffects used during testing. - testTxPoolConfig = DefaultConfig + testTxPoolConfig TxPoolConfig // eip1559Config is a chain config with EIP-1559 enabled at block 0. eip1559Config *params.ChainConfig ) func init() { + testTxPoolConfig = DefaultTxPoolConfig + testTxPoolConfig.Journal = "" + cpy := *params.TestChainConfig eip1559Config = &cpy - eip1559Config.ApricotPhase2BlockTimestamp = utils.NewUint64(0) - eip1559Config.ApricotPhase3BlockTimestamp = utils.NewUint64(0) + eip1559Config.ApricotPhase2BlockTimestamp = common.Big0 + eip1559Config.ApricotPhase3BlockTimestamp = common.Big0 } type testBlockChain struct { - config *params.ChainConfig - gasLimit atomic.Uint64 statedb *state.StateDB + gasLimit uint64 chainHeadFeed *event.Feed lock sync.Mutex } -func newTestBlockChain(config *params.ChainConfig, gasLimit uint64, statedb *state.StateDB, chainHeadFeed *event.Feed) *testBlockChain { - bc := testBlockChain{config: config, statedb: statedb, chainHeadFeed: new(event.Feed)} - bc.gasLimit.Store(gasLimit) - return &bc +func newTestBlockchain(statedb *state.StateDB, gasLimit uint64, chainHeadFeed *event.Feed) *testBlockChain { + return &testBlockChain{ + statedb: statedb, + gasLimit: gasLimit, + chainHeadFeed: chainHeadFeed, + } } -func (bc *testBlockChain) Config() *params.ChainConfig { - return bc.config +func (bc *testBlockChain) reset(statedb *state.StateDB, gasLimit uint64, chainHeadFeed *event.Feed) { + bc.lock.Lock() + defer bc.lock.Unlock() + + bc.statedb = statedb + bc.gasLimit = gasLimit + bc.chainHeadFeed = chainHeadFeed } -func (bc *testBlockChain) CurrentBlock() *types.Header { +func (bc *testBlockChain) CurrentBlock() *types.Block { bc.lock.Lock() defer bc.lock.Unlock() - return &types.Header{ - Number: new(big.Int), - GasLimit: bc.gasLimit.Load(), - } + return types.NewBlock(&types.Header{ + GasLimit: bc.gasLimit, + }, nil, nil, nil, trie.NewStackTrie(nil), nil, true) } func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { - return types.NewBlock(bc.CurrentBlock(), nil, nil, nil, trie.NewStackTrie(nil)) + return bc.CurrentBlock() } func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { @@ -107,16 +111,15 @@ func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { return bc.statedb, nil } -func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { +func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { bc.lock.Lock() defer bc.lock.Unlock() return bc.chainHeadFeed.Subscribe(ch) } -func (bc *testBlockChain) SenderCacher() *core.TxSenderCacher { - // Zero threads avoids starting goroutines. - return core.NewTxSenderCacher(0) +func (bc *testBlockChain) SenderCacher() *TxSenderCacher { + return newTxSenderCacher(1) } func transaction(nonce uint64, gaslimit uint64, key *ecdsa.PrivateKey) *types.Transaction { @@ -130,7 +133,7 @@ func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ec func pricedDataTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey, bytes uint64) *types.Transaction { data := make([]byte, bytes) - crand.Read(data) + rand.Read(data) tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(0), gaslimit, gasprice, data), types.HomesteadSigner{}, key) return tx @@ -151,51 +154,24 @@ func dynamicFeeTx(nonce uint64, gaslimit uint64, gasFee *big.Int, tip *big.Int, return tx } -func makeAddressReserver() txpool.AddressReserver { - var ( - reserved = make(map[common.Address]struct{}) - lock sync.Mutex - ) - return func(addr common.Address, reserve bool) error { - lock.Lock() - defer lock.Unlock() - - _, exists := reserved[addr] - if reserve { - if exists { - panic("already reserved") - } - reserved[addr] = struct{}{} - return nil - } - if !exists { - panic("not reserved") - } - delete(reserved, addr) - return nil - } -} - -func setupPool() (*LegacyPool, *ecdsa.PrivateKey) { - return setupPoolWithConfig(params.TestChainConfig) +func setupTxPool() (*TxPool, *ecdsa.PrivateKey) { + return setupTxPoolWithConfig(params.TestChainConfig) } -func setupPoolWithConfig(config *params.ChainConfig) (*LegacyPool, *ecdsa.PrivateKey) { - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(config, 10000000, statedb, new(event.Feed)) +func setupTxPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) { + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 10000000, new(event.Feed)) key, _ := crypto.GenerateKey() - pool := New(testTxPoolConfig, blockchain) - if err := pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()); err != nil { - panic(err) - } + pool := NewTxPool(testTxPoolConfig, config, blockchain) + // wait for the pool to initialize <-pool.initDoneCh return pool, key } -// validatePoolInternals checks various consistency invariants within the pool. -func validatePoolInternals(pool *LegacyPool) error { +// validateTxPoolInternals checks various consistency invariants within the pool. +func validateTxPoolInternals(pool *TxPool) error { pool.mu.RLock() defer pool.mu.RUnlock() @@ -221,16 +197,13 @@ func validatePoolInternals(pool *LegacyPool) error { if nonce := pool.pendingNonces.get(addr); nonce != last+1 { return fmt.Errorf("pending nonce mismatch: have %v, want %v", nonce, last+1) } - if txs.totalcost.Cmp(common.Big0) < 0 { - return fmt.Errorf("totalcost went negative: %v", txs.totalcost) - } } return nil } // validateEvents checks that the correct number of transaction addition events // were fired on the pool's event feed. -func validateEvents(events chan core.NewTxsEvent, count int) error { +func validateEvents(events chan NewTxsEvent, count int) error { var received []*types.Transaction for len(received) < count { @@ -275,7 +248,7 @@ func (c *testChain) State() (*state.StateDB, error) { // a state change between those fetches. stdb := c.statedb if *c.trigger { - c.statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + c.statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) // simulate that the new head block included tx0 and tx1 c.statedb.SetNonce(c.address, 2) c.statedb.SetBalance(c.address, new(big.Int).SetUint64(params.Ether)) @@ -287,33 +260,32 @@ func (c *testChain) State() (*state.StateDB, error) { // This test simulates a scenario where a new block is imported during a // state reset and tests whether the pending state is in sync with the // block head event that initiated the resetState(). -func TestStateChangeDuringReset(t *testing.T) { +func TestStateChangeDuringTransactionPoolReset(t *testing.T) { t.Parallel() var ( key, _ = crypto.GenerateKey() address = crypto.PubkeyToAddress(key.PublicKey) - statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) trigger = false ) // setup pool with 2 transaction in it statedb.SetBalance(address, new(big.Int).SetUint64(params.Ether)) - blockchain := &testChain{newTestBlockChain(params.TestChainConfig, 1000000000, statedb, new(event.Feed)), address, &trigger} + blockchain := &testChain{newTestBlockchain(statedb, 1000000000, new(event.Feed)), address, &trigger} tx0 := transaction(0, 100000, key) tx1 := transaction(1, 100000, key) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) + defer pool.Stop() nonce := pool.Nonce(address) if nonce != 0 { t.Fatalf("Invalid nonce, want 0, got %d", nonce) } - pool.addRemotesSync([]*types.Transaction{tx0, tx1}) + pool.AddRemotesSync([]*types.Transaction{tx0, tx1}) nonce = pool.Nonce(address) if nonce != 2 { @@ -330,13 +302,13 @@ func TestStateChangeDuringReset(t *testing.T) { } } -func testAddBalance(pool *LegacyPool, addr common.Address, amount *big.Int) { +func testAddBalance(pool *TxPool, addr common.Address, amount *big.Int) { pool.mu.Lock() pool.currentState.AddBalance(addr, amount) pool.mu.Unlock() } -func testSetNonce(pool *LegacyPool, addr common.Address, nonce uint64) { +func testSetNonce(pool *TxPool, addr common.Address, nonce uint64) { pool.mu.Lock() pool.currentState.SetNonce(addr, nonce) pool.mu.Unlock() @@ -345,46 +317,45 @@ func testSetNonce(pool *LegacyPool, addr common.Address, nonce uint64) { func TestInvalidTransactions(t *testing.T) { t.Parallel() - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() tx := transaction(0, 100, key) from, _ := deriveSender(tx) - // Intrinsic gas too low testAddBalance(pool, from, big.NewInt(1)) - if err, want := pool.addRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) { - t.Errorf("want %v have %v", want, err) + if err := pool.AddRemote(tx); !errors.Is(err, ErrInsufficientFunds) { + t.Error("expected", ErrInsufficientFunds) } - // Insufficient funds - tx = transaction(0, 100000, key) - if err, want := pool.addRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) { - t.Errorf("want %v have %v", want, err) + balance := new(big.Int).Add(tx.Value(), new(big.Int).Mul(new(big.Int).SetUint64(tx.Gas()), tx.GasPrice())) + testAddBalance(pool, from, balance) + if err := pool.AddRemote(tx); !errors.Is(err, ErrIntrinsicGas) { + t.Error("expected", ErrIntrinsicGas, "got", err) } testSetNonce(pool, from, 1) testAddBalance(pool, from, big.NewInt(0xffffffffffffff)) tx = transaction(0, 100000, key) - if err, want := pool.addRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) { - t.Errorf("want %v have %v", want, err) + if err := pool.AddRemote(tx); !errors.Is(err, ErrNonceTooLow) { + t.Error("expected", ErrNonceTooLow) } tx = transaction(1, 100000, key) - pool.gasTip.Store(big.NewInt(1000)) - if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) { - t.Errorf("want %v have %v", want, err) + pool.gasPrice = big.NewInt(1000) + if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { + t.Error("expected error to contain", ErrUnderpriced, "got", err) } - if err := pool.addLocal(tx); err != nil { + if err := pool.AddLocal(tx); err != nil { t.Error("expected", nil, "got", err) } } -func TestQueue(t *testing.T) { +func TestTransactionQueue(t *testing.T) { t.Parallel() - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() tx := transaction(0, 100, key) from, _ := deriveSender(tx) @@ -411,11 +382,11 @@ func TestQueue(t *testing.T) { } } -func TestQueue2(t *testing.T) { +func TestTransactionQueue2(t *testing.T) { t.Parallel() - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() tx1 := transaction(0, 100, key) tx2 := transaction(10, 100, key) @@ -437,65 +408,65 @@ func TestQueue2(t *testing.T) { } } -func TestNegativeValue(t *testing.T) { +func TestTransactionNegativeValue(t *testing.T) { t.Parallel() - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key) from, _ := deriveSender(tx) testAddBalance(pool, from, big.NewInt(1)) - if err := pool.addRemote(tx); err != txpool.ErrNegativeValue { - t.Error("expected", txpool.ErrNegativeValue, "got", err) + if err := pool.AddRemote(tx); err != ErrNegativeValue { + t.Error("expected", ErrNegativeValue, "got", err) } } -func TestTipAboveFeeCap(t *testing.T) { +func TestTransactionTipAboveFeeCap(t *testing.T) { t.Parallel() - pool, key := setupPoolWithConfig(eip1559Config) - defer pool.Close() + pool, key := setupTxPoolWithConfig(eip1559Config) + defer pool.Stop() tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key) - if err := pool.addRemote(tx); err != core.ErrTipAboveFeeCap { - t.Error("expected", core.ErrTipAboveFeeCap, "got", err) + if err := pool.AddRemote(tx); err != ErrTipAboveFeeCap { + t.Error("expected", ErrTipAboveFeeCap, "got", err) } } -func TestVeryHighValues(t *testing.T) { +func TestTransactionVeryHighValues(t *testing.T) { t.Parallel() - pool, key := setupPoolWithConfig(eip1559Config) - defer pool.Close() + pool, key := setupTxPoolWithConfig(eip1559Config) + defer pool.Stop() veryBigNumber := big.NewInt(1) veryBigNumber.Lsh(veryBigNumber, 300) tx := dynamicFeeTx(0, 100, big.NewInt(1), veryBigNumber, key) - if err := pool.addRemote(tx); err != core.ErrTipVeryHigh { - t.Error("expected", core.ErrTipVeryHigh, "got", err) + if err := pool.AddRemote(tx); err != ErrTipVeryHigh { + t.Error("expected", ErrTipVeryHigh, "got", err) } tx2 := dynamicFeeTx(0, 100, veryBigNumber, big.NewInt(1), key) - if err := pool.addRemote(tx2); err != core.ErrFeeCapVeryHigh { - t.Error("expected", core.ErrFeeCapVeryHigh, "got", err) + if err := pool.AddRemote(tx2); err != ErrFeeCapVeryHigh { + t.Error("expected", ErrFeeCapVeryHigh, "got", err) } } -func TestChainFork(t *testing.T) { +func TestTransactionChainFork(t *testing.T) { t.Parallel() - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() addr := crypto.PubkeyToAddress(key.PublicKey) resetState := func() { - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.AddBalance(addr, big.NewInt(100000000000000)) - pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed)) + pool.chain.(*testBlockChain).reset(statedb, 1000000, new(event.Feed)) <-pool.requestReset(nil, nil) } resetState() @@ -504,7 +475,7 @@ func TestChainFork(t *testing.T) { if _, err := pool.add(tx, false); err != nil { t.Error("didn't expect error", err) } - pool.removeTx(tx.Hash(), true, true) + pool.removeTx(tx.Hash(), true) // reset the pool's internal state resetState() @@ -513,18 +484,18 @@ func TestChainFork(t *testing.T) { } } -func TestDoubleNonce(t *testing.T) { +func TestTransactionDoubleNonce(t *testing.T) { t.Parallel() - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() addr := crypto.PubkeyToAddress(key.PublicKey) resetState := func() { - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.AddBalance(addr, big.NewInt(100000000000000)) - pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed)) + pool.chain.(*testBlockChain).reset(statedb, 1000000, new(event.Feed)) <-pool.requestReset(nil, nil) } resetState() @@ -564,11 +535,11 @@ func TestDoubleNonce(t *testing.T) { } } -func TestMissingNonce(t *testing.T) { +func TestTransactionMissingNonce(t *testing.T) { t.Parallel() - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() addr := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, addr, big.NewInt(100000000000000)) @@ -587,12 +558,12 @@ func TestMissingNonce(t *testing.T) { } } -func TestNonceRecovery(t *testing.T) { +func TestTransactionNonceRecovery(t *testing.T) { t.Parallel() const n = 10 - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() addr := crypto.PubkeyToAddress(key.PublicKey) testSetNonce(pool, addr, n) @@ -600,7 +571,7 @@ func TestNonceRecovery(t *testing.T) { <-pool.requestReset(nil, nil) tx := transaction(n, 100000, key) - if err := pool.addRemote(tx); err != nil { + if err := pool.AddRemote(tx); err != nil { t.Error(err) } // simulate some weird re-order of transactions and missing nonce(s) @@ -613,12 +584,12 @@ func TestNonceRecovery(t *testing.T) { // Tests that if an account runs out of funds, any pending and queued transactions // are dropped. -func TestDropping(t *testing.T) { +func TestTransactionDropping(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000)) @@ -694,7 +665,8 @@ func TestDropping(t *testing.T) { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4) } // Reduce the block gas limit, check that invalidated transactions are dropped - pool.chain.(*testBlockChain).gasLimit.Store(100) + tbc := pool.chain.(*testBlockChain) + tbc.reset(tbc.statedb, 100, tbc.chainHeadFeed) <-pool.requestReset(nil, nil) if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { @@ -717,16 +689,15 @@ func TestDropping(t *testing.T) { // Tests that if a transaction is dropped from the current pending pool (e.g. out // of fund), all consecutive (still valid, but not executable) transactions are // postponed back into the future queue to prevent broadcasting them. -func TestPostponing(t *testing.T) { +func TestTransactionPostponing(t *testing.T) { t.Parallel() // Create the pool to test the postponing with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) + defer pool.Stop() // Create two test accounts to produce different gap profiles with keys := make([]*ecdsa.PrivateKey, 2) @@ -751,7 +722,7 @@ func TestPostponing(t *testing.T) { txs = append(txs, tx) } } - for i, err := range pool.addRemotesSync(txs) { + for i, err := range pool.AddRemotesSync(txs) { if err != nil { t.Fatalf("tx %d: failed to add transactions: %v", i, err) } @@ -831,23 +802,23 @@ func TestPostponing(t *testing.T) { // Tests that if the transaction pool has both executable and non-executable // transactions from an origin account, filling the nonce gap moves all queued // ones into the pending pool. -func TestGapFilling(t *testing.T) { +func TestTransactionGapFilling(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) // Keep track of transaction events to ensure all executables get announced - events := make(chan core.NewTxsEvent, testTxPoolConfig.AccountQueue+5) + events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() // Create a pending and a queued transaction with a nonce-gap in between - pool.addRemotesSync([]*types.Transaction{ + pool.AddRemotesSync([]*types.Transaction{ transaction(0, 100000, key), transaction(2, 100000, key), }) @@ -861,7 +832,7 @@ func TestGapFilling(t *testing.T) { if err := validateEvents(events, 1); err != nil { t.Fatalf("original event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Fill the nonce gap and ensure all transactions become pending @@ -878,19 +849,19 @@ func TestGapFilling(t *testing.T) { if err := validateEvents(events, 2); err != nil { t.Fatalf("gap-filling event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that if the transaction count belonging to a single account goes above // some threshold, the higher transactions are dropped to prevent DOS attacks. -func TestQueueAccountLimiting(t *testing.T) { +func TestTransactionQueueAccountLimiting(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) @@ -923,27 +894,26 @@ func TestQueueAccountLimiting(t *testing.T) { // // This logic should not hold for local transactions, unless the local tracking // mechanism is disabled. -func TestQueueGlobalLimiting(t *testing.T) { - testQueueGlobalLimiting(t, false) +func TestTransactionQueueGlobalLimiting(t *testing.T) { + testTransactionQueueGlobalLimiting(t, false) } -func TestQueueGlobalLimitingNoLocals(t *testing.T) { - testQueueGlobalLimiting(t, true) +func TestTransactionQueueGlobalLimitingNoLocals(t *testing.T) { + testTransactionQueueGlobalLimiting(t, true) } -func testQueueGlobalLimiting(t *testing.T, nolocals bool) { +func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) { t.Parallel() // Create the pool to test the limit enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) config := testTxPoolConfig config.NoLocals = nolocals config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible) - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Create a number of test accounts and fund them (last one will be the local) keys := make([]*ecdsa.PrivateKey, 5) @@ -965,7 +935,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { nonces[addr]++ } // Import the batch and verify that limits have been enforced - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) queued := 0 for addr, list := range pool.queue { @@ -982,7 +952,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { for i := uint64(0); i < 3*config.GlobalQueue; i++ { txs = append(txs, transaction(i+1, 100000, local)) } - pool.addLocals(txs) + pool.AddLocals(txs) // If locals are disabled, the previous eviction algorithm should apply here too if nolocals { @@ -1014,29 +984,28 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { // // This logic should not hold for local transactions, unless the local tracking // mechanism is disabled. -func TestQueueTimeLimiting(t *testing.T) { - testQueueTimeLimiting(t, false) +func TestTransactionQueueTimeLimiting(t *testing.T) { + testTransactionQueueTimeLimiting(t, false) } -func TestQueueTimeLimitingNoLocals(t *testing.T) { - testQueueTimeLimiting(t, true) +func TestTransactionQueueTimeLimitingNoLocals(t *testing.T) { + testTransactionQueueTimeLimiting(t, true) } -func testQueueTimeLimiting(t *testing.T, nolocals bool) { +func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { // Reduce the eviction interval to a testable amount defer func(old time.Duration) { evictionInterval = old }(evictionInterval) evictionInterval = time.Millisecond * 100 // Create the pool to test the non-expiration enforcement - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) config := testTxPoolConfig config.Lifetime = time.Second config.NoLocals = nolocals - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Create two test accounts to ensure remotes expire but locals do not local, _ := crypto.GenerateKey() @@ -1046,10 +1015,10 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000)) // Add the two transactions and ensure they both are queued up - if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { + if err := pool.AddLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil { + if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } pending, queued := pool.Stats() @@ -1059,7 +1028,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1074,7 +1043,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1094,7 +1063,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) } } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1111,12 +1080,12 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Queue gapped transactions - if err := pool.addLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil { + if err := pool.AddLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil { @@ -1125,7 +1094,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { time.Sleep(5 * evictionInterval) // A half lifetime pass // Queue executable transactions, the life cycle should be restarted. - if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { + if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil { @@ -1139,9 +1108,9 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) } if queued != 2 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1160,7 +1129,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) } } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1168,18 +1137,18 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { // Tests that even if the transaction count belonging to a single account goes // above some threshold, as long as the transactions are executable, they are // accepted. -func TestPendingLimiting(t *testing.T) { +func TestTransactionPendingLimiting(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) - testAddBalance(pool, account, big.NewInt(1000000000000)) + testAddBalance(pool, account, big.NewInt(1000000)) // Keep track of transaction events to ensure all executables get announced - events := make(chan core.NewTxsEvent, testTxPoolConfig.AccountQueue+5) + events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -1201,7 +1170,7 @@ func TestPendingLimiting(t *testing.T) { if err := validateEvents(events, int(testTxPoolConfig.AccountQueue+5)); err != nil { t.Fatalf("event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1209,19 +1178,18 @@ func TestPendingLimiting(t *testing.T) { // Tests that if the transaction count belonging to multiple accounts go above // some hard threshold, the higher transactions are dropped to prevent DOS // attacks. -func TestPendingGlobalLimiting(t *testing.T) { +func TestTransactionPendingGlobalLimiting(t *testing.T) { t.Parallel() // Create the pool to test the limit enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = config.AccountSlots * 10 - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Create a number of test accounts and fund them keys := make([]*ecdsa.PrivateKey, 5) @@ -1241,7 +1209,7 @@ func TestPendingGlobalLimiting(t *testing.T) { } } // Import the batch and verify that limits have been enforced - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) pending := 0 for _, list := range pool.pending { @@ -1250,7 +1218,7 @@ func TestPendingGlobalLimiting(t *testing.T) { if pending > int(config.GlobalSlots) { t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, config.GlobalSlots) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1258,12 +1226,12 @@ func TestPendingGlobalLimiting(t *testing.T) { // Test the limit on transaction size is enforced correctly. // This test verifies every transaction having allowed size // is added to the pool, and longer transactions are rejected. -func TestAllowedTxSize(t *testing.T) { +func TestTransactionAllowedTxSize(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000000)) @@ -1272,7 +1240,7 @@ func TestAllowedTxSize(t *testing.T) { // // It is assumed the fields in the transaction (except of the data) are: // - nonce <= 32 bytes - // - gasTip <= 32 bytes + // - gasPrice <= 32 bytes // - gasLimit <= 32 bytes // - recipient == 20 bytes // - value <= 32 bytes @@ -1280,21 +1248,22 @@ func TestAllowedTxSize(t *testing.T) { // All those fields are summed up to at most 213 bytes. baseSize := uint64(213) dataSize := txMaxSize - baseSize + // Try adding a transaction with maximal allowed size - tx := pricedDataTransaction(0, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize) + tx := pricedDataTransaction(0, pool.currentMaxGas, big.NewInt(1), key, dataSize) if err := pool.addRemoteSync(tx); err != nil { t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err) } // Try adding a transaction with random allowed size - if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentHead.Load().GasLimit, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil { + if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentMaxGas, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil { t.Fatalf("failed to add transaction of random allowed size: %v", err) } // Try adding a transaction of minimal not allowed size - if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, txMaxSize)); err == nil { + if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, big.NewInt(1), key, txMaxSize)); err == nil { t.Fatalf("expected rejection on slightly oversize transaction") } // Try adding a transaction of random not allowed size - if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil { + if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil { t.Fatalf("expected rejection on oversize transaction") } // Run some sanity checks on the pool internals @@ -1305,27 +1274,26 @@ func TestAllowedTxSize(t *testing.T) { if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that if transactions start being capped, transactions are also removed from 'all' -func TestCapClearsFromAll(t *testing.T) { +func TestTransactionCapClearsFromAll(t *testing.T) { t.Parallel() // Create the pool to test the limit enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) config := testTxPoolConfig config.AccountSlots = 2 config.AccountQueue = 2 config.GlobalSlots = 8 - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Create a number of test accounts and fund them key, _ := crypto.GenerateKey() @@ -1337,8 +1305,8 @@ func TestCapClearsFromAll(t *testing.T) { txs = append(txs, transaction(uint64(j), 100000, key)) } // Import the batch and verify that limits have been enforced - pool.addRemotes(txs) - if err := validatePoolInternals(pool); err != nil { + pool.AddRemotes(txs) + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1346,19 +1314,18 @@ func TestCapClearsFromAll(t *testing.T) { // Tests that if the transaction count belonging to multiple accounts go above // some hard threshold, if they are under the minimum guaranteed slot count then // the transactions are still kept. -func TestPendingMinimumAllowance(t *testing.T) { +func TestTransactionPendingMinimumAllowance(t *testing.T) { t.Parallel() // Create the pool to test the limit enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 1 - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Create a number of test accounts and fund them keys := make([]*ecdsa.PrivateKey, 5) @@ -1378,14 +1345,14 @@ func TestPendingMinimumAllowance(t *testing.T) { } } // Import the batch and verify that limits have been enforced - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) for addr, list := range pool.pending { if list.Len() != int(config.AccountSlots) { t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), config.AccountSlots) } } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1395,19 +1362,18 @@ func TestPendingMinimumAllowance(t *testing.T) { // from the pending pool to the queue. // // Note, local transactions are never allowed to be dropped. -func TestRepricing(t *testing.T) { +func TestTransactionPoolRepricing(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) + defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan core.NewTxsEvent, 32) + events := make(chan NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -1435,8 +1401,8 @@ func TestRepricing(t *testing.T) { ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3]) // Import the batch and that both pending and queued transactions match up - pool.addRemotesSync(txs) - pool.addLocal(ltx) + pool.AddRemotesSync(txs) + pool.AddLocal(ltx) pending, queued := pool.Stats() if pending != 7 { @@ -1448,11 +1414,11 @@ func TestRepricing(t *testing.T) { if err := validateEvents(events, 7); err != nil { t.Fatalf("original event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Reprice the pool and check that underpriced transactions get dropped - pool.SetGasTip(big.NewInt(2)) + pool.SetGasPrice(big.NewInt(2)) pending, queued = pool.Stats() if pending != 2 { @@ -1464,28 +1430,28 @@ func TestRepricing(t *testing.T) { if err := validateEvents(events, 0); err != nil { t.Fatalf("reprice event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Check that we can't add the old transactions back - if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want error to conain %v", err, ErrUnderpriced) } - if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want error to conain %v", err, ErrUnderpriced) } - if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { + t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want error to conain %v", err, ErrUnderpriced) } if err := validateEvents(events, 0); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // However we can add local underpriced transactions tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3]) - if err := pool.addLocal(tx); err != nil { + if err := pool.AddLocal(tx); err != nil { t.Fatalf("failed to add underpriced local transaction: %v", err) } if pending, _ = pool.Stats(); pending != 3 { @@ -1494,85 +1460,41 @@ func TestRepricing(t *testing.T) { if err := validateEvents(events, 1); err != nil { t.Fatalf("post-reprice local event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // And we can fill gaps with properly priced transactions - if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil { + if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil { + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } - if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil { + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil { t.Fatalf("failed to add queued transaction: %v", err) } if err := validateEvents(events, 5); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } -func TestMinGasPriceEnforced(t *testing.T) { - t.Parallel() - - // Create the pool to test the pricing enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(eip1559Config, 10000000, statedb, new(event.Feed)) - - txPoolConfig := DefaultConfig - txPoolConfig.NoLocals = true - pool := New(txPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(txPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() - - key, _ := crypto.GenerateKey() - testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000)) - - tx := pricedTransaction(0, 100000, big.NewInt(2), key) - pool.SetGasTip(big.NewInt(tx.GasPrice().Int64() + 1)) - - if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("Min tip not enforced") - } - - if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("Min tip not enforced") - } - - tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), key) - pool.SetGasTip(big.NewInt(tx.GasTipCap().Int64() + 1)) - - if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("Min tip not enforced") - } - - if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("Min tip not enforced") - } - // Make sure the tx is accepted if locals are enabled - pool.config.NoLocals = false - if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; err != nil { - t.Fatalf("Min tip enforced with locals enabled, error: %v", err) - } -} - // Tests that setting the transaction pool gas price to a higher value correctly // discards everything cheaper (legacy & dynamic fee) than that and moves any // gapped transactions back from the pending pool to the queue. // // Note, local transactions are never allowed to be dropped. -func TestRepricingDynamicFee(t *testing.T) { +func TestTransactionPoolRepricingDynamicFee(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - pool, _ := setupPoolWithConfig(eip1559Config) - defer pool.Close() + pool, _ := setupTxPoolWithConfig(eip1559Config) + defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan core.NewTxsEvent, 32) + events := make(chan NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -1600,8 +1522,8 @@ func TestRepricingDynamicFee(t *testing.T) { ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[3]) // Import the batch and that both pending and queued transactions match up - pool.addRemotesSync(txs) - pool.addLocal(ltx) + pool.AddRemotesSync(txs) + pool.AddLocal(ltx) pending, queued := pool.Stats() if pending != 7 { @@ -1613,11 +1535,11 @@ func TestRepricingDynamicFee(t *testing.T) { if err := validateEvents(events, 7); err != nil { t.Fatalf("original event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Reprice the pool and check that underpriced transactions get dropped - pool.SetGasTip(big.NewInt(2)) + pool.SetGasPrice(big.NewInt(2)) pending, queued = pool.Stats() if pending != 2 { @@ -1629,31 +1551,31 @@ func TestRepricingDynamicFee(t *testing.T) { if err := validateEvents(events, 0); err != nil { t.Fatalf("reprice event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Check that we can't add the old transactions back tx := pricedTransaction(1, 100000, big.NewInt(1), keys[0]) - if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } tx = dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) - if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } tx = dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2]) - if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { + t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } if err := validateEvents(events, 0); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // However we can add local underpriced transactions tx = dynamicFeeTx(1, 100000, big.NewInt(1), big.NewInt(1), keys[3]) - if err := pool.addLocal(tx); err != nil { + if err := pool.AddLocal(tx); err != nil { t.Fatalf("failed to add underpriced local transaction: %v", err) } if pending, _ = pool.Stats(); pending != 3 { @@ -1662,70 +1584,69 @@ func TestRepricingDynamicFee(t *testing.T) { if err := validateEvents(events, 1); err != nil { t.Fatalf("post-reprice local event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // And we can fill gaps with properly priced transactions tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0]) - if err := pool.addRemote(tx); err != nil { + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1]) - if err := pool.addRemote(tx); err != nil { + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } tx = dynamicFeeTx(2, 100000, big.NewInt(2), big.NewInt(2), keys[2]) - if err := pool.addRemoteSync(tx); err != nil { + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to add queued transaction: %v", err) } if err := validateEvents(events, 5); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that setting the transaction pool gas price to a higher value does not // remove local transactions (legacy & dynamic fee). -func TestRepricingKeepsLocals(t *testing.T) { +func TestTransactionPoolRepricingKeepsLocals(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) + defer pool.Stop() // Create a number of test accounts and fund them keys := make([]*ecdsa.PrivateKey, 3) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() - testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(100000*1000000)) + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000*1000000)) } // Create transaction (both pending and queued) with a linearly growing gasprice for i := uint64(0); i < 500; i++ { // Add pending transaction. pendingTx := pricedTransaction(i, 100000, big.NewInt(int64(i)), keys[2]) - if err := pool.addLocal(pendingTx); err != nil { + if err := pool.AddLocal(pendingTx); err != nil { t.Fatal(err) } // Add queued transaction. queuedTx := pricedTransaction(i+501, 100000, big.NewInt(int64(i)), keys[2]) - if err := pool.addLocal(queuedTx); err != nil { + if err := pool.AddLocal(queuedTx); err != nil { t.Fatal(err) } // Add pending dynamic fee transaction. pendingTx = dynamicFeeTx(i, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1]) - if err := pool.addLocal(pendingTx); err != nil { + if err := pool.AddLocal(pendingTx); err != nil { t.Fatal(err) } // Add queued dynamic fee transaction. queuedTx = dynamicFeeTx(i+501, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1]) - if err := pool.addLocal(queuedTx); err != nil { + if err := pool.AddLocal(queuedTx); err != nil { t.Fatal(err) } } @@ -1740,20 +1661,20 @@ func TestRepricingKeepsLocals(t *testing.T) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, expQueued) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } validate() // Reprice the pool and check that nothing is dropped - pool.SetGasTip(big.NewInt(2)) + pool.SetGasPrice(big.NewInt(2)) validate() - pool.SetGasTip(big.NewInt(2)) - pool.SetGasTip(big.NewInt(4)) - pool.SetGasTip(big.NewInt(8)) - pool.SetGasTip(big.NewInt(100)) + pool.SetGasPrice(big.NewInt(2)) + pool.SetGasPrice(big.NewInt(4)) + pool.SetGasPrice(big.NewInt(8)) + pool.SetGasPrice(big.NewInt(100)) validate() } @@ -1762,28 +1683,27 @@ func TestRepricingKeepsLocals(t *testing.T) { // pending transactions are moved into the queue. // // Note, local transactions are never allowed to be dropped. -func TestUnderpricing(t *testing.T) { +func TestTransactionPoolUnderpricing(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 2 config.GlobalQueue = 2 - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan core.NewTxsEvent, 32) + events := make(chan NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() // Create a number of test accounts and fund them - keys := make([]*ecdsa.PrivateKey, 5) + keys := make([]*ecdsa.PrivateKey, 4) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) @@ -1799,8 +1719,8 @@ func TestUnderpricing(t *testing.T) { ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[2]) // Import the batch and that both pending and queued transactions match up - pool.addRemotes(txs) - pool.addLocal(ltx) + pool.AddRemotesSync(txs) + pool.AddLocal(ltx) pending, queued := pool.Stats() if pending != 3 { @@ -1812,16 +1732,12 @@ func TestUnderpricing(t *testing.T) { if err := validateEvents(events, 3); err != nil { t.Fatalf("original event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding an underpriced transaction on block limit fails - if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) - } - // Replace a future transaction with a future transaction - if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1 - t.Fatalf("failed to add well priced transaction: %v", err) + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } // Ensure that adding high priced transactions drops cheap ones, but not own if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - @@ -1830,13 +1746,9 @@ func TestUnderpricing(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2 t.Fatalf("failed to add well priced transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 + if err := pool.addRemoteSync(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 t.Fatalf("failed to add well priced transaction: %v", err) } - // Ensure that replacing a pending transaction with a future transaction fails - if err := pool.addRemote(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != txpool.ErrFutureReplacePending { - t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, txpool.ErrFutureReplacePending) - } pending, queued = pool.Stats() if pending != 2 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) @@ -1844,19 +1756,19 @@ func TestUnderpricing(t *testing.T) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validateEvents(events, 2); err != nil { + if err := validateEvents(events, 1); err != nil { t.Fatalf("additional event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding local transactions can push out even higher priced ones ltx = pricedTransaction(1, 100000, big.NewInt(0), keys[2]) - if err := pool.addLocal(ltx); err != nil { + if err := pool.AddLocal(ltx); err != nil { t.Fatalf("failed to append underpriced local transaction: %v", err) } ltx = pricedTransaction(0, 100000, big.NewInt(0), keys[3]) - if err := pool.addLocal(ltx); err != nil { + if err := pool.AddLocal(ltx); err != nil { t.Fatalf("failed to add new underpriced local transaction: %v", err) } pending, queued = pool.Stats() @@ -1869,7 +1781,7 @@ func TestUnderpricing(t *testing.T) { if err := validateEvents(events, 2); err != nil { t.Fatalf("local event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1877,23 +1789,22 @@ func TestUnderpricing(t *testing.T) { // Tests that more expensive transactions push out cheap ones from the pool, but // without producing instability by creating gaps that start jumping transactions // back and forth between queued/pending. -func TestStableUnderpricing(t *testing.T) { +func TestTransactionPoolStableUnderpricing(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 128 config.GlobalQueue = 0 - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan core.NewTxsEvent, 32) + events := make(chan NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -1908,7 +1819,7 @@ func TestStableUnderpricing(t *testing.T) { for i := uint64(0); i < config.GlobalSlots; i++ { txs = append(txs, pricedTransaction(i, 100000, big.NewInt(1), keys[0])) } - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) pending, queued := pool.Stats() if pending != int(config.GlobalSlots) { @@ -1920,7 +1831,7 @@ func TestStableUnderpricing(t *testing.T) { if err := validateEvents(events, int(config.GlobalSlots)); err != nil { t.Fatalf("original event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding high priced transactions drops a cheap, but doesn't produce a gap @@ -1937,7 +1848,7 @@ func TestStableUnderpricing(t *testing.T) { if err := validateEvents(events, 1); err != nil { t.Fatalf("additional event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1947,17 +1858,17 @@ func TestStableUnderpricing(t *testing.T) { // expensive ones and any gapped pending transactions are moved into the queue. // // Note, local transactions are never allowed to be dropped. -func TestUnderpricingDynamicFee(t *testing.T) { +func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { t.Parallel() - pool, _ := setupPoolWithConfig(eip1559Config) - defer pool.Close() + pool, _ := setupTxPoolWithConfig(eip1559Config) + defer pool.Stop() pool.config.GlobalSlots = 2 pool.config.GlobalQueue = 2 // Keep track of transaction events to ensure all executables get announced - events := make(chan core.NewTxsEvent, 32) + events := make(chan NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -1978,8 +1889,8 @@ func TestUnderpricingDynamicFee(t *testing.T) { ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[2]) // Import the batch and that both pending and queued transactions match up - pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1 - pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1 + pool.AddRemotesSync(txs) // Pend K0:0, K0:1; Que K1:1 + pool.AddLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1 pending, queued := pool.Stats() if pending != 3 { @@ -1991,27 +1902,27 @@ func TestUnderpricingDynamicFee(t *testing.T) { if err := validateEvents(events, 3); err != nil { t.Fatalf("original event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding an underpriced transaction fails tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) - if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1 - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.addRemoteSync(tx); err != ErrUnderpriced { // Pend K0:0, K0:1, K2:0; Que K1:1 + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } // Ensure that adding high priced transactions drops cheap ones, but not own tx = pricedTransaction(0, 100000, big.NewInt(2), keys[1]) - if err := pool.addRemote(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - + if err := pool.addRemoteSync(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - t.Fatalf("failed to add well priced transaction: %v", err) } - tx = pricedTransaction(1, 100000, big.NewInt(3), keys[1]) + tx = pricedTransaction(2, 100000, big.NewInt(3), keys[1]) if err := pool.addRemoteSync(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2 t.Fatalf("failed to add well priced transaction: %v", err) } - tx = dynamicFeeTx(2, 100000, big.NewInt(4), big.NewInt(1), keys[1]) + tx = dynamicFeeTx(3, 100000, big.NewInt(4), big.NewInt(1), keys[1]) if err := pool.addRemoteSync(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3 t.Fatalf("failed to add well priced transaction: %v", err) } @@ -2022,19 +1933,19 @@ func TestUnderpricingDynamicFee(t *testing.T) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validateEvents(events, 2); err != nil { + if err := validateEvents(events, 1); err != nil { t.Fatalf("additional event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding local transactions can push out even higher priced ones ltx = dynamicFeeTx(1, 100000, big.NewInt(0), big.NewInt(0), keys[2]) - if err := pool.addLocal(ltx); err != nil { + if err := pool.AddLocal(ltx); err != nil { t.Fatalf("failed to append underpriced local transaction: %v", err) } ltx = dynamicFeeTx(0, 100000, big.NewInt(0), big.NewInt(0), keys[3]) - if err := pool.addLocal(ltx); err != nil { + if err := pool.AddLocal(ltx); err != nil { t.Fatalf("failed to add new underpriced local transaction: %v", err) } pending, queued = pool.Stats() @@ -2047,7 +1958,7 @@ func TestUnderpricingDynamicFee(t *testing.T) { if err := validateEvents(events, 2); err != nil { t.Fatalf("local event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -2057,8 +1968,8 @@ func TestUnderpricingDynamicFee(t *testing.T) { func TestDualHeapEviction(t *testing.T) { t.Parallel() - pool, _ := setupPoolWithConfig(eip1559Config) - defer pool.Close() + pool, _ := setupTxPoolWithConfig(eip1559Config) + defer pool.Stop() pool.config.GlobalSlots = 10 pool.config.GlobalQueue = 10 @@ -2087,7 +1998,7 @@ func TestDualHeapEviction(t *testing.T) { tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key) highCap = tx } - pool.addRemotesSync([]*types.Transaction{tx}) + pool.AddRemotesSync([]*types.Transaction{tx}) } pending, queued := pool.Stats() if pending+queued != 20 { @@ -2106,22 +2017,21 @@ func TestDualHeapEviction(t *testing.T) { check(highTip, "effective tip") } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that the pool rejects duplicate transactions. -func TestDeduplication(t *testing.T) { +func TestTransactionDeduplication(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) + defer pool.Stop() // Create a test account to add transactions with key, _ := crypto.GenerateKey() @@ -2136,7 +2046,7 @@ func TestDeduplication(t *testing.T) { for i := 0; i < len(txs); i += 2 { firsts = append(firsts, txs[i]) } - errs := pool.addRemotesSync(firsts) + errs := pool.AddRemotesSync(firsts) if len(errs) != len(firsts) { t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), len(firsts)) } @@ -2153,7 +2063,7 @@ func TestDeduplication(t *testing.T) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, len(txs)/2-1) } // Try to add all of them now and ensure previous ones error out as knowns - errs = pool.addRemotesSync(txs) + errs = pool.AddRemotesSync(txs) if len(errs) != len(txs) { t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), len(txs)) } @@ -2172,26 +2082,25 @@ func TestDeduplication(t *testing.T) { if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that the pool rejects replacement transactions that don't meet the minimum // price bump required. -func TestReplacement(t *testing.T) { +func TestTransactionReplacement(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) + defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan core.NewTxsEvent, 32) + events := make(chan NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -2206,10 +2115,10 @@ func TestReplacement(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap pending transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced { + t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } - if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil { + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil { t.Fatalf("failed to replace original cheap pending transaction: %v", err) } if err := validateEvents(events, 2); err != nil { @@ -2219,10 +2128,10 @@ func TestReplacement(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil { t.Fatalf("failed to add original proper pending transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { + t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } - if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil { + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil { t.Fatalf("failed to replace original proper pending transaction: %v", err) } if err := validateEvents(events, 2); err != nil { @@ -2230,46 +2139,46 @@ func TestReplacement(t *testing.T) { } // Add queued transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) - if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil { + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap queued transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced { + t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } - if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil { + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil { t.Fatalf("failed to replace original cheap queued transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil { + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil { t.Fatalf("failed to add original proper queued transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { + t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } - if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil { + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil { t.Fatalf("failed to replace original proper queued transaction: %v", err) } if err := validateEvents(events, 0); err != nil { t.Fatalf("queued replacement event firing failed: %v", err) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that the pool rejects replacement dynamic fee transactions that don't // meet the minimum price bump required. -func TestReplacementDynamicFee(t *testing.T) { +func TestTransactionReplacementDynamicFee(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - pool, key := setupPoolWithConfig(eip1559Config) - defer pool.Close() + pool, key := setupTxPoolWithConfig(eip1559Config) + defer pool.Stop() testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000)) // Keep track of transaction events to ensure all executables get announced - events := make(chan core.NewTxsEvent, 32) + events := make(chan NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -2294,7 +2203,7 @@ func TestReplacementDynamicFee(t *testing.T) { stages := []string{"pending", "queued"} for _, stage := range stages { // Since state is empty, 0 nonce txs are "executable" and can go - // into pending immediately. 2 nonce txs are "gapped" + // into pending immediately. 2 nonce txs are "happed nonce := uint64(0) if stage == "queued" { nonce = 2 @@ -2307,12 +2216,12 @@ func TestReplacementDynamicFee(t *testing.T) { } // 2. Don't bump tip or feecap => discard tx = dynamicFeeTx(nonce, 100001, big.NewInt(2), big.NewInt(1), key) - if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 3. Bump both more than min => accept tx = dynamicFeeTx(nonce, 100000, big.NewInt(3), big.NewInt(2), key) - if err := pool.addRemote(tx); err != nil { + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err) } // 4. Check events match expected (2 new executable txs during pending, 0 during queue) @@ -2330,27 +2239,27 @@ func TestReplacementDynamicFee(t *testing.T) { } // 6. Bump tip max allowed so it's still underpriced => discard tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold-1), key) - if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 7. Bump fee cap max allowed so it's still underpriced => discard tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold-1), big.NewInt(gasTipCap), key) - if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 8. Bump tip min for acceptance => accept tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold), key) - if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 9. Bump fee cap min for acceptance => accept tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(gasTipCap), key) - if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 10. Check events match expected (3 new executable txs during pending, 0 during queue) tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(tipThreshold), key) - if err := pool.addRemote(tx); err != nil { + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err) } // 11. Check events match expected (3 new executable txs during pending, 0 during queue) @@ -2363,17 +2272,17 @@ func TestReplacementDynamicFee(t *testing.T) { } } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that local transactions are journaled to disk, but remote transactions // get discarded between restarts. -func TestJournaling(t *testing.T) { testJournaling(t, false) } -func TestJournalingNoLocals(t *testing.T) { testJournaling(t, true) } +func TestTransactionJournaling(t *testing.T) { testTransactionJournaling(t, false) } +func TestTransactionJournalingNoLocals(t *testing.T) { testTransactionJournaling(t, true) } -func testJournaling(t *testing.T, nolocals bool) { +func testTransactionJournaling(t *testing.T, nolocals bool) { t.Parallel() // Create a temporary file for the journal @@ -2389,16 +2298,15 @@ func testJournaling(t *testing.T, nolocals bool) { os.Remove(journal) // Create the original pool to inject transaction into the journal - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) config := testTxPoolConfig config.NoLocals = nolocals config.Journal = journal config.Rejournal = time.Second - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + pool := NewTxPool(config, params.TestChainConfig, blockchain) // Create two test accounts to ensure remotes expire but locals do not local, _ := crypto.GenerateKey() @@ -2408,13 +2316,13 @@ func testJournaling(t *testing.T, nolocals bool) { testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000)) // Add three local and a remote transactions and ensure they are queued up - if err := pool.addLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil { + if err := pool.AddLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } - if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { + if err := pool.AddLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } - if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { + if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil { @@ -2427,16 +2335,15 @@ func testJournaling(t *testing.T, nolocals bool) { if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive - pool.Close() + pool.Stop() statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain = newTestBlockchain(statedb, 1000000, new(event.Feed)) - pool = New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + pool = NewTxPool(config, params.TestChainConfig, blockchain) pending, queued = pool.Stats() if queued != 0 { @@ -2451,19 +2358,18 @@ func testJournaling(t *testing.T, nolocals bool) { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) } } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Bump the nonce temporarily and ensure the newly invalidated transaction is removed statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) <-pool.requestReset(nil, nil) time.Sleep(2 * config.Rejournal) - pool.Close() + pool.Stop() statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool = New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + blockchain = newTestBlockchain(statedb, 1000000, new(event.Feed)) + pool = NewTxPool(config, params.TestChainConfig, blockchain) pending, queued = pool.Stats() if pending != 0 { @@ -2478,24 +2384,23 @@ func testJournaling(t *testing.T, nolocals bool) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) } } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } - pool.Close() + pool.Stop() } -// TestStatusCheck tests that the pool can correctly retrieve the +// TestTransactionStatusCheck tests that the pool can correctly retrieve the // pending status of individual transactions. -func TestStatusCheck(t *testing.T) { +func TestTransactionStatusCheck(t *testing.T) { t.Parallel() // Create the pool to test the status retrievals with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) + defer pool.Stop() // Create the test accounts to check various transaction statuses with keys := make([]*ecdsa.PrivateKey, 3) @@ -2512,7 +2417,7 @@ func TestStatusCheck(t *testing.T) { txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) // Queued only // Import the transaction and ensure they are correctly added - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) pending, queued := pool.Stats() if pending != 2 { @@ -2521,7 +2426,7 @@ func TestStatusCheck(t *testing.T) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validatePoolInternals(pool); err != nil { + if err := validateTxPoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Retrieve the status of each transaction and validate them @@ -2530,17 +2435,19 @@ func TestStatusCheck(t *testing.T) { hashes[i] = tx.Hash() } hashes = append(hashes, common.Hash{}) - expect := []txpool.TxStatus{txpool.TxStatusPending, txpool.TxStatusPending, txpool.TxStatusQueued, txpool.TxStatusQueued, txpool.TxStatusUnknown} - for i := 0; i < len(hashes); i++ { - if status := pool.Status(hashes[i]); status != expect[i] { - t.Errorf("transaction %d: status mismatch: have %v, want %v", i, status, expect[i]) + statuses := pool.Status(hashes) + expect := []TxStatus{TxStatusPending, TxStatusPending, TxStatusQueued, TxStatusQueued, TxStatusUnknown} + + for i := 0; i < len(statuses); i++ { + if statuses[i] != expect[i] { + t.Errorf("transaction %d: status mismatch: have %v, want %v", i, statuses[i], expect[i]) } } } // Test the transaction slots consumption is computed correctly -func TestSlotCount(t *testing.T) { +func TestTransactionSlotCount(t *testing.T) { t.Parallel() key, _ := crypto.GenerateKey() @@ -2565,8 +2472,8 @@ func BenchmarkPendingDemotion10000(b *testing.B) { benchmarkPendingDemotion(b, 1 func benchmarkPendingDemotion(b *testing.B, size int) { // Add a batch of transactions to a pool one by one - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) @@ -2590,8 +2497,8 @@ func BenchmarkFuturePromotion10000(b *testing.B) { benchmarkFuturePromotion(b, 1 func benchmarkFuturePromotion(b *testing.B, size int) { // Add a batch of transactions to a pool one by one - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) @@ -2608,21 +2515,21 @@ func benchmarkFuturePromotion(b *testing.B, size int) { } // Benchmarks the speed of batched transaction insertion. -func BenchmarkBatchInsert100(b *testing.B) { benchmarkBatchInsert(b, 100, false) } -func BenchmarkBatchInsert1000(b *testing.B) { benchmarkBatchInsert(b, 1000, false) } -func BenchmarkBatchInsert10000(b *testing.B) { benchmarkBatchInsert(b, 10000, false) } +func BenchmarkPoolBatchInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100, false) } +func BenchmarkPoolBatchInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000, false) } +func BenchmarkPoolBatchInsert10000(b *testing.B) { benchmarkPoolBatchInsert(b, 10000, false) } -func BenchmarkBatchLocalInsert100(b *testing.B) { benchmarkBatchInsert(b, 100, true) } -func BenchmarkBatchLocalInsert1000(b *testing.B) { benchmarkBatchInsert(b, 1000, true) } -func BenchmarkBatchLocalInsert10000(b *testing.B) { benchmarkBatchInsert(b, 10000, true) } +func BenchmarkPoolBatchLocalInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100, true) } +func BenchmarkPoolBatchLocalInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000, true) } +func BenchmarkPoolBatchLocalInsert10000(b *testing.B) { benchmarkPoolBatchInsert(b, 10000, true) } -func benchmarkBatchInsert(b *testing.B, size int, local bool) { +func benchmarkPoolBatchInsert(b *testing.B, size int, local bool) { // Generate a batch of transactions to enqueue into the pool - pool, key := setupPool() - defer pool.Close() + pool, key := setupTxPool() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) - testAddBalance(pool, account, big.NewInt(1000000000000000000)) + testAddBalance(pool, account, big.NewInt(1000000)) batches := make([]types.Transactions, b.N) for i := 0; i < b.N; i++ { @@ -2635,9 +2542,9 @@ func benchmarkBatchInsert(b *testing.B, size int, local bool) { b.ResetTimer() for _, batch := range batches { if local { - pool.addLocals(batch) + pool.AddLocals(batch) } else { - pool.addRemotes(batch) + pool.AddRemotes(batch) } } } @@ -2662,26 +2569,26 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() - pool, _ := setupPool() + pool, _ := setupTxPool() testAddBalance(pool, account, big.NewInt(100000000)) for _, local := range locals { - pool.addLocal(local) + pool.AddLocal(local) } b.StartTimer() // Assign a high enough balance for testing testAddBalance(pool, remoteAddr, big.NewInt(100000000)) for i := 0; i < len(remotes); i++ { - pool.addRemotes([]*types.Transaction{remotes[i]}) + pool.AddRemotes([]*types.Transaction{remotes[i]}) } - pool.Close() + pool.Stop() } } // Benchmarks the speed of batch transaction insertion in case of multiple accounts. -func BenchmarkMultiAccountBatchInsert(b *testing.B) { +func BenchmarkPoolMultiAccountBatchInsert(b *testing.B) { // Generate a batch of transactions to enqueue into the pool - pool, _ := setupPool() - defer pool.Close() + pool, _ := setupTxPool() + defer pool.Stop() b.ReportAllocs() batches := make(types.Transactions, b.N) for i := 0; i < b.N; i++ { @@ -2694,6 +2601,6 @@ func BenchmarkMultiAccountBatchInsert(b *testing.B) { // Benchmark importing the transactions into the queue b.ResetTimer() for _, tx := range batches { - pool.addRemotesSync([]*types.Transaction{tx}) + pool.AddRemotesSync([]*types.Transaction{tx}) } } diff --git a/core/txpool/legacypool/list.go b/core/txpool/legacypool/list.go index f544556cdb..0a25da486d 100644 --- a/core/txpool/legacypool/list.go +++ b/core/txpool/legacypool/list.go @@ -35,8 +35,8 @@ import ( "sync/atomic" "time" - "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/types" ) // nonceHeap is a heap.Interface implementation over 64bit unsigned integers for diff --git a/core/txpool/legacypool/list_test.go b/core/txpool/legacypool/list_test.go index d8aaa31644..c17c0f2b2b 100644 --- a/core/txpool/legacypool/list_test.go +++ b/core/txpool/legacypool/list_test.go @@ -31,8 +31,8 @@ import ( "math/rand" "testing" - "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/tenderly/coreth/core/types" ) // Tests that transactions can be added to strict lists and list contents and diff --git a/core/txpool/legacypool/noncer.go b/core/txpool/legacypool/noncer.go index dd880a6ba3..4003e59031 100644 --- a/core/txpool/legacypool/noncer.go +++ b/core/txpool/legacypool/noncer.go @@ -29,8 +29,8 @@ package legacypool import ( "sync" - "github.com/ava-labs/coreth/core/state" "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/state" ) // noncer is a tiny virtual state database to manage the executable nonces of diff --git a/core/types.go b/core/types.go index 77e6dd4d2b..4d3931e32d 100644 --- a/core/types.go +++ b/core/types.go @@ -27,9 +27,9 @@ package core import ( - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" ) // Validator is an interface which defines the standard for block validation. It diff --git a/core/types/block.go b/core/types/block.go index 6038046e31..5748ba794e 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -34,9 +34,18 @@ import ( "reflect" "sync/atomic" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/rlp" +) + +var ( + EmptyRootHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + EmptyCodeHash = common.BytesToHash(crypto.Keccak256(nil)) + EmptyUncleHash = rlpHash([]*Header(nil)) + EmptyExtDataHash = rlpHash([]byte(nil)) ) // A BlockNonce is a 64-bit hash which proves (combined with the @@ -67,7 +76,7 @@ func (n *BlockNonce) UnmarshalText(input []byte) error { } //go:generate go run github.com/fjl/gencodec -type Header -field-override headerMarshaling -out gen_header_json.go -//go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -type Header -out gen_header_rlp.go +//go:generate go run github.com/tenderly/coreth/rlp/rlpgen -type Header -out gen_header_rlp.go // Header represents a block header in the Ethereum blockchain. type Header struct { @@ -102,14 +111,8 @@ type Header struct { // headers. BlockGasCost *big.Int `json:"blockGasCost" rlp:"optional"` - // BlobGasUsed was added by EIP-4844 and is ignored in legacy headers. - BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"` - - // ExcessBlobGas was added by EIP-4844 and is ignored in legacy headers. - ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"` - - // ParentBeaconRoot was added by EIP-4788 and is ignored in legacy headers. - ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` + // ExtraStateRoot root was added by Cortina and is ignored in legacy headers. + ExtraStateRoot common.Hash `json:"extraStateRoot" rlp:"optional"` } // field type overrides for gencodec @@ -124,8 +127,6 @@ type headerMarshaling struct { ExtDataGasUsed *hexutil.Big BlockGasCost *hexutil.Big Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON - BlobGasUsed *hexutil.Uint64 - ExcessBlobGas *hexutil.Uint64 } // Hash returns the block hash of the header, which is simply the keccak256 hash of its @@ -139,22 +140,18 @@ var headerSize = common.StorageSize(reflect.TypeOf(Header{}).Size()) // Size returns the approximate memory used by all internal contents. It is used // to approximate and limit the memory consumption of various caches. func (h *Header) Size() common.StorageSize { - var baseFeeBits int - if h.BaseFee != nil { - baseFeeBits = h.BaseFee.BitLen() - } - return headerSize + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen()+baseFeeBits)/8) + return headerSize + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen())/8) } // EmptyBody returns true if there is no additional 'body' to complete the header // that is: no transactions and no uncles. func (h *Header) EmptyBody() bool { - return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash + return h.TxHash == EmptyRootHash && h.UncleHash == EmptyUncleHash } // EmptyReceipts returns true if there are no receipts for this header/block. func (h *Header) EmptyReceipts() bool { - return h.ReceiptHash == EmptyReceiptsHash + return h.ReceiptHash == EmptyRootHash } // Body is a simple (mutable, non-safe) data container for storing and moving @@ -166,23 +163,7 @@ type Body struct { ExtData *[]byte `rlp:"nil"` } -// Block represents an Ethereum block. -// -// Note the Block type tries to be 'immutable', and contains certain caches that rely -// on that. The rules around block immutability are as follows: -// -// - We copy all data when the block is constructed. This makes references held inside -// the block independent of whatever value was passed in. -// -// - We copy all header data on access. This is because any change to the header would mess -// up the cached hash and size values in the block. Calling code is expected to take -// advantage of this to avoid over-allocating! -// -// - When new body data is attached to the block, a shallow copy of the block is returned. -// This ensures block modifications are race-free. -// -// - We do not copy body data on access because it does not affect the caches, and also -// because it would be too expensive. +// Block represents an entire block in the Ethereum blockchain. type Block struct { header *Header uncles []*Header @@ -206,20 +187,22 @@ type extblock struct { ExtData *[]byte `rlp:"nil"` } -// NewBlock creates a new block. The input data is copied, changes to header and to the -// field values will not affect the block. +// NewBlock creates a new block. The input data is copied, +// changes to header and to the field values will not affect the +// block. // // The values of TxHash, UncleHash, ReceiptHash and Bloom in header // are ignored and set to values derived from the given txs, uncles // and receipts. func NewBlock( - header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher TrieHasher, + header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, + hasher TrieHasher, extdata []byte, recalc bool, ) *Block { b := &Block{header: CopyHeader(header)} // TODO: panic if len(txs) != len(receipts) if len(txs) == 0 { - b.header.TxHash = EmptyTxsHash + b.header.TxHash = EmptyRootHash } else { b.header.TxHash = DeriveSha(Transactions(txs), hasher) b.transactions = make(Transactions, len(txs)) @@ -227,7 +210,7 @@ func NewBlock( } if len(receipts) == 0 { - b.header.ReceiptHash = EmptyReceiptsHash + b.header.ReceiptHash = EmptyRootHash } else { b.header.ReceiptHash = DeriveSha(Receipts(receipts), hasher) b.header.Bloom = CreateBloom(receipts) @@ -243,10 +226,19 @@ func NewBlock( } } + b.setExtData(extdata, recalc) return b } -// CopyHeader creates a deep copy of a block header. +// NewBlockWithHeader creates a block with the given header data. The +// header data is copied, changes to header and to the field values +// will not affect the block. +func NewBlockWithHeader(header *Header) *Block { + return &Block{header: CopyHeader(header)} +} + +// CopyHeader creates a deep copy of a block header to prevent side effects from +// modifying a header variable. func CopyHeader(h *Header) *Header { cpy := *h if cpy.Difficulty = new(big.Int); h.Difficulty != nil { @@ -268,22 +260,10 @@ func CopyHeader(h *Header) *Header { cpy.Extra = make([]byte, len(h.Extra)) copy(cpy.Extra, h.Extra) } - if h.ExcessBlobGas != nil { - cpy.ExcessBlobGas = new(uint64) - *cpy.ExcessBlobGas = *h.ExcessBlobGas - } - if h.BlobGasUsed != nil { - cpy.BlobGasUsed = new(uint64) - *cpy.BlobGasUsed = *h.BlobGasUsed - } - if h.ParentBeaconRoot != nil { - cpy.ParentBeaconRoot = new(common.Hash) - *cpy.ParentBeaconRoot = *h.ParentBeaconRoot - } return &cpy } -// DecodeRLP decodes a block from RLP. +// DecodeRLP decodes the Ethereum func (b *Block) DecodeRLP(s *rlp.Stream) error { var eb extblock _, size, _ := s.Kind() @@ -291,13 +271,45 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error { return err } b.header, b.uncles, b.transactions, b.version, b.extdata = eb.Header, eb.Uncles, eb.Txs, eb.Version, eb.ExtData - b.size.Store(rlp.ListSize(size)) + b.size.Store(common.StorageSize(rlp.ListSize(size))) return nil } -// EncodeRLP serializes a block as RLP. +func (b *Block) setExtDataHelper(data *[]byte, recalc bool) { + if data == nil { + b.setExtData(nil, recalc) + return + } + b.setExtData(*data, recalc) +} + +func (b *Block) setExtData(data []byte, recalc bool) { + _data := make([]byte, len(data)) + b.extdata = &_data + copy(*b.extdata, data) + if recalc { + b.header.ExtDataHash = CalcExtDataHash(*b.extdata) + } +} + +func (b *Block) ExtData() []byte { + if b.extdata == nil { + return nil + } + return *b.extdata +} + +func (b *Block) SetVersion(ver uint32) { + b.version = ver +} + +func (b *Block) Version() uint32 { + return b.version +} + +// EncodeRLP serializes b into the Ethereum RLP block format. func (b *Block) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, &extblock{ + return rlp.Encode(w, extblock{ Header: b.header, Txs: b.transactions, Uncles: b.uncles, @@ -306,14 +318,7 @@ func (b *Block) EncodeRLP(w io.Writer) error { }) } -// Body returns the non-header content of the block. -// Note the returned data is not an independent copy. -func (b *Block) Body() *Body { - return &Body{b.transactions, b.uncles, b.version, b.extdata} -} - -// Accessors for body data. These do not return a copy because the content -// of the body slices does not affect the cached hash/size in block. +// TODO: copies func (b *Block) Uncles() []*Header { return b.uncles } func (b *Block) Transactions() Transactions { return b.transactions } @@ -327,19 +332,12 @@ func (b *Block) Transaction(hash common.Hash) *Transaction { return nil } -// Header returns the block header (as a copy). -func (b *Block) Header() *Header { - return CopyHeader(b.header) -} - -// Header value accessors. These do copy! - func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number) } func (b *Block) GasLimit() uint64 { return b.header.GasLimit } func (b *Block) GasUsed() uint64 { return b.header.GasUsed } func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) } func (b *Block) Time() uint64 { return b.header.Time } -func (b *Block) Timestamp() uint64 { return b.header.Time } +func (b *Block) Timestamp() *big.Int { return new(big.Int).SetUint64(b.header.Time) } func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() } func (b *Block) MixDigest() common.Hash { return b.header.MixDigest } @@ -360,24 +358,11 @@ func (b *Block) BaseFee() *big.Int { return new(big.Int).Set(b.header.BaseFee) } -func (b *Block) BeaconRoot() *common.Hash { return b.header.ParentBeaconRoot } - -func (b *Block) ExcessBlobGas() *uint64 { - var excessBlobGas *uint64 - if b.header.ExcessBlobGas != nil { - excessBlobGas = new(uint64) - *excessBlobGas = *b.header.ExcessBlobGas - } - return excessBlobGas -} - -func (b *Block) BlobGasUsed() *uint64 { - var blobGasUsed *uint64 - if b.header.BlobGasUsed != nil { - blobGasUsed = new(uint64) - *blobGasUsed = *b.header.BlobGasUsed +func (b *Block) ExtDataGasUsed() *big.Int { + if b.header.ExtDataGasUsed == nil { + return nil } - return blobGasUsed + return new(big.Int).Set(b.header.ExtDataGasUsed) } func (b *Block) BlockGasCost() *big.Int { @@ -387,25 +372,37 @@ func (b *Block) BlockGasCost() *big.Int { return new(big.Int).Set(b.header.BlockGasCost) } +func (b *Block) Header() *Header { return CopyHeader(b.header) } + +// Body returns the non-header content of the block. +func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles, b.version, b.extdata} } + // Size returns the true RLP encoded storage size of the block, either by encoding // and returning it, or returning a previously cached value. -func (b *Block) Size() uint64 { +func (b *Block) Size() common.StorageSize { if size := b.size.Load(); size != nil { - return size.(uint64) + return size.(common.StorageSize) } c := writeCounter(0) rlp.Encode(&c, b) - b.size.Store(uint64(c)) - return uint64(c) + b.size.Store(common.StorageSize(c)) + return common.StorageSize(c) } -type writeCounter uint64 +type writeCounter common.StorageSize func (c *writeCounter) Write(b []byte) (int, error) { *c += writeCounter(len(b)) return len(b), nil } +func CalcExtDataHash(extdata []byte) common.Hash { + if len(extdata) == 0 { + return EmptyExtDataHash + } + return rlpHash(extdata) +} + func CalcUncleHash(uncles []*Header) common.Hash { if len(uncles) == 0 { return EmptyUncleHash @@ -413,34 +410,31 @@ func CalcUncleHash(uncles []*Header) common.Hash { return rlpHash(uncles) } -// NewBlockWithHeader creates a block with the given header data. The -// header data is copied, changes to header and to the field values -// will not affect the block. -func NewBlockWithHeader(header *Header) *Block { - return &Block{header: CopyHeader(header)} -} - // WithSeal returns a new block with the data from b but the header replaced with // the sealed one. func (b *Block) WithSeal(header *Header) *Block { + cpy := *header + return &Block{ - header: CopyHeader(header), + header: &cpy, transactions: b.transactions, uncles: b.uncles, } } -// WithBody returns a copy of the block with the given transaction and uncle contents. -func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block { +// WithBody returns a new block with the given transaction and uncle contents. +func (b *Block) WithBody(transactions []*Transaction, uncles []*Header, version uint32, extdata *[]byte) *Block { block := &Block{ - header: b.header, + header: CopyHeader(b.header), transactions: make([]*Transaction, len(transactions)), uncles: make([]*Header, len(uncles)), + version: version, } copy(block.transactions, transactions) for i := range uncles { block.uncles[i] = CopyHeader(uncles[i]) } + block.setExtDataHelper(extdata, false) return block } diff --git a/core/types/block_test.go b/core/types/block_test.go index 6da8f441f9..1be0d747de 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -28,16 +28,17 @@ package types import ( "bytes" + "hash" "math/big" "reflect" "testing" - "github.com/ava-labs/coreth/internal/blocktest" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rlp" + "golang.org/x/crypto/sha3" ) // This test has been modified from https://github.com/ethereum/go-ethereum/blob/v1.9.21/core/types/block_test.go#L35 to fit @@ -74,7 +75,7 @@ func TestBlockEncoding(t *testing.T) { check("ExtDataGasUsed", block.ExtDataGasUsed(), (*big.Int)(nil)) check("BlockGasCost", block.BlockGasCost(), (*big.Int)(nil)) - check("Size", block.Size(), uint64(len(blockEnc))) + check("Size", block.Size(), common.StorageSize(len(blockEnc))) check("BlockHash", block.Hash(), common.HexToHash("0608e5d5e13c337f226b621a0b08b3d50470f1961329826fd59f5a241d1df49e")) txHash := common.HexToHash("f5a60149da2ea4e97061a9f47c66036ee843fa76cd1f9ce5a71eb55ff90b2e0e") @@ -111,7 +112,7 @@ func TestEIP1559BlockEncoding(t *testing.T) { check("Hash", block.Hash(), common.HexToHash("2aefaa81ae43541bf2d608e2bb26a157212394abad4d219c06163be0d5d010f8")) check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4)) check("Time", block.Time(), uint64(1426516743)) - check("Size", block.Size(), uint64(len(blockEnc))) + check("Size", block.Size(), common.StorageSize(len(blockEnc))) check("BaseFee", block.BaseFee(), new(big.Int).SetUint64(1_000_000_000)) check("ExtDataGasUsed", block.ExtDataGasUsed(), (*big.Int)(nil)) check("BlockGasCost", block.BlockGasCost(), (*big.Int)(nil)) @@ -176,7 +177,7 @@ func TestEIP2718BlockEncoding(t *testing.T) { check("Root", block.Root(), common.HexToHash("ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017")) check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4)) check("Time", block.Time(), uint64(1426516743)) - check("Size", block.Size(), uint64(len(blockEnc))) + check("Size", block.Size(), common.StorageSize(len(blockEnc))) check("ExtDataHash", block.header.ExtDataHash, common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")) check("BaseFee", block.BaseFee(), (*big.Int)(nil)) check("ExtDataGasUsed", block.ExtDataGasUsed(), (*big.Int)(nil)) @@ -206,11 +207,11 @@ func TestEIP2718BlockEncoding(t *testing.T) { }) sig2 := common.Hex2Bytes("3dbacc8d0259f2508625e97fdfc57cd85fdd16e5821bc2c10bdd1a52649e8335476e10695b183a87b0aa292a7f4b78ef0c3fbe62aa2c42c84e1d9c3da159ef1401") tx2, _ = tx2.WithSignature(NewEIP2930Signer(big.NewInt(1)), sig2) + check("Transactions[1].Type()", block.Transactions()[1].Type(), uint8(AccessListTxType)) check("len(Transactions)", len(block.Transactions()), 2) check("Transactions[0].Hash", block.Transactions()[0].Hash(), tx1.Hash()) check("Transactions[1].Hash", block.Transactions()[1].Hash(), tx2.Hash()) - check("Transactions[1].Type()", block.Transactions()[1].Type(), uint8(AccessListTxType)) if !bytes.Equal(block.ExtData(), []byte{}) { t.Errorf("Block ExtraData field mismatch, expected empty byte array, but found 0x%x", block.ExtData()) @@ -257,7 +258,7 @@ func TestBlockEncodingWithExtraData(t *testing.T) { check("ExtDataGasUsed", block.ExtDataGasUsed(), (*big.Int)(nil)) check("BlockGasCost", block.BlockGasCost(), (*big.Int)(nil)) - check("Size", block.Size(), uint64(len(blockEnc))) + check("Size", block.Size(), common.StorageSize(len(blockEnc))) check("BlockHash", block.Hash(), common.HexToHash("4504ee98a94d16dbd70a35370501a3cb00c2965b012672085fbd328a72962902")) check("len(Transactions)", len(block.Transactions()), 0) @@ -299,6 +300,30 @@ func BenchmarkEncodeBlock(b *testing.B) { } } +// testHasher is the helper tool for transaction/receipt list hashing. +// The original hasher is trie, in order to get rid of import cycle, +// use the testing hasher instead. +type testHasher struct { + hasher hash.Hash +} + +func newHasher() *testHasher { + return &testHasher{hasher: sha3.NewLegacyKeccak256()} +} + +func (h *testHasher) Reset() { + h.hasher.Reset() +} + +func (h *testHasher) Update(key, val []byte) { + h.hasher.Write(key) + h.hasher.Write(val) +} + +func (h *testHasher) Hash() common.Hash { + return common.BytesToHash(h.hasher.Sum(nil)) +} + func makeBenchBlock() *Block { var ( key, _ = crypto.GenerateKey() @@ -337,7 +362,7 @@ func makeBenchBlock() *Block { Extra: []byte("benchmark uncle"), } } - return NewBlock(header, txs, uncles, receipts, blocktest.NewHasher()) + return NewBlock(header, txs, uncles, receipts, newHasher(), nil, true) } func TestAP4BlockEncoding(t *testing.T) { @@ -363,7 +388,7 @@ func TestAP4BlockEncoding(t *testing.T) { check("Hash", block.Hash(), common.HexToHash("0xc41340f5d2af79a12373bc8d6f0f05f9f98b240834608f428da171449e8a1468")) check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4)) check("Time", block.Time(), uint64(1426516743)) - check("Size", block.Size(), uint64(len(blockEnc))) + check("Size", block.Size(), common.StorageSize(len(blockEnc))) check("BaseFee", block.BaseFee(), big.NewInt(1_000_000_000)) check("ExtDataGasUsed", block.ExtDataGasUsed(), big.NewInt(25_000)) check("BlockGasCost", block.BlockGasCost(), big.NewInt(1_000_000)) diff --git a/core/types/gen_account_rlp.go b/core/types/gen_account_rlp.go index cf0e572336..c49d23a9ea 100644 --- a/core/types/gen_account_rlp.go +++ b/core/types/gen_account_rlp.go @@ -1,11 +1,14 @@ // Code generated by rlpgen. DO NOT EDIT. +//go:build !norlpgen +// +build !norlpgen + package types import ( "io" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/rlp" ) func (obj *StateAccount) EncodeRLP(_w io.Writer) error { diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go index 632a6ed48e..7036c7b51c 100644 --- a/core/types/gen_header_json.go +++ b/core/types/gen_header_json.go @@ -16,29 +16,27 @@ var _ = (*headerMarshaling)(nil) // MarshalJSON marshals as JSON. func (h Header) MarshalJSON() ([]byte, error) { type Header struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` - Coinbase common.Address `json:"miner" gencodec:"required"` - Root common.Hash `json:"stateRoot" gencodec:"required"` - TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` - ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` - Bloom Bloom `json:"logsBloom" gencodec:"required"` - Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` - Number *hexutil.Big `json:"number" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` - Extra hexutil.Bytes `json:"extraData" gencodec:"required"` - MixDigest common.Hash `json:"mixHash"` - Nonce BlockNonce `json:"nonce"` - ExtDataHash common.Hash `json:"extDataHash" gencodec:"required"` - BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` - ExtDataGasUsed *hexutil.Big `json:"extDataGasUsed" rlp:"optional"` - BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"` - ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"` - ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` - Hash common.Hash `json:"hash"` + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase common.Address `json:"miner" gencodec:"required"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` + Bloom Bloom `json:"logsBloom" gencodec:"required"` + Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` + Number *hexutil.Big `json:"number" gencodec:"required"` + GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` + Extra hexutil.Bytes `json:"extraData" gencodec:"required"` + MixDigest common.Hash `json:"mixHash"` + Nonce BlockNonce `json:"nonce"` + ExtDataHash common.Hash `json:"extDataHash" gencodec:"required"` + BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` + ExtDataGasUsed *hexutil.Big `json:"extDataGasUsed" rlp:"optional"` + BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"` + ExtraStateRoot common.Hash `json:"extraStateRoot" rlp:"optional"` + Hash common.Hash `json:"hash"` } var enc Header enc.ParentHash = h.ParentHash @@ -60,9 +58,7 @@ func (h Header) MarshalJSON() ([]byte, error) { enc.BaseFee = (*hexutil.Big)(h.BaseFee) enc.ExtDataGasUsed = (*hexutil.Big)(h.ExtDataGasUsed) enc.BlockGasCost = (*hexutil.Big)(h.BlockGasCost) - enc.BlobGasUsed = (*hexutil.Uint64)(h.BlobGasUsed) - enc.ExcessBlobGas = (*hexutil.Uint64)(h.ExcessBlobGas) - enc.ParentBeaconRoot = h.ParentBeaconRoot + enc.ExtraStateRoot = h.ExtraStateRoot enc.Hash = h.Hash() return json.Marshal(&enc) } @@ -70,28 +66,26 @@ func (h Header) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals from JSON. func (h *Header) UnmarshalJSON(input []byte) error { type Header struct { - ParentHash *common.Hash `json:"parentHash" gencodec:"required"` - UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"` - Coinbase *common.Address `json:"miner" gencodec:"required"` - Root *common.Hash `json:"stateRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"` - ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"` - Bloom *Bloom `json:"logsBloom" gencodec:"required"` - Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` - Number *hexutil.Big `json:"number" gencodec:"required"` - GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"` - Extra *hexutil.Bytes `json:"extraData" gencodec:"required"` - MixDigest *common.Hash `json:"mixHash"` - Nonce *BlockNonce `json:"nonce"` - ExtDataHash *common.Hash `json:"extDataHash" gencodec:"required"` - BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` - ExtDataGasUsed *hexutil.Big `json:"extDataGasUsed" rlp:"optional"` - BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"` - ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"` - ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` + ParentHash *common.Hash `json:"parentHash" gencodec:"required"` + UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase *common.Address `json:"miner" gencodec:"required"` + Root *common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"` + Bloom *Bloom `json:"logsBloom" gencodec:"required"` + Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` + Number *hexutil.Big `json:"number" gencodec:"required"` + GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"` + Extra *hexutil.Bytes `json:"extraData" gencodec:"required"` + MixDigest *common.Hash `json:"mixHash"` + Nonce *BlockNonce `json:"nonce"` + ExtDataHash *common.Hash `json:"extDataHash" gencodec:"required"` + BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` + ExtDataGasUsed *hexutil.Big `json:"extDataGasUsed" rlp:"optional"` + BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"` + ExtraStateRoot *common.Hash `json:"extraStateRoot" rlp:"optional"` } var dec Header if err := json.Unmarshal(input, &dec); err != nil { @@ -168,14 +162,8 @@ func (h *Header) UnmarshalJSON(input []byte) error { if dec.BlockGasCost != nil { h.BlockGasCost = (*big.Int)(dec.BlockGasCost) } - if dec.BlobGasUsed != nil { - h.BlobGasUsed = (*uint64)(dec.BlobGasUsed) - } - if dec.ExcessBlobGas != nil { - h.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) - } - if dec.ParentBeaconRoot != nil { - h.ParentBeaconRoot = dec.ParentBeaconRoot + if dec.ExtraStateRoot != nil { + h.ExtraStateRoot = *dec.ExtraStateRoot } return nil } diff --git a/core/types/gen_header_rlp.go b/core/types/gen_header_rlp.go index 711a33b8ca..ba542d2199 100644 --- a/core/types/gen_header_rlp.go +++ b/core/types/gen_header_rlp.go @@ -1,8 +1,12 @@ // Code generated by rlpgen. DO NOT EDIT. +//go:build !norlpgen +// +build !norlpgen + package types -import "github.com/ethereum/go-ethereum/rlp" +import "github.com/ethereum/go-ethereum/common" +import "github.com/tenderly/coreth/rlp" import "io" func (obj *Header) EncodeRLP(_w io.Writer) error { @@ -41,10 +45,8 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { _tmp1 := obj.BaseFee != nil _tmp2 := obj.ExtDataGasUsed != nil _tmp3 := obj.BlockGasCost != nil - _tmp4 := obj.BlobGasUsed != nil - _tmp5 := obj.ExcessBlobGas != nil - _tmp6 := obj.ParentBeaconRoot != nil - if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 { + _tmp4 := obj.ExtraStateRoot != (common.Hash{}) + if _tmp1 || _tmp2 || _tmp3 || _tmp4 { if obj.BaseFee == nil { w.Write(rlp.EmptyString) } else { @@ -54,7 +56,7 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBigInt(obj.BaseFee) } } - if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 { + if _tmp2 || _tmp3 || _tmp4 { if obj.ExtDataGasUsed == nil { w.Write(rlp.EmptyString) } else { @@ -64,7 +66,7 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBigInt(obj.ExtDataGasUsed) } } - if _tmp3 || _tmp4 || _tmp5 || _tmp6 { + if _tmp3 || _tmp4 { if obj.BlockGasCost == nil { w.Write(rlp.EmptyString) } else { @@ -74,26 +76,8 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBigInt(obj.BlockGasCost) } } - if _tmp4 || _tmp5 || _tmp6 { - if obj.BlobGasUsed == nil { - w.Write([]byte{0x80}) - } else { - w.WriteUint64((*obj.BlobGasUsed)) - } - } - if _tmp5 || _tmp6 { - if obj.ExcessBlobGas == nil { - w.Write([]byte{0x80}) - } else { - w.WriteUint64((*obj.ExcessBlobGas)) - } - } - if _tmp6 { - if obj.ParentBeaconRoot == nil { - w.Write([]byte{0x80}) - } else { - w.WriteBytes(obj.ParentBeaconRoot[:]) - } + if _tmp4 { + w.WriteBytes(obj.ExtraStateRoot[:]) } w.ListEnd(_tmp0) return w.Flush() diff --git a/core/types/gen_log_rlp.go b/core/types/gen_log_rlp.go index 89f2083d4b..62d7e83c6f 100644 --- a/core/types/gen_log_rlp.go +++ b/core/types/gen_log_rlp.go @@ -1,14 +1,17 @@ // Code generated by rlpgen. DO NOT EDIT. +//go:build !norlpgen +// +build !norlpgen + package types import ( "io" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/rlp" ) -func (obj *Log) EncodeRLP(_w io.Writer) error { +func (obj *rlpLog) EncodeRLP(_w io.Writer) error { w := rlp.NewEncoderBuffer(_w) _tmp0 := w.List() w.WriteBytes(obj.Address[:]) diff --git a/core/types/hashing.go b/core/types/hashing.go index 40291f5406..10dd260e88 100644 --- a/core/types/hashing.go +++ b/core/types/hashing.go @@ -34,7 +34,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/rlp" "golang.org/x/crypto/sha3" ) diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go index 17874cf747..df40439792 100644 --- a/core/types/hashing_test.go +++ b/core/types/hashing_test.go @@ -34,13 +34,13 @@ import ( mrand "math/rand" "testing" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/trie" ) func TestDeriveSha(t *testing.T) { @@ -49,7 +49,7 @@ func TestDeriveSha(t *testing.T) { t.Fatal(err) } for len(txs) < 1000 { - exp := types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) + exp := types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))) got := types.DeriveSha(txs, trie.NewStackTrie(nil)) if !bytes.Equal(got[:], exp[:]) { t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp) @@ -96,7 +96,7 @@ func BenchmarkDeriveSha200(b *testing.B) { b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { - exp = types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) + exp = types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))) } }) @@ -117,7 +117,7 @@ func TestFuzzDeriveSha(t *testing.T) { rndSeed := mrand.Int() for i := 0; i < 10; i++ { seed := rndSeed + i - exp := types.DeriveSha(newDummy(i), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) + exp := types.DeriveSha(newDummy(i), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))) got := types.DeriveSha(newDummy(i), trie.NewStackTrie(nil)) if !bytes.Equal(got[:], exp[:]) { printList(newDummy(seed)) @@ -145,7 +145,7 @@ func TestDerivableList(t *testing.T) { }, } for i, tc := range tcs[1:] { - exp := types.DeriveSha(flatList(tc), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) + exp := types.DeriveSha(flatList(tc), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))) got := types.DeriveSha(flatList(tc), trie.NewStackTrie(nil)) if !bytes.Equal(got[:], exp[:]) { t.Fatalf("case %d: got %x exp %x", i, got, exp) @@ -229,10 +229,9 @@ func (d *hashToHumanReadable) Reset() { d.data = make([]byte, 0) } -func (d *hashToHumanReadable) Update(i []byte, i2 []byte) error { +func (d *hashToHumanReadable) Update(i []byte, i2 []byte) { l := fmt.Sprintf("%x %x\n", i, i2) d.data = append(d.data, []byte(l)...) - return nil } func (d *hashToHumanReadable) Hash() common.Hash { diff --git a/core/types/log.go b/core/types/log.go index fc7e42f295..f173f2cc54 100644 --- a/core/types/log.go +++ b/core/types/log.go @@ -27,11 +27,13 @@ package types import ( + "io" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/tenderly/coreth/rlp" ) -//go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -type Log -out gen_log_rlp.go //go:generate go run github.com/fjl/gencodec -type Log -field-override logMarshaling -out gen_log_json.go // Log represents a contract log event. These events are generated by the LOG opcode and @@ -48,19 +50,19 @@ type Log struct { // Derived fields. These fields are filled in by the node // but not secured by consensus. // block in which the transaction was included - BlockNumber uint64 `json:"blockNumber" rlp:"-"` + BlockNumber uint64 `json:"blockNumber"` // hash of the transaction - TxHash common.Hash `json:"transactionHash" gencodec:"required" rlp:"-"` + TxHash common.Hash `json:"transactionHash" gencodec:"required"` // index of the transaction in the block - TxIndex uint `json:"transactionIndex" rlp:"-"` + TxIndex uint `json:"transactionIndex"` // hash of the block in which the transaction was included - BlockHash common.Hash `json:"blockHash" rlp:"-"` + BlockHash common.Hash `json:"blockHash"` // index of the log in the block - Index uint `json:"logIndex" rlp:"-"` + Index uint `json:"logIndex"` // The Removed field is true if this log was reverted due to a chain reorganisation. // You must pay attention to this field if you receive logs through a filter query. - Removed bool `json:"removed" rlp:"-"` + Removed bool `json:"removed"` } type logMarshaling struct { @@ -70,11 +72,79 @@ type logMarshaling struct { Index hexutil.Uint } -// FlattenLogs converts a nested array of logs to a single array of logs. -func FlattenLogs(list [][]*Log) []*Log { - var flat []*Log - for _, logs := range list { - flat = append(flat, logs...) +//go:generate go run github.com/tenderly/coreth/rlp/rlpgen -type rlpLog -out gen_log_rlp.go + +type rlpLog struct { + Address common.Address + Topics []common.Hash + Data []byte +} + +// legacyRlpStorageLog is the previous storage encoding of a log including some redundant fields. +type legacyRlpStorageLog struct { + Address common.Address + Topics []common.Hash + Data []byte + BlockNumber uint64 + TxHash common.Hash + TxIndex uint + BlockHash common.Hash + Index uint +} + +// EncodeRLP implements rlp.Encoder. +func (l *Log) EncodeRLP(w io.Writer) error { + rl := rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data} + return rlp.Encode(w, &rl) +} + +// DecodeRLP implements rlp.Decoder. +func (l *Log) DecodeRLP(s *rlp.Stream) error { + var dec rlpLog + err := s.Decode(&dec) + if err == nil { + l.Address, l.Topics, l.Data = dec.Address, dec.Topics, dec.Data + } + return err +} + +// LogForStorage is a wrapper around a Log that handles +// backward compatibility with prior storage formats. +type LogForStorage Log + +// EncodeRLP implements rlp.Encoder. +func (l *LogForStorage) EncodeRLP(w io.Writer) error { + rl := rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data} + return rlp.Encode(w, &rl) +} + +// DecodeRLP implements rlp.Decoder. +// +// Note some redundant fields(e.g. block number, tx hash etc) will be assembled later. +func (l *LogForStorage) DecodeRLP(s *rlp.Stream) error { + blob, err := s.Raw() + if err != nil { + return err + } + var dec rlpLog + err = rlp.DecodeBytes(blob, &dec) + if err == nil { + *l = LogForStorage{ + Address: dec.Address, + Topics: dec.Topics, + Data: dec.Data, + } + } else { + // Try to decode log with previous definition. + var dec legacyRlpStorageLog + err = rlp.DecodeBytes(blob, &dec) + if err == nil { + *l = LogForStorage{ + Address: dec.Address, + Topics: dec.Topics, + Data: dec.Data, + } + } } - return flat + return err } diff --git a/core/types/receipt.go b/core/types/receipt.go index 1686b8d217..659fa82ff1 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -34,11 +34,11 @@ import ( "math/big" "unsafe" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rlp" ) //go:generate go run github.com/fjl/gencodec -type Receipt -field-override receiptMarshaling -out gen_receipt_json.go @@ -69,12 +69,10 @@ type Receipt struct { Logs []*Log `json:"logs" gencodec:"required"` // Implementation fields: These fields are added by geth when processing a transaction. - TxHash common.Hash `json:"transactionHash" gencodec:"required"` - ContractAddress common.Address `json:"contractAddress"` - GasUsed uint64 `json:"gasUsed" gencodec:"required"` - EffectiveGasPrice *big.Int `json:"effectiveGasPrice"` // required, but tag omitted for backwards compatibility - BlobGasUsed uint64 `json:"blobGasUsed,omitempty"` - BlobGasPrice *big.Int `json:"blobGasPrice,omitempty"` + // They are stored in the chain database. + TxHash common.Hash `json:"transactionHash" gencodec:"required"` + ContractAddress common.Address `json:"contractAddress"` + GasUsed uint64 `json:"gasUsed" gencodec:"required"` // Inclusion information: These fields provide information about the inclusion of the // transaction corresponding to this receipt. @@ -89,9 +87,6 @@ type receiptMarshaling struct { Status hexutil.Uint64 CumulativeGasUsed hexutil.Uint64 GasUsed hexutil.Uint64 - EffectiveGasPrice *hexutil.Big - BlobGasUsed hexutil.Uint64 - BlobGasPrice *hexutil.Big BlockNumber *hexutil.Big TransactionIndex hexutil.Uint } @@ -108,7 +103,28 @@ type receiptRLP struct { type storedReceiptRLP struct { PostStateOrStatus []byte CumulativeGasUsed uint64 - Logs []*Log + Logs []*LogForStorage +} + +// v4StoredReceiptRLP is the storage encoding of a receipt used in database version 4. +type v4StoredReceiptRLP struct { + PostStateOrStatus []byte + CumulativeGasUsed uint64 + TxHash common.Hash + ContractAddress common.Address + Logs []*LogForStorage + GasUsed uint64 +} + +// v3StoredReceiptRLP is the original storage encoding of a receipt including some unnecessary fields. +type v3StoredReceiptRLP struct { + PostStateOrStatus []byte + CumulativeGasUsed uint64 + Bloom Bloom + TxHash common.Hash + ContractAddress common.Address + Logs []*LogForStorage + GasUsed uint64 } // NewReceipt creates a barebone transaction receipt, copying the init fields. @@ -163,7 +179,7 @@ func (r *Receipt) MarshalBinary() ([]byte, error) { // DecodeRLP implements rlp.Decoder, and loads the consensus fields of a receipt // from an RLP stream. func (r *Receipt) DecodeRLP(s *rlp.Stream) error { - kind, size, err := s.Kind() + kind, _, err := s.Kind() switch { case err != nil: return err @@ -175,18 +191,12 @@ func (r *Receipt) DecodeRLP(s *rlp.Stream) error { } r.Type = LegacyTxType return r.setFromRLP(dec) - case kind == rlp.Byte: - return errShortTypedReceipt default: // It's an EIP-2718 typed tx receipt. - b, buf, err := getPooledBuffer(size) + b, err := s.Bytes() if err != nil { return err } - defer encodeBufferPool.Put(buf) - if err := s.ReadBytes(b); err != nil { - return err - } return r.decodeTyped(b) } } @@ -214,7 +224,7 @@ func (r *Receipt) decodeTyped(b []byte) error { return errShortTypedReceipt } switch b[0] { - case DynamicFeeTxType, AccessListTxType, BlobTxType: + case DynamicFeeTxType, AccessListTxType: var data receiptRLP err := rlp.DecodeBytes(b[1:], &data) if err != nil { @@ -280,7 +290,7 @@ func (r *ReceiptForStorage) EncodeRLP(_w io.Writer) error { w.WriteUint64(r.CumulativeGasUsed) logList := w.List() for _, log := range r.Logs { - if err := log.EncodeRLP(w); err != nil { + if err := rlp.Encode(w, log); err != nil { return err } } @@ -292,20 +302,82 @@ func (r *ReceiptForStorage) EncodeRLP(_w io.Writer) error { // DecodeRLP implements rlp.Decoder, and loads both consensus and implementation // fields of a receipt from an RLP stream. func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error { + // Retrieve the entire receipt blob as we need to try multiple decoders + blob, err := s.Raw() + if err != nil { + return err + } + // Try decoding from the newest format for future proofness, then the older one + // for old nodes that just upgraded. V4 was an intermediate unreleased format so + // we do need to decode it, but it's not common (try last). + if err := decodeStoredReceiptRLP(r, blob); err == nil { + return nil + } + if err := decodeV3StoredReceiptRLP(r, blob); err == nil { + return nil + } + return decodeV4StoredReceiptRLP(r, blob) +} + +func decodeStoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { var stored storedReceiptRLP - if err := s.Decode(&stored); err != nil { + if err := rlp.DecodeBytes(blob, &stored); err != nil { return err } if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { return err } r.CumulativeGasUsed = stored.CumulativeGasUsed - r.Logs = stored.Logs + r.Logs = make([]*Log, len(stored.Logs)) + for i, log := range stored.Logs { + r.Logs[i] = (*Log)(log) + } r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) return nil } +func decodeV4StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { + var stored v4StoredReceiptRLP + if err := rlp.DecodeBytes(blob, &stored); err != nil { + return err + } + if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { + return err + } + r.CumulativeGasUsed = stored.CumulativeGasUsed + r.TxHash = stored.TxHash + r.ContractAddress = stored.ContractAddress + r.GasUsed = stored.GasUsed + r.Logs = make([]*Log, len(stored.Logs)) + for i, log := range stored.Logs { + r.Logs[i] = (*Log)(log) + } + r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) + + return nil +} + +func decodeV3StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { + var stored v3StoredReceiptRLP + if err := rlp.DecodeBytes(blob, &stored); err != nil { + return err + } + if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { + return err + } + r.CumulativeGasUsed = stored.CumulativeGasUsed + r.Bloom = stored.Bloom + r.TxHash = stored.TxHash + r.ContractAddress = stored.ContractAddress + r.GasUsed = stored.GasUsed + r.Logs = make([]*Log, len(stored.Logs)) + for i, log := range stored.Logs { + r.Logs[i] = (*Log)(log) + } + return nil +} + // Receipts implements DerivableList for receipts. type Receipts []*Receipt @@ -316,13 +388,14 @@ func (rs Receipts) Len() int { return len(rs) } func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) { r := rs[i] data := &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs} - if r.Type == LegacyTxType { - rlp.Encode(w, data) - return - } - w.WriteByte(r.Type) switch r.Type { - case AccessListTxType, DynamicFeeTxType, BlobTxType: + case LegacyTxType: + rlp.Encode(w, data) + case AccessListTxType: + w.WriteByte(AccessListTxType) + rlp.Encode(w, data) + case DynamicFeeTxType: + w.WriteByte(DynamicFeeTxType) rlp.Encode(w, data) default: // For unsupported types, write nothing. Since this is for @@ -333,8 +406,8 @@ func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) { // DeriveFields fills the receipts with their computed fields based on consensus // data and contextual infos like containing block and transactions. -func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, time uint64, baseFee *big.Int, blobGasPrice *big.Int, txs []*Transaction) error { - signer := MakeSigner(config, new(big.Int).SetUint64(number), time) +func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, time uint64, txs Transactions) error { + signer := MakeSigner(config, new(big.Int).SetUint64(number), new(big.Int).SetUint64(time)) logIndex := uint(0) if len(txs) != len(rs) { @@ -344,13 +417,6 @@ func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, nu // The transaction type and hash can be retrieved from the transaction itself rs[i].Type = txs[i].Type() rs[i].TxHash = txs[i].Hash() - rs[i].EffectiveGasPrice = txs[i].inner.effectiveGasPrice(new(big.Int), baseFee) - - // EIP-4844 blob transaction fields - if txs[i].Type() == BlobTxType { - rs[i].BlobGasUsed = txs[i].BlobGas() - rs[i].BlobGasPrice = blobGasPrice - } // block location fields rs[i].BlockHash = hash @@ -362,17 +428,13 @@ func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, nu // Deriving the signer is expensive, only do if it's actually needed from, _ := Sender(signer, txs[i]) rs[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce()) - } else { - rs[i].ContractAddress = common.Address{} } - // The used gas can be calculated based on previous r if i == 0 { rs[i].GasUsed = rs[i].CumulativeGasUsed } else { rs[i].GasUsed = rs[i].CumulativeGasUsed - rs[i-1].CumulativeGasUsed } - // The derived log fields can simply be set from the block and transaction for j := 0; j < len(rs[i].Logs); j++ { rs[i].Logs[j].BlockNumber = number diff --git a/core/types/state_account.go b/core/types/state_account.go index 54d22c8ba9..1b91cbabbe 100644 --- a/core/types/state_account.go +++ b/core/types/state_account.go @@ -27,14 +27,12 @@ package types import ( - "bytes" "math/big" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" ) -//go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -type StateAccount -out gen_account_rlp.go +//go:generate go run github.com/tenderly/coreth/rlp/rlpgen -type StateAccount -out gen_account_rlp.go // StateAccount is the Ethereum consensus representation of accounts. // These objects are stored in the main account trie. @@ -45,91 +43,3 @@ type StateAccount struct { CodeHash []byte IsMultiCoin bool } - -// NewEmptyStateAccount constructs an empty state account. -func NewEmptyStateAccount() *StateAccount { - return &StateAccount{ - Balance: new(big.Int), - Root: EmptyRootHash, - CodeHash: EmptyCodeHash.Bytes(), - } -} - -// Copy returns a deep-copied state account object. -func (acct *StateAccount) Copy() *StateAccount { - var balance *big.Int - if acct.Balance != nil { - balance = new(big.Int).Set(acct.Balance) - } - return &StateAccount{ - Nonce: acct.Nonce, - Balance: balance, - Root: acct.Root, - CodeHash: common.CopyBytes(acct.CodeHash), - IsMultiCoin: acct.IsMultiCoin, - } -} - -// SlimAccount is a modified version of an Account, where the root is replaced -// with a byte slice. This format can be used to represent full-consensus format -// or slim format which replaces the empty root and code hash as nil byte slice. -type SlimAccount struct { - Nonce uint64 - Balance *big.Int - Root []byte // Nil if root equals to types.EmptyRootHash - CodeHash []byte // Nil if hash equals to types.EmptyCodeHash - IsMultiCoin bool -} - -// SlimAccountRLP encodes the state account in 'slim RLP' format. -func SlimAccountRLP(account StateAccount) []byte { - slim := SlimAccount{ - Nonce: account.Nonce, - Balance: account.Balance, - IsMultiCoin: account.IsMultiCoin, - } - if account.Root != EmptyRootHash { - slim.Root = account.Root[:] - } - if !bytes.Equal(account.CodeHash, EmptyCodeHash[:]) { - slim.CodeHash = account.CodeHash - } - data, err := rlp.EncodeToBytes(slim) - if err != nil { - panic(err) - } - return data -} - -// FullAccount decodes the data on the 'slim RLP' format and returns -// the consensus format account. -func FullAccount(data []byte) (*StateAccount, error) { - var slim SlimAccount - if err := rlp.DecodeBytes(data, &slim); err != nil { - return nil, err - } - var account StateAccount - account.Nonce, account.Balance, account.IsMultiCoin = slim.Nonce, slim.Balance, slim.IsMultiCoin - - // Interpret the storage root and code hash in slim format. - if len(slim.Root) == 0 { - account.Root = EmptyRootHash - } else { - account.Root = common.BytesToHash(slim.Root) - } - if len(slim.CodeHash) == 0 { - account.CodeHash = EmptyCodeHash[:] - } else { - account.CodeHash = slim.CodeHash - } - return &account, nil -} - -// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format. -func FullAccountRLP(data []byte) ([]byte, error) { - account, err := FullAccount(data) - if err != nil { - return nil, err - } - return rlp.EncodeToBytes(account) -} diff --git a/core/types/transaction.go b/core/types/transaction.go index bf33165a77..2e3837b4f4 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -28,6 +28,7 @@ package types import ( "bytes" + "container/heap" "errors" "io" "math/big" @@ -37,7 +38,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/rlp" ) var ( @@ -47,17 +48,13 @@ var ( ErrTxTypeNotSupported = errors.New("transaction type not supported") ErrGasFeeCapTooLow = errors.New("fee cap less than base fee") errShortTypedTx = errors.New("typed transaction too short") - errInvalidYParity = errors.New("'yParity' field must be 0 or 1") - errVYParityMismatch = errors.New("'v' and 'yParity' fields do not match") - errVYParityMissing = errors.New("missing 'yParity' or 'v' field in transaction") ) // Transaction types. const ( - LegacyTxType = 0x00 - AccessListTxType = 0x01 - DynamicFeeTxType = 0x02 - BlobTxType = 0x03 + LegacyTxType = iota + AccessListTxType + DynamicFeeTxType ) // Transaction is an Ethereum transaction. @@ -98,17 +95,6 @@ type TxData interface { rawSignatureValues() (v, r, s *big.Int) setSignatureValues(chainID, v, r, s *big.Int) - - // effectiveGasPrice computes the gas price paid by the transaction, given - // the inclusion block baseFee. - // - // Unlike other TxData methods, the returned *big.Int should be an independent - // copy of the computed value, i.e. callers are allowed to mutate the result. - // Method implementations can use 'dst' to store the result. - effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int - - encode(*bytes.Buffer) error - decode([]byte) error } // EncodeRLP implements rlp.Encoder @@ -129,7 +115,7 @@ func (tx *Transaction) EncodeRLP(w io.Writer) error { // encodeTyped writes the canonical encoding of a typed transaction to w. func (tx *Transaction) encodeTyped(w *bytes.Buffer) error { w.WriteByte(tx.Type()) - return tx.inner.encode(w) + return rlp.Encode(w, tx.inner) } // MarshalBinary returns the canonical encoding of the transaction. @@ -155,33 +141,25 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error { var inner LegacyTx err := s.Decode(&inner) if err == nil { - tx.setDecoded(&inner, rlp.ListSize(size)) + tx.setDecoded(&inner, int(rlp.ListSize(size))) } return err - case kind == rlp.Byte: - return errShortTypedTx default: // It's an EIP-2718 typed TX envelope. - // First read the tx payload bytes into a temporary buffer. - b, buf, err := getPooledBuffer(size) - if err != nil { + var b []byte + if b, err = s.Bytes(); err != nil { return err } - defer encodeBufferPool.Put(buf) - if err := s.ReadBytes(b); err != nil { - return err - } - // Now decode the inner transaction. inner, err := tx.decodeTyped(b) if err == nil { - tx.setDecoded(inner, size) + tx.setDecoded(inner, len(b)) } return err } } // UnmarshalBinary decodes the canonical encoding of transactions. -// It supports legacy RLP transactions and EIP-2718 typed transactions. +// It supports legacy RLP transactions and EIP2718 typed transactions. func (tx *Transaction) UnmarshalBinary(b []byte) error { if len(b) > 0 && b[0] > 0x7f { // It's a legacy transaction. @@ -190,15 +168,15 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error { if err != nil { return err } - tx.setDecoded(&data, uint64(len(b))) + tx.setDecoded(&data, len(b)) return nil } - // It's an EIP-2718 typed transaction envelope. + // It's an EIP2718 typed transaction envelope. inner, err := tx.decodeTyped(b) if err != nil { return err } - tx.setDecoded(inner, uint64(len(b))) + tx.setDecoded(inner, len(b)) return nil } @@ -207,27 +185,26 @@ func (tx *Transaction) decodeTyped(b []byte) (TxData, error) { if len(b) <= 1 { return nil, errShortTypedTx } - var inner TxData switch b[0] { case AccessListTxType: - inner = new(AccessListTx) + var inner AccessListTx + err := rlp.DecodeBytes(b[1:], &inner) + return &inner, err case DynamicFeeTxType: - inner = new(DynamicFeeTx) - case BlobTxType: - inner = new(BlobTx) + var inner DynamicFeeTx + err := rlp.DecodeBytes(b[1:], &inner) + return &inner, err default: return nil, ErrTxTypeNotSupported } - err := inner.decode(b[1:]) - return inner, err } // setDecoded sets the inner transaction and size after decoding. -func (tx *Transaction) setDecoded(inner TxData, size uint64) { +func (tx *Transaction) setDecoded(inner TxData, size int) { tx.inner = inner tx.time = time.Now() if size > 0 { - tx.size.Store(size) + tx.size.Store(common.StorageSize(size)) } } @@ -318,12 +295,9 @@ func (tx *Transaction) To() *common.Address { return copyAddressPtr(tx.inner.to()) } -// Cost returns (gas * gasPrice) + (blobGas * blobGasPrice) + value. +// Cost returns gas * gasPrice + value. func (tx *Transaction) Cost() *big.Int { total := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())) - if tx.Type() == BlobTxType { - total.Add(total, new(big.Int).Mul(tx.BlobGasFeeCap(), new(big.Int).SetUint64(tx.BlobGas()))) - } total.Add(total, tx.Value()) return total } @@ -392,81 +366,6 @@ func (tx *Transaction) EffectiveGasTipIntCmp(other *big.Int, baseFee *big.Int) i return tx.EffectiveGasTipValue(baseFee).Cmp(other) } -// BlobGas returns the blob gas limit of the transaction for blob transactions, 0 otherwise. -func (tx *Transaction) BlobGas() uint64 { - if blobtx, ok := tx.inner.(*BlobTx); ok { - return blobtx.blobGas() - } - return 0 -} - -// BlobGasFeeCap returns the blob gas fee cap per blob gas of the transaction for blob transactions, nil otherwise. -func (tx *Transaction) BlobGasFeeCap() *big.Int { - if blobtx, ok := tx.inner.(*BlobTx); ok { - return blobtx.BlobFeeCap.ToBig() - } - return nil -} - -// BlobHashes returns the hashes of the blob commitments for blob transactions, nil otherwise. -func (tx *Transaction) BlobHashes() []common.Hash { - if blobtx, ok := tx.inner.(*BlobTx); ok { - return blobtx.BlobHashes - } - return nil -} - -// BlobTxSidecar returns the sidecar of a blob transaction, nil otherwise. -func (tx *Transaction) BlobTxSidecar() *BlobTxSidecar { - if blobtx, ok := tx.inner.(*BlobTx); ok { - return blobtx.Sidecar - } - return nil -} - -// BlobGasFeeCapCmp compares the blob fee cap of two transactions. -func (tx *Transaction) BlobGasFeeCapCmp(other *Transaction) int { - return tx.BlobGasFeeCap().Cmp(other.BlobGasFeeCap()) -} - -// BlobGasFeeCapIntCmp compares the blob fee cap of the transaction against the given blob fee cap. -func (tx *Transaction) BlobGasFeeCapIntCmp(other *big.Int) int { - return tx.BlobGasFeeCap().Cmp(other) -} - -// WithoutBlobTxSidecar returns a copy of tx with the blob sidecar removed. -func (tx *Transaction) WithoutBlobTxSidecar() *Transaction { - blobtx, ok := tx.inner.(*BlobTx) - if !ok { - return tx - } - cpy := &Transaction{ - inner: blobtx.withoutSidecar(), - time: tx.time, - } - // Note: tx.size cache not carried over because the sidecar is included in size! - if h := tx.hash.Load(); h != nil { - cpy.hash.Store(h) - } - if f := tx.from.Load(); f != nil { - cpy.from.Store(f) - } - return cpy -} - -// SetTime sets the decoding time of a transaction. This is used by tests to set -// arbitrary times and by persistent transaction pools when loading old txs from -// disk. -func (tx *Transaction) SetTime(t time.Time) { - tx.time = t -} - -// Time returns the time when the transaction was first seen on the network. It -// is a heuristic to prefer mining older txs vs new all other things equal. -func (tx *Transaction) Time() time.Time { - return tx.time -} - // Hash returns the transaction hash. func (tx *Transaction) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { @@ -483,32 +382,16 @@ func (tx *Transaction) Hash() common.Hash { return h } -// Size returns the true encoded storage size of the transaction, either by encoding -// and returning it, or returning a previously cached value. -func (tx *Transaction) Size() uint64 { +// Size returns the true RLP encoded storage size of the transaction, either by +// encoding and returning it, or returning a previously cached value. +func (tx *Transaction) Size() common.StorageSize { if size := tx.size.Load(); size != nil { - return size.(uint64) + return size.(common.StorageSize) } - - // Cache miss, encode and cache. - // Note we rely on the assumption that all tx.inner values are RLP-encoded! c := writeCounter(0) rlp.Encode(&c, &tx.inner) - size := uint64(c) - - // For blob transactions, add the size of the blob content and the outer list of the - // tx + sidecar encoding. - if sc := tx.BlobTxSidecar(); sc != nil { - size += rlp.ListSize(sc.encodedSize()) - } - - // For typed transactions, the encoding also includes the leading type byte. - if tx.Type() != LegacyTxType { - size += 1 - } - - tx.size.Store(size) - return size + tx.size.Store(common.StorageSize(c)) + return common.StorageSize(c) } // WithSignature returns a new transaction with the given signature. @@ -523,6 +406,16 @@ func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, e return &Transaction{inner: cpy, time: tx.time}, nil } +// FirstSeen is the time a transaction is first seen. +func (tx *Transaction) FirstSeen() time.Time { + return tx.time +} + +// SetFirstSeen sets overwrites the time a transaction is first seen. +func (tx *Transaction) SetFirstSeen(t time.Time) { + tx.time = t +} + // Transactions implements DerivableList for transactions. type Transactions []*Transaction @@ -586,6 +479,190 @@ func (s TxByNonce) Len() int { return len(s) } func (s TxByNonce) Less(i, j int) bool { return s[i].Nonce() < s[j].Nonce() } func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +// TxWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap +type TxWithMinerFee struct { + Tx *Transaction + minerFee *big.Int +} + +// NewTxWithMinerFee creates a wrapped transaction, calculating the effective +// miner gasTipCap if a base fee is provided. +// Returns error in case of a negative effective miner gasTipCap. +func NewTxWithMinerFee(tx *Transaction, baseFee *big.Int) (*TxWithMinerFee, error) { + minerFee, err := tx.EffectiveGasTip(baseFee) + if err != nil { + return nil, err + } + return &TxWithMinerFee{ + Tx: tx, + minerFee: minerFee, + }, nil +} + +// TxByPriceAndTime implements both the sort and the heap interface, making it useful +// for all at once sorting as well as individually adding and removing elements. +type TxByPriceAndTime []*TxWithMinerFee + +func (s TxByPriceAndTime) Len() int { return len(s) } +func (s TxByPriceAndTime) Less(i, j int) bool { + // If the prices are equal, use the time the transaction was first seen for + // deterministic sorting + cmp := s[i].minerFee.Cmp(s[j].minerFee) + if cmp == 0 { + return s[i].Tx.time.Before(s[j].Tx.time) + } + return cmp > 0 +} +func (s TxByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s *TxByPriceAndTime) Push(x interface{}) { + *s = append(*s, x.(*TxWithMinerFee)) +} + +func (s *TxByPriceAndTime) Pop() interface{} { + old := *s + n := len(old) + x := old[n-1] + *s = old[0 : n-1] + return x +} + +// TransactionsByPriceAndNonce represents a set of transactions that can return +// transactions in a profit-maximizing sorted order, while supporting removing +// entire batches of transactions for non-executable accounts. +type TransactionsByPriceAndNonce struct { + txs map[common.Address]Transactions // Per account nonce-sorted list of transactions + heads TxByPriceAndTime // Next transaction for each unique account (price heap) + signer Signer // Signer for the set of transactions + baseFee *big.Int // Current base fee +} + +// NewTransactionsByPriceAndNonce creates a transaction set that can retrieve +// price sorted transactions in a nonce-honouring way. +// +// Note, the input map is reowned so the caller should not interact any more with +// if after providing it to the constructor. +func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *big.Int) *TransactionsByPriceAndNonce { + // Initialize a price and received time based heap with the head transactions + heads := make(TxByPriceAndTime, 0, len(txs)) + for from, accTxs := range txs { + acc, _ := Sender(signer, accTxs[0]) + wrapped, err := NewTxWithMinerFee(accTxs[0], baseFee) + // Remove transaction if sender doesn't match from, or if wrapping fails. + if acc != from || err != nil { + delete(txs, from) + continue + } + heads = append(heads, wrapped) + txs[from] = accTxs[1:] + } + heap.Init(&heads) + + // Assemble and return the transaction set + return &TransactionsByPriceAndNonce{ + txs: txs, + heads: heads, + signer: signer, + baseFee: baseFee, + } +} + +// Peek returns the next transaction by price. +func (t *TransactionsByPriceAndNonce) Peek() *Transaction { + if len(t.heads) == 0 { + return nil + } + return t.heads[0].Tx +} + +// Shift replaces the current best head with the next one from the same account. +func (t *TransactionsByPriceAndNonce) Shift() { + acc, _ := Sender(t.signer, t.heads[0].Tx) + if txs, ok := t.txs[acc]; ok && len(txs) > 0 { + if wrapped, err := NewTxWithMinerFee(txs[0], t.baseFee); err == nil { + t.heads[0], t.txs[acc] = wrapped, txs[1:] + heap.Fix(&t.heads, 0) + return + } + } + heap.Pop(&t.heads) +} + +// Pop removes the best transaction, *not* replacing it with the next one from +// the same account. This should be used when a transaction cannot be executed +// and hence all subsequent ones should be discarded from the same account. +func (t *TransactionsByPriceAndNonce) Pop() { + heap.Pop(&t.heads) +} + +// Message is a fully derived transaction and implements core.Message +// +// NOTE: In a future PR this will be removed. +type Message struct { + to *common.Address + from common.Address + nonce uint64 + amount *big.Int + gasLimit uint64 + gasPrice *big.Int + gasFeeCap *big.Int + gasTipCap *big.Int + data []byte + accessList AccessList + isFake bool +} + +func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *big.Int, gasLimit uint64, gasPrice, gasFeeCap, gasTipCap *big.Int, data []byte, accessList AccessList, isFake bool) Message { + return Message{ + from: from, + to: to, + nonce: nonce, + amount: amount, + gasLimit: gasLimit, + gasPrice: gasPrice, + gasFeeCap: gasFeeCap, + gasTipCap: gasTipCap, + data: data, + accessList: accessList, + isFake: isFake, + } +} + +// AsMessage returns the transaction as a core.Message. +func (tx *Transaction) AsMessage(s Signer, baseFee *big.Int) (Message, error) { + msg := Message{ + nonce: tx.Nonce(), + gasLimit: tx.Gas(), + gasPrice: new(big.Int).Set(tx.GasPrice()), + gasFeeCap: new(big.Int).Set(tx.GasFeeCap()), + gasTipCap: new(big.Int).Set(tx.GasTipCap()), + to: tx.To(), + amount: tx.Value(), + data: tx.Data(), + accessList: tx.AccessList(), + isFake: false, + } + // If baseFee provided, set gasPrice to effectiveGasPrice. + if baseFee != nil { + msg.gasPrice = math.BigMin(msg.gasPrice.Add(msg.gasTipCap, baseFee), msg.gasFeeCap) + } + var err error + msg.from, err = Sender(s, tx) + return msg, err +} + +func (m Message) From() common.Address { return m.from } +func (m Message) To() *common.Address { return m.to } +func (m Message) GasPrice() *big.Int { return m.gasPrice } +func (m Message) GasFeeCap() *big.Int { return m.gasFeeCap } +func (m Message) GasTipCap() *big.Int { return m.gasTipCap } +func (m Message) Value() *big.Int { return m.amount } +func (m Message) Gas() uint64 { return m.gasLimit } +func (m Message) Nonce() uint64 { return m.nonce } +func (m Message) Data() []byte { return m.data } +func (m Message) AccessList() AccessList { return m.accessList } +func (m Message) IsFake() bool { return m.isFake } + // copyAddressPtr copies an address. func copyAddressPtr(a *common.Address) *common.Address { if a == nil { diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index ac6e432893..60a897b3f7 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -32,9 +32,9 @@ import ( "fmt" "math/big" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/tenderly/coreth/params" ) var ErrInvalidChainId = errors.New("invalid chain id for signer") diff --git a/core/types/transaction_signing_test.go b/core/types/transaction_signing_test.go index 0976f59751..eebcae3a88 100644 --- a/core/types/transaction_signing_test.go +++ b/core/types/transaction_signing_test.go @@ -27,13 +27,12 @@ package types import ( - "errors" "math/big" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/rlp" ) func TestEIP155Signing(t *testing.T) { @@ -122,9 +121,6 @@ func TestEIP155SigningVitalik(t *testing.T) { if from != addr { t.Errorf("%d: expected %x got %x", i, addr, from) } - if !tx.Protected() { - t.Errorf("%d: expected to be protected", i) - } } } @@ -140,8 +136,8 @@ func TestChainId(t *testing.T) { } _, err = Sender(NewEIP155Signer(big.NewInt(2)), tx) - if !errors.Is(err, ErrInvalidChainId) { - t.Error("expected error:", ErrInvalidChainId, err) + if err != ErrInvalidChainId { + t.Error("expected error:", ErrInvalidChainId) } _, err = Sender(NewEIP155Signer(big.NewInt(1)), tx) diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index 4471a6ba45..a57553700f 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -38,7 +38,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/rlp" ) // The values in those tests are from the Transaction Tests diff --git a/core/types/types_test.go b/core/types/types_test.go index 7b68db9b4b..73443e90eb 100644 --- a/core/types/types_test.go +++ b/core/types/types_test.go @@ -32,7 +32,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/rlp" ) type devnull struct{ len int } diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 77d0ecc808..52c30bc2c5 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -33,17 +33,16 @@ import ( "fmt" "math/big" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/precompile/contract" - "github.com/ava-labs/coreth/precompile/modules" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/blake2b" "github.com/ethereum/go-ethereum/crypto/bls12381" "github.com/ethereum/go-ethereum/crypto/bn256" - "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/tenderly/coreth/constants" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/precompile" + "github.com/tenderly/coreth/vmerrs" "golang.org/x/crypto/ripemd160" ) @@ -57,7 +56,7 @@ type PrecompiledContract interface { // PrecompiledContractsHomestead contains the default set of pre-compiled Ethereum // contracts used in the Frontier and Homestead releases. -var PrecompiledContractsHomestead = map[common.Address]contract.StatefulPrecompiledContract{ +var PrecompiledContractsHomestead = map[common.Address]precompile.StatefulPrecompiledContract{ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), @@ -66,7 +65,7 @@ var PrecompiledContractsHomestead = map[common.Address]contract.StatefulPrecompi // PrecompiledContractsByzantium contains the default set of pre-compiled Ethereum // contracts used in the Byzantium release. -var PrecompiledContractsByzantium = map[common.Address]contract.StatefulPrecompiledContract{ +var PrecompiledContractsByzantium = map[common.Address]precompile.StatefulPrecompiledContract{ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), @@ -79,7 +78,7 @@ var PrecompiledContractsByzantium = map[common.Address]contract.StatefulPrecompi // PrecompiledContractsIstanbul contains the default set of pre-compiled Ethereum // contracts used in the Istanbul release. -var PrecompiledContractsIstanbul = map[common.Address]contract.StatefulPrecompiledContract{ +var PrecompiledContractsIstanbul = map[common.Address]precompile.StatefulPrecompiledContract{ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), @@ -93,41 +92,7 @@ var PrecompiledContractsIstanbul = map[common.Address]contract.StatefulPrecompil // PrecompiledContractsApricotPhase2 contains the default set of pre-compiled Ethereum // contracts used in the Apricot Phase 2 release. -var PrecompiledContractsApricotPhase2 = map[common.Address]contract.StatefulPrecompiledContract{ - common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), - common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), - common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), - common.BytesToAddress([]byte{4}): newWrappedPrecompiledContract(&dataCopy{}), - common.BytesToAddress([]byte{5}): newWrappedPrecompiledContract(&bigModExp{eip2565: true}), - common.BytesToAddress([]byte{6}): newWrappedPrecompiledContract(&bn256AddIstanbul{}), - common.BytesToAddress([]byte{7}): newWrappedPrecompiledContract(&bn256ScalarMulIstanbul{}), - common.BytesToAddress([]byte{8}): newWrappedPrecompiledContract(&bn256PairingIstanbul{}), - common.BytesToAddress([]byte{9}): newWrappedPrecompiledContract(&blake2F{}), - genesisContractAddr: &deprecatedContract{}, - NativeAssetBalanceAddr: &nativeAssetBalance{gasCost: params.AssetBalanceApricot}, - NativeAssetCallAddr: &nativeAssetCall{gasCost: params.AssetCallApricot}, -} - -// PrecompiledContractsApricotPhasePre6 contains the default set of pre-compiled Ethereum -// contracts used in the PrecompiledContractsApricotPhasePre6 release. -var PrecompiledContractsApricotPhasePre6 = map[common.Address]contract.StatefulPrecompiledContract{ - common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), - common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), - common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), - common.BytesToAddress([]byte{4}): newWrappedPrecompiledContract(&dataCopy{}), - common.BytesToAddress([]byte{5}): newWrappedPrecompiledContract(&bigModExp{eip2565: true}), - common.BytesToAddress([]byte{6}): newWrappedPrecompiledContract(&bn256AddIstanbul{}), - common.BytesToAddress([]byte{7}): newWrappedPrecompiledContract(&bn256ScalarMulIstanbul{}), - common.BytesToAddress([]byte{8}): newWrappedPrecompiledContract(&bn256PairingIstanbul{}), - common.BytesToAddress([]byte{9}): newWrappedPrecompiledContract(&blake2F{}), - genesisContractAddr: &deprecatedContract{}, - NativeAssetBalanceAddr: &deprecatedContract{}, - NativeAssetCallAddr: &deprecatedContract{}, -} - -// PrecompiledContractsApricotPhase6 contains the default set of pre-compiled Ethereum -// contracts used in the Apricot Phase 6 release. -var PrecompiledContractsApricotPhase6 = map[common.Address]contract.StatefulPrecompiledContract{ +var PrecompiledContractsApricotPhase2 = map[common.Address]precompile.StatefulPrecompiledContract{ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), @@ -144,7 +109,7 @@ var PrecompiledContractsApricotPhase6 = map[common.Address]contract.StatefulPrec // PrecompiledContractsBanff contains the default set of pre-compiled Ethereum // contracts used in the Banff release. -var PrecompiledContractsBanff = map[common.Address]contract.StatefulPrecompiledContract{ +var PrecompiledContractsBanff = map[common.Address]precompile.StatefulPrecompiledContract{ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), @@ -159,46 +124,13 @@ var PrecompiledContractsBanff = map[common.Address]contract.StatefulPrecompiledC NativeAssetCallAddr: &deprecatedContract{}, } -// PrecompiledContractsCancun contains the default set of pre-compiled Ethereum -// contracts used in the Cancun release. -var PrecompiledContractsCancun = map[common.Address]contract.StatefulPrecompiledContract{ - common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), - common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), - common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), - common.BytesToAddress([]byte{4}): newWrappedPrecompiledContract(&dataCopy{}), - common.BytesToAddress([]byte{5}): newWrappedPrecompiledContract(&bigModExp{eip2565: true}), - common.BytesToAddress([]byte{6}): newWrappedPrecompiledContract(&bn256AddIstanbul{}), - common.BytesToAddress([]byte{7}): newWrappedPrecompiledContract(&bn256ScalarMulIstanbul{}), - common.BytesToAddress([]byte{8}): newWrappedPrecompiledContract(&bn256PairingIstanbul{}), - common.BytesToAddress([]byte{9}): newWrappedPrecompiledContract(&blake2F{}), - common.BytesToAddress([]byte{0x0a}): newWrappedPrecompiledContract(&kzgPointEvaluation{}), -} - -// PrecompiledContractsBLS contains the set of pre-compiled Ethereum -// contracts specified in EIP-2537. These are exported for testing purposes. -var PrecompiledContractsBLS = map[common.Address]contract.StatefulPrecompiledContract{ - common.BytesToAddress([]byte{10}): newWrappedPrecompiledContract(&bls12381G1Add{}), - common.BytesToAddress([]byte{11}): newWrappedPrecompiledContract(&bls12381G1Mul{}), - common.BytesToAddress([]byte{12}): newWrappedPrecompiledContract(&bls12381G1MultiExp{}), - common.BytesToAddress([]byte{13}): newWrappedPrecompiledContract(&bls12381G2Add{}), - common.BytesToAddress([]byte{14}): newWrappedPrecompiledContract(&bls12381G2Mul{}), - common.BytesToAddress([]byte{15}): newWrappedPrecompiledContract(&bls12381G2MultiExp{}), - common.BytesToAddress([]byte{16}): newWrappedPrecompiledContract(&bls12381Pairing{}), - common.BytesToAddress([]byte{17}): newWrappedPrecompiledContract(&bls12381MapG1{}), - common.BytesToAddress([]byte{18}): newWrappedPrecompiledContract(&bls12381MapG2{}), -} - var ( - PrecompiledAddressesCancun []common.Address - PrecompiledAddressesBanff []common.Address - PrecompiledAddressesApricotPhase6 []common.Address - PrecompiledAddressesApricotPhasePre6 []common.Address - PrecompiledAddressesApricotPhase2 []common.Address - PrecompiledAddressesIstanbul []common.Address - PrecompiledAddressesByzantium []common.Address - PrecompiledAddressesHomestead []common.Address - PrecompiledAddressesBLS []common.Address - PrecompileAllNativeAddresses map[common.Address]struct{} + PrecompiledAddressesBanff []common.Address + PrecompiledAddressesApricotPhase2 []common.Address + PrecompiledAddressesIstanbul []common.Address + PrecompiledAddressesByzantium []common.Address + PrecompiledAddressesHomestead []common.Address + PrecompileAllNativeAddresses map[common.Address]struct{} ) func init() { @@ -214,21 +146,9 @@ func init() { for k := range PrecompiledContractsApricotPhase2 { PrecompiledAddressesApricotPhase2 = append(PrecompiledAddressesApricotPhase2, k) } - for k := range PrecompiledContractsApricotPhasePre6 { - PrecompiledAddressesApricotPhasePre6 = append(PrecompiledAddressesApricotPhasePre6, k) - } - for k := range PrecompiledContractsApricotPhase6 { - PrecompiledAddressesApricotPhase6 = append(PrecompiledAddressesApricotPhase6, k) - } for k := range PrecompiledContractsBanff { PrecompiledAddressesBanff = append(PrecompiledAddressesBanff, k) } - for k := range PrecompiledContractsCancun { - PrecompiledAddressesCancun = append(PrecompiledAddressesCancun, k) - } - for k := range PrecompiledContractsBLS { - PrecompiledAddressesBLS = append(PrecompiledAddressesBLS, k) - } // Set of all native precompile addresses that are in use // Note: this will repeat some addresses, but this is cheap and makes the code clearer. @@ -236,21 +156,31 @@ func init() { addrsList := append(PrecompiledAddressesHomestead, PrecompiledAddressesByzantium...) addrsList = append(addrsList, PrecompiledAddressesIstanbul...) addrsList = append(addrsList, PrecompiledAddressesApricotPhase2...) - addrsList = append(addrsList, PrecompiledAddressesApricotPhasePre6...) - addrsList = append(addrsList, PrecompiledAddressesApricotPhase6...) addrsList = append(addrsList, PrecompiledAddressesBanff...) - addrsList = append(addrsList, PrecompiledAddressesCancun...) - addrsList = append(addrsList, PrecompiledAddressesBLS...) for _, k := range addrsList { PrecompileAllNativeAddresses[k] = struct{}{} } // Ensure that this package will panic during init if there is a conflict present with the declared // precompile addresses. - for _, module := range modules.RegisteredModules() { - address := module.Address - if _, ok := PrecompileAllNativeAddresses[address]; ok { - panic(fmt.Errorf("precompile address collides with existing native address: %s", address)) + for _, k := range precompile.UsedAddresses { + if _, ok := PrecompileAllNativeAddresses[k]; ok { + panic(fmt.Errorf("precompile address collides with existing native address: %s", k)) + } + if k == constants.BlackholeAddr { + panic(fmt.Errorf("cannot use address %s for stateful precompile - overlaps with blackhole address", k)) + } + + // check that [k] belongs to at least one ReservedRange + found := false + for _, reservedRange := range precompile.ReservedRanges { + if reservedRange.Contains(k) { + found = true + break + } + } + if !found { + panic(fmt.Errorf("address %s used for stateful precompile but not specified in any reserved range", k)) } } } @@ -258,8 +188,6 @@ func init() { // ActivePrecompiles returns the precompiles enabled with the current configuration. func ActivePrecompiles(rules params.Rules) []common.Address { switch { - case rules.IsCancun: - return PrecompiledAddressesCancun case rules.IsBanff: return PrecompiledAddressesBanff case rules.IsApricotPhase2: @@ -368,7 +296,7 @@ func (c *dataCopy) RequiredGas(input []byte) uint64 { return uint64(len(input)+31)/32*params.IdentityPerWordGas + params.IdentityBaseGas } func (c *dataCopy) Run(in []byte) ([]byte, error) { - return common.CopyBytes(in), nil + return in, nil } // bigModExp implements a native big integer exponential modular operation. @@ -396,10 +324,10 @@ var ( // modexpMultComplexity implements bigModexp multComplexity formula, as defined in EIP-198 // -// def mult_complexity(x): -// if x <= 64: return x ** 2 -// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072 -// else: return x ** 2 // 16 + 480 * x - 199680 +// def mult_complexity(x): +// if x <= 64: return x ** 2 +// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072 +// else: return x ** 2 // 16 + 480 * x - 199680 // // where is x is max(length_of_MODULUS, length_of_BASE) func modexpMultComplexity(x *big.Int) *big.Int { @@ -513,19 +441,12 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) { base = new(big.Int).SetBytes(getData(input, 0, baseLen)) exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) - v []byte ) - switch { - case mod.BitLen() == 0: + if mod.BitLen() == 0 { // Modulo 0 is undefined, return zero return common.LeftPadBytes([]byte{}, int(modLen)), nil - case base.BitLen() == 1: // a bit length of 1 means it's 1 (or -1). - //If base == 1, then we can just return base % mod (if mod >= 1, which it is) - v = base.Mod(base, mod).Bytes() - default: - v = base.Exp(base, exp, mod).Bytes() } - return common.LeftPadBytes(v, int(modLen)), nil + return common.LeftPadBytes(base.Exp(base, exp, mod).Bytes(), int(modLen)), nil } // newCurvePoint unmarshals a binary blob into a bn256 elliptic curve point, @@ -1075,7 +996,7 @@ func (c *bls12381Pairing) Run(input []byte) ([]byte, error) { return nil, errBLS12381G2PointSubgroup } - // Update pairing engine with G1 and G2 points + // Update pairing engine with G1 and G2 ponits e.AddPair(p1, p2) } // Prepare 32 byte output @@ -1115,7 +1036,7 @@ func (c *bls12381MapG1) RequiredGas(input []byte) uint64 { func (c *bls12381MapG1) Run(input []byte) ([]byte, error) { // Implements EIP-2537 Map_To_G1 precompile. - // > Field-to-curve call expects `64` bytes as an input that is interpreted as a an element of the base field. + // > Field-to-curve call expects `64` bytes an an input that is interpreted as a an element of the base field. // > Output of this call is `128` bytes and is G1 point following respective encoding rules. if len(input) != 64 { return nil, errBLS12381InvalidInputLength @@ -1150,7 +1071,7 @@ func (c *bls12381MapG2) RequiredGas(input []byte) uint64 { func (c *bls12381MapG2) Run(input []byte) ([]byte, error) { // Implements EIP-2537 Map_FP2_TO_G2 precompile logic. - // > Field-to-curve call expects `128` bytes as an input that is interpreted as a an element of the quadratic extension field. + // > Field-to-curve call expects `128` bytes an an input that is interpreted as a an element of the quadratic extension field. // > Output of this call is `256` bytes and is G2 point following respective encoding rules. if len(input) != 128 { return nil, errBLS12381InvalidInputLength @@ -1181,67 +1102,3 @@ func (c *bls12381MapG2) Run(input []byte) ([]byte, error) { // Encode the G2 point to 256 bytes return g.EncodePoint(r), nil } - -// kzgPointEvaluation implements the EIP-4844 point evaluation precompile. -type kzgPointEvaluation struct{} - -// RequiredGas estimates the gas required for running the point evaluation precompile. -func (b *kzgPointEvaluation) RequiredGas(input []byte) uint64 { - return params.BlobTxPointEvaluationPrecompileGas -} - -const ( - blobVerifyInputLength = 192 // Max input length for the point evaluation precompile. - blobCommitmentVersionKZG uint8 = 0x01 // Version byte for the point evaluation precompile. - blobPrecompileReturnValue = "000000000000000000000000000000000000000000000000000000000000100073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001" -) - -var ( - errBlobVerifyInvalidInputLength = errors.New("invalid input length") - errBlobVerifyMismatchedVersion = errors.New("mismatched versioned hash") - errBlobVerifyKZGProof = errors.New("error verifying kzg proof") -) - -// Run executes the point evaluation precompile. -func (b *kzgPointEvaluation) Run(input []byte) ([]byte, error) { - if len(input) != blobVerifyInputLength { - return nil, errBlobVerifyInvalidInputLength - } - // versioned hash: first 32 bytes - var versionedHash common.Hash - copy(versionedHash[:], input[:]) - - var ( - point kzg4844.Point - claim kzg4844.Claim - ) - // Evaluation point: next 32 bytes - copy(point[:], input[32:]) - // Expected output: next 32 bytes - copy(claim[:], input[64:]) - - // input kzg point: next 48 bytes - var commitment kzg4844.Commitment - copy(commitment[:], input[96:]) - if kZGToVersionedHash(commitment) != versionedHash { - return nil, errBlobVerifyMismatchedVersion - } - - // Proof: next 48 bytes - var proof kzg4844.Proof - copy(proof[:], input[144:]) - - if err := kzg4844.VerifyProof(commitment, point, claim, proof); err != nil { - return nil, fmt.Errorf("%w: %v", errBlobVerifyKZGProof, err) - } - - return common.Hex2Bytes(blobPrecompileReturnValue), nil -} - -// kZGToVersionedHash implements kzg_to_versioned_hash from EIP-4844 -func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash { - h := sha256.Sum256(kzg[:]) - h[0] = blobCommitmentVersionKZG - - return h -} diff --git a/core/vm/contracts_fuzz_test.go b/core/vm/contracts_fuzz_test.go index e955287ba3..6a3c8e0062 100644 --- a/core/vm/contracts_fuzz_test.go +++ b/core/vm/contracts_fuzz_test.go @@ -1,4 +1,4 @@ -// (c) 2019-2024, Ava Labs, Inc. +// (c) 2021-2022, Ava Labs, Inc. // // This file is a derived work, based on the go-ethereum library whose original // notices appear below. @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********** -// Copyright 2023 The go-ethereum Authors +// Copyright 2019 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -24,31 +24,27 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package vm +package leveldb import ( "testing" - "github.com/ethereum/go-ethereum/common" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/ethdb/dbtest" ) -func FuzzPrecompiledContracts(f *testing.F) { - // Create list of addresses - var addrs []common.Address - for k := range allPrecompiles { - addrs = append(addrs, k) - } - f.Fuzz(func(t *testing.T, addr uint8, input []byte) { - a := addrs[int(addr)%len(addrs)] - p := allPrecompiles[a] - gas := p.RequiredGas(input) - if gas > 10_000_000 { - return - } - inWant := string(input) - RunPrecompiledContract(p, input, gas) - if inHave := string(input); inWant != inHave { - t.Errorf("Precompiled %v modified input data", a) - } +func TestLevelDB(t *testing.T) { + t.Run("DatabaseSuite", func(t *testing.T) { + dbtest.TestDatabaseSuite(t, func() ethdb.KeyValueStore { + db, err := leveldb.Open(storage.NewMemStorage(), nil) + if err != nil { + t.Fatal(err) + } + return &Database{ + db: db, + } + }) }) } diff --git a/core/vm/contracts_stateful.go b/core/vm/contracts_stateful.go index 64a34ffcf5..4b9b1b5aae 100644 --- a/core/vm/contracts_stateful.go +++ b/core/vm/contracts_stateful.go @@ -4,8 +4,25 @@ package vm import ( - "github.com/ava-labs/coreth/precompile/contract" + "fmt" + "math/big" + "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" + "github.com/tenderly/coreth/precompile" + "github.com/tenderly/coreth/vmerrs" +) + +// PrecompiledContractsApricot contains the default set of pre-compiled Ethereum +// contracts used in the Istanbul release and the stateful precompiled contracts +// added for the Avalanche Apricot release. +// Apricot is incompatible with the YoloV3 Release since it does not include the +// BLS12-381 Curve Operations added to the set of precompiled contracts + +var ( + genesisContractAddr = common.HexToAddress("0x0100000000000000000000000000000000000000") + NativeAssetBalanceAddr = common.HexToAddress("0x0100000000000000000000000000000000000001") + NativeAssetCallAddr = common.HexToAddress("0x0100000000000000000000000000000000000002") ) // wrappedPrecompiledContract implements StatefulPrecompiledContract by wrapping stateless native precompiled contracts @@ -14,18 +31,103 @@ type wrappedPrecompiledContract struct { p PrecompiledContract } -// newWrappedPrecompiledContract returns a wrapped version of [PrecompiledContract] to be executed according to the StatefulPrecompiledContract -// interface. -func newWrappedPrecompiledContract(p PrecompiledContract) contract.StatefulPrecompiledContract { +func newWrappedPrecompiledContract(p PrecompiledContract) precompile.StatefulPrecompiledContract { return &wrappedPrecompiledContract{p: p} } // Run implements the StatefulPrecompiledContract interface -func (w *wrappedPrecompiledContract) Run(accessibleState contract.AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { +func (w *wrappedPrecompiledContract) Run(accessibleState precompile.PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { return RunPrecompiledContract(w.p, input, suppliedGas) } // RunStatefulPrecompiledContract confirms runs [precompile] with the specified parameters. -func RunStatefulPrecompiledContract(precompile contract.StatefulPrecompiledContract, accessibleState contract.AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { +func RunStatefulPrecompiledContract(precompile precompile.StatefulPrecompiledContract, accessibleState precompile.PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { return precompile.Run(accessibleState, caller, addr, input, suppliedGas, readOnly) } + +// nativeAssetBalance is a precompiled contract used to retrieve the native asset balance +type nativeAssetBalance struct { + gasCost uint64 +} + +// PackNativeAssetBalanceInput packs the arguments into the required input data for a transaction to be passed into +// the native asset balance precompile. +func PackNativeAssetBalanceInput(address common.Address, assetID common.Hash) []byte { + input := make([]byte, 52) + copy(input, address.Bytes()) + copy(input[20:], assetID.Bytes()) + return input +} + +// UnpackNativeAssetBalanceInput attempts to unpack [input] into the arguments to the native asset balance precompile +func UnpackNativeAssetBalanceInput(input []byte) (common.Address, common.Hash, error) { + if len(input) != 52 { + return common.Address{}, common.Hash{}, fmt.Errorf("native asset balance input had unexpcted length %d", len(input)) + } + address := common.BytesToAddress(input[:20]) + assetID := common.Hash{} + assetID.SetBytes(input[20:52]) + return address, assetID, nil +} + +// Run implements StatefulPrecompiledContract +func (b *nativeAssetBalance) Run(accessibleState precompile.PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { + // input: encodePacked(address 20 bytes, assetID 32 bytes) + if suppliedGas < b.gasCost { + return nil, 0, vmerrs.ErrOutOfGas + } + remainingGas = suppliedGas - b.gasCost + + address, assetID, err := UnpackNativeAssetBalanceInput(input) + if err != nil { + return nil, remainingGas, vmerrs.ErrExecutionReverted + } + + res, overflow := uint256.FromBig(accessibleState.GetStateDB().GetBalanceMultiCoin(address, assetID)) + if overflow { + return nil, remainingGas, vmerrs.ErrExecutionReverted + } + return common.LeftPadBytes(res.Bytes(), 32), remainingGas, nil +} + +// nativeAssetCall atomically transfers a native asset to a recipient address as well as calling that +// address +type nativeAssetCall struct { + gasCost uint64 +} + +// PackNativeAssetCallInput packs the arguments into the required input data for a transaction to be passed into +// the native asset precompile. +// Assumes that [assetAmount] is non-nil. +func PackNativeAssetCallInput(address common.Address, assetID common.Hash, assetAmount *big.Int, callData []byte) []byte { + input := make([]byte, 84+len(callData)) + copy(input[0:20], address.Bytes()) + copy(input[20:52], assetID.Bytes()) + assetAmount.FillBytes(input[52:84]) + copy(input[84:], callData) + return input +} + +// UnpackNativeAssetCallInput attempts to unpack [input] into the arguments to the native asset call precompile +func UnpackNativeAssetCallInput(input []byte) (common.Address, common.Hash, *big.Int, []byte, error) { + if len(input) < 84 { + return common.Address{}, common.Hash{}, nil, nil, fmt.Errorf("native asset call input had unexpected length %d", len(input)) + } + to := common.BytesToAddress(input[:20]) + assetID := common.BytesToHash(input[20:52]) + assetAmount := new(big.Int).SetBytes(input[52:84]) + callData := input[84:] + return to, assetID, assetAmount, callData, nil +} + +// Run implements StatefulPrecompiledContract +func (c *nativeAssetCall) Run(accessibleState precompile.PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { + // input: encodePacked(address 20 bytes, assetID 32 bytes, assetAmount 32 bytes, callData variable length bytes) + return accessibleState.NativeAssetCall(caller, input, suppliedGas, c.gasCost, readOnly) +} + +type deprecatedContract struct{} + +func (*deprecatedContract) Run(accessibleState precompile.PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { + return nil, suppliedGas, vmerrs.ErrExecutionReverted +} diff --git a/core/vm/contracts_stateful_test.go b/core/vm/contracts_stateful_test.go index ef3eb24a8e..c4167ffdd0 100644 --- a/core/vm/contracts_stateful_test.go +++ b/core/vm/contracts_stateful_test.go @@ -7,13 +7,13 @@ import ( "math/big" "testing" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/assert" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/vmerrs" ) func TestPrecompiledContractSpendsGas(t *testing.T) { @@ -73,7 +73,7 @@ func TestPackNativeAssetCallInput(t *testing.T) { func TestStatefulPrecompile(t *testing.T) { vmCtx := BlockContext{ BlockNumber: big.NewInt(0), - Time: 0, + Time: big.NewInt(0), CanTransfer: CanTransfer, CanTransferMC: CanTransferMC, Transfer: Transfer, diff --git a/core/vm/eips.go b/core/vm/eips.go index de98b10850..426286b9d3 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -30,23 +30,17 @@ import ( "fmt" "sort" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" - "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" + "github.com/tenderly/coreth/params" ) var activators = map[int]func(*JumpTable){ - 5656: enable5656, - 6780: enable6780, 3855: enable3855, - 3860: enable3860, 3198: enable3198, 2929: enable2929, 2200: enable2200, 1884: enable1884, 1344: enable1344, - 1153: enable1153, } // EnableEIP enables the given EIP on the config. @@ -189,45 +183,6 @@ func enable3198(jt *JumpTable) { } } -// enable1153 applies EIP-1153 "Transient Storage" -// - Adds TLOAD that reads from transient storage -// - Adds TSTORE that writes to transient storage -func enable1153(jt *JumpTable) { - jt[TLOAD] = &operation{ - execute: opTload, - constantGas: params.WarmStorageReadCostEIP2929, - minStack: minStack(1, 1), - maxStack: maxStack(1, 1), - } - - jt[TSTORE] = &operation{ - execute: opTstore, - constantGas: params.WarmStorageReadCostEIP2929, - minStack: minStack(2, 0), - maxStack: maxStack(2, 0), - } -} - -// opTload implements TLOAD opcode -func opTload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - loc := scope.Stack.peek() - hash := common.Hash(loc.Bytes32()) - val := interpreter.evm.StateDB.GetTransientState(scope.Contract.Address(), hash) - loc.SetBytes(val.Bytes()) - return nil, nil -} - -// opTstore implements TSTORE opcode -func opTstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - if interpreter.readOnly { - return nil, vmerrs.ErrWriteProtection - } - loc := scope.Stack.pop() - val := scope.Stack.pop() - interpreter.evm.StateDB.SetTransientState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32()) - return nil, nil -} - // opBaseFee implements BASEFEE opcode func opBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { baseFee, _ := uint256.FromBig(interpreter.evm.Context.BaseFee) @@ -251,86 +206,3 @@ func opPush0(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by scope.Stack.push(new(uint256.Int)) return nil, nil } - -// enable3860 enables "EIP-3860: Limit and meter initcode" -// https://eips.ethereum.org/EIPS/eip-3860 -func enable3860(jt *JumpTable) { - jt[CREATE].dynamicGas = gasCreateEip3860 - jt[CREATE2].dynamicGas = gasCreate2Eip3860 -} - -// enable5656 enables EIP-5656 (MCOPY opcode) -// https://eips.ethereum.org/EIPS/eip-5656 -func enable5656(jt *JumpTable) { - jt[MCOPY] = &operation{ - execute: opMcopy, - constantGas: GasFastestStep, - dynamicGas: gasMcopy, - minStack: minStack(3, 0), - maxStack: maxStack(3, 0), - memorySize: memoryMcopy, - } -} - -// opMcopy implements the MCOPY opcode (https://eips.ethereum.org/EIPS/eip-5656) -func opMcopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - var ( - dst = scope.Stack.pop() - src = scope.Stack.pop() - length = scope.Stack.pop() - ) - // These values are checked for overflow during memory expansion calculation - // (the memorySize function on the opcode). - scope.Memory.Copy(dst.Uint64(), src.Uint64(), length.Uint64()) - return nil, nil -} - -// opBlobHash implements the BLOBHASH opcode -func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - index := scope.Stack.peek() - if index.LtUint64(uint64(len(interpreter.evm.TxContext.BlobHashes))) { - blobHash := interpreter.evm.TxContext.BlobHashes[index.Uint64()] - index.SetBytes32(blobHash[:]) - } else { - index.Clear() - } - return nil, nil -} - -// opBlobBaseFee implements BLOBBASEFEE opcode -func opBlobBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - blobBaseFee, _ := uint256.FromBig(interpreter.evm.Context.BlobBaseFee) - scope.Stack.push(blobBaseFee) - return nil, nil -} - -// enable4844 applies EIP-4844 (BLOBHASH opcode) -func enable4844(jt *JumpTable) { - jt[BLOBHASH] = &operation{ - execute: opBlobHash, - constantGas: GasFastestStep, - minStack: minStack(1, 1), - maxStack: maxStack(1, 1), - } -} - -// enable7516 applies EIP-7516 (BLOBBASEFEE opcode) -func enable7516(jt *JumpTable) { - jt[BLOBBASEFEE] = &operation{ - execute: opBlobBaseFee, - constantGas: GasQuickStep, - minStack: minStack(0, 1), - maxStack: maxStack(0, 1), - } -} - -// enable6780 applies EIP-6780 (deactivate SELFDESTRUCT) -func enable6780(jt *JumpTable) { - jt[SELFDESTRUCT] = &operation{ - execute: opSelfdestruct6780, - dynamicGas: gasSelfdestructEIP3529, - constantGas: params.SelfdestructGasEIP150, - minStack: minStack(1, 0), - maxStack: maxStack(1, 0), - } -} diff --git a/core/vm/evm.go b/core/vm/evm.go index 9cfc38413d..428f92d942 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -31,35 +31,59 @@ import ( "sync/atomic" "time" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/coreth/constants" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/precompile/contract" - "github.com/ava-labs/coreth/precompile/modules" - "github.com/ava-labs/coreth/precompile/precompileconfig" - "github.com/ava-labs/coreth/predicate" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/holiman/uint256" + "github.com/tenderly/coreth/constants" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/precompile" + "github.com/tenderly/coreth/vmerrs" ) var ( - _ contract.AccessibleState = &EVM{} - _ contract.BlockContext = &BlockContext{} + _ precompile.PrecompileAccessibleState = &EVM{} + _ precompile.BlockContext = &BlockContext{} ) -// IsProhibited returns true if [addr] is in the prohibited list of addresses which should -// not be allowed as an EOA or newly created contract address. +// IsProhibited returns true if [addr] is the blackhole address or is +// with a range reserved for precompiled contracts. func IsProhibited(addr common.Address) bool { if addr == constants.BlackholeAddr { return true } + for _, reservedRange := range precompile.ReservedRanges { + if reservedRange.Contains(addr) { + return true + } + } + return false +} + +// TODO: deprecate after Banff activation. +func (evm *EVM) isProhibitedWithTimestamp(addr common.Address) error { + if addr != NativeAssetCallAddr { + return nil + } - return modules.ReservedAddress(addr) + // Return error depending on the phase + switch { + case evm.chainRules.IsBanff: // Disable the soft fork as of Banff + return nil + case evm.chainRules.IsApricotPhasePost6: // If we are in the soft fork, return the soft error + return vmerrs.ErrToAddrProhibitedSoft + case evm.chainRules.IsApricotPhase6: // If we are in Phase6, return nil + return nil + case evm.chainRules.IsApricotPhasePre6: // If we are in PrePhase6, return Prohibited6 + return vmerrs.ErrToAddrProhibited6 + default: // Prior to Pre6, don't alter behavior at all + return nil + } } +// emptyCodeHash is used by create to ensure deployment is disallowed to already +// deployed contract addresses (relevant after the account abstraction). +var emptyCodeHash = crypto.Keccak256Hash(nil) + type ( // CanTransferFunc is the signature of a transfer guard function CanTransferFunc func(StateDB, common.Address, *big.Int) bool @@ -72,17 +96,11 @@ type ( GetHashFunc func(uint64) common.Hash ) -func (evm *EVM) precompile(addr common.Address) (contract.StatefulPrecompiledContract, bool) { - var precompiles map[common.Address]contract.StatefulPrecompiledContract +func (evm *EVM) precompile(addr common.Address) (precompile.StatefulPrecompiledContract, bool) { + var precompiles map[common.Address]precompile.StatefulPrecompiledContract switch { - case evm.chainRules.IsCancun: - precompiles = PrecompiledContractsCancun case evm.chainRules.IsBanff: precompiles = PrecompiledContractsBanff - case evm.chainRules.IsApricotPhase6: - precompiles = PrecompiledContractsApricotPhase6 - case evm.chainRules.IsApricotPhasePre6: - precompiles = PrecompiledContractsApricotPhasePre6 case evm.chainRules.IsApricotPhase2: precompiles = PrecompiledContractsApricotPhase2 case evm.chainRules.IsIstanbul: @@ -100,12 +118,8 @@ func (evm *EVM) precompile(addr common.Address) (contract.StatefulPrecompiledCon } // Otherwise, check the chain rules for the additionally configured precompiles. - if _, ok = evm.chainRules.ActivePrecompiles[addr]; ok { - module, ok := modules.GetPrecompileModuleByAddress(addr) - return module.Contract, ok - } - - return nil, false + p, ok = evm.chainRules.Precompiles[addr] + return p, ok } // BlockContext provides the EVM with auxiliary information. Once provided @@ -123,43 +137,30 @@ type BlockContext struct { TransferMultiCoin TransferMCFunc // GetHash returns the hash corresponding to n GetHash GetHashFunc - // PredicateResults are the results of predicate verification available throughout the EVM's execution. - // PredicateResults may be nil if it is not encoded in the block's header. - PredicateResults *predicate.Results // Block information Coinbase common.Address // Provides information for COINBASE GasLimit uint64 // Provides information for GASLIMIT BlockNumber *big.Int // Provides information for NUMBER - Time uint64 // Provides information for TIME + Time *big.Int // Provides information for TIME Difficulty *big.Int // Provides information for DIFFICULTY BaseFee *big.Int // Provides information for BASEFEE - BlobBaseFee *big.Int // Provides information for BLOBBASEFEE (0 if vm runs with NoBaseFee flag and 0 blob gas price) } func (b *BlockContext) Number() *big.Int { return b.BlockNumber } -func (b *BlockContext) Timestamp() uint64 { +func (b *BlockContext) Timestamp() *big.Int { return b.Time } -func (b *BlockContext) GetPredicateResults(txHash common.Hash, address common.Address) []byte { - if b.PredicateResults == nil { - return nil - } - return b.PredicateResults.GetResults(txHash, address) -} - // TxContext provides the EVM with information about a transaction. // All fields can change between transactions. type TxContext struct { // Message information - Origin common.Address // Provides information for ORIGIN - GasPrice *big.Int // Provides information for GASPRICE (and is used to zero the basefee if NoBaseFee is set) - BlobHashes []common.Hash // Provides information for BLOBHASH - BlobFeeCap *big.Int // Is used to zero the blobbasefee if NoBaseFee is set + Origin common.Address // Provides information for ORIGIN + GasPrice *big.Int // Provides information for GASPRICE } // EVM is the Ethereum Virtual Machine base object and provides @@ -191,7 +192,8 @@ type EVM struct { // used throughout the execution of the tx. interpreter *EVMInterpreter // abort is used to abort the EVM calling operations - abort atomic.Bool + // NOTE: must be set atomically + abort int32 // callGasTemp holds the gas available for the current call. This is needed because the // available gas is calculated in gasCall* according to the 63/64 rule and later // applied in opCall*. @@ -201,26 +203,15 @@ type EVM struct { // NewEVM returns a new EVM. The returned EVM is not thread safe and should // only ever be used *once*. func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig *params.ChainConfig, config Config) *EVM { - // If basefee tracking is disabled (eth_call, eth_estimateGas, etc), and no - // gas prices were specified, lower the basefee to 0 to avoid breaking EVM - // invariants (basefee < feecap) - if config.NoBaseFee { - if txCtx.GasPrice.BitLen() == 0 { - blockCtx.BaseFee = new(big.Int) - } - if txCtx.BlobFeeCap != nil && txCtx.BlobFeeCap.BitLen() == 0 { - blockCtx.BlobBaseFee = new(big.Int) - } - } evm := &EVM{ Context: blockCtx, TxContext: txCtx, StateDB: statedb, Config: config, chainConfig: chainConfig, - chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Time), + chainRules: chainConfig.AvalancheRules(blockCtx.BlockNumber, blockCtx.Time), } - evm.interpreter = NewEVMInterpreter(evm) + evm.interpreter = NewEVMInterpreter(evm, config) return evm } @@ -234,26 +225,21 @@ func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) { // Cancel cancels any running EVM operation. This may be called concurrently and // it's safe to be called multiple times. func (evm *EVM) Cancel() { - evm.abort.Store(true) + atomic.StoreInt32(&evm.abort, 1) } // Cancelled returns true if Cancel has been called func (evm *EVM) Cancelled() bool { - return evm.abort.Load() -} - -// GetSnowContext returns the evm's snow.Context. -func (evm *EVM) GetSnowContext() *snow.Context { - return evm.chainConfig.SnowCtx + return atomic.LoadInt32(&evm.abort) == 1 } // GetStateDB returns the evm's StateDB -func (evm *EVM) GetStateDB() contract.StateDB { +func (evm *EVM) GetStateDB() precompile.StateDB { return evm.StateDB } // GetBlockContext returns the evm's BlockContext -func (evm *EVM) GetBlockContext() contract.BlockContext { +func (evm *EVM) GetBlockContext() precompile.BlockContext { return &evm.Context } @@ -267,6 +253,9 @@ func (evm *EVM) Interpreter() *EVMInterpreter { // the necessary steps to create accounts and reverses the state in case of an // execution error or failed value transfer. func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) { + if prohibitErr := evm.isProhibitedWithTimestamp(addr); prohibitErr != nil { + return nil, gas, prohibitErr + } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, vmerrs.ErrDepth @@ -280,15 +269,14 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas } snapshot := evm.StateDB.Snapshot() p, isPrecompile := evm.precompile(addr) - debug := evm.Config.Tracer != nil if !evm.StateDB.Exist(addr) { if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { // Calling a non existing account, don't do anything, but ping the tracer - if debug { + if evm.Config.Debug { if evm.depth == 0 { evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) - evm.Config.Tracer.CaptureEnd(ret, 0, nil) + evm.Config.Tracer.CaptureEnd(ret, 0, 0, nil) } else { evm.Config.Tracer.CaptureEnter(CALL, caller.Address(), addr, input, gas, value) evm.Config.Tracer.CaptureExit(ret, 0, nil) @@ -301,12 +289,12 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas evm.Context.Transfer(evm.StateDB, caller.Address(), addr, value) // Capture the tracer start/end events in debug mode - if debug { + if evm.Config.Debug { if evm.depth == 0 { evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) - defer func(startGas uint64) { // Lazy evaluation of the parameters - evm.Config.Tracer.CaptureEnd(ret, startGas-gas, err) - }(gas) + defer func(startGas uint64, startTime time.Time) { // Lazy evaluation of the parameters + evm.Config.Tracer.CaptureEnd(ret, startGas-gas, time.Since(startTime), err) + }(gas, time.Now()) } else { // Handle tracer events for entering and exiting a call frame evm.Config.Tracer.CaptureEnter(CALL, caller.Address(), addr, input, gas, value) @@ -351,6 +339,9 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // This allows the user transfer balance of a specified coinId in addition to a normal Call(). func (evm *EVM) CallExpert(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int, coinID common.Hash, value2 *big.Int) (ret []byte, leftOverGas uint64, err error) { + if prohibitErr := evm.isProhibitedWithTimestamp(addr); prohibitErr != nil { + return nil, gas, prohibitErr + } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, vmerrs.ErrDepth @@ -386,11 +377,10 @@ func (evm *EVM) CallExpert(caller ContractRef, addr common.Address, input []byte evm.Context.TransferMultiCoin(evm.StateDB, caller.Address(), addr, coinID, value2) // Capture the tracer start/end events in debug mode - debug := evm.Config.Tracer != nil - if debug && evm.depth == 0 { + if evm.Config.Debug && evm.depth == 0 { evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) defer func(startGas uint64, startTime time.Time) { // Lazy evaluation of the parameters - evm.Config.Tracer.CaptureEnd(ret, startGas-gas, err) + evm.Config.Tracer.CaptureEnd(ret, startGas-gas, time.Since(startTime), err) }(gas, time.Now()) } @@ -435,6 +425,9 @@ func (evm *EVM) CallExpert(caller ContractRef, addr common.Address, input []byte // CallCode differs from Call in the sense that it executes the given address' // code with the caller as context. func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) { + if prohibitErr := evm.isProhibitedWithTimestamp(addr); prohibitErr != nil { + return nil, gas, prohibitErr + } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, vmerrs.ErrDepth @@ -452,7 +445,7 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, var snapshot = evm.StateDB.Snapshot() // Invoke tracer hooks that signal entering/exiting a call frame - if evm.Config.Tracer != nil { + if evm.Config.Debug { evm.Config.Tracer.CaptureEnter(CALLCODE, caller.Address(), addr, input, gas, value) defer func(startGas uint64) { evm.Config.Tracer.CaptureExit(ret, startGas-gas, err) @@ -486,6 +479,9 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, // DelegateCall differs from CallCode in the sense that it executes the given address' // code with the caller as context and the caller is set to the caller of the caller. func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) { + if prohibitErr := evm.isProhibitedWithTimestamp(addr); prohibitErr != nil { + return nil, gas, prohibitErr + } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, vmerrs.ErrDepth @@ -493,12 +489,8 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by var snapshot = evm.StateDB.Snapshot() // Invoke tracer hooks that signal entering/exiting a call frame - if evm.Config.Tracer != nil { - // NOTE: caller must, at all times be a contract. It should never happen - // that caller is something other than a Contract. - parent := caller.(*Contract) - // DELEGATECALL inherits value from parent call - evm.Config.Tracer.CaptureEnter(DELEGATECALL, caller.Address(), addr, input, gas, parent.value) + if evm.Config.Debug { + evm.Config.Tracer.CaptureEnter(DELEGATECALL, caller.Address(), addr, input, gas, nil) defer func(startGas uint64) { evm.Config.Tracer.CaptureExit(ret, startGas-gas, err) }(gas) @@ -529,6 +521,9 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by // Opcodes that attempt to perform such modifications will result in exceptions // instead of performing the modifications. func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) { + if prohibitErr := evm.isProhibitedWithTimestamp(addr); prohibitErr != nil { + return nil, gas, prohibitErr + } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, vmerrs.ErrDepth @@ -547,7 +542,7 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte evm.StateDB.AddBalance(addr, big0) // Invoke tracer hooks that signal entering/exiting a call frame - if evm.Config.Tracer != nil { + if evm.Config.Debug { evm.Config.Tracer.CaptureEnter(STATICCALL, caller.Address(), addr, input, gas, nil) defer func(startGas uint64) { evm.Config.Tracer.CaptureExit(ret, startGas-gas, err) @@ -622,7 +617,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } // Ensure there's no existing contract already at the designated address contractHash := evm.StateDB.GetCodeHash(address) - if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) { + if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != emptyCodeHash) { return nil, common.Address{}, 0, vmerrs.ErrContractAddressCollision } // Create a new account on the state @@ -638,7 +633,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, contract := NewContract(caller, AccountRef(address), value, gas) contract.SetCodeOptionalHash(&address, codeAndHash) - if evm.Config.Tracer != nil { + if evm.Config.Debug { if evm.depth == 0 { evm.Config.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value) } else { @@ -646,6 +641,8 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } } + start := time.Now() + ret, err := evm.interpreter.Run(contract, nil, false) // Check whether the max code size has been exceeded, assign err if the case. @@ -681,9 +678,9 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } } - if evm.Config.Tracer != nil { + if evm.Config.Debug { if evm.depth == 0 { - evm.Config.Tracer.CaptureEnd(ret, gas-contract.Gas, err) + evm.Config.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err) } else { evm.Config.Tracer.CaptureExit(ret, gas-contract.Gas, err) } @@ -710,9 +707,6 @@ func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment * // ChainConfig returns the environment's chain configuration func (evm *EVM) ChainConfig() *params.ChainConfig { return evm.chainConfig } -// GetChainConfig implements AccessibleState -func (evm *EVM) GetChainConfig() precompileconfig.ChainConfig { return evm.chainConfig } - func (evm *EVM) NativeAssetCall(caller common.Address, input []byte, suppliedGas uint64, gasCost uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { if suppliedGas < gasCost { return nil, 0, vmerrs.ErrOutOfGas diff --git a/core/vm/evm_test.go b/core/vm/evm_test.go index 5a7e72f3f4..021dc5700c 100644 --- a/core/vm/evm_test.go +++ b/core/vm/evm_test.go @@ -16,20 +16,9 @@ func TestIsProhibited(t *testing.T) { assert.True(t, IsProhibited(common.HexToAddress("0x0100000000000000000000000000000000000010"))) assert.True(t, IsProhibited(common.HexToAddress("0x01000000000000000000000000000000000000f0"))) assert.True(t, IsProhibited(common.HexToAddress("0x01000000000000000000000000000000000000ff"))) - assert.True(t, IsProhibited(common.HexToAddress("0x0200000000000000000000000000000000000000"))) - assert.True(t, IsProhibited(common.HexToAddress("0x0200000000000000000000000000000000000010"))) - assert.True(t, IsProhibited(common.HexToAddress("0x02000000000000000000000000000000000000f0"))) - assert.True(t, IsProhibited(common.HexToAddress("0x02000000000000000000000000000000000000ff"))) - // reserved addresses (custom precompiles) - assert.True(t, IsProhibited(common.HexToAddress("0x0300000000000000000000000000000000000000"))) - assert.True(t, IsProhibited(common.HexToAddress("0x0300000000000000000000000000000000000010"))) - assert.True(t, IsProhibited(common.HexToAddress("0x03000000000000000000000000000000000000f0"))) - assert.True(t, IsProhibited(common.HexToAddress("0x03000000000000000000000000000000000000ff"))) // allowed for use assert.False(t, IsProhibited(common.HexToAddress("0x00000000000000000000000000000000000000ff"))) - assert.False(t, IsProhibited(common.HexToAddress("0x00ffffffffffffffffffffffffffffffffffffff"))) assert.False(t, IsProhibited(common.HexToAddress("0x0100000000000000000000000000000000000100"))) - assert.False(t, IsProhibited(common.HexToAddress("0x0200000000000000000000000000000000000100"))) - assert.False(t, IsProhibited(common.HexToAddress("0x0300000000000000000000000000000000000100"))) + assert.False(t, IsProhibited(common.HexToAddress("0x0200000000000000000000000000000000000000"))) } diff --git a/core/vm/gas.go b/core/vm/gas.go index 1a195acf2a..b265ea838a 100644 --- a/core/vm/gas.go +++ b/core/vm/gas.go @@ -29,6 +29,7 @@ package vm import ( "github.com/ava-labs/coreth/vmerrs" "github.com/holiman/uint256" + "github.com/tenderly/coreth/vmerrs" ) // Gas costs diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index f97d0fd1ba..babfc2c95b 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -29,10 +29,10 @@ package vm import ( "errors" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/vmerrs" ) // memoryGasCost calculates the quadratic gas for memory expansion. It does so @@ -71,7 +71,6 @@ func memoryGasCost(mem *Memory, newMemSize uint64) (uint64, error) { // as argument: // CALLDATACOPY (stack position 2) // CODECOPY (stack position 2) -// MCOPY (stack position 2) // EXTCODECOPY (stack position 3) // RETURNDATACOPY (stack position 2) func memoryCopierGas(stackpos int) gasFunc { @@ -101,7 +100,6 @@ func memoryCopierGas(stackpos int) gasFunc { var ( gasCallDataCopy = memoryCopierGas(2) gasCodeCopy = memoryCopierGas(2) - gasMcopy = memoryCopierGas(2) gasExtCodeCopy = memoryCopierGas(3) gasReturnDataCopy = memoryCopierGas(2) ) @@ -115,7 +113,7 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi // Legacy rules should be applied if we are in Petersburg (removal of EIP-1283) // OR Constantinople is not active if evm.chainRules.IsPetersburg || !evm.chainRules.IsConstantinople { - // This checks for 3 scenarios and calculates gas accordingly: + // This checks for 3 scenario's and calculates gas accordingly: // // 1. From a zero-value address to a non-zero value (NEW VALUE) // 2. From a non-zero value address to a zero-value address (DELETE) @@ -130,21 +128,20 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi return params.SstoreResetGas, nil } } - // The new gas metering is based on net gas costs (EIP-1283): // - // (1.) If current value equals new value (this is a no-op), 200 gas is deducted. - // (2.) If current value does not equal new value - // (2.1.) If original value equals current value (this storage slot has not been changed by the current execution context) - // (2.1.1.) If original value is 0, 20000 gas is deducted. - // (2.1.2.) Otherwise, 5000 gas is deducted. If new value is 0, add 15000 gas to refund counter. - // (2.2.) If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses. - // (2.2.1.) If original value is not 0 - // (2.2.1.1.) If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0. - // (2.2.1.2.) If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter. - // (2.2.2.) If original value equals new value (this storage slot is reset) - // (2.2.2.1.) If original value is 0, add 19800 gas to refund counter. - // (2.2.2.2.) Otherwise, add 4800 gas to refund counter. + // 1. If current value equals new value (this is a no-op), 200 gas is deducted. + // 2. If current value does not equal new value + // 2.1. If original value equals current value (this storage slot has not been changed by the current execution context) + // 2.1.1. If original value is 0, 20000 gas is deducted. + // 2.1.2. Otherwise, 5000 gas is deducted. If new value is 0, add 15000 gas to refund counter. + // 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses. + // 2.2.1. If original value is not 0 + // 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0. + // 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter. + // 2.2.2. If original value equals new value (this storage slot is reset) + // 2.2.2.1. If original value is 0, add 19800 gas to refund counter. + // 2.2.2.2. Otherwise, add 4800 gas to refund counter. value := common.Hash(y.Bytes32()) if current == value { // noop (1) return params.NetSstoreNoopGas, nil @@ -176,21 +173,19 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi return params.NetSstoreDirtyGas, nil } -// Here come the EIP2200 rules: -// -// (0.) If *gasleft* is less than or equal to 2300, fail the current call. -// (1.) If current value equals new value (this is a no-op), SLOAD_GAS is deducted. -// (2.) If current value does not equal new value: -// (2.1.) If original value equals current value (this storage slot has not been changed by the current execution context): -// (2.1.1.) If original value is 0, SSTORE_SET_GAS (20K) gas is deducted. -// (2.1.2.) Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter. -// (2.2.) If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: -// (2.2.1.) If original value is not 0: -// (2.2.1.1.) If current value is 0 (also means that new value is not 0), subtract SSTORE_CLEARS_SCHEDULE gas from refund counter. -// (2.2.1.2.) If new value is 0 (also means that current value is not 0), add SSTORE_CLEARS_SCHEDULE gas to refund counter. -// (2.2.2.) If original value equals new value (this storage slot is reset): -// (2.2.2.1.) If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter. -// (2.2.2.2.) Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter. +// 0. If *gasleft* is less than or equal to 2300, fail the current call. +// 1. If current value equals new value (this is a no-op), SLOAD_GAS is deducted. +// 2. If current value does not equal new value: +// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context): +// 2.1.1. If original value is 0, SSTORE_SET_GAS (20K) gas is deducted. +// 2.1.2. Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter. +// 2.2. If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: +// 2.2.1. If original value is not 0: +// 2.2.1.1. If current value is 0 (also means that new value is not 0), subtract SSTORE_CLEARS_SCHEDULE gas from refund counter. +// 2.2.1.2. If new value is 0 (also means that current value is not 0), add SSTORE_CLEARS_SCHEDULE gas to refund counter. +// 2.2.2. If original value equals new value (this storage slot is reset): +// 2.2.2.1. If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter. +// 2.2.2.2. Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter. func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { // If we fail the minimum gas availability invariant, fail (0) if contract.Gas <= params.SstoreSentryGasEIP2200 { @@ -235,13 +230,13 @@ func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *Stack, mem *Memory, m // gasSStoreAP1 simplifies the dynamic gas cost of SSTORE by removing all refund logic // -// 0. If *gasleft* is less than or equal to 2300, fail the current call. -// 1. If current value equals new value (this is a no-op), SLOAD_GAS is deducted. -// 2. If current value does not equal new value: -// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context): +// 0. If *gasleft* is less than or equal to 2300, fail the current call. +// 1. If current value equals new value (this is a no-op), SLOAD_GAS is deducted. +// 2. If current value does not equal new value: +// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context): // 2.1.1. If original value is 0, SSTORE_SET_GAS (20K) gas is deducted. // 2.1.2. Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter. -// 2.2. If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: +// 2.2. If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: func gasSStoreAP1(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { // If we fail the minimum gas availability invariant, fail (0) if contract.Gas <= params.SstoreSentryGasEIP2200 { @@ -350,39 +345,6 @@ func gasCreate2(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memoryS return gas, nil } -func gasCreateEip3860(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { - gas, err := memoryGasCost(mem, memorySize) - if err != nil { - return 0, err - } - size, overflow := stack.Back(2).Uint64WithOverflow() - if overflow || size > params.MaxInitCodeSize { - return 0, vmerrs.ErrGasUintOverflow - } - // Since size <= params.MaxInitCodeSize, these multiplication cannot overflow - moreGas := params.InitCodeWordGas * ((size + 31) / 32) - if gas, overflow = math.SafeAdd(gas, moreGas); overflow { - return 0, vmerrs.ErrGasUintOverflow - } - return gas, nil -} -func gasCreate2Eip3860(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { - gas, err := memoryGasCost(mem, memorySize) - if err != nil { - return 0, err - } - size, overflow := stack.Back(2).Uint64WithOverflow() - if overflow || size > params.MaxInitCodeSize { - return 0, vmerrs.ErrGasUintOverflow - } - // Since size <= params.MaxInitCodeSize, these multiplication cannot overflow - moreGas := (params.InitCodeWordGas + params.Keccak256WordGas) * ((size + 31) / 32) - if gas, overflow = math.SafeAdd(gas, moreGas); overflow { - return 0, vmerrs.ErrGasUintOverflow - } - return gas, nil -} - func gasExpFrontier(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { expByteLen := uint64((stack.data[stack.len()-2].BitLen() + 7) / 8) @@ -557,7 +519,7 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me } } - if !evm.StateDB.HasSelfDestructed(contract.Address()) { + if !evm.StateDB.HasSuicided(contract.Address()) { evm.StateDB.AddRefund(params.SelfdestructRefundGas) } return gas, nil diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go index a6ae777ddc..3238a20ee0 100644 --- a/core/vm/gas_table_test.go +++ b/core/vm/gas_table_test.go @@ -27,19 +27,16 @@ package vm import ( - "bytes" "math" "math/big" - "sort" "testing" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/vmerrs" ) func TestMemoryGasCost(t *testing.T) { @@ -95,7 +92,7 @@ func TestEIP2200(t *testing.T) { for i, tt := range eip2200Tests { address := common.BytesToAddress([]byte("contract")) - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.CreateAccount(address) statedb.SetCode(address, hexutil.MustDecode(tt.input)) statedb.SetState(address, common.Hash{}, common.BytesToHash([]byte{tt.original})) @@ -119,75 +116,3 @@ func TestEIP2200(t *testing.T) { } } } - -var createGasTests = []struct { - code string - eip3860 bool - gasUsed uint64 - minimumGas uint64 -}{ - // legacy create(0, 0, 0xc000) without 3860 used - {"0x61C00060006000f0" + "600052" + "60206000F3", false, 41237, 41237}, - // legacy create(0, 0, 0xc000) _with_ 3860 - {"0x61C00060006000f0" + "600052" + "60206000F3", true, 44309, 44309}, - // create2(0, 0, 0xc001, 0) without 3860 - {"0x600061C00160006000f5" + "600052" + "60206000F3", false, 50471, 50471}, - // create2(0, 0, 0xc001, 0) (too large), with 3860 - {"0x600061C00160006000f5" + "600052" + "60206000F3", true, 32012, 100_000}, - // create2(0, 0, 0xc000, 0) - // This case is trying to deploy code at (within) the limit - {"0x600061C00060006000f5" + "600052" + "60206000F3", true, 53528, 53528}, - // create2(0, 0, 0xc001, 0) - // This case is trying to deploy code exceeding the limit - {"0x600061C00160006000f5" + "600052" + "60206000F3", true, 32024, 100000}, -} - -func TestCreateGas(t *testing.T) { - for i, tt := range createGasTests { - var gasUsed = uint64(0) - doCheck := func(testGas int) bool { - address := common.BytesToAddress([]byte("contract")) - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - statedb.CreateAccount(address) - statedb.SetCode(address, hexutil.MustDecode(tt.code)) - statedb.Finalise(true) - vmctx := BlockContext{ - CanTransfer: func(StateDB, common.Address, *big.Int) bool { return true }, - Transfer: func(StateDB, common.Address, common.Address, *big.Int) {}, - BlockNumber: big.NewInt(0), - } - config := Config{} - if tt.eip3860 { - config.ExtraEips = []int{3860} - } - - // Note: we use Cortina instead of AllEthashProtocolChanges (upstream) - // because it is the last fork before the activation of EIP-3860 - vmenv := NewEVM(vmctx, TxContext{}, statedb, params.TestCortinaChainConfig, config) - var startGas = uint64(testGas) - ret, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, startGas, new(big.Int)) - if err != nil { - return false - } - gasUsed = startGas - gas - if len(ret) != 32 { - t.Fatalf("test %d: expected 32 bytes returned, have %d", i, len(ret)) - } - if bytes.Equal(ret, make([]byte, 32)) { - // Failure - return false - } - return true - } - minGas := sort.Search(100_000, doCheck) - if uint64(minGas) != tt.minimumGas { - t.Fatalf("test %d: min gas error, want %d, have %d", i, tt.minimumGas, minGas) - } - // If the deployment succeeded, we also check the gas used - if minGas < 100_000 { - if gasUsed != tt.gasUsed { - t.Errorf("test %d: gas used mismatch: have %v, want %v", i, gasUsed, tt.gasUsed) - } - } - } -} diff --git a/core/vm/instructions.go b/core/vm/instructions.go index e31282dc2d..13e6db4655 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -28,12 +28,14 @@ package vm import ( "errors" + "sync/atomic" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" "github.com/holiman/uint256" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/vmerrs" + "golang.org/x/crypto/sha3" ) func opAdd(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { @@ -248,7 +250,7 @@ func opKeccak256(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ( data := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) if interpreter.hasher == nil { - interpreter.hasher = crypto.NewKeccakState() + interpreter.hasher = sha3.NewLegacyKeccak256().(keccakState) } else { interpreter.hasher.Reset() } @@ -263,7 +265,6 @@ func opKeccak256(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ( size.SetBytes(interpreter.hasherBuf[:]) return nil, nil } - func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Address().Bytes())) return nil, nil @@ -291,7 +292,6 @@ func opOrigin(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Origin.Bytes())) return nil, nil } - func opCaller(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Caller().Bytes())) return nil, nil @@ -415,29 +415,29 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) // opExtCodeHash returns the code hash of a specified account. // There are several cases when the function is called, while we can relay everything // to `state.GetCodeHash` function to ensure the correctness. +// (1) Caller tries to get the code hash of a normal contract account, state +// should return the relative code hash and set it as the result. // -// 1. Caller tries to get the code hash of a normal contract account, state -// should return the relative code hash and set it as the result. -// -// 2. Caller tries to get the code hash of a non-existent account, state should -// return common.Hash{} and zero will be set as the result. +// (2) Caller tries to get the code hash of a non-existent account, state should +// return common.Hash{} and zero will be set as the result. // -// 3. Caller tries to get the code hash for an account without contract code, state -// should return emptyCodeHash(0xc5d246...) as the result. +// (3) Caller tries to get the code hash for an account without contract code, +// state should return emptyCodeHash(0xc5d246...) as the result. // -// 4. Caller tries to get the code hash of a precompiled account, the result should be -// zero or emptyCodeHash. +// (4) Caller tries to get the code hash of a precompiled account, the result +// should be zero or emptyCodeHash. // -// It is worth noting that in order to avoid unnecessary create and clean, all precompile -// accounts on mainnet have been transferred 1 wei, so the return here should be -// emptyCodeHash. If the precompile account is not transferred any amount on a private or +// It is worth noting that in order to avoid unnecessary create and clean, +// all precompile accounts on mainnet have been transferred 1 wei, so the return +// here should be emptyCodeHash. +// If the precompile account is not transferred any amount on a private or // customized chain, the return value will be zero. // -// 5. Caller tries to get the code hash for an account which is marked as self-destructed -// in the current transaction, the code hash of this account should be returned. +// (5) Caller tries to get the code hash for an account which is marked as suicided +// in the current transaction, the code hash of this account should be returned. // -// 6. Caller tries to get the code hash for an account which is marked as deleted, this -// account should be regarded as a non-existent account and zero should be returned. +// (6) Caller tries to get the code hash for an account which is marked as deleted, +// this account should be regarded as a non-existent account and zero should be returned. func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) @@ -483,7 +483,8 @@ func opCoinbase(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ } func opTimestamp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.Time)) + v, _ := uint256.FromBig(interpreter.evm.Context.Time) + scope.Stack.push(v) return nil, nil } @@ -543,12 +544,13 @@ func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b } loc := scope.Stack.pop() val := scope.Stack.pop() - interpreter.evm.StateDB.SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32()) + interpreter.evm.StateDB.SetState(scope.Contract.Address(), + loc.Bytes32(), val.Bytes32()) return nil, nil } func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - if interpreter.evm.abort.Load() { + if atomic.LoadInt32(&interpreter.evm.abort) != 0 { return nil, errStopToken } pos := scope.Stack.pop() @@ -560,7 +562,7 @@ func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt } func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - if interpreter.evm.abort.Load() { + if atomic.LoadInt32(&interpreter.evm.abort) != 0 { return nil, errStopToken } pos, cond := scope.Stack.pop(), scope.Stack.pop() @@ -616,6 +618,10 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b } res, addr, returnGas, suberr := interpreter.evm.Create(scope.Contract, input, gas, bigVal) + // Special case the error in the op code. TODO remove. + if errors.Is(suberr, vmerrs.ErrToAddrProhibitedSoft) { + return nil, suberr + } // Push item on the stack based on the returned error. If the ruleset is // homestead we must check for CodeStoreOutOfGasError (homestead only // rule) and treat as an error, if the ruleset is frontier we must @@ -649,6 +655,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) + // Apply EIP150 gas -= gas / 64 scope.Contract.UseGas(gas) @@ -661,6 +668,10 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] } res, addr, returnGas, suberr := interpreter.evm.Create2(scope.Contract, input, gas, bigEndowment, &salt) + // Special case the error in the op code. TODO remove. + if errors.Is(suberr, vmerrs.ErrToAddrProhibitedSoft) { + return nil, suberr + } // Push item on the stack based on the returned error. if suberr != nil { stackvalue.Clear() @@ -703,6 +714,10 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt } ret, returnGas, err := interpreter.evm.Call(scope.Contract, toAddr, args, gas, bigVal) + // Special case the error in the op code. TODO remove. + if errors.Is(err, vmerrs.ErrToAddrProhibitedSoft) { + return nil, err + } if err != nil { temp.Clear() } else { @@ -710,6 +725,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt } stack.push(&temp) if err == nil || err == vmerrs.ErrExecutionReverted { + ret = common.CopyBytes(ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -718,7 +734,6 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt return ret, nil } -// Note: opCallExpert was de-activated in ApricotPhase2. func opCallExpert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { stack := scope.Stack // Pop gas. The actual gas in interpreter.evm.callGasTemp. @@ -732,8 +747,6 @@ func opCallExpert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) // Get the arguments from the memory. args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) - // Note: this code fails to check that value2 is zero, which was a bug when CALLEX was active. - // The CALLEX opcode was de-activated in ApricotPhase2 resolving this issue. if interpreter.readOnly && !value.IsZero() { return nil, vmerrs.ErrWriteProtection } @@ -755,6 +768,10 @@ func opCallExpert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) } ret, returnGas, err := interpreter.evm.CallExpert(scope.Contract, toAddr, args, gas, bigVal, coinID, bigVal2) + // Special case the error in the op code. TODO remove. + if errors.Is(err, vmerrs.ErrToAddrProhibitedSoft) { + return nil, err + } if err != nil { temp.Clear() } else { @@ -762,6 +779,7 @@ func opCallExpert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) } stack.push(&temp) if err == nil || err == vmerrs.ErrExecutionReverted { + ret = common.CopyBytes(ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -789,6 +807,10 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ } ret, returnGas, err := interpreter.evm.CallCode(scope.Contract, toAddr, args, gas, bigVal) + // Special case the error in the op code. TODO remove. + if errors.Is(err, vmerrs.ErrToAddrProhibitedSoft) { + return nil, err + } if err != nil { temp.Clear() } else { @@ -796,6 +818,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ } stack.push(&temp) if err == nil || err == vmerrs.ErrExecutionReverted { + ret = common.CopyBytes(ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -817,6 +840,10 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) ret, returnGas, err := interpreter.evm.DelegateCall(scope.Contract, toAddr, args, gas) + // Special case the error in the op code. TODO remove. + if errors.Is(err, vmerrs.ErrToAddrProhibitedSoft) { + return nil, err + } if err != nil { temp.Clear() } else { @@ -824,6 +851,7 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext } stack.push(&temp) if err == nil || err == vmerrs.ErrExecutionReverted { + ret = common.CopyBytes(ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -845,6 +873,10 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) ret, returnGas, err := interpreter.evm.StaticCall(scope.Contract, toAddr, args, gas) + // Special case the error in the op code. TODO remove. + if errors.Is(err, vmerrs.ErrToAddrProhibitedSoft) { + return nil, err + } if err != nil { temp.Clear() } else { @@ -852,6 +884,7 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) } stack.push(&temp) if err == nil || err == vmerrs.ErrExecutionReverted { + ret = common.CopyBytes(ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -890,26 +923,10 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext beneficiary := scope.Stack.pop() balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance) - interpreter.evm.StateDB.SelfDestruct(scope.Contract.Address()) - if tracer := interpreter.evm.Config.Tracer; tracer != nil { - tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) - tracer.CaptureExit([]byte{}, 0, nil) - } - return nil, errStopToken -} - -func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - if interpreter.readOnly { - return nil, vmerrs.ErrWriteProtection - } - beneficiary := scope.Stack.pop() - balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) - interpreter.evm.StateDB.SubBalance(scope.Contract.Address(), balance) - interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance) - interpreter.evm.StateDB.Selfdestruct6780(scope.Contract.Address()) - if tracer := interpreter.evm.Config.Tracer; tracer != nil { - tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) - tracer.CaptureExit([]byte{}, 0, nil) + interpreter.evm.StateDB.Suicide(scope.Contract.Address()) + if interpreter.cfg.Debug { + interpreter.cfg.Tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) + interpreter.cfg.Tracer.CaptureExit([]byte{}, 0, nil) } return nil, errStopToken } @@ -931,14 +948,14 @@ func makeLog(size int) executionFunc { } d := scope.Memory.GetCopy(int64(mStart.Uint64()), int64(mSize.Uint64())) - interpreter.evm.StateDB.AddLog( - scope.Contract.Address(), - topics, - d, + interpreter.evm.StateDB.AddLog(&types.Log{ + Address: scope.Contract.Address(), + Topics: topics, + Data: d, // This is a non-consensus field, but assigned here because // core/state doesn't know the current block number. - interpreter.evm.Context.BlockNumber.Uint64(), - ) + BlockNumber: interpreter.evm.Context.BlockNumber.Uint64(), + }) return nil, nil } diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 1c6c874c37..7e7becc15c 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -32,18 +32,12 @@ import ( "fmt" "math/big" "os" - "strings" "testing" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" "github.com/holiman/uint256" + "github.com/tenderly/coreth/params" ) type TwoOperandTestcase struct { @@ -61,14 +55,6 @@ var alphabetSoup = "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffff var commonParams []*twoOperandParams var twoOpMethods map[string]executionFunc -type contractRef struct { - addr common.Address -} - -func (c contractRef) Address() common.Address { - return c.addr -} - func init() { // Params is a list of common edgecases that should be used for some common tests params := []string{ @@ -217,7 +203,7 @@ func TestAddMod(t *testing.T) { var ( env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = newstack() - evmInterpreter = NewEVMInterpreter(env) + evmInterpreter = NewEVMInterpreter(env, env.Config) pc = uint64(0) ) tests := []struct { @@ -307,7 +293,7 @@ func opBenchmark(bench *testing.B, op executionFunc, args ...string) { env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = newstack() scope = &ScopeContext{nil, stack, nil} - evmInterpreter = NewEVMInterpreter(env) + evmInterpreter = NewEVMInterpreter(env, env.Config) ) env.interpreter = evmInterpreter @@ -548,7 +534,7 @@ func TestOpMstore(t *testing.T) { env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = newstack() mem = NewMemory() - evmInterpreter = NewEVMInterpreter(env) + evmInterpreter = NewEVMInterpreter(env, env.Config) ) env.interpreter = evmInterpreter @@ -574,7 +560,7 @@ func BenchmarkOpMstore(bench *testing.B) { env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = newstack() mem = NewMemory() - evmInterpreter = NewEVMInterpreter(env) + evmInterpreter = NewEVMInterpreter(env, env.Config) ) env.interpreter = evmInterpreter @@ -591,55 +577,12 @@ func BenchmarkOpMstore(bench *testing.B) { } } -func TestOpTstore(t *testing.T) { - var ( - statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - env = NewEVM(BlockContext{}, TxContext{}, statedb, params.TestChainConfig, Config{}) - stack = newstack() - mem = NewMemory() - evmInterpreter = NewEVMInterpreter(env) - caller = common.Address{} - to = common.Address{1} - contractRef = contractRef{caller} - contract = NewContract(contractRef, AccountRef(to), new(big.Int), 0) - scopeContext = ScopeContext{mem, stack, contract} - value = common.Hex2Bytes("abcdef00000000000000abba000000000deaf000000c0de00100000000133700") - ) - - // Add a stateObject for the caller and the contract being called - statedb.CreateAccount(caller) - statedb.CreateAccount(to) - - env.interpreter = evmInterpreter - pc := uint64(0) - // push the value to the stack - stack.push(new(uint256.Int).SetBytes(value)) - // push the location to the stack - stack.push(new(uint256.Int)) - opTstore(&pc, evmInterpreter, &scopeContext) - // there should be no elements on the stack after TSTORE - if stack.len() != 0 { - t.Fatal("stack wrong size") - } - // push the location to the stack - stack.push(new(uint256.Int)) - opTload(&pc, evmInterpreter, &scopeContext) - // there should be one element on the stack after TLOAD - if stack.len() != 1 { - t.Fatal("stack wrong size") - } - val := stack.peek() - if !bytes.Equal(val.Bytes(), value) { - t.Fatal("incorrect element read from transient storage") - } -} - func BenchmarkOpKeccak256(bench *testing.B) { var ( env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = newstack() mem = NewMemory() - evmInterpreter = NewEVMInterpreter(env) + evmInterpreter = NewEVMInterpreter(env, env.Config) ) env.interpreter = evmInterpreter mem.Resize(32) @@ -739,7 +682,7 @@ func TestRandom(t *testing.T) { for _, tt := range []testcase{ {name: "empty hash", random: common.Hash{}}, {name: "1", random: common.Hash{0}}, - {name: "emptyCodeHash", random: types.EmptyCodeHash}, + {name: "emptyCodeHash", random: emptyCodeHash}, {name: "hash(0x010203)", random: crypto.Keccak256Hash([]byte{0x01, 0x02, 0x03})}, } { var ( @@ -763,183 +706,3 @@ func TestRandom(t *testing.T) { } } } - -func TestBlobHash(t *testing.T) { - type testcase struct { - name string - idx uint64 - expect common.Hash - hashes []common.Hash - } - var ( - zero = common.Hash{0} - one = common.Hash{1} - two = common.Hash{2} - three = common.Hash{3} - ) - for _, tt := range []testcase{ - {name: "[{1}]", idx: 0, expect: one, hashes: []common.Hash{one}}, - {name: "[1,{2},3]", idx: 2, expect: three, hashes: []common.Hash{one, two, three}}, - {name: "out-of-bounds (empty)", idx: 10, expect: zero, hashes: []common.Hash{}}, - {name: "out-of-bounds", idx: 25, expect: zero, hashes: []common.Hash{one, two, three}}, - {name: "out-of-bounds (nil)", idx: 25, expect: zero, hashes: nil}, - } { - var ( - env = NewEVM(BlockContext{}, TxContext{BlobHashes: tt.hashes}, nil, params.TestChainConfig, Config{}) - stack = newstack() - pc = uint64(0) - evmInterpreter = env.interpreter - ) - stack.push(uint256.NewInt(tt.idx)) - opBlobHash(&pc, evmInterpreter, &ScopeContext{nil, stack, nil}) - if len(stack.data) != 1 { - t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data)) - } - actual := stack.pop() - expected, overflow := uint256.FromBig(new(big.Int).SetBytes(tt.expect.Bytes())) - if overflow { - t.Errorf("Testcase %v: invalid overflow", tt.name) - } - if actual.Cmp(expected) != 0 { - t.Errorf("Testcase %v: expected %x, got %x", tt.name, expected, actual) - } - } -} - -func TestOpMCopy(t *testing.T) { - // Test cases from https://eips.ethereum.org/EIPS/eip-5656#test-cases - for i, tc := range []struct { - dst, src, len string - pre string - want string - wantGas uint64 - }{ - { // MCOPY 0 32 32 - copy 32 bytes from offset 32 to offset 0. - dst: "0x0", src: "0x20", len: "0x20", - pre: "0000000000000000000000000000000000000000000000000000000000000000 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", - want: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", - wantGas: 6, - }, - - { // MCOPY 0 0 32 - copy 32 bytes from offset 0 to offset 0. - dst: "0x0", src: "0x0", len: "0x20", - pre: "0101010101010101010101010101010101010101010101010101010101010101", - want: "0101010101010101010101010101010101010101010101010101010101010101", - wantGas: 6, - }, - { // MCOPY 0 1 8 - copy 8 bytes from offset 1 to offset 0 (overlapping). - dst: "0x0", src: "0x1", len: "0x8", - pre: "000102030405060708 000000000000000000000000000000000000000000000000", - want: "010203040506070808 000000000000000000000000000000000000000000000000", - wantGas: 6, - }, - { // MCOPY 1 0 8 - copy 8 bytes from offset 0 to offset 1 (overlapping). - dst: "0x1", src: "0x0", len: "0x8", - pre: "000102030405060708 000000000000000000000000000000000000000000000000", - want: "000001020304050607 000000000000000000000000000000000000000000000000", - wantGas: 6, - }, - // Tests below are not in the EIP, but maybe should be added - { // MCOPY 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds index(overlapping). - dst: "0xFFFFFFFFFFFF", src: "0xFFFFFFFFFFFF", len: "0x0", - pre: "11", - want: "11", - wantGas: 3, - }, - { // MCOPY 0xFFFFFFFFFFFF 0 0 - copy zero bytes from start of mem to out-of-bounds. - dst: "0xFFFFFFFFFFFF", src: "0x0", len: "0x0", - pre: "11", - want: "11", - wantGas: 3, - }, - { // MCOPY 0 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds to start of mem - dst: "0x0", src: "0xFFFFFFFFFFFF", len: "0x0", - pre: "11", - want: "11", - wantGas: 3, - }, - { // MCOPY - copy 1 from space outside of uint64 space - dst: "0x0", src: "0x10000000000000000", len: "0x1", - pre: "0", - }, - { // MCOPY - copy 1 from 0 to space outside of uint64 - dst: "0x10000000000000000", src: "0x0", len: "0x1", - pre: "0", - }, - { // MCOPY - copy nothing from 0 to space outside of uint64 - dst: "0x10000000000000000", src: "0x0", len: "0x0", - pre: "", - want: "", - wantGas: 3, - }, - { // MCOPY - copy 1 from 0x20 to 0x10, with no prior allocated mem - dst: "0x10", src: "0x20", len: "0x1", - pre: "", - // 64 bytes - want: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - wantGas: 12, - }, - { // MCOPY - copy 1 from 0x19 to 0x10, with no prior allocated mem - dst: "0x10", src: "0x19", len: "0x1", - pre: "", - // 32 bytes - want: "0x0000000000000000000000000000000000000000000000000000000000000000", - wantGas: 9, - }, - } { - var ( - env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) - stack = newstack() - pc = uint64(0) - evmInterpreter = env.interpreter - ) - data := common.FromHex(strings.ReplaceAll(tc.pre, " ", "")) - // Set pre - mem := NewMemory() - mem.Resize(uint64(len(data))) - mem.Set(0, uint64(len(data)), data) - // Push stack args - len, _ := uint256.FromHex(tc.len) - src, _ := uint256.FromHex(tc.src) - dst, _ := uint256.FromHex(tc.dst) - - stack.push(len) - stack.push(src) - stack.push(dst) - wantErr := (tc.wantGas == 0) - // Calc mem expansion - var memorySize uint64 - if memSize, overflow := memoryMcopy(stack); overflow { - if wantErr { - continue - } - t.Errorf("overflow") - } else { - var overflow bool - if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow { - t.Error(vmerrs.ErrGasUintOverflow) - } - } - // and the dynamic cost - var haveGas uint64 - if dynamicCost, err := gasMcopy(env, nil, stack, mem, memorySize); err != nil { - t.Error(err) - } else { - haveGas = GasFastestStep + dynamicCost - } - // Expand mem - if memorySize > 0 { - mem.Resize(memorySize) - } - // Do the copy - opMcopy(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) - want := common.FromHex(strings.ReplaceAll(tc.want, " ", "")) - if have := mem.store; !bytes.Equal(want, have) { - t.Errorf("case %d: \nwant: %#x\nhave: %#x\n", i, want, have) - } - wantGas := tc.wantGas - if haveGas != wantGas { - t.Errorf("case %d: gas wrong, want %d have %d\n", i, wantGas, haveGas) - } - } -} diff --git a/core/vm/interface.go b/core/vm/interface.go index b044b74482..cc7efcc1a2 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -29,9 +29,8 @@ package vm import ( "math/big" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/types" ) // StateDB is an EVM database for full state querying. @@ -63,21 +62,17 @@ type StateDB interface { GetState(common.Address, common.Hash) common.Hash SetState(common.Address, common.Hash, common.Hash) - GetTransientState(addr common.Address, key common.Hash) common.Hash - SetTransientState(addr common.Address, key, value common.Hash) - - SelfDestruct(common.Address) - HasSelfDestructed(common.Address) bool - - Selfdestruct6780(common.Address) + Suicide(common.Address) bool + HasSuicided(common.Address) bool // Exist reports whether the given account exists in state. - // Notably this should also return true for self-destructed accounts. + // Notably this should also return true for suicided accounts. Exist(common.Address) bool // Empty returns whether the given account is empty. Empty // is defined according to EIP161 (balance = nonce = code = 0). Empty(common.Address) bool + PrepareAccessList(sender common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) AddressInAccessList(addr common.Address) bool SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool) // AddAddressToAccessList adds the given address to the access list. This operation is safe to perform @@ -86,30 +81,26 @@ type StateDB interface { // AddSlotToAccessList adds the given (address,slot) to the access list. This operation is safe to perform // even if the feature/fork is not active yet AddSlotToAccessList(addr common.Address, slot common.Hash) - Prepare(rules params.Rules, sender, coinbase common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) RevertToSnapshot(int) Snapshot() int - AddLog(addr common.Address, topics []common.Hash, data []byte, blockNumber uint64) - GetLogData() (topics [][]common.Hash, data [][]byte) - GetPredicateStorageSlots(address common.Address, index int) ([]byte, bool) - SetPredicateStorageSlots(address common.Address, predicates [][]byte) - - GetTxHash() common.Hash - + AddLog(*types.Log) AddPreimage(common.Hash, []byte) + + ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error } // CallContext provides a basic interface for the EVM calling conventions. The EVM // depends on this context being implemented for doing subcalls and initialising new EVM contracts. type CallContext interface { - // Call calls another contract. + // Call another contract Call(env *EVM, me ContractRef, addr common.Address, data []byte, gas, value *big.Int) ([]byte, error) - // CallCode takes another contracts code and execute within our own context + CallExpert(env *EVM, me ContractRef, addr common.Address, data []byte, gas, value *big.Int, coinID *common.Hash, value2 *big.Int) ([]byte, error) + // Take another's contract code and execute within our own context CallCode(env *EVM, me ContractRef, addr common.Address, data []byte, gas, value *big.Int) ([]byte, error) - // DelegateCall is same as CallCode except sender and value is propagated from parent to child scope + // Same as CallCode except sender and value is propagated from parent to child scope DelegateCall(env *EVM, me ContractRef, addr common.Address, data []byte, gas *big.Int) ([]byte, error) - // Create creates a new contract + // Create a new contract Create(env *EVM, me ContractRef, data []byte, gas, value *big.Int) ([]byte, common.Address, error) } diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index bbcac5b6a4..f3e3fe534e 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -27,24 +27,34 @@ package vm import ( - "github.com/ava-labs/coreth/vmerrs" + "hash" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/vmerrs" ) -var BuiltinAddr = common.Address{ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} +var ( + BuiltinAddr = common.Address{ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } +) // Config are the configuration options for the Interpreter type Config struct { + Debug bool // Enables debugging Tracer EVMLogger // Opcode logger NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages - ExtraEips []int // Additional EIPS that are to be enabled + + JumpTable *JumpTable // EVM instruction table, automatically populated if unset + + ExtraEips []int // Additional EIPS that are to be enabled + + // AllowUnfinalizedQueries allow unfinalized queries + AllowUnfinalizedQueries bool } // ScopeContext contains the things that are per-call, such as stack and memory, @@ -55,63 +65,67 @@ type ScopeContext struct { Contract *Contract } +// keccakState wraps sha3.state. In addition to the usual hash methods, it also supports +// Read to get a variable amount of data from the hash state. Read is faster than Sum +// because it doesn't copy the internal state, but also modifies the internal state. +type keccakState interface { + hash.Hash + Read([]byte) (int, error) +} + // EVMInterpreter represents an EVM interpreter type EVMInterpreter struct { - evm *EVM - table *JumpTable + evm *EVM + cfg Config - hasher crypto.KeccakState // Keccak256 hasher instance shared across opcodes - hasherBuf common.Hash // Keccak256 hasher result array shared across opcodes + hasher keccakState // Keccak256 hasher instance shared across opcodes + hasherBuf common.Hash // Keccak256 hasher result array shared aross opcodes readOnly bool // Whether to throw on stateful modifications returnData []byte // Last CALL's return data for subsequent reuse } // NewEVMInterpreter returns a new instance of the Interpreter. -func NewEVMInterpreter(evm *EVM) *EVMInterpreter { +func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { // If jump table was not initialised we set the default one. - var table *JumpTable - switch { - case evm.chainRules.IsCancun: - table = &cancunInstructionSet - case evm.chainRules.IsDurango: - table = &durangoInstructionSet - case evm.chainRules.IsApricotPhase3: - table = &apricotPhase3InstructionSet - case evm.chainRules.IsApricotPhase2: - table = &apricotPhase2InstructionSet - case evm.chainRules.IsApricotPhase1: - table = &apricotPhase1InstructionSet - case evm.chainRules.IsIstanbul: - table = &istanbulInstructionSet - case evm.chainRules.IsConstantinople: - table = &constantinopleInstructionSet - case evm.chainRules.IsByzantium: - table = &byzantiumInstructionSet - case evm.chainRules.IsEIP158: - table = &spuriousDragonInstructionSet - case evm.chainRules.IsEIP150: - table = &tangerineWhistleInstructionSet - case evm.chainRules.IsHomestead: - table = &homesteadInstructionSet - default: - table = &frontierInstructionSet - } - var extraEips []int - if len(evm.Config.ExtraEips) > 0 { - // Deep-copy jumptable to prevent modification of opcodes in other tables - table = copyJumpTable(table) - } - for _, eip := range evm.Config.ExtraEips { - if err := EnableEIP(eip, table); err != nil { - // Disable it, so caller can check if it's activated or not - log.Error("EIP activation failed", "eip", eip, "error", err) - } else { - extraEips = append(extraEips, eip) + if cfg.JumpTable == nil { + switch { + case evm.chainRules.IsApricotPhase3: + cfg.JumpTable = &apricotPhase3InstructionSet + case evm.chainRules.IsApricotPhase2: + cfg.JumpTable = &apricotPhase2InstructionSet + case evm.chainRules.IsApricotPhase1: + cfg.JumpTable = &apricotPhase1InstructionSet + case evm.chainRules.IsIstanbul: + cfg.JumpTable = &istanbulInstructionSet + case evm.chainRules.IsConstantinople: + cfg.JumpTable = &constantinopleInstructionSet + case evm.chainRules.IsByzantium: + cfg.JumpTable = &byzantiumInstructionSet + case evm.chainRules.IsEIP158: + cfg.JumpTable = &spuriousDragonInstructionSet + case evm.chainRules.IsEIP150: + cfg.JumpTable = &tangerineWhistleInstructionSet + case evm.chainRules.IsHomestead: + cfg.JumpTable = &homesteadInstructionSet + default: + cfg.JumpTable = &frontierInstructionSet } + for i, eip := range cfg.ExtraEips { + copy := *cfg.JumpTable + if err := EnableEIP(eip, ©); err != nil { + // Disable it, so caller can check if it's activated or not + cfg.ExtraEips = append(cfg.ExtraEips[:i], cfg.ExtraEips[i+1:]...) + log.Error("EIP activation failed", "eip", eip, "error", err) + } + cfg.JumpTable = © + } + } + + return &EVMInterpreter{ + evm: evm, + cfg: cfg, } - evm.Config.ExtraEips = extraEips - return &EVMInterpreter{evm: evm, table: table} } // Run loops and evaluates the contract's code with the given input data and returns @@ -174,7 +188,6 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( gasCopy uint64 // for EVMLogger to log gas remaining before execution logged bool // deferred EVMLogger should ignore already logged steps res []byte // result of the opcode execution function - debug = in.evm.Config.Tracer != nil ) // Don't move this deferred function, it's placed before the capturestate-deferred method, @@ -185,13 +198,13 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( }() contract.Input = input - if debug { + if in.cfg.Debug { defer func() { if err != nil { if !logged { - in.evm.Config.Tracer.CaptureState(pcCopy, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) + in.cfg.Tracer.CaptureState(pcCopy, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) } else { - in.evm.Config.Tracer.CaptureFault(pcCopy, op, gasCopy, cost, callContext, in.evm.depth, err) + in.cfg.Tracer.CaptureFault(pcCopy, op, gasCopy, cost, callContext, in.evm.depth, err) } } }() @@ -201,14 +214,14 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( // the execution of one of the operations or until the done flag is set by the // parent context. for { - if debug { + if in.cfg.Debug { // Capture pre-execution values for tracing. logged, pcCopy, gasCopy = false, pc, contract.Gas } // Get the operation from the jump table and validate the stack to ensure there are // enough stack items available to perform the operation. op = contract.GetOp(pc) - operation := in.table[op] + operation := in.cfg.JumpTable[op] cost = operation.constantGas // For tracing // Validate stack if sLen := stack.len(); sLen < operation.minStack { @@ -247,15 +260,15 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( return nil, vmerrs.ErrOutOfGas } // Do tracing before memory expansion - if debug { - in.evm.Config.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) + if in.cfg.Debug { + in.cfg.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) logged = true } if memorySize > 0 { mem.Resize(memorySize) } - } else if debug { - in.evm.Config.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) + } else if in.cfg.Debug { + in.cfg.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) logged = true } diff --git a/core/vm/interpreter_test.go b/core/vm/interpreter_test.go index 018d7af188..4923bf89a4 100644 --- a/core/vm/interpreter_test.go +++ b/core/vm/interpreter_test.go @@ -31,12 +31,11 @@ import ( "testing" "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/params" ) var loopInterruptTests = []string{ @@ -53,7 +52,7 @@ func TestLoopInterrupt(t *testing.T) { } for i, tt := range loopInterruptTests { - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.CreateAccount(address) statedb.SetCode(address, common.Hex2Bytes(tt)) statedb.Finalise(true) diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index c2cbf8c668..73bc44b0e3 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -29,7 +29,7 @@ package vm import ( "fmt" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/params" ) type ( diff --git a/core/vm/jump_table_test.go b/core/vm/jump_table_test.go index 6e838337c1..1db0383ccc 100644 --- a/core/vm/jump_table_test.go +++ b/core/vm/jump_table_test.go @@ -1,4 +1,4 @@ -// (c) 2023, Ava Labs, Inc. +// (c) 2020-2021, Ava Labs, Inc. // // This file is a derived work, based on the go-ethereum library whose original // notices appear below. @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********** -// Copyright 2022 The go-ethereum Authors +// Copyright 2018 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -24,22 +24,19 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package vm +package memorydb import ( "testing" - "github.com/stretchr/testify/require" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/ethdb/dbtest" ) -// TestJumpTableCopy tests that deep copy is necessery to prevent modify shared jump table -func TestJumpTableCopy(t *testing.T) { - tbl := newDurangoInstructionSet() - require.Equal(t, uint64(0), tbl[SLOAD].constantGas) - - // a deep copy won't modify the shared jump table - deepCopy := copyJumpTable(&tbl) - deepCopy[SLOAD].constantGas = 100 - require.Equal(t, uint64(100), deepCopy[SLOAD].constantGas) - require.Equal(t, uint64(0), tbl[SLOAD].constantGas) +func TestMemoryDB(t *testing.T) { + t.Run("DatabaseSuite", func(t *testing.T) { + dbtest.TestDatabaseSuite(t, func() ethdb.KeyValueStore { + return New() + }) + }) } diff --git a/core/vm/memory.go b/core/vm/memory.go index 259b7bf463..eb6bc89078 100644 --- a/core/vm/memory.go +++ b/core/vm/memory.go @@ -113,14 +113,3 @@ func (m *Memory) Len() int { func (m *Memory) Data() []byte { return m.store } - -// Copy copies data from the src position slice into the dst position. -// The source and destination may overlap. -// OBS: This operation assumes that any necessary memory expansion has already been performed, -// and this method may panic otherwise. -func (m *Memory) Copy(dst, src, len uint64) { - if len == 0 { - return - } - copy(m.store[dst:], m.store[src:src+len]) -} diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go index a32119a0f4..82edbf3866 100644 --- a/core/vm/opcodes.go +++ b/core/vm/opcodes.go @@ -35,7 +35,11 @@ type OpCode byte // IsPush specifies if an opcode is a PUSH opcode. func (op OpCode) IsPush() bool { - return PUSH0 <= op && op <= PUSH32 + switch op { + case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32: + return true + } + return false } // 0x0 range - arithmetic ops. @@ -108,8 +112,6 @@ const ( CHAINID OpCode = 0x46 SELFBALANCE OpCode = 0x47 BASEFEE OpCode = 0x48 - BLOBHASH OpCode = 0x49 - BLOBBASEFEE OpCode = 0x4a ) // 0x50 range - 'storage' and execution. @@ -126,9 +128,6 @@ const ( MSIZE OpCode = 0x59 GAS OpCode = 0x5a JUMPDEST OpCode = 0x5b - TLOAD OpCode = 0x5c - TSTORE OpCode = 0x5d - MCOPY OpCode = 0x5e PUSH0 OpCode = 0x5f ) @@ -237,7 +236,8 @@ const ( SELFDESTRUCT OpCode = 0xff ) -var opCodeToString = [256]string{ +// Since the opcodes aren't all in order we can't use a regular slice. +var opCodeToString = map[OpCode]string{ // 0x0 range - arithmetic ops. STOP: "STOP", ADD: "ADD", @@ -300,11 +300,11 @@ var opCodeToString = [256]string{ CHAINID: "CHAINID", SELFBALANCE: "SELFBALANCE", BASEFEE: "BASEFEE", - BLOBHASH: "BLOBHASH", - BLOBBASEFEE: "BLOBBASEFEE", // 0x50 range - 'storage' and execution. - POP: "POP", + POP: "POP", + //DUP: "DUP", + //SWAP: "SWAP", MLOAD: "MLOAD", MSTORE: "MSTORE", MSTORE8: "MSTORE8", @@ -316,12 +316,9 @@ var opCodeToString = [256]string{ MSIZE: "MSIZE", GAS: "GAS", JUMPDEST: "JUMPDEST", - TLOAD: "TLOAD", - TSTORE: "TSTORE", - MCOPY: "MCOPY", PUSH0: "PUSH0", - // 0x60 range - pushes. + // 0x60 range - push. PUSH1: "PUSH1", PUSH2: "PUSH2", PUSH3: "PUSH3", @@ -355,7 +352,6 @@ var opCodeToString = [256]string{ PUSH31: "PUSH31", PUSH32: "PUSH32", - // 0x80 - dups. DUP1: "DUP1", DUP2: "DUP2", DUP3: "DUP3", @@ -373,7 +369,6 @@ var opCodeToString = [256]string{ DUP15: "DUP15", DUP16: "DUP16", - // 0x90 - swaps. SWAP1: "SWAP1", SWAP2: "SWAP2", SWAP3: "SWAP3", @@ -390,15 +385,13 @@ var opCodeToString = [256]string{ SWAP14: "SWAP14", SWAP15: "SWAP15", SWAP16: "SWAP16", + LOG0: "LOG0", + LOG1: "LOG1", + LOG2: "LOG2", + LOG3: "LOG3", + LOG4: "LOG4", - // 0xa0 range - logging ops. - LOG0: "LOG0", - LOG1: "LOG1", - LOG2: "LOG2", - LOG3: "LOG3", - LOG4: "LOG4", - - // 0xf0 range - closures. + // 0xf0 range. CREATE: "CREATE", CALL: "CALL", CALLEX: "CALLEX", @@ -413,10 +406,12 @@ var opCodeToString = [256]string{ } func (op OpCode) String() string { - if s := opCodeToString[op]; s != "" { - return s + str := opCodeToString[op] + if len(str) == 0 { + return fmt.Sprintf("opcode %#x not defined", int(op)) } - return fmt.Sprintf("opcode %#x not defined", int(op)) + + return str } var stringToOp = map[string]OpCode{ @@ -458,8 +453,6 @@ var stringToOp = map[string]OpCode{ "CALLDATACOPY": CALLDATACOPY, "CHAINID": CHAINID, "BASEFEE": BASEFEE, - "BLOBHASH": BLOBHASH, - "BLOBBASEFEE": BLOBBASEFEE, "DELEGATECALL": DELEGATECALL, "STATICCALL": STATICCALL, "CODESIZE": CODESIZE, @@ -489,9 +482,6 @@ var stringToOp = map[string]OpCode{ "MSIZE": MSIZE, "GAS": GAS, "JUMPDEST": JUMPDEST, - "TLOAD": TLOAD, - "TSTORE": TSTORE, - "MCOPY": MCOPY, "PUSH0": PUSH0, "PUSH1": PUSH1, "PUSH2": PUSH2, diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 5e946590d5..9db77be88f 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -29,58 +29,68 @@ package vm import ( "errors" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/vmerrs" ) -func makeGasSStoreFunc() gasFunc { - return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { - // If we fail the minimum gas availability invariant, fail (0) - if contract.Gas <= params.SstoreSentryGasEIP2200 { - return 0, errors.New("not enough gas for reentrancy sentry") - } - // Gas sentry honoured, do the actual gas calculation based on the stored value - var ( - y, x = stack.Back(1), stack.peek() - slot = common.Hash(x.Bytes32()) - current = evm.StateDB.GetState(contract.Address(), slot) - cost = uint64(0) - ) - // Check slot presence in the access list - if addrPresent, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent { - cost = params.ColdSloadCostEIP2929 - // If the caller cannot afford the cost, this change will be rolled back - evm.StateDB.AddSlotToAccessList(contract.Address(), slot) - if !addrPresent { - // Once we're done with YOLOv2 and schedule this for mainnet, might - // be good to remove this panic here, which is just really a - // canary to have during testing - panic("impossible case: address was not present in access list during sstore op") - } +// gasSStoreEIP2929 implements gas cost for SSTORE according to EIP-2929 +// +// When calling SSTORE, check if the (address, storage_key) pair is in accessed_storage_keys. +// If it is not, charge an additional COLD_SLOAD_COST gas, and add the pair to accessed_storage_keys. +// Additionally, modify the parameters defined in EIP 2200 as follows: +// +// Parameter Old value New value +// SLOAD_GAS 800 = WARM_STORAGE_READ_COST +// SSTORE_RESET_GAS 5000 5000 - COLD_SLOAD_COST +// +//The other parameters defined in EIP 2200 are unchanged. +// see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified +func gasSStoreEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + // If we fail the minimum gas availability invariant, fail (0) + if contract.Gas <= params.SstoreSentryGasEIP2200 { + return 0, errors.New("not enough gas for reentrancy sentry") + } + // Gas sentry honoured, do the actual gas calculation based on the stored value + var ( + y, x = stack.Back(1), stack.peek() + slot = common.Hash(x.Bytes32()) + current = evm.StateDB.GetState(contract.Address(), slot) + cost = uint64(0) + ) + // Check slot presence in the access list + if addrPresent, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent { + cost = params.ColdSloadCostEIP2929 + // If the caller cannot afford the cost, this change will be rolled back + evm.StateDB.AddSlotToAccessList(contract.Address(), slot) + if !addrPresent { + // Once we're done with YOLOv2 and schedule this for mainnet, might + // be good to remove this panic here, which is just really a + // canary to have during testing + panic("impossible case: address was not present in access list during sstore op") } - value := common.Hash(y.Bytes32()) + } + value := common.Hash(y.Bytes32()) - if current == value { // noop (1) - // EIP 2200 original clause: - // return params.SloadGasEIP2200, nil - return cost + params.WarmStorageReadCostEIP2929, nil // SLOAD_GAS - } - original := evm.StateDB.GetCommittedStateAP1(contract.Address(), x.Bytes32()) - if original == current { - if original == (common.Hash{}) { // create slot (2.1.1) - return cost + params.SstoreSetGasEIP2200, nil - } - // EIP-2200 original clause: - // return params.SstoreResetGasEIP2200, nil // write existing slot (2.1.2) - return cost + (params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929), nil // write existing slot (2.1.2) + if current == value { // noop (1) + // EIP 2200 original clause: + // return params.SloadGasEIP2200, nil + return cost + params.WarmStorageReadCostEIP2929, nil // SLOAD_GAS + } + original := evm.StateDB.GetCommittedStateAP1(contract.Address(), x.Bytes32()) + if original == current { + if original == (common.Hash{}) { // create slot (2.1.1) + return cost + params.SstoreSetGasEIP2200, nil } - // EIP-2200 original clause: - //return params.SloadGasEIP2200, nil // dirty update (2.2) - return cost + params.WarmStorageReadCostEIP2929, nil // dirty update (2.2) + // return params.SstoreResetGasEIP2200, nil // write existing slot (2.1.2) + return cost + (params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929), nil // write existing slot (2.1.2) } + + // EIP-2200 original clause: + //return params.SloadGasEIP2200, nil // dirty update (2.2) + return cost + params.WarmStorageReadCostEIP2929, nil // dirty update (2.2) } // gasSLoadEIP2929 calculates dynamic gas for SLOAD according to EIP-2929 @@ -184,44 +194,22 @@ var ( gasDelegateCallEIP2929 = makeCallVariantGasCallEIP2929(gasDelegateCall) gasStaticCallEIP2929 = makeCallVariantGasCallEIP2929(gasStaticCall) gasCallCodeEIP2929 = makeCallVariantGasCallEIP2929(gasCallCode) - gasSelfdestructEIP2929 = makeSelfdestructGasFn(false) // Note: refunds were never enabled on Avalanche - // gasSelfdestructEIP3529 implements the changes in EIP-2539 (no refunds) - gasSelfdestructEIP3529 = makeSelfdestructGasFn(false) - // gasSStoreEIP2929 implements gas cost for SSTORE according to EIP-2929 - // - // When calling SSTORE, check if the (address, storage_key) pair is in accessed_storage_keys. - // If it is not, charge an additional COLD_SLOAD_COST gas, and add the pair to accessed_storage_keys. - // Additionally, modify the parameters defined in EIP 2200 as follows: - // - // Parameter Old value New value - // SLOAD_GAS 800 = WARM_STORAGE_READ_COST - // SSTORE_RESET_GAS 5000 5000 - COLD_SLOAD_COST - // - //The other parameters defined in EIP 2200 are unchanged. - // see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified - gasSStoreEIP2929 = makeGasSStoreFunc() ) -// makeSelfdestructGasFn can create the selfdestruct dynamic gas function for EIP-2929 and EIP-2539 -func makeSelfdestructGasFn(refundsEnabled bool) gasFunc { - gasFunc := func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { - var ( - gas uint64 - address = common.Address(stack.peek().Bytes20()) - ) - if !evm.StateDB.AddressInAccessList(address) { - // If the caller cannot afford the cost, this change will be rolled back - evm.StateDB.AddAddressToAccessList(address) - gas = params.ColdAccountAccessCostEIP2929 - } - // if empty and transfers value - if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 { - gas += params.CreateBySelfdestructGas - } - if refundsEnabled && !evm.StateDB.HasSelfDestructed(contract.Address()) { - evm.StateDB.AddRefund(params.SelfdestructRefundGas) - } - return gas, nil +func gasSelfdestructEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + var ( + gas uint64 + address = common.Address(stack.peek().Bytes20()) + ) + if !evm.StateDB.AddressInAccessList(address) { + // If the caller cannot afford the cost, this change will be rolled back + evm.StateDB.AddAddressToAccessList(address) + gas = params.ColdAccountAccessCostEIP2929 } - return gasFunc + // if empty and transfers value + if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 { + gas += params.CreateBySelfdestructGas + } + + return gas, nil } diff --git a/core/vm/runtime/env.go b/core/vm/runtime/env.go index 41fa4f54d3..d8faa594ff 100644 --- a/core/vm/runtime/env.go +++ b/core/vm/runtime/env.go @@ -27,8 +27,8 @@ package runtime import ( - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/vm" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/vm" ) func NewEnv(cfg *Config) *vm.EVM { diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index 338c1c53b3..71fa987784 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -29,14 +29,14 @@ package runtime import ( "math" "math/big" + "time" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/params" ) // Config is a basic type specifying certain configuration flags for running @@ -47,17 +47,13 @@ type Config struct { Origin common.Address Coinbase common.Address BlockNumber *big.Int - Time uint64 + Time *big.Int GasLimit uint64 GasPrice *big.Int Value *big.Int Debug bool EVMConfig vm.Config BaseFee *big.Int - BlobBaseFee *big.Int - BlobHashes []common.Hash - BlobFeeCap *big.Int - Random *common.Hash State *state.StateDB GetHashFn func(n uint64) common.Hash @@ -72,6 +68,7 @@ func setDefaults(cfg *Config) { DAOForkBlock: new(big.Int), DAOForkSupport: false, EIP150Block: new(big.Int), + EIP150Hash: common.Hash{}, EIP155Block: new(big.Int), EIP158Block: new(big.Int), ByzantiumBlock: new(big.Int), @@ -79,16 +76,19 @@ func setDefaults(cfg *Config) { PetersburgBlock: new(big.Int), IstanbulBlock: new(big.Int), MuirGlacierBlock: new(big.Int), - ApricotPhase1BlockTimestamp: new(uint64), - ApricotPhase2BlockTimestamp: new(uint64), - ApricotPhase3BlockTimestamp: new(uint64), - ApricotPhase4BlockTimestamp: new(uint64), + ApricotPhase1BlockTimestamp: new(big.Int), + ApricotPhase2BlockTimestamp: new(big.Int), + ApricotPhase3BlockTimestamp: new(big.Int), + ApricotPhase4BlockTimestamp: new(big.Int), } } if cfg.Difficulty == nil { cfg.Difficulty = new(big.Int) } + if cfg.Time == nil { + cfg.Time = big.NewInt(time.Now().Unix()) + } if cfg.GasLimit == 0 { cfg.GasLimit = math.MaxUint64 } @@ -109,9 +109,6 @@ func setDefaults(cfg *Config) { if cfg.BaseFee == nil { cfg.BaseFee = big.NewInt(params.ApricotPhase3InitialBaseFee) } - if cfg.BlobBaseFee == nil { - cfg.BlobBaseFee = big.NewInt(params.BlobTxMinBlobGasprice) - } } // Execute executes the code using the input as call data during the execution. @@ -126,19 +123,16 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) { setDefaults(cfg) if cfg.State == nil { - cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) } var ( address = common.BytesToAddress([]byte("contract")) vmenv = NewEnv(cfg) sender = vm.AccountRef(cfg.Origin) - rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Time) ) - // Execute the preparatory steps for state transition which includes: - // - prepare accessList(post-berlin/ApricotPhase2) - // - reset transient storage(eip 1153) - cfg.State.Prepare(rules, cfg.Origin, cfg.Coinbase, &address, vm.ActivePrecompiles(rules), nil) - + if rules := cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time); rules.IsApricotPhase2 { + cfg.State.PrepareAccessList(cfg.Origin, &address, vm.ActivePrecompiles(rules), nil) + } cfg.State.CreateAccount(address) // set the receiver's (the executing contract) code for execution. cfg.State.SetCode(address, code) @@ -150,6 +144,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) { cfg.GasLimit, cfg.Value, ) + return ret, cfg.State, err } @@ -161,18 +156,15 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) { setDefaults(cfg) if cfg.State == nil { - cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) } var ( vmenv = NewEnv(cfg) sender = vm.AccountRef(cfg.Origin) - rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Time) ) - // Execute the preparatory steps for state transition which includes: - // - prepare accessList(post-berlin/ApricotPhase2) - // - reset transient storage(eip 1153) - cfg.State.Prepare(rules, cfg.Origin, cfg.Coinbase, nil, vm.ActivePrecompiles(rules), nil) - + if rules := cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time); rules.IsApricotPhase2 { + cfg.State.PrepareAccessList(cfg.Origin, nil, vm.ActivePrecompiles(rules), nil) + } // Call the code with the given configuration. code, address, leftOverGas, err := vmenv.Create( sender, @@ -191,17 +183,14 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) { func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, error) { setDefaults(cfg) - var ( - vmenv = NewEnv(cfg) - sender = cfg.State.GetOrNewStateObject(cfg.Origin) - statedb = cfg.State - rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Time) - ) - // Execute the preparatory steps for state transition which includes: - // - prepare accessList(post-berlin/ApricotPhase2) - // - reset transient storage(eip 1153) - statedb.Prepare(rules, cfg.Origin, cfg.Coinbase, &address, vm.ActivePrecompiles(rules), nil) + vmenv := NewEnv(cfg) + + sender := cfg.State.GetOrNewStateObject(cfg.Origin) + statedb := cfg.State + if rules := cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time); rules.IsApricotPhase2 { + statedb.PrepareAccessList(cfg.Origin, &address, vm.ActivePrecompiles(rules), nil) + } // Call the code with the given configuration. ret, leftOverGas, err := vmenv.Call( sender, diff --git a/core/vm/runtime/runtime_example_test.go b/core/vm/runtime/runtime_example_test.go index 9850e283be..22fdf1becc 100644 --- a/core/vm/runtime/runtime_example_test.go +++ b/core/vm/runtime/runtime_example_test.go @@ -29,8 +29,8 @@ package runtime_test import ( "fmt" - "github.com/ava-labs/coreth/core/vm/runtime" "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/vm/runtime" ) func ExampleExecute() { diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index 624da0f71f..f06b1552b5 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -33,21 +33,21 @@ import ( "strings" "testing" - "github.com/ava-labs/coreth/accounts/abi" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/eth/tracers" - "github.com/ava-labs/coreth/eth/tracers/logger" - "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/asm" - - // force-load js tracers to trigger registration - _ "github.com/ava-labs/coreth/eth/tracers/js" + "github.com/tenderly/coreth/accounts/abi" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/eth/tracers" + "github.com/tenderly/coreth/eth/tracers/logger" + "github.com/tenderly/coreth/params" + + // force-load native tracers to trigger registration + _ "github.com/tenderly/coreth/eth/tracers/native" ) func TestDefaults(t *testing.T) { @@ -58,6 +58,9 @@ func TestDefaults(t *testing.T) { t.Error("expected difficulty to be non nil") } + if cfg.Time == nil { + t.Error("expected time to be non nil") + } if cfg.GasLimit == 0 { t.Error("didn't expect gaslimit to be zero") } @@ -113,7 +116,7 @@ func TestExecute(t *testing.T) { } func TestCall(t *testing.T) { - state, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + state, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) address := common.HexToAddress("0x0a") state.SetCode(address, []byte{ byte(vm.PUSH1), 10, @@ -169,7 +172,7 @@ func BenchmarkCall(b *testing.B) { } func benchmarkEVM_Create(bench *testing.B, code string) { var ( - statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) sender = common.BytesToAddress([]byte("sender")) receiver = common.BytesToAddress([]byte("receiver")) ) @@ -181,7 +184,7 @@ func benchmarkEVM_Create(bench *testing.B, code string) { State: statedb, GasLimit: 10000000, Difficulty: big.NewInt(0x200000), - Time: 0, + Time: new(big.Int).SetUint64(0), Coinbase: common.Address{}, BlockNumber: new(big.Int).SetUint64(1), ChainConfig: ¶ms.ChainConfig{ @@ -337,14 +340,15 @@ func TestBlockhash(t *testing.T) { func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode string, b *testing.B) { cfg := new(Config) setDefaults(cfg) - cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) cfg.GasLimit = gas if len(tracerCode) > 0 { - tracer, err := tracers.DefaultDirectory.New(tracerCode, new(tracers.Context), nil) + tracer, err := tracers.New(tracerCode, new(tracers.Context), nil) if err != nil { b.Fatal(err) } cfg.EVMConfig = vm.Config{ + Debug: true, Tracer: tracer, } } @@ -520,6 +524,7 @@ func TestEip2929Cases(t *testing.T) { code, ops) Execute(code, nil, &Config{ EVMConfig: vm.Config{ + Debug: true, Tracer: logger.NewMarkdownLogger(nil, os.Stdout), ExtraEips: []int{2929}, }, @@ -673,6 +678,7 @@ func TestColdAccountAccessCost(t *testing.T) { tracer := logger.NewStructLogger(nil) Execute(tc.code, nil, &Config{ EVMConfig: vm.Config{ + Debug: true, Tracer: tracer, }, }) @@ -681,244 +687,7 @@ func TestColdAccountAccessCost(t *testing.T) { for ii, op := range tracer.StructLogs() { t.Logf("%d: %v %d", ii, op.OpName(), op.GasCost) } - t.Fatalf("testcase %d, gas report wrong, step %d, have %d want %d", i, tc.step, have, want) - } - } -} - -func TestRuntimeJSTracer(t *testing.T) { - jsTracers := []string{ - `{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, steps:0, - step: function() { this.steps++}, - fault: function() {}, - result: function() { - return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") - }, - enter: function(frame) { - this.enters++; - this.enterGas = frame.getGas(); - }, - exit: function(res) { - this.exits++; - this.gasUsed = res.getGasUsed(); - }}`, - `{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, steps:0, - fault: function() {}, - result: function() { - return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") - }, - enter: function(frame) { - this.enters++; - this.enterGas = frame.getGas(); - }, - exit: function(res) { - this.exits++; - this.gasUsed = res.getGasUsed(); - }}`} - tests := []struct { - code []byte - // One result per tracer - results []string - }{ - { - // CREATE - code: []byte{ - // Store initcode in memory at 0x00 (5 bytes left-padded to 32 bytes) - byte(vm.PUSH5), - // Init code: PUSH1 0, PUSH1 0, RETURN (3 steps) - byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN), - byte(vm.PUSH1), 0, - byte(vm.MSTORE), - // length, offset, value - byte(vm.PUSH1), 5, byte(vm.PUSH1), 27, byte(vm.PUSH1), 0, - byte(vm.CREATE), - byte(vm.POP), - }, - results: []string{`"1,1,952855,6,12"`, `"1,1,952855,6,0"`}, - }, - { - // CREATE2 - code: []byte{ - // Store initcode in memory at 0x00 (5 bytes left-padded to 32 bytes) - byte(vm.PUSH5), - // Init code: PUSH1 0, PUSH1 0, RETURN (3 steps) - byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN), - byte(vm.PUSH1), 0, - byte(vm.MSTORE), - // salt, length, offset, value - byte(vm.PUSH1), 1, byte(vm.PUSH1), 5, byte(vm.PUSH1), 27, byte(vm.PUSH1), 0, - byte(vm.CREATE2), - byte(vm.POP), - }, - results: []string{`"1,1,952846,6,13"`, `"1,1,952846,6,0"`}, - }, - { - // CALL - code: []byte{ - // outsize, outoffset, insize, inoffset - byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, - byte(vm.PUSH1), 0, // value - byte(vm.PUSH1), 0xbb, //address - byte(vm.GAS), // gas - byte(vm.CALL), - byte(vm.POP), - }, - results: []string{`"1,1,981796,6,13"`, `"1,1,981796,6,0"`}, - }, - { - // CALLCODE - code: []byte{ - // outsize, outoffset, insize, inoffset - byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, - byte(vm.PUSH1), 0, // value - byte(vm.PUSH1), 0xcc, //address - byte(vm.GAS), // gas - byte(vm.CALLCODE), - byte(vm.POP), - }, - results: []string{`"1,1,981796,6,13"`, `"1,1,981796,6,0"`}, - }, - { - // STATICCALL - code: []byte{ - // outsize, outoffset, insize, inoffset - byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, - byte(vm.PUSH1), 0xdd, //address - byte(vm.GAS), // gas - byte(vm.STATICCALL), - byte(vm.POP), - }, - results: []string{`"1,1,981799,6,12"`, `"1,1,981799,6,0"`}, - }, - { - // DELEGATECALL - code: []byte{ - // outsize, outoffset, insize, inoffset - byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, - byte(vm.PUSH1), 0xee, //address - byte(vm.GAS), // gas - byte(vm.DELEGATECALL), - byte(vm.POP), - }, - results: []string{`"1,1,981799,6,12"`, `"1,1,981799,6,0"`}, - }, - { - // CALL self-destructing contract - code: []byte{ - // outsize, outoffset, insize, inoffset - byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, - byte(vm.PUSH1), 0, // value - byte(vm.PUSH1), 0xff, //address - byte(vm.GAS), // gas - byte(vm.CALL), - byte(vm.POP), - }, - results: []string{`"2,2,0,5003,12"`, `"2,2,0,5003,0"`}, - }, - } - calleeCode := []byte{ - byte(vm.PUSH1), 0, - byte(vm.PUSH1), 0, - byte(vm.RETURN), - } - depressedCode := []byte{ - byte(vm.PUSH1), 0xaa, - byte(vm.SELFDESTRUCT), - } - main := common.HexToAddress("0xaa") - for i, jsTracer := range jsTracers { - for j, tc := range tests { - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - statedb.SetCode(main, tc.code) - statedb.SetCode(common.HexToAddress("0xbb"), calleeCode) - statedb.SetCode(common.HexToAddress("0xcc"), calleeCode) - statedb.SetCode(common.HexToAddress("0xdd"), calleeCode) - statedb.SetCode(common.HexToAddress("0xee"), calleeCode) - statedb.SetCode(common.HexToAddress("0xff"), depressedCode) - - tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil) - if err != nil { - t.Fatal(err) - } - _, _, err = Call(main, nil, &Config{ - GasLimit: 1000000, - State: statedb, - EVMConfig: vm.Config{ - Tracer: tracer, - }}) - if err != nil { - t.Fatal("didn't expect error", err) - } - res, err := tracer.GetResult() - if err != nil { - t.Fatal(err) - } - if have, want := string(res), tc.results[i]; have != want { - t.Errorf("wrong result for tracer %d testcase %d, have \n%v\nwant\n%v\n", i, j, have, want) - } + t.Fatalf("tescase %d, gas report wrong, step %d, have %d want %d", i, tc.step, have, want) } } } - -func TestJSTracerCreateTx(t *testing.T) { - jsTracer := ` - {enters: 0, exits: 0, - step: function() {}, - fault: function() {}, - result: function() { return [this.enters, this.exits].join(",") }, - enter: function(frame) { this.enters++ }, - exit: function(res) { this.exits++ }}` - code := []byte{byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN)} - - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil) - if err != nil { - t.Fatal(err) - } - _, _, _, err = Create(code, &Config{ - State: statedb, - EVMConfig: vm.Config{ - Tracer: tracer, - }}) - if err != nil { - t.Fatal(err) - } - - res, err := tracer.GetResult() - if err != nil { - t.Fatal(err) - } - if have, want := string(res), `"0,0"`; have != want { - t.Errorf("wrong result for tracer, have \n%v\nwant\n%v\n", have, want) - } -} - -func BenchmarkTracerStepVsCallFrame(b *testing.B) { - // Simply pushes and pops some values in a loop - code := []byte{ - byte(vm.JUMPDEST), - byte(vm.PUSH1), 0, - byte(vm.PUSH1), 0, - byte(vm.POP), - byte(vm.POP), - byte(vm.PUSH1), 0, // jumpdestination - byte(vm.JUMP), - } - - stepTracer := ` - { - step: function() {}, - fault: function() {}, - result: function() {}, - }` - callFrameTracer := ` - { - enter: function() {}, - exit: function() {}, - fault: function() {}, - result: function() {}, - }` - - benchmarkNonModifyingCode(10000000, code, "tracer-step-10M", stepTracer, b) - benchmarkNonModifyingCode(10000000, code, "tracer-call-frame-10M", callFrameTracer, b) -} diff --git a/core/vm/stack_table.go b/core/vm/stack_table.go index 487acaefdb..0ee3c12611 100644 --- a/core/vm/stack_table.go +++ b/core/vm/stack_table.go @@ -27,7 +27,7 @@ package vm import ( - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/params" ) func minSwapStack(n int) int { diff --git a/eth/bloombits.go b/eth/bloombits.go index ecc0aaf157..31be317434 100644 --- a/eth/bloombits.go +++ b/eth/bloombits.go @@ -29,7 +29,7 @@ package eth import ( "time" - "github.com/ava-labs/coreth/core/rawdb" + "github.com/tenderly/coreth/core/rawdb" "github.com/ethereum/go-ethereum/common/bitutil" ) diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 7f28c81721..b24a9eeca6 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -29,12 +29,9 @@ package ethconfig import ( "time" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/txpool/blobpool" - "github.com/ava-labs/coreth/core/txpool/legacypool" - "github.com/ava-labs/coreth/eth/gasprice" - "github.com/ava-labs/coreth/miner" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/eth/gasprice" + "github.com/tenderly/coreth/miner" "github.com/ethereum/go-ethereum/common" ) @@ -42,7 +39,6 @@ import ( var DefaultFullGPOConfig = gasprice.Config{ Blocks: 40, Percentile: 60, - MaxLookbackSeconds: gasprice.DefaultMaxLookbackSeconds, MaxCallBlockHistory: gasprice.DefaultMaxCallBlockHistory, MaxBlockHistory: gasprice.DefaultMaxBlockHistory, MinPrice: gasprice.DefaultMinPrice, @@ -55,35 +51,37 @@ var DefaultConfig = NewDefaultConfig() func NewDefaultConfig() Config { return Config{ - NetworkId: 0, // enable auto configuration of networkID == chainID - StateHistory: params.FullImmutabilityThreshold, - TrieCleanCache: 512, - TrieDirtyCache: 256, - TrieDirtyCommitTarget: 20, - TriePrefetcherParallelism: 16, - SnapshotCache: 256, - AcceptedCacheSize: 32, - Miner: miner.Config{}, - TxPool: legacypool.DefaultConfig, - BlobPool: blobpool.DefaultConfig, - RPCGasCap: 25000000, - RPCEVMTimeout: 5 * time.Second, - GPO: DefaultFullGPOConfig, - RPCTxFeeCap: 1, // 1 AVAX + NetworkId: 1, + LightPeers: 100, + UltraLightFraction: 75, + DatabaseCache: 512, + TrieCleanCache: 256, + TrieDirtyCache: 256, + TrieDirtyCommitTarget: 20, + SnapshotCache: 128, + Miner: miner.Config{}, + TxPool: core.DefaultTxPoolConfig, + RPCGasCap: 25000000, + RPCEVMTimeout: 5 * time.Second, + GPO: DefaultFullGPOConfig, + RPCTxFeeCap: 1, // 1 AVAX } } -//go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go +//go:generate gencodec -type Config -formats toml -out gen_config.go -// Config contains configuration options for ETH and LES protocols. +// Config contains configuration options for of the ETH and LES protocols. type Config struct { // The genesis block, which is inserted if the database is empty. // If nil, the Ethereum main net block is used. Genesis *core.Genesis `toml:",omitempty"` - // Network ID separates blockchains on the peer-to-peer networking level. When left - // zero, the chain ID is used as network ID. - NetworkId uint64 + // Protocol options + NetworkId uint64 // Network ID to use for selecting peers to connect to + + // This can be set to list of enrtree:// URLs which will be queried for + // for nodes to connect to. + DiscoveryURLs []string Pruning bool // Whether to disable pruning and flush everything to disk AcceptorQueueLimit int // Maximum blocks to queue before blocking during acceptance @@ -91,32 +89,43 @@ type Config struct { PopulateMissingTries *uint64 // Height at which to start re-populating missing tries on startup. PopulateMissingTriesParallelism int // Number of concurrent readers to use when re-populating missing tries on startup. AllowMissingTries bool // Whether to allow an archival node to run with pruning enabled and corrupt a complete index. - SnapshotDelayInit bool // Whether snapshot tree should be initialized on startup or delayed until explicit call (= StateSyncEnabled) - SnapshotWait bool // Whether to wait for the initial snapshot generation + SnapshotDelayInit bool // Whether snapshot tree should be initialized on startup or delayed until explicit call + SnapshotAsync bool // Whether to generate the initial snapshot in async mode SnapshotVerify bool // Whether to verify generated snapshots SkipSnapshotRebuild bool // Whether to skip rebuilding the snapshot in favor of returning an error (only set to true for tests) + // Whitelist of required block number -> hash values to accept + Whitelist map[uint64]common.Hash `toml:"-"` + + // Light client options + LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests + LightIngress int `toml:",omitempty"` // Incoming bandwidth limit for light servers + LightEgress int `toml:",omitempty"` // Outgoing bandwidth limit for light servers + LightPeers int `toml:",omitempty"` // Maximum number of LES client peers + LightNoPrune bool `toml:",omitempty"` // Whether to disable light chain pruning + + // Ultra Light client options + UltraLightServers []string `toml:",omitempty"` // List of trusted ultra light servers + UltraLightFraction int `toml:",omitempty"` // Percentage of trusted servers to accept an announcement + UltraLightOnlyAnnounce bool `toml:",omitempty"` // Whether to only announce headers, or also serve them + // Database options SkipBcVersionCheck bool `toml:"-"` + DatabaseHandles int `toml:"-"` + DatabaseCache int + // DatabaseFreezer string - // TrieDB and snapshot options - TrieCleanCache int - TrieDirtyCache int - TrieDirtyCommitTarget int - TriePrefetcherParallelism int - SnapshotCache int - Preimages bool - - // AcceptedCacheSize is the depth of accepted headers cache and accepted - // logs cache at the accepted tip. - AcceptedCacheSize int + TrieCleanCache int + TrieDirtyCache int + TrieDirtyCommitTarget int + SnapshotCache int + Preimages bool // Mining options Miner miner.Config // Transaction pool options - TxPool legacypool.Config - BlobPool blobpool.Config + TxPool core.TxPoolConfig // Gas Price Oracle options GPO gasprice.Config @@ -124,6 +133,9 @@ type Config struct { // Enables tracking of SHA3 preimages in the VM EnablePreimageRecording bool + // Miscellaneous options + DocRoot string `toml:"-"` + // RPCGasCap is the global gas cap for eth-call variants. RPCGasCap uint64 `toml:",omitempty"` @@ -131,7 +143,7 @@ type Config struct { RPCEVMTimeout time.Duration // RPCTxFeeCap is the global transaction fee(price * gaslimit) cap for - // send-transaction variants. The unit is ether. + // send-transction variants. The unit is ether. RPCTxFeeCap float64 `toml:",omitempty"` // AllowUnfinalizedQueries allow unfinalized queries @@ -141,38 +153,10 @@ type Config struct { // Unprotected transactions are transactions that are signed without EIP-155 // replay protection. AllowUnprotectedTxs bool - // AllowUnprotectedTxHashes provides a list of transaction hashes, which will be allowed - // to be issued without replay protection over the API even if AllowUnprotectedTxs is false. - AllowUnprotectedTxHashes []common.Hash // OfflinePruning enables offline pruning on startup of the node. If a node is started // with this configuration option, it must finish pruning before resuming normal operation. OfflinePruning bool OfflinePruningBloomFilterSize uint64 OfflinePruningDataDirectory string - - // SkipUpgradeCheck disables checking that upgrades must take place before the last - // accepted block. Skipping this check is useful when a node operator does not update - // their node before the network upgrade and their node accepts blocks that have - // identical state with the pre-upgrade ruleset. - SkipUpgradeCheck bool - - // TxLookupLimit is the maximum number of blocks from head whose tx indices - // are reserved: - // * 0: means no limit - // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes - // Deprecated, use 'TransactionHistory' instead. - TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. - TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. - StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved. - - // State scheme represents the scheme used to store ethereum states and trie - // nodes on top. It can be 'hash', 'path', or none which means use the scheme - // consistent with persistent state. - StateScheme string `toml:",omitempty"` - - // SkipTxIndexing skips indexing transactions. - // This is useful for validators that don't need to index transactions. - // TxLookupLimit can be still used to control unindexing old transactions. - SkipTxIndexing bool } diff --git a/eth/filters/api.go b/eth/filters/api.go index c51117968d..a1e155906f 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -35,64 +35,53 @@ import ( "sync" "time" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/interfaces" - "github.com/ava-labs/coreth/internal/ethapi" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/interfaces" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/event" ) -var ( - errInvalidTopic = errors.New("invalid topic(s)") - errFilterNotFound = errors.New("filter not found") - errInvalidBlockRange = errors.New("invalid block range params") - errExceedMaxTopics = errors.New("exceed max topics") -) - -// The maximum number of topic criteria allowed, vm.LOG4 - vm.LOG0 -const maxTopics = 4 - // filter is a helper struct that holds meta information over the filter type // and associated subscription in the event system. type filter struct { typ Type - deadline *time.Timer // filter is inactive when deadline triggers + deadline *time.Timer // filter is inactiv when deadline triggers hashes []common.Hash - fullTx bool - txs []*types.Transaction crit FilterCriteria logs []*types.Log s *Subscription // associated subscription in event system } -// FilterAPI offers support to create and manage filters. This will allow external clients to retrieve various -// information related to the Ethereum protocol such as blocks, transactions and logs. -type FilterAPI struct { - sys *FilterSystem +// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various +// information related to the Ethereum protocol such als blocks, transactions and logs. +type PublicFilterAPI struct { + backend Backend + mux *event.TypeMux + quit chan struct{} events *EventSystem filtersMu sync.Mutex filters map[rpc.ID]*filter timeout time.Duration } -// NewFilterAPI returns a new FilterAPI instance. -func NewFilterAPI(system *FilterSystem) *FilterAPI { - api := &FilterAPI{ - sys: system, - events: NewEventSystem(system), +// NewPublicFilterAPI returns a new PublicFilterAPI instance. +func NewPublicFilterAPI(backend Backend, lightMode bool, timeout time.Duration) *PublicFilterAPI { + api := &PublicFilterAPI{ + backend: backend, + events: NewEventSystem(backend, lightMode), filters: make(map[rpc.ID]*filter), - timeout: system.cfg.Timeout, + timeout: timeout, } - go api.timeoutLoop(system.cfg.Timeout) + go api.timeoutLoop(timeout) return api } // timeoutLoop runs at the interval set by 'timeout' and deletes filters // that have not been recently used. It is started when the API is created. -func (api *FilterAPI) timeoutLoop(timeout time.Duration) { +func (api *PublicFilterAPI) timeoutLoop(timeout time.Duration) { var toUninstall []*Subscription ticker := time.NewTicker(timeout) defer ticker.Stop() @@ -120,28 +109,30 @@ func (api *FilterAPI) timeoutLoop(timeout time.Duration) { } } -// NewPendingTransactionFilter creates a filter that fetches pending transactions +// NewPendingTransactionFilter creates a filter that fetches pending transaction hashes // as transactions enter the pending state. // // It is part of the filter package because this filter can be used through the // `eth_getFilterChanges` polling method that is also used for log filters. -func (api *FilterAPI) NewPendingTransactionFilter(fullTx *bool) rpc.ID { +// +// https://eth.wiki/json-rpc/API#eth_newpendingtransactionfilter +func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID { var ( - pendingTxs = make(chan []*types.Transaction) + pendingTxs = make(chan []common.Hash) pendingTxSub = api.events.SubscribePendingTxs(pendingTxs) ) api.filtersMu.Lock() - api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, fullTx: fullTx != nil && *fullTx, deadline: time.NewTimer(api.timeout), txs: make([]*types.Transaction, 0), s: pendingTxSub} + api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: pendingTxSub} api.filtersMu.Unlock() go func() { for { select { - case pTx := <-pendingTxs: + case ph := <-pendingTxs: api.filtersMu.Lock() if f, found := api.filters[pendingTxSub.ID]; found { - f.txs = append(f.txs, pTx...) + f.hashes = append(f.hashes, ph...) } api.filtersMu.Unlock() case <-pendingTxSub.Err(): @@ -156,10 +147,9 @@ func (api *FilterAPI) NewPendingTransactionFilter(fullTx *bool) rpc.ID { return pendingTxSub.ID } -// NewPendingTransactions creates a subscription that is triggered each time a -// transaction enters the transaction pool. If fullTx is true the full tx is -// sent to the client, otherwise the hash is sent. -func (api *FilterAPI) NewPendingTransactions(ctx context.Context, fullTx *bool) (*rpc.Subscription, error) { +// NewPendingTransactions creates a subscription that is triggered each time a transaction +// enters the transaction pool and was signed from one of the transactions this nodes manages. +func (api *PublicFilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Subscription, error) { notifier, supported := rpc.NotifierFromContext(ctx) if !supported { return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported @@ -168,23 +158,16 @@ func (api *FilterAPI) NewPendingTransactions(ctx context.Context, fullTx *bool) rpcSub := notifier.CreateSubscription() go func() { - txs := make(chan []*types.Transaction, 128) - pendingTxSub := api.events.SubscribePendingTxs(txs) - chainConfig := api.sys.backend.ChainConfig() + txHashes := make(chan []common.Hash, 128) + pendingTxSub := api.events.SubscribePendingTxs(txHashes) for { select { - case txs := <-txs: + case hashes := <-txHashes: // To keep the original behaviour, send a single tx hash in one notification. // TODO(rjl493456442) Send a batch of tx hashes in one notification - latest := api.sys.backend.CurrentHeader() - for _, tx := range txs { - if fullTx != nil && *fullTx { - rpcTx := ethapi.NewRPCTransaction(tx, latest, latest.BaseFee, chainConfig) - notifier.Notify(rpcSub.ID, rpcTx) - } else { - notifier.Notify(rpcSub.ID, tx.Hash()) - } + for _, h := range hashes { + notifier.Notify(rpcSub.ID, h) } case <-rpcSub.Err(): pendingTxSub.Unsubscribe() @@ -199,10 +182,8 @@ func (api *FilterAPI) NewPendingTransactions(ctx context.Context, fullTx *bool) return rpcSub, nil } -// NewAcceptedTransactions creates a subscription that is triggered each time a -// transaction is accepted. If fullTx is true the full tx is -// sent to the client, otherwise the hash is sent. -func (api *FilterAPI) NewAcceptedTransactions(ctx context.Context, fullTx *bool) (*rpc.Subscription, error) { +// NewAcceptedTransactions creates a subscription that is triggered each time a transaction is accepted. +func (api *PublicFilterAPI) NewAcceptedTransactions(ctx context.Context) (*rpc.Subscription, error) { notifier, supported := rpc.NotifierFromContext(ctx) if !supported { return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported @@ -211,23 +192,14 @@ func (api *FilterAPI) NewAcceptedTransactions(ctx context.Context, fullTx *bool) rpcSub := notifier.CreateSubscription() go func() { - txs := make(chan []*types.Transaction, 128) - acceptedTxSub := api.events.SubscribeAcceptedTxs(txs) - chainConfig := api.sys.backend.ChainConfig() + txHashes := make(chan []common.Hash, 128) + acceptedTxSub := api.events.SubscribeAcceptedTxs(txHashes) for { select { - case txs := <-txs: - // To keep the original behaviour, send a single tx hash in one notification. - // TODO(rjl493456442) Send a batch of tx hashes in one notification - latest := api.sys.backend.LastAcceptedBlock().Header() - for _, tx := range txs { - if fullTx != nil && *fullTx { - rpcTx := ethapi.NewRPCTransaction(tx, latest, latest.BaseFee, chainConfig) - notifier.Notify(rpcSub.ID, rpcTx) - } else { - notifier.Notify(rpcSub.ID, tx.Hash()) - } + case hashes := <-txHashes: + for _, h := range hashes { + notifier.Notify(rpcSub.ID, h) } case <-rpcSub.Err(): acceptedTxSub.Unsubscribe() @@ -244,13 +216,15 @@ func (api *FilterAPI) NewAcceptedTransactions(ctx context.Context, fullTx *bool) // NewBlockFilter creates a filter that fetches blocks that are imported into the chain. // It is part of the filter package since polling goes with eth_getFilterChanges. -func (api *FilterAPI) NewBlockFilter() rpc.ID { +// +// https://eth.wiki/json-rpc/API#eth_newblockfilter +func (api *PublicFilterAPI) NewBlockFilter() rpc.ID { var ( headers = make(chan *types.Header) headerSub *Subscription ) - if api.sys.backend.IsAllowUnfinalizedQueries() { + if api.backend.GetVMConfig().AllowUnfinalizedQueries { headerSub = api.events.SubscribeNewHeads(headers) } else { headerSub = api.events.SubscribeAcceptedHeads(headers) @@ -282,7 +256,7 @@ func (api *FilterAPI) NewBlockFilter() rpc.ID { } // NewHeads send a notification each time a new (header) block is appended to the chain. -func (api *FilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, error) { +func (api *PublicFilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, error) { notifier, supported := rpc.NotifierFromContext(ctx) if !supported { return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported @@ -296,7 +270,7 @@ func (api *FilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, error) { headersSub event.Subscription ) - if api.sys.backend.IsAllowUnfinalizedQueries() { + if api.backend.GetVMConfig().AllowUnfinalizedQueries { headersSub = api.events.SubscribeNewHeads(headers) } else { headersSub = api.events.SubscribeAcceptedHeads(headers) @@ -320,7 +294,7 @@ func (api *FilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, error) { } // Logs creates a subscription that fires for all new log that match the given filter criteria. -func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subscription, error) { +func (api *PublicFilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subscription, error) { notifier, supported := rpc.NotifierFromContext(ctx) if !supported { return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported @@ -333,7 +307,7 @@ func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subsc err error ) - if api.sys.backend.IsAllowUnfinalizedQueries() { + if api.backend.GetVMConfig().AllowUnfinalizedQueries { logsSub, err = api.events.SubscribeLogs(interfaces.FilterQuery(crit), matchedLogs) if err != nil { return nil, err @@ -350,7 +324,6 @@ func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subsc select { case logs := <-matchedLogs: for _, log := range logs { - log := log notifier.Notify(rpcSub.ID, &log) } case <-rpcSub.Err(): // client send an unsubscribe request @@ -381,22 +354,24 @@ type FilterCriteria interfaces.FilterQuery // again but with the removed property set to true. // // In case "fromBlock" > "toBlock" an error is returned. -func (api *FilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { +// +// https://eth.wiki/json-rpc/API#eth_newfilter +func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { var ( logs = make(chan []*types.Log) logsSub *Subscription err error ) - if api.sys.backend.IsAllowUnfinalizedQueries() { + if api.backend.GetVMConfig().AllowUnfinalizedQueries { logsSub, err = api.events.SubscribeLogs(interfaces.FilterQuery(crit), logs) if err != nil { - return "", err + return rpc.ID(""), err } } else { logsSub, err = api.events.SubscribeAcceptedLogs(interfaces.FilterQuery(crit), logs) if err != nil { - return "", err + return rpc.ID(""), err } } @@ -426,14 +401,13 @@ func (api *FilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { } // GetLogs returns logs matching the given argument that are stored within the state. -func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) { - if len(crit.Topics) > maxTopics { - return nil, errExceedMaxTopics - } +// +// https://eth.wiki/json-rpc/API#eth_getlogs +func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) { var filter *Filter if crit.BlockHash != nil { // Block filter requested, construct a single-shot filter - filter = api.sys.NewBlockFilter(*crit.BlockHash, crit.Addresses, crit.Topics) + filter = NewBlockFilter(api.backend, *crit.BlockHash, crit.Addresses, crit.Topics) } else { // Convert the RPC block numbers into internal representations // LatestBlockNumber is left in place here to be handled @@ -446,11 +420,12 @@ func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*type if crit.ToBlock != nil { end = crit.ToBlock.Int64() } - if begin > 0 && end > 0 && begin > end { - return nil, errInvalidBlockRange - } // Construct the range filter - filter = api.sys.NewRangeFilter(begin, end, crit.Addresses, crit.Topics) + var err error + filter, err = NewRangeFilter(api.backend, begin, end, crit.Addresses, crit.Topics) + if err != nil { + return nil, err + } } // Run the filter and return all the logs logs, err := filter.Logs(ctx) @@ -461,7 +436,9 @@ func (api *FilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*type } // UninstallFilter removes the filter with the given filter id. -func (api *FilterAPI) UninstallFilter(id rpc.ID) bool { +// +// https://eth.wiki/json-rpc/API#eth_uninstallfilter +func (api *PublicFilterAPI) UninstallFilter(id rpc.ID) bool { api.filtersMu.Lock() f, found := api.filters[id] if found { @@ -477,19 +454,21 @@ func (api *FilterAPI) UninstallFilter(id rpc.ID) bool { // GetFilterLogs returns the logs for the filter with the given id. // If the filter could not be found an empty array of logs is returned. -func (api *FilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Log, error) { +// +// https://eth.wiki/json-rpc/API#eth_getfilterlogs +func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Log, error) { api.filtersMu.Lock() f, found := api.filters[id] api.filtersMu.Unlock() if !found || f.typ != LogsSubscription { - return nil, errFilterNotFound + return nil, fmt.Errorf("filter not found") } var filter *Filter if f.crit.BlockHash != nil { // Block filter requested, construct a single-shot filter - filter = api.sys.NewBlockFilter(*f.crit.BlockHash, f.crit.Addresses, f.crit.Topics) + filter = NewBlockFilter(api.backend, *f.crit.BlockHash, f.crit.Addresses, f.crit.Topics) } else { // Convert the RPC block numbers into internal representations // Leave LatestBlockNumber in place here as the defaults @@ -504,7 +483,11 @@ func (api *FilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Lo end = f.crit.ToBlock.Int64() } // Construct the range filter - filter = api.sys.NewRangeFilter(begin, end, f.crit.Addresses, f.crit.Topics) + var err error + filter, err = NewRangeFilter(api.backend, begin, end, f.crit.Addresses, f.crit.Topics) + if err != nil { + return nil, err + } } // Run the filter and return all the logs logs, err := filter.Logs(ctx) @@ -519,18 +502,12 @@ func (api *FilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Lo // // For pending transaction and block filters the result is []common.Hash. // (pending)Log filters return []Log. -func (api *FilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { +// +// https://eth.wiki/json-rpc/API#eth_getfilterchanges +func (api *PublicFilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { api.filtersMu.Lock() defer api.filtersMu.Unlock() - chainConfig := api.sys.backend.ChainConfig() - latest := api.sys.backend.CurrentHeader() - - var baseFee *big.Int - if latest != nil { - baseFee = latest.BaseFee - } - if f, found := api.filters[id]; found { if !f.deadline.Stop() { // timer expired but filter is not yet removed in timeout loop @@ -540,26 +517,10 @@ func (api *FilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { f.deadline.Reset(api.timeout) switch f.typ { - case BlocksSubscription, AcceptedBlocksSubscription: + case PendingTransactionsSubscription, BlocksSubscription, AcceptedBlocksSubscription, AcceptedTransactionsSubscription: hashes := f.hashes f.hashes = nil return returnHashes(hashes), nil - case PendingTransactionsSubscription, AcceptedTransactionsSubscription: - if f.fullTx { - txs := make([]*ethapi.RPCTransaction, 0, len(f.txs)) - for _, tx := range f.txs { - txs = append(txs, ethapi.NewRPCTransaction(tx, latest, baseFee, chainConfig)) - } - f.txs = nil - return txs, nil - } else { - hashes := make([]common.Hash, 0, len(f.txs)) - for _, tx := range f.txs { - hashes = append(hashes, tx.Hash()) - } - f.txs = nil - return hashes, nil - } case LogsSubscription, AcceptedLogsSubscription, MinedAndPendingLogsSubscription: logs := f.logs f.logs = nil @@ -567,7 +528,7 @@ func (api *FilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { } } - return []interface{}{}, errFilterNotFound + return []interface{}{}, fmt.Errorf("filter not found") } // returnHashes is a helper that will return an empty hash array case the given hash array is nil, @@ -606,7 +567,7 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error { if raw.BlockHash != nil { if raw.FromBlock != nil || raw.ToBlock != nil { // BlockHash is mutually exclusive with FromBlock/ToBlock criteria - return errors.New("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other") + return fmt.Errorf("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other") } args.BlockHash = raw.BlockHash } else { @@ -679,11 +640,11 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error { } args.Topics[i] = append(args.Topics[i], parsed) } else { - return errInvalidTopic + return fmt.Errorf("invalid topic(s)") } } default: - return errInvalidTopic + return fmt.Errorf("invalid topic(s)") } } } diff --git a/eth/filters/api_test.go b/eth/filters/api_test.go index 72838b4d8c..a374dbf28c 100644 --- a/eth/filters/api_test.go +++ b/eth/filters/api_test.go @@ -21,7 +21,7 @@ import ( "fmt" "testing" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" ) diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 6b77ee68e1..f7f549be24 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -32,28 +32,64 @@ import ( "fmt" "math/big" - "github.com/ava-labs/coreth/core/bloombits" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/core/vm" + + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/bloombits" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/event" ) +type Backend interface { + ChainDb() ethdb.Database + HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) + HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) + GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) + GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) + + SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription + SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription + SubscribeChainAcceptedEvent(ch chan<- core.ChainEvent) event.Subscription + SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription + SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription + SubscribeAcceptedLogsEvent(ch chan<- []*types.Log) event.Subscription + + SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription + + SubscribeAcceptedTransactionEvent(ch chan<- core.NewTxsEvent) event.Subscription + + BloomStatus() (uint64, uint64) + ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) + + // Added to the backend interface to support limiting of logs requests + GetVMConfig() *vm.Config + LastAcceptedBlock() *types.Block + GetMaxBlocksPerRequest() int64 +} + // Filter can be used to retrieve and filter logs. type Filter struct { - sys *FilterSystem + backend Backend + db ethdb.Database addresses []common.Address topics [][]common.Hash - block *common.Hash // Block hash if filtering a single block - begin, end int64 // Range interval if filtering multiple blocks + block common.Hash // Block hash if filtering a single block + begin, end int64 // Range interval if filtering multiple blocks matcher *bloombits.Matcher } // NewRangeFilter creates a new filter which uses a bloom filter on blocks to // figure out whether a particular block is interesting or not. -func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter { +func NewRangeFilter(backend Backend, begin, end int64, addresses []common.Address, topics [][]common.Hash) (*Filter, error) { + allowUnfinalizedQueries := backend.GetVMConfig().AllowUnfinalizedQueries + acceptedBlock := backend.LastAcceptedBlock() + // Flatten the address and topic filter clauses into a single bloombits filter // system. Since the bloombits are not positional, nil topics are permitted, // which get flattened into a nil byte slice. @@ -72,34 +108,45 @@ func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Add } filters = append(filters, filter) } - size, _ := sys.backend.BloomStatus() + size, _ := backend.BloomStatus() + + if !allowUnfinalizedQueries && acceptedBlock != nil { + lastAccepted := acceptedBlock.Number().Int64() + if begin >= 0 && begin > lastAccepted { + return nil, fmt.Errorf("requested from block %d after last accepted block %d", begin, lastAccepted) + } + if end >= 0 && end > lastAccepted { + return nil, fmt.Errorf("requested to block %d after last accepted block %d", end, lastAccepted) + } + } // Create a generic filter and convert it into a range filter - filter := newFilter(sys, addresses, topics) + filter := newFilter(backend, addresses, topics) filter.matcher = bloombits.NewMatcher(size, filters) filter.begin = begin filter.end = end - return filter + return filter, nil } // NewBlockFilter creates a new filter which directly inspects the contents of // a block to figure out whether it is interesting or not. -func (sys *FilterSystem) NewBlockFilter(block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter { +func NewBlockFilter(backend Backend, block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter { // Create a generic filter and convert it into a block filter - filter := newFilter(sys, addresses, topics) - filter.block = &block + filter := newFilter(backend, addresses, topics) + filter.block = block return filter } // newFilter creates a generic filter that can either filter based on a block hash, // or based on range queries. The search criteria needs to be explicitly set. -func newFilter(sys *FilterSystem, addresses []common.Address, topics [][]common.Hash) *Filter { +func newFilter(backend Backend, addresses []common.Address, topics [][]common.Hash) *Filter { return &Filter{ - sys: sys, + backend: backend, addresses: addresses, topics: topics, + db: backend.ChainDb(), } } @@ -107,8 +154,8 @@ func newFilter(sys *FilterSystem, addresses []common.Address, topics [][]common. // first block that contains matches, updating the start of the filter accordingly. func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { // If we're doing singleton block filtering, execute and return - if f.block != nil { - header, err := f.sys.backend.HeaderByHash(ctx, *f.block) + if f.block != (common.Hash{}) { + header, err := f.backend.HeaderByHash(ctx, f.block) if err != nil { return nil, err } @@ -117,70 +164,24 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { } return f.blockLogs(ctx, header) } - - // Disallow blocks past the last accepted block if the backend does not - // allow unfinalized queries. - allowUnfinalizedQueries := f.sys.backend.IsAllowUnfinalizedQueries() - acceptedBlock := f.sys.backend.LastAcceptedBlock() - if !allowUnfinalizedQueries && acceptedBlock != nil { - lastAccepted := acceptedBlock.Number().Int64() - if f.begin >= 0 && f.begin > lastAccepted { - return nil, fmt.Errorf("requested from block %d after last accepted block %d", f.begin, lastAccepted) - } - if f.end >= 0 && f.end > lastAccepted { - return nil, fmt.Errorf("requested to block %d after last accepted block %d", f.end, lastAccepted) - } - } - - var ( - beginPending = f.begin == rpc.PendingBlockNumber.Int64() - endPending = f.end == rpc.PendingBlockNumber.Int64() - endSet = f.end >= 0 - ) - - // special case for pending logs - if beginPending && !endPending { - return nil, errInvalidBlockRange + // Figure out the limits of the filter range + // LatestBlockNumber is transformed into the last accepted block in HeaderByNumber + // so it is left in place here. + header, err := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) + if err != nil { + return nil, err } - - // Short-cut if all we care about is pending logs - if beginPending && endPending { + if header == nil { return nil, nil } + head := header.Number.Uint64() - resolveSpecial := func(number int64) (int64, error) { - var hdr *types.Header - switch number { - case rpc.LatestBlockNumber.Int64(), rpc.PendingBlockNumber.Int64(): - // we should return head here since we've already captured - // that we need to get the pending logs in the pending boolean above - hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) - if hdr == nil { - return 0, errors.New("latest header not found") - } - case rpc.FinalizedBlockNumber.Int64(): - hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.FinalizedBlockNumber) - if hdr == nil { - return 0, errors.New("finalized header not found") - } - case rpc.SafeBlockNumber.Int64(): - hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.SafeBlockNumber) - if hdr == nil { - return 0, errors.New("safe header not found") - } - default: - return number, nil - } - return hdr.Number.Int64(), nil - } - - var err error - // range query need to resolve the special begin/end block number - if f.begin, err = resolveSpecial(f.begin); err != nil { - return nil, err + if f.begin < 0 { + f.begin = int64(head) } - if f.end, err = resolveSpecial(f.end); err != nil { - return nil, err + end := uint64(f.end) + if f.end < 0 { + end = head } // When querying unfinalized data without a populated end block, it is @@ -189,86 +190,49 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { // We error in this case to prevent a bad UX where the caller thinks there // are no logs from the specified beginning to end (when in reality there may // be some). - if endSet && f.end < f.begin { - return nil, fmt.Errorf("begin block %d is greater than end block %d", f.begin, f.end) + if end < uint64(f.begin) { + return nil, fmt.Errorf("begin block %d is greater than end block %d", f.begin, end) } // If the requested range of blocks exceeds the maximum number of blocks allowed by the backend // return an error instead of searching for the logs. - if maxBlocks := f.sys.backend.GetMaxBlocksPerRequest(); f.end-f.begin >= maxBlocks && maxBlocks > 0 { - return nil, fmt.Errorf("requested too many blocks from %d to %d, maximum is set to %d", f.begin, f.end, maxBlocks) + if maxBlocks := f.backend.GetMaxBlocksPerRequest(); int64(end)-f.begin > maxBlocks && maxBlocks > 0 { + return nil, fmt.Errorf("requested too many blocks from %d to %d, maximum is set to %d", f.begin, int64(end), maxBlocks) } // Gather all indexed logs, and finish with non indexed ones - logChan, errChan := f.rangeLogsAsync(ctx) var logs []*types.Log - for { - select { - case log := <-logChan: - logs = append(logs, log) - case err := <-errChan: - if err != nil { - // if an error occurs during extraction, we do return the extracted data - return logs, err - } - return logs, nil - } - } -} - -// rangeLogsAsync retrieves block-range logs that match the filter criteria asynchronously, -// it creates and returns two channels: one for delivering log data, and one for reporting errors. -func (f *Filter) rangeLogsAsync(ctx context.Context) (chan *types.Log, chan error) { - var ( - logChan = make(chan *types.Log) - errChan = make(chan error) - ) - - go func() { - defer func() { - close(errChan) - close(logChan) - }() - - // Gather all indexed logs, and finish with non indexed ones - var ( - end = uint64(f.end) - size, sections = f.sys.backend.BloomStatus() - err error - ) - if indexed := sections * size; indexed > uint64(f.begin) { - if indexed > end { - indexed = end + 1 - } - if err = f.indexedLogs(ctx, indexed-1, logChan); err != nil { - errChan <- err - return - } + size, sections := f.backend.BloomStatus() + if indexed := sections * size; indexed > uint64(f.begin) { + if indexed > end { + logs, err = f.indexedLogs(ctx, end) + } else { + logs, err = f.indexedLogs(ctx, indexed-1) } - - if err := f.unindexedLogs(ctx, end, logChan); err != nil { - errChan <- err - return + if err != nil { + return logs, err } - - errChan <- nil - }() - - return logChan, errChan + } + rest, err := f.unindexedLogs(ctx, end) + logs = append(logs, rest...) + return logs, err } // indexedLogs returns the logs matching the filter criteria based on the bloom // bits indexed available locally or via the network. -func (f *Filter) indexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error { +func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) { // Create a matcher session and request servicing from the backend matches := make(chan uint64, 64) session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches) if err != nil { - return err + return nil, err } defer session.Close() - f.sys.backend.ServiceFilter(ctx, session) + f.backend.ServiceFilter(ctx, session) + + // Iterate over the matches until exhausted or context closed + var logs []*types.Log for { select { @@ -279,132 +243,131 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64, logChan chan *type if err == nil { f.begin = int64(end) + 1 } - return err + return logs, err } f.begin = int64(number) + 1 // Retrieve the suggested block and pull any truly matching logs - header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number)) + header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(number)) if header == nil || err != nil { - return err + return logs, err } found, err := f.checkMatches(ctx, header) if err != nil { - return err - } - for _, log := range found { - logChan <- log + return logs, err } + logs = append(logs, found...) case <-ctx.Done(): - return ctx.Err() + return logs, ctx.Err() } } } // unindexedLogs returns the logs matching the filter criteria based on raw block // iteration and bloom matching. -func (f *Filter) unindexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error { +func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) { + var logs []*types.Log + for ; f.begin <= int64(end); f.begin++ { - header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin)) + header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin)) if header == nil || err != nil { - return err + return logs, err } found, err := f.blockLogs(ctx, header) if err != nil { - return err - } - for _, log := range found { - select { - case logChan <- log: - case <-ctx.Done(): - return ctx.Err() - } + return logs, err } + logs = append(logs, found...) } - return nil + return logs, nil } // blockLogs returns the logs matching the filter criteria within a single block. -func (f *Filter) blockLogs(ctx context.Context, header *types.Header) ([]*types.Log, error) { +func (f *Filter) blockLogs(ctx context.Context, header *types.Header) (logs []*types.Log, err error) { if bloomFilter(header.Bloom, f.addresses, f.topics) { - return f.checkMatches(ctx, header) + found, err := f.checkMatches(ctx, header) + if err != nil { + return logs, err + } + logs = append(logs, found...) } - return nil, nil + return logs, nil } // checkMatches checks if the receipts belonging to the given header contain any log events that // match the filter criteria. This function is called when the bloom filter signals a potential match. -func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) { - logsList, err := f.sys.getLogs(ctx, header.Hash(), header.Number.Uint64()) +func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs []*types.Log, err error) { + // Get the logs of the block + logsList, err := f.backend.GetLogs(ctx, header.Hash()) if err != nil { return nil, err } - - unfiltered := types.FlattenLogs(logsList) - logs := filterLogs(unfiltered, nil, nil, f.addresses, f.topics) - if len(logs) == 0 { - return nil, nil + var unfiltered []*types.Log + for _, logs := range logsList { + unfiltered = append(unfiltered, logs...) } - // Most backends will deliver un-derived logs, but check nevertheless. - if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) { + logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics) + if len(logs) > 0 { + // We have matching logs, check if we need to resolve full logs via the light client + if logs[0].TxHash == (common.Hash{}) { + receipts, err := f.backend.GetReceipts(ctx, header.Hash()) + if err != nil { + return nil, err + } + unfiltered = unfiltered[:0] + for _, receipt := range receipts { + unfiltered = append(unfiltered, receipt.Logs...) + } + logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics) + } return logs, nil } - // We have matching logs, check if we need to resolve full logs via the light client - receipts, err := f.sys.backend.GetReceipts(ctx, header.Hash()) - if err != nil { - return nil, err - } - unfiltered = unfiltered[:0] - for _, receipt := range receipts { - unfiltered = append(unfiltered, receipt.Logs...) - } - logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics) - - return logs, nil + return nil, nil } -// includes returns true if the element is present in the list. -func includes[T comparable](things []T, element T) bool { - for _, thing := range things { - if thing == element { +func includes(addresses []common.Address, a common.Address) bool { + for _, addr := range addresses { + if addr == a { return true } } + return false } // filterLogs creates a slice of logs matching the given criteria. func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log { - var check = func(log *types.Log) bool { + var ret []*types.Log +Logs: + for _, log := range logs { if fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > log.BlockNumber { - return false + continue } if toBlock != nil && toBlock.Int64() >= 0 && toBlock.Uint64() < log.BlockNumber { - return false + continue } + if len(addresses) > 0 && !includes(addresses, log.Address) { - return false + continue } // If the to filtered topics is greater than the amount of topics in logs, skip. if len(topics) > len(log.Topics) { - return false + continue Logs } for i, sub := range topics { - if len(sub) == 0 { - continue // empty rule set == wildcard + match := len(sub) == 0 // empty rule set == wildcard + for _, topic := range sub { + if log.Topics[i] == topic { + match = true + break + } } - if !includes(sub, log.Topics[i]) { - return false + if !match { + continue Logs } } - return true - } - var ret []*types.Log - for _, log := range logs { - if check(log) { - ret = append(ret, log) - } + ret = append(ret, log) } return ret } diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index 3987826a88..81f8089db3 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -34,90 +34,16 @@ import ( "sync" "time" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/bloombits" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/interfaces" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/interfaces" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" ) -// Config represents the configuration of the filter system. -type Config struct { - Timeout time.Duration // how long filters stay active (default: 5min) -} - -func (cfg Config) withDefaults() Config { - if cfg.Timeout == 0 { - cfg.Timeout = 5 * time.Minute - } - return cfg -} - -type Backend interface { - ChainDb() ethdb.Database - HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) - HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) - GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) - GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) - GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) - - CurrentHeader() *types.Header - ChainConfig() *params.ChainConfig - SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription - SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription - SubscribeChainAcceptedEvent(ch chan<- core.ChainEvent) event.Subscription - SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription - SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription - SubscribeAcceptedLogsEvent(ch chan<- []*types.Log) event.Subscription - - SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription - - SubscribeAcceptedTransactionEvent(ch chan<- core.NewTxsEvent) event.Subscription - - BloomStatus() (uint64, uint64) - ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) - - // Added to the backend interface to support limiting of logs requests - IsAllowUnfinalizedQueries() bool - LastAcceptedBlock() *types.Block - GetMaxBlocksPerRequest() int64 -} - -// FilterSystem holds resources shared by all filters. -type FilterSystem struct { - // Note: go-ethereum uses an LRU cache for logs, - // instead we cache logs on the blockchain object itself. - backend Backend - cfg *Config -} - -// NewFilterSystem creates a filter system. -func NewFilterSystem(backend Backend, config Config) *FilterSystem { - config = config.withDefaults() - return &FilterSystem{ - backend: backend, - cfg: &config, - } -} - -// getLogs loads block logs from the backend. The backend is responsible for -// performing any log caching. -func (sys *FilterSystem) getLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) { - logs, err := sys.backend.GetLogs(ctx, blockHash, number) - if err != nil { - return nil, err - } - if logs == nil { - return nil, fmt.Errorf("failed to get logs for block #%d (0x%s)", number, blockHash.TerminalString()) - } - return logs, nil -} - // Type determines the kind of filter and is used to put the filter in to // the correct bucket when added. type Type byte @@ -133,16 +59,16 @@ const ( PendingLogsSubscription // MinedAndPendingLogsSubscription queries for logs in mined and pending blocks. MinedAndPendingLogsSubscription - // PendingTransactionsSubscription queries for pending transactions entering - // the pending state + // PendingTransactionsSubscription queries tx hashes for pending + // transactions entering the pending state PendingTransactionsSubscription - // AcceptedTransactionsSubscription queries for accepted transactions + // AcceptedTransactionsSubscription queries tx hashes for accepted transactions AcceptedTransactionsSubscription // BlocksSubscription queries hashes for blocks that are imported BlocksSubscription // AcceptedBlocksSubscription queries hashes for blocks that are accepted AcceptedBlocksSubscription - // LastIndexSubscription keeps track of the last index + // LastSubscription keeps track of the last index LastIndexSubscription ) @@ -164,7 +90,7 @@ type subscription struct { created time.Time logsCrit interfaces.FilterQuery logs chan []*types.Log - txs chan []*types.Transaction + hashes chan []common.Hash headers chan *types.Header installed chan struct{} // closed when the filter is installed err chan error // closed when the filter is uninstalled @@ -173,8 +99,9 @@ type subscription struct { // EventSystem creates subscriptions, processes events and broadcasts them to the // subscription which match the subscription criteria. type EventSystem struct { - backend Backend - sys *FilterSystem + backend Backend + lightMode bool + lastHead *types.Header // Subscriptions txsSub event.Subscription // Subscription for new transaction event @@ -205,10 +132,10 @@ type EventSystem struct { // // The returned manager has a loop that needs to be stopped with the Stop function // or by stopping the given mux. -func NewEventSystem(sys *FilterSystem) *EventSystem { +func NewEventSystem(backend Backend, lightMode bool) *EventSystem { m := &EventSystem{ - sys: sys, - backend: sys.backend, + backend: backend, + lightMode: lightMode, install: make(chan *subscription), uninstall: make(chan *subscription), txsCh: make(chan core.NewTxsEvent, txChanSize), @@ -266,7 +193,7 @@ func (sub *Subscription) Unsubscribe() { case sub.es.uninstall <- sub.f: break uninstallLoop case <-sub.f.logs: - case <-sub.f.txs: + case <-sub.f.hashes: case <-sub.f.headers: } } @@ -289,9 +216,6 @@ func (es *EventSystem) subscribe(sub *subscription) *Subscription { // given criteria to the given logs channel. Default value for the from and to // block is "latest". If the fromBlock > toBlock an error is returned. func (es *EventSystem) SubscribeLogs(crit interfaces.FilterQuery, logs chan []*types.Log) (*Subscription, error) { - if len(crit.Topics) > maxTopics { - return nil, errExceedMaxTopics - } var from, to rpc.BlockNumber if crit.FromBlock == nil { from = rpc.LatestBlockNumber @@ -324,7 +248,7 @@ func (es *EventSystem) SubscribeLogs(crit interfaces.FilterQuery, logs chan []*t if from >= 0 && to == rpc.LatestBlockNumber { return es.subscribeLogs(crit, logs), nil } - return nil, errInvalidBlockRange + return nil, fmt.Errorf("invalid from and to block combination: from > to") } func (es *EventSystem) SubscribeAcceptedLogs(crit interfaces.FilterQuery, logs chan []*types.Log) (*Subscription, error) { @@ -359,7 +283,7 @@ func (es *EventSystem) subscribeAcceptedLogs(crit interfaces.FilterQuery, logs c logsCrit: crit, created: time.Now(), logs: logs, - txs: make(chan []*types.Transaction), + hashes: make(chan []common.Hash), headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -376,7 +300,7 @@ func (es *EventSystem) subscribeMinedPendingLogs(crit interfaces.FilterQuery, lo logsCrit: crit, created: time.Now(), logs: logs, - txs: make(chan []*types.Transaction), + hashes: make(chan []common.Hash), headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -393,7 +317,7 @@ func (es *EventSystem) subscribeLogs(crit interfaces.FilterQuery, logs chan []*t logsCrit: crit, created: time.Now(), logs: logs, - txs: make(chan []*types.Transaction), + hashes: make(chan []common.Hash), headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -401,7 +325,7 @@ func (es *EventSystem) subscribeLogs(crit interfaces.FilterQuery, logs chan []*t return es.subscribe(sub) } -// subscribePendingLogs creates a subscription that writes contract event logs for +// subscribePendingLogs creates a subscription that writes transaction hashes for // transactions that enter the transaction pool. func (es *EventSystem) subscribePendingLogs(crit interfaces.FilterQuery, logs chan []*types.Log) *Subscription { sub := &subscription{ @@ -410,7 +334,7 @@ func (es *EventSystem) subscribePendingLogs(crit interfaces.FilterQuery, logs ch logsCrit: crit, created: time.Now(), logs: logs, - txs: make(chan []*types.Transaction), + hashes: make(chan []common.Hash), headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -426,7 +350,7 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti typ: BlocksSubscription, created: time.Now(), logs: make(chan []*types.Log), - txs: make(chan []*types.Transaction), + hashes: make(chan []common.Hash), headers: headers, installed: make(chan struct{}), err: make(chan error), @@ -442,7 +366,7 @@ func (es *EventSystem) SubscribeAcceptedHeads(headers chan *types.Header) *Subsc typ: AcceptedBlocksSubscription, created: time.Now(), logs: make(chan []*types.Log), - txs: make(chan []*types.Transaction), + hashes: make(chan []common.Hash), headers: headers, installed: make(chan struct{}), err: make(chan error), @@ -450,15 +374,15 @@ func (es *EventSystem) SubscribeAcceptedHeads(headers chan *types.Header) *Subsc return es.subscribe(sub) } -// SubscribePendingTxs creates a subscription that writes transactions for +// SubscribePendingTxs creates a subscription that writes transaction hashes for // transactions that enter the transaction pool. -func (es *EventSystem) SubscribePendingTxs(txs chan []*types.Transaction) *Subscription { +func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscription { sub := &subscription{ id: rpc.NewID(), typ: PendingTransactionsSubscription, created: time.Now(), logs: make(chan []*types.Log), - txs: txs, + hashes: hashes, headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -466,15 +390,15 @@ func (es *EventSystem) SubscribePendingTxs(txs chan []*types.Transaction) *Subsc return es.subscribe(sub) } -// SubscribeAcceptedTxs creates a subscription that writes transactions for +// SubscribeAcceptedTxs creates a subscription that writes transaction hashes for // transactions have been accepted. -func (es *EventSystem) SubscribeAcceptedTxs(txs chan []*types.Transaction) *Subscription { +func (es *EventSystem) SubscribeAcceptedTxs(hashes chan []common.Hash) *Subscription { sub := &subscription{ id: rpc.NewID(), typ: AcceptedTransactionsSubscription, created: time.Now(), logs: make(chan []*types.Log), - txs: txs, + hashes: hashes, headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -520,13 +444,26 @@ func (es *EventSystem) handlePendingLogs(filters filterIndex, ev []*types.Log) { } } +func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) { + for _, f := range filters[LogsSubscription] { + matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) + if len(matchedLogs) > 0 { + f.logs <- matchedLogs + } + } +} + func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent, accepted bool) { + hashes := make([]common.Hash, 0, len(ev.Txs)) + for _, tx := range ev.Txs { + hashes = append(hashes, tx.Hash()) + } for _, f := range filters[PendingTransactionsSubscription] { - f.txs <- ev.Txs + f.hashes <- hashes } if accepted { for _, f := range filters[AcceptedTransactionsSubscription] { - f.txs <- ev.Txs + f.hashes <- hashes } } } @@ -535,12 +472,103 @@ func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) for _, f := range filters[BlocksSubscription] { f.headers <- ev.Block.Header() } + if es.lightMode && len(filters[LogsSubscription]) > 0 { + es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) { + for _, f := range filters[LogsSubscription] { + if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 { + f.logs <- matchedLogs + } + } + }) + } } func (es *EventSystem) handleChainAcceptedEvent(filters filterIndex, ev core.ChainEvent) { for _, f := range filters[AcceptedBlocksSubscription] { f.headers <- ev.Block.Header() } + if es.lightMode && len(filters[LogsSubscription]) > 0 { + es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) { + for _, f := range filters[LogsSubscription] { + if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 { + f.logs <- matchedLogs + } + } + }) + } +} + +func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func(*types.Header, bool)) { + oldh := es.lastHead + es.lastHead = newHeader + if oldh == nil { + return + } + newh := newHeader + // find common ancestor, create list of rolled back and new block hashes + var oldHeaders, newHeaders []*types.Header + for oldh.Hash() != newh.Hash() { + if oldh.Number.Uint64() >= newh.Number.Uint64() { + oldHeaders = append(oldHeaders, oldh) + oldh = rawdb.ReadHeader(es.backend.ChainDb(), oldh.ParentHash, oldh.Number.Uint64()-1) + } + if oldh.Number.Uint64() < newh.Number.Uint64() { + newHeaders = append(newHeaders, newh) + newh = rawdb.ReadHeader(es.backend.ChainDb(), newh.ParentHash, newh.Number.Uint64()-1) + if newh == nil { + // happens when CHT syncing, nothing to do + newh = oldh + } + } + } + // roll back old blocks + for _, h := range oldHeaders { + callBack(h, true) + } + // check new blocks (array is in reverse order) + for i := len(newHeaders) - 1; i >= 0; i-- { + callBack(newHeaders[i], false) + } +} + +// filter logs of a single header in light client mode +func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log { + if bloomFilter(header.Bloom, addresses, topics) { + // Get the logs of the block + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + logsList, err := es.backend.GetLogs(ctx, header.Hash()) + if err != nil { + return nil + } + var unfiltered []*types.Log + for _, logs := range logsList { + for _, log := range logs { + logcopy := *log + logcopy.Removed = remove + unfiltered = append(unfiltered, &logcopy) + } + } + logs := filterLogs(unfiltered, nil, nil, addresses, topics) + if len(logs) > 0 && logs[0].TxHash == (common.Hash{}) { + // We have matching but non-derived logs + receipts, err := es.backend.GetReceipts(ctx, header.Hash()) + if err != nil { + return nil + } + unfiltered = unfiltered[:0] + for _, receipt := range receipts { + for _, log := range receipt.Logs { + logcopy := *log + logcopy.Removed = remove + unfiltered = append(unfiltered, &logcopy) + } + } + logs = filterLogs(unfiltered, nil, nil, addresses, topics) + } + return logs + } + return nil } // eventLoop (un)installs filters and processes mux events. @@ -571,7 +599,7 @@ func (es *EventSystem) eventLoop() { case ev := <-es.logsAcceptedCh: es.handleAcceptedLogs(index, ev) case ev := <-es.rmLogsCh: - es.handleLogs(index, ev.Logs) + es.handleRemovedLogs(index, ev) case ev := <-es.pendingLogsCh: es.handlePendingLogs(index, ev) case ev := <-es.chainCh: diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 521d48e98e..19470d89e4 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -33,8 +33,8 @@ import ( "math/big" "slices" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) diff --git a/eth/gasprice/feehistory_test.go b/eth/gasprice/feehistory_test.go index f8234d1d18..84801c22cf 100644 --- a/eth/gasprice/feehistory_test.go +++ b/eth/gasprice/feehistory_test.go @@ -32,21 +32,20 @@ import ( "math/big" "testing" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/types" - "github.com/stretchr/testify/require" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/types" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" ) func TestFeeHistory(t *testing.T) { var cases = []struct { pending bool - maxCallBlock uint64 - maxBlock uint64 - count uint64 + maxCallBlock int + maxBlock int + count int last rpc.BlockNumber percent []float64 expFirst uint64 @@ -82,6 +81,7 @@ func TestFeeHistory(t *testing.T) { } tip := big.NewInt(1 * params.GWei) backend := newTestBackendFakerEngine(t, params.TestChainConfig, 32, common.Big0, func(i int, b *core.BlockGen) { + signer := types.LatestSigner(params.TestChainConfig) b.SetCoinbase(common.Address{1}) @@ -106,11 +106,10 @@ func TestFeeHistory(t *testing.T) { } b.AddTx(tx) }) - oracle, err := NewOracle(backend, config) - require.NoError(t, err) + oracle := NewOracle(backend, config) first, reward, baseFee, ratio, err := oracle.FeeHistory(context.Background(), c.count, c.last, c.percent) - backend.teardown() + expReward := c.expCount if len(c.percent) == 0 { expReward = 0 diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index 82941c015e..6ee7a4ee48 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -32,14 +32,14 @@ import ( "testing" "time" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" diff --git a/eth/state_accessor.go b/eth/state_accessor.go index 60d2849160..295b6a06df 100644 --- a/eth/state_accessor.go +++ b/eth/state_accessor.go @@ -27,97 +27,87 @@ package eth import ( - "context" "errors" "fmt" + "math/big" "time" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/eth/tracers" - "github.com/ava-labs/coreth/trie" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) -// noopReleaser is returned in case there is no operation expected -// for releasing state. -var noopReleaser = tracers.StateReleaseFunc(func() {}) - -func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) { - reexec = 0 // Do not support re-executing historical blocks to grab state +// StateAtBlock retrieves the state database associated with a certain block. +// If no state is locally available for the given block, a number of blocks +// are attempted to be reexecuted to generate the desired state. The optional +// base layer statedb can be passed then it's regarded as the statedb of the +// parent block. +// Parameters: +// - block: The block for which we want the state (== state at the stateRoot of the parent) +// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state +// - base: If the caller is tracing multiple blocks, the caller can provide the parent state +// continuously from the callsite. +// - checklive: if true, then the live 'blockchain' state database is used. If the caller want to +// perform Commit or other 'save-to-disk' changes, this should be set to false to avoid +// storing trash persistently +// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is provided, +// it would be preferrable to start from a fresh state, if we have it on disk. +func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) { var ( current *types.Block database state.Database - triedb *trie.Database report = true origin = block.NumberU64() ) - // The state is only for reading purposes, check the state presence in - // live database. - if readOnly { - // The state is available in live database, create a reference - // on top to prevent garbage collection and return a release - // function to deref it. - if statedb, err = eth.blockchain.StateAt(block.Root()); err == nil { - eth.blockchain.TrieDB().Reference(block.Root(), common.Hash{}) - return statedb, func() { - eth.blockchain.TrieDB().Dereference(block.Root()) - }, nil + // Check the live database first if we have the state fully available, use that. + if checkLive { + statedb, err = eth.blockchain.StateAt(block.Root()) + if err == nil { + return statedb, nil } } - // The state is both for reading and writing, or it's unavailable in disk, - // try to construct/recover the state over an ephemeral trie.Database for - // isolating the live one. if base != nil { if preferDisk { // Create an ephemeral trie.Database for isolating the live one. Otherwise // the internal junks created by tracing will be persisted into the disk. - // TODO(rjl493456442), clean cache is disabled to prevent memory leak, - // please re-enable it for better performance. - database = state.NewDatabaseWithConfig(eth.chainDb, trie.HashDefaults) + database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16}) if statedb, err = state.New(block.Root(), database, nil); err == nil { log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number()) - return statedb, noopReleaser, nil + return statedb, nil } } // The optional base statedb is given, mark the start point as parent block - statedb, database, triedb, report = base, base.Database(), base.Database().TrieDB(), false + statedb, database, report = base, base.Database(), false current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) } else { - // Otherwise, try to reexec blocks until we find a state or reach our limit + // Otherwise try to reexec blocks until we find a state or reach our limit current = block // Create an ephemeral trie.Database for isolating the live one. Otherwise // the internal junks created by tracing will be persisted into the disk. - // TODO(rjl493456442), clean cache is disabled to prevent memory leak, - // please re-enable it for better performance. - triedb = trie.NewDatabase(eth.chainDb, trie.HashDefaults) - database = state.NewDatabaseWithNodeDB(eth.chainDb, triedb) + database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16}) - // If we didn't check the live database, do check state over ephemeral database, - // otherwise we would rewind past a persisted block (specific corner case is - // chain tracing from the genesis). - if !readOnly { + // If we didn't check the dirty database, do check the clean one, otherwise + // we would rewind past a persisted block (specific corner case is chain + // tracing from the genesis). + if !checkLive { statedb, err = state.New(current.Root(), database, nil) if err == nil { - return statedb, noopReleaser, nil + return statedb, nil } } // Database does not have the state for the given block, try to regenerate for i := uint64(0); i < reexec; i++ { - if err := ctx.Err(); err != nil { - return nil, nil, err - } if current.NumberU64() == 0 { - return nil, nil, errors.New("genesis state is missing") + return nil, errors.New("genesis state is missing") } parent := eth.blockchain.GetBlock(current.ParentHash(), current.NumberU64()-1) if parent == nil { - return nil, nil, fmt.Errorf("missing block %v %d", current.ParentHash(), current.NumberU64()-1) + return nil, fmt.Errorf("missing block %v %d", current.ParentHash(), current.NumberU64()-1) } current = parent @@ -129,23 +119,19 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u if err != nil { switch err.(type) { case *trie.MissingNodeError: - return nil, nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec) + return nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec) default: - return nil, nil, err + return nil, err } } } - // State is available at historical point, re-execute the blocks on top for - // the desired state. + // State was available at historical point, regenerate var ( start = time.Now() logged time.Time parent common.Hash ) for current.NumberU64() < origin { - if err := ctx.Err(); err != nil { - return nil, nil, err - } // Print progress logs if long enough time elapsed if time.Since(logged) > 8*time.Second && report { log.Info("Regenerating historical state", "block", current.NumberU64()+1, "target", origin, "remaining", origin-current.NumberU64()-1, "elapsed", time.Since(start)) @@ -155,138 +141,74 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u parentHeader := current.Header() next := current.NumberU64() + 1 if current = eth.blockchain.GetBlockByNumber(next); current == nil { - return nil, nil, fmt.Errorf("block #%d not found", next) + return nil, fmt.Errorf("block #%d not found", next) } _, _, _, err := eth.blockchain.Processor().Process(current, parentHeader, statedb, vm.Config{}) if err != nil { - return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err) + return nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err) } // Finalize the state so any modifications are written to the trie - root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()), true) + root, err := statedb.Commit(eth.blockchain.Config().IsEIP158(current.Number())) if err != nil { - return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w", + return nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w", current.NumberU64(), current.Root().Hex(), err) } statedb, err = state.New(root, database, nil) if err != nil { - return nil, nil, fmt.Errorf("state reset after block %d failed: %v", current.NumberU64(), err) + return nil, fmt.Errorf("state reset after block %d failed: %v", current.NumberU64(), err) } - // Note: In coreth, the state reference is held by passing true to [statedb.Commit]. - // Drop the parent state to prevent accumulating too many nodes in memory. + database.TrieDB().Reference(root, common.Hash{}) if parent != (common.Hash{}) { - triedb.Dereference(parent) + database.TrieDB().Dereference(parent) } parent = root } if report { - _, nodes, imgs := triedb.Size() // all memory is contained within the nodes return in hashdb + nodes, imgs := database.TrieDB().Size() log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) } - return statedb, func() { triedb.Dereference(block.Root()) }, nil -} - -func (eth *Ethereum) pathState(block *types.Block) (*state.StateDB, func(), error) { - // Check if the requested state is available in the live chain. - statedb, err := eth.blockchain.StateAt(block.Root()) - if err == nil { - return statedb, noopReleaser, nil - } - // TODO historic state is not supported in path-based scheme. - // Fully archive node in pbss will be implemented by relying - // on state history, but needs more work on top. - return nil, nil, errors.New("historical state not available in path scheme yet") -} - -// stateAtBlock retrieves the state database associated with a certain block. -// If no state is locally available for the given block, a number of blocks -// are attempted to be reexecuted to generate the desired state. The optional -// base layer statedb can be provided which is regarded as the statedb of the -// parent block. -// -// An additional release function will be returned if the requested state is -// available. Release is expected to be invoked when the returned state is no -// longer needed. Its purpose is to prevent resource leaking. Though it can be -// noop in some cases. -// -// Parameters: -// - block: The block for which we want the state(state = block.Root) -// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state -// - base: If the caller is tracing multiple blocks, the caller can provide the parent -// state continuously from the callsite. -// - readOnly: If true, then the live 'blockchain' state database is used. No mutation should -// be made from caller, e.g. perform Commit or other 'save-to-disk' changes. -// Otherwise, the trash generated by caller may be persisted permanently. -// - preferDisk: This arg can be used by the caller to signal that even though the 'base' is -// provided, it would be preferable to start from a fresh state, if we have it -// on disk. -func (eth *Ethereum) stateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) { - if eth.blockchain.TrieDB().Scheme() == rawdb.HashScheme { - return eth.hashState(ctx, block, reexec, base, readOnly, preferDisk) - } - return eth.pathState(block) + return statedb, nil } // stateAtTransaction returns the execution environment of a certain transaction. -func (eth *Ethereum) stateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { +func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { // Short circuit if it's genesis block. if block.NumberU64() == 0 { - return nil, vm.BlockContext{}, nil, nil, errors.New("no transaction in genesis") + return nil, vm.BlockContext{}, nil, errors.New("no transaction in genesis") } // Create the parent state database parent := eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) + return nil, vm.BlockContext{}, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) } // Lookup the statedb of parent block from the live database, // otherwise regenerate it on the flight. - statedb, release, err := eth.StateAtNextBlock(ctx, parent, block, reexec, nil, true, false) + statedb, err := eth.StateAtBlock(parent, reexec, nil, true, false) if err != nil { - return nil, vm.BlockContext{}, nil, nil, err + return nil, vm.BlockContext{}, nil, err } if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, statedb, release, nil + return nil, vm.BlockContext{}, statedb, nil } // Recompute transactions up to the target index. - signer := types.MakeSigner(eth.blockchain.Config(), block.Number(), block.Time()) + signer := types.MakeSigner(eth.blockchain.Config(), block.Number(), new(big.Int).SetUint64(block.Time())) for idx, tx := range block.Transactions() { // Assemble the transaction call message and return if the requested offset - msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) + msg, _ := tx.AsMessage(signer, block.BaseFee()) txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil) if idx == txIndex { - return msg, context, statedb, release, nil + return msg, context, statedb, nil } // Not yet the searched for transaction, execute on top of the current state vmenv := vm.NewEVM(context, txContext, statedb, eth.blockchain.Config(), vm.Config{}) - statedb.SetTxContext(tx.Hash(), idx) + statedb.Prepare(tx.Hash(), idx) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } // Ensure any modifications are committed to the state // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) } - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) -} - -// StateAtNextBlock is a helper function that returns the state at the next block. -// It wraps StateAtBlock and handles the case where Upgrades are applied to the -// next block. -// This is different than using StateAtBlock with [nextBlock] because it will -// apply the upgrades to the [parent] state before returning it. -func (eth *Ethereum) StateAtNextBlock(ctx context.Context, parent *types.Block, nextBlock *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) { - // Get state for [parent] - statedb, release, err := eth.stateAtBlock(ctx, parent, reexec, base, readOnly, preferDisk) - if err != nil { - return nil, nil, err - } - - // Apply upgrades here for the [nextBlock] - err = core.ApplyUpgrades(eth.blockchain.Config(), &parent.Header().Time, nextBlock, statedb) - if err != nil { - release() - return nil, nil, err - } - - return statedb, release, nil + return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } diff --git a/eth/tracers/api.go b/eth/tracers/api.go index f596bf2638..6c19a9a1cb 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -27,28 +27,27 @@ package tracers import ( - "bufio" + "bytes" "context" - "encoding/json" "errors" "fmt" - "os" + "math/big" "runtime" "sync" "time" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/eth/tracers/logger" - "github.com/ava-labs/coreth/internal/ethapi" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/eth/tracers/logger" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/internal/ethapi" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) @@ -69,19 +68,8 @@ const ( // For non-archive nodes, this limit _will_ be overblown, as disk-backed tries // will only be found every ~15K blocks or so. defaultTracechainMemLimit = common.StorageSize(500 * 1024 * 1024) - - // maximumPendingTraceStates is the maximum number of states allowed waiting - // for tracing. The creation of trace state will be paused if the unused - // trace states exceed this limit. - maximumPendingTraceStates = 128 ) -var errTxNotFound = errors.New("transaction not found") - -// StateReleaseFunc is used to deallocate resources held by constructing a -// historical state for tracing purposes. -type StateReleaseFunc func() - // Backend interface provides the common API services (that are provided by // both full and light clients) with access to necessary functions. type Backend interface { @@ -89,53 +77,59 @@ type Backend interface { HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) - BadBlocks() ([]*types.Block, []*core.BadBlockReason) + BadBlocks() []*types.Block GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) RPCGasCap() uint64 ChainConfig() *params.ChainConfig Engine() consensus.Engine ChainDb() ethdb.Database - StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) - StateAtNextBlock(ctx context.Context, parent, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) - StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error) -} - -// baseAPI holds the collection of common methods for API and FileTracerAPI. -type baseAPI struct { - backend Backend + StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) + StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) } // API is the collection of tracing APIs exposed over the private debugging endpoint. type API struct { - baseAPI + backend Backend } // NewAPI creates a new API definition for the tracing methods of the Ethereum service. func NewAPI(backend Backend) *API { - return &API{baseAPI{backend: backend}} + return &API{backend: backend} } -// FileTracerAPI is the collection of additional tracing APIs exposed over the private -// debugging endpoint that log their output to a file. -type FileTracerAPI struct { - baseAPI +type chainContext struct { + api *API + ctx context.Context } -// NewFileTracerAPI creates a new API definition for the tracing methods of the Ethererum -// service that log their output to a file. -func NewFileTracerAPI(backend Backend) *FileTracerAPI { - return &FileTracerAPI{baseAPI{backend: backend}} +func (context *chainContext) Engine() consensus.Engine { + return context.api.backend.Engine() +} + +func (context *chainContext) GetHeader(hash common.Hash, number uint64) *types.Header { + header, err := context.api.backend.HeaderByNumber(context.ctx, rpc.BlockNumber(number)) + if err != nil { + return nil + } + if header.Hash() == hash { + return header + } + header, err = context.api.backend.HeaderByHash(context.ctx, hash) + if err != nil { + return nil + } + return header } -// chainContext constructs the context reader which is used by the evm for reading +// chainContext construts the context reader which is used by the evm for reading // the necessary chain context. -func (api *baseAPI) chainContext(ctx context.Context) core.ChainContext { - return ethapi.NewChainContext(ctx, api.backend) +func (api *API) chainContext(ctx context.Context) core.ChainContext { + return &chainContext{api: api, ctx: ctx} } // blockByNumber is the wrapper of the chain access function offered by the backend. // It will return an error if the block is not found. -func (api *baseAPI) blockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { +func (api *API) blockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { block, err := api.backend.BlockByNumber(ctx, number) if err != nil { return nil, err @@ -148,7 +142,7 @@ func (api *baseAPI) blockByNumber(ctx context.Context, number rpc.BlockNumber) ( // blockByHash is the wrapper of the chain access function offered by the backend. // It will return an error if the block is not found. -func (api *baseAPI) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { +func (api *API) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { block, err := api.backend.BlockByHash(ctx, hash) if err != nil { return nil, err @@ -164,7 +158,7 @@ func (api *baseAPI) blockByHash(ctx context.Context, hash common.Hash) (*types.B // // Note this function is friendly for the light client which can only retrieve the // historical(before the CHT) header/block by number. -func (api *baseAPI) blockByNumberAndHash(ctx context.Context, number rpc.BlockNumber, hash common.Hash) (*types.Block, error) { +func (api *API) blockByNumberAndHash(ctx context.Context, number rpc.BlockNumber, hash common.Hash) (*types.Block, error) { block, err := api.blockByNumber(ctx, number) if err != nil { return nil, err @@ -181,18 +175,16 @@ type TraceConfig struct { Tracer *string Timeout *string Reexec *uint64 - // Config specific to given tracer. Note struct logger - // config are historically embedded in main object. - TracerConfig json.RawMessage } // TraceCallConfig is the config for traceCall API. It holds one more // field to override the state for tracing. type TraceCallConfig struct { - TraceConfig + *logger.Config + Tracer *string + Timeout *string + Reexec *uint64 StateOverrides *ethapi.StateOverride - BlockOverrides *ethapi.BlockOverrides - TxIndex *hexutil.Uint } // StdTraceConfig holds extra parameters to standard-json trace functions. @@ -204,21 +196,16 @@ type StdTraceConfig struct { // txTraceResult is the result of a single transaction trace. type txTraceResult struct { - TxHash common.Hash `json:"txHash"` // transaction hash Result interface{} `json:"result,omitempty"` // Trace results produced by the tracer Error string `json:"error,omitempty"` // Trace failure produced by the tracer } -func (t *txTraceResult) String() string { - return fmt.Sprintf("result: %s, error: %s", t.Result, t.Error) -} - // blockTraceTask represents a single block trace task when an entire chain is // being traced. type blockTraceTask struct { statedb *state.StateDB // Intermediate state prepped for tracing block *types.Block // Block to trace the transactions from - release StateReleaseFunc // The function to release the held resource for this task + rootref common.Hash // Trie root reference held for this task results []*txTraceResult // Trace results procudes by the task } @@ -251,6 +238,13 @@ func (api *API) TraceChain(ctx context.Context, start, end rpc.BlockNumber, conf if from.Number().Cmp(to.Number()) >= 0 { return nil, fmt.Errorf("end block (#%d) needs to come after start block (#%d)", end, start) } + return api.traceChain(ctx, from, to, config) +} + +// traceChain configures a new tracer according to the provided configuration, and +// executes all the transactions contained within. The return value will be one item +// per transaction, dependent on the requested tracer. +func (api *API) traceChain(ctx context.Context, start, end *types.Block, config *TraceConfig) (*rpc.Subscription, error) { // Tracing a chain is a **long** operation, only do with subscriptions notifier, supported := rpc.NotifierFromContext(ctx) if !supported { @@ -258,21 +252,8 @@ func (api *API) TraceChain(ctx context.Context, start, end rpc.BlockNumber, conf } sub := notifier.CreateSubscription() - resCh := api.traceChain(from, to, config, notifier.Closed()) - go func() { - for result := range resCh { - notifier.Notify(sub.ID, result) - } - }() - return sub, nil -} - -// traceChain configures a new tracer according to the provided configuration, and -// executes all the transactions contained within. The tracing chain range includes -// the end block but excludes the start one. The return value will be one item per -// transaction, dependent on the requested tracer. -// The tracing procedure should be aborted in case the closed signal is received. -func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed <-chan interface{}) chan *blockTraceResult { + // Prepare all the states for tracing. Note this procedure can take very + // long time. Timeout mechanism is necessary. reexec := defaultTraceReexec if config != nil && config.Reexec != nil { reexec = *config.Reexec @@ -283,76 +264,68 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed threads = blocks } var ( - pend = new(sync.WaitGroup) - ctx = context.Background() - taskCh = make(chan *blockTraceTask, threads) - resCh = make(chan *blockTraceTask, threads) - tracker = newStateTracker(maximumPendingTraceStates, start.NumberU64()) + pend = new(sync.WaitGroup) + tasks = make(chan *blockTraceTask, threads) + results = make(chan *blockTraceTask, threads) + localctx = context.Background() ) for th := 0; th < threads; th++ { pend.Add(1) go func() { defer pend.Done() - // Fetch and execute the block trace taskCh - for task := range taskCh { - var ( - signer = types.MakeSigner(api.backend.ChainConfig(), task.block.Number(), task.block.Time()) - blockCtx = core.NewEVMBlockContext(task.block.Header(), api.chainContext(ctx), nil) - ) + // Fetch and execute the next block trace tasks + for task := range tasks { + signer := types.MakeSigner(api.backend.ChainConfig(), task.block.Number(), new(big.Int).SetUint64(task.block.Time())) + blockCtx := core.NewEVMBlockContext(task.block.Header(), api.chainContext(localctx), nil) // Trace all the transactions contained within for i, tx := range task.block.Transactions() { - msg, _ := core.TransactionToMessage(tx, signer, task.block.BaseFee()) + msg, _ := tx.AsMessage(signer, task.block.BaseFee()) txctx := &Context{ - BlockHash: task.block.Hash(), - BlockNumber: task.block.Number(), - TxIndex: i, - TxHash: tx.Hash(), + BlockHash: task.block.Hash(), + TxIndex: i, + TxHash: tx.Hash(), } - res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config) + res, err := api.traceTx(localctx, msg, txctx, blockCtx, task.statedb, config) if err != nil { - task.results[i] = &txTraceResult{TxHash: tx.Hash(), Error: err.Error()} + task.results[i] = &txTraceResult{Error: err.Error()} log.Warn("Tracing failed", "hash", tx.Hash(), "block", task.block.NumberU64(), "err", err) break } // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect task.statedb.Finalise(api.backend.ChainConfig().IsEIP158(task.block.Number())) - task.results[i] = &txTraceResult{TxHash: tx.Hash(), Result: res} + task.results[i] = &txTraceResult{Result: res} } - // Tracing state is used up, queue it for de-referencing. Note the - // state is the parent state of trace block, use block.number-1 as - // the state number. - tracker.releaseState(task.block.NumberU64()-1, task.release) - - // Stream the result back to the result catcher or abort on teardown + // Stream the result back to the user or abort on teardown select { - case resCh <- task: - case <-closed: + case results <- task: + case <-notifier.Closed(): return } } }() } // Start a goroutine to feed all the blocks into the tracers + var ( + begin = time.Now() + derefTodo []common.Hash // list of hashes to dereference from the db + derefsMu sync.Mutex // mutex for the derefs + ) + go func() { var ( logged time.Time - begin = time.Now() number uint64 traced uint64 failed error + parent common.Hash statedb *state.StateDB - release StateReleaseFunc ) // Ensure everything is properly cleaned up on any exit path defer func() { - close(taskCh) + close(tasks) pend.Wait() - // Clean out any pending release functions of trace states. - tracker.callReleases() - - // Log the chain result switch { case failed != nil: log.Warn("Chain tracing failed", "start", start.NumberU64(), "end", end.NumberU64(), "transactions", traced, "elapsed", time.Since(begin), "err", failed) @@ -361,104 +334,105 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed default: log.Info("Chain tracing finished", "start", start.NumberU64(), "end", end.NumberU64(), "transactions", traced, "elapsed", time.Since(begin)) } - close(resCh) + close(results) }() + var preferDisk bool // Feed all the blocks both into the tracer, as well as fast process concurrently for number = start.NumberU64(); number < end.NumberU64(); number++ { // Stop tracing if interruption was requested select { - case <-closed: + case <-notifier.Closed(): return default: } + // clean out any derefs + derefsMu.Lock() + for _, h := range derefTodo { + statedb.Database().TrieDB().Dereference(h) + } + derefTodo = derefTodo[:0] + derefsMu.Unlock() + // Print progress logs if long enough time elapsed if time.Since(logged) > 8*time.Second { logged = time.Now() log.Info("Tracing chain segment", "start", start.NumberU64(), "end", end.NumberU64(), "current", number, "transactions", traced, "elapsed", time.Since(begin)) } - // Retrieve the parent block and target block for tracing. - block, err := api.blockByNumber(ctx, rpc.BlockNumber(number)) + // Retrieve the parent state to trace on top + block, err := api.blockByNumber(localctx, rpc.BlockNumber(number)) if err != nil { failed = err break } - next, err := api.blockByNumber(ctx, rpc.BlockNumber(number+1)) + // Prepare the statedb for tracing. Don't use the live database for + // tracing to avoid persisting state junks into the database. + statedb, err = api.backend.StateAtBlock(localctx, block, reexec, statedb, false, preferDisk) if err != nil { failed = err break } - // Make sure the state creator doesn't go too far. Too many unprocessed - // trace state may cause the oldest state to become stale(e.g. in - // path-based scheme). - if err = tracker.wait(number); err != nil { - failed = err - break - } - // Prepare the statedb for tracing. Don't use the live database for - // tracing to avoid persisting state junks into the database. Switch - // over to `preferDisk` mode only if the memory usage exceeds the - // limit, the trie database will be reconstructed from scratch only - // if the relevant state is available in disk. - var preferDisk bool - if statedb != nil { - s1, s2, s3 := statedb.Database().TrieDB().Size() - preferDisk = s1+s2+s3 > defaultTracechainMemLimit + if trieDb := statedb.Database().TrieDB(); trieDb != nil { + // Hold the reference for tracer, will be released at the final stage + trieDb.Reference(block.Root(), common.Hash{}) + + // Release the parent state because it's already held by the tracer + if parent != (common.Hash{}) { + trieDb.Dereference(parent) + } + // Prefer disk if the trie db memory grows too much + s1, s2 := trieDb.Size() + if !preferDisk && (s1+s2) > defaultTracechainMemLimit { + log.Info("Switching to prefer-disk mode for tracing", "size", s1+s2) + preferDisk = true + } } - statedb, release, err = api.backend.StateAtNextBlock(ctx, block, next, reexec, statedb, false, preferDisk) + parent = block.Root() + + next, err := api.blockByNumber(localctx, rpc.BlockNumber(number+1)) if err != nil { failed = err break } - // Clean out any pending release functions of trace state. Note this - // step must be done after constructing tracing state, because the - // tracing state of block next depends on the parent state and construction - // may fail if we release too early. - tracker.callReleases() - // Send the block over to the concurrent tracers (if not in the fast-forward phase) txs := next.Transactions() select { - case taskCh <- &blockTraceTask{statedb: statedb.Copy(), block: next, release: release, results: make([]*txTraceResult, len(txs))}: - case <-closed: - tracker.releaseState(number, release) + case tasks <- &blockTraceTask{statedb: statedb.Copy(), block: next, rootref: block.Root(), results: make([]*txTraceResult, len(txs))}: + case <-notifier.Closed(): return } traced += uint64(len(txs)) } }() - // Keep reading the trace results and stream them to result channel. - retCh := make(chan *blockTraceResult) + // Keep reading the trace results and stream the to the user go func() { - defer close(retCh) var ( - next = start.NumberU64() + 1 done = make(map[uint64]*blockTraceResult) + next = start.NumberU64() + 1 ) - for res := range resCh { + for res := range results { // Queue up next received result result := &blockTraceResult{ Block: hexutil.Uint64(res.block.NumberU64()), Hash: res.block.Hash(), Traces: res.results, } + // Schedule any parent tries held in memory by this task for dereferencing done[uint64(result.Block)] = result - - // Stream completed traces to the result channel + derefsMu.Lock() + derefTodo = append(derefTodo, res.rootref) + derefsMu.Unlock() + // Stream completed traces to the user, aborting on the first error for result, ok := done[next]; ok; result, ok = done[next] { if len(result.Traces) > 0 || next == end.NumberU64() { - // It will be blocked in case the channel consumer doesn't take the - // tracing result in time(e.g. the websocket connect is not stable) - // which will eventually block the entire chain tracer. It's the - // expected behavior to not waste node resources for a non-active user. - retCh <- result + notifier.Notify(sub.ID, result) } delete(done, next) next++ } } }() - return retCh + return sub, nil } // TraceBlockByNumber returns the structured logs created during the execution of @@ -483,32 +457,22 @@ func (api *API) TraceBlockByHash(ctx context.Context, hash common.Hash, config * // TraceBlock returns the structured logs created during the execution of EVM // and returns them as a JSON object. -func (api *baseAPI) TraceBlock(ctx context.Context, blob hexutil.Bytes, config *TraceConfig) ([]*txTraceResult, error) { +func (api *API) TraceBlock(ctx context.Context, blob []byte, config *TraceConfig) ([]*txTraceResult, error) { block := new(types.Block) - if err := rlp.DecodeBytes(blob, block); err != nil { + if err := rlp.Decode(bytes.NewReader(blob), block); err != nil { return nil, fmt.Errorf("could not decode block: %v", err) } return api.traceBlock(ctx, block, config) } -// TraceBlockFromFile returns the structured logs created during the execution of -// EVM and returns them as a JSON object. -func (api *FileTracerAPI) TraceBlockFromFile(ctx context.Context, file string, config *TraceConfig) ([]*txTraceResult, error) { - blob, err := os.ReadFile(file) - if err != nil { - return nil, fmt.Errorf("could not read file: %v", err) - } - return api.TraceBlock(ctx, blob, config) -} - // TraceBadBlock returns the structured logs created during the execution of // EVM against a block pulled from the pool of bad ones and returns them as a JSON // object. func (api *API) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) { // Search for the bad block corresponding to [hash]. var ( - badBlocks, _ = api.backend.BadBlocks() - block *types.Block + badBlocks = api.backend.BadBlocks() + block *types.Block ) for _, badBlock := range badBlocks { if hash == block.Hash() { @@ -522,17 +486,6 @@ func (api *API) TraceBadBlock(ctx context.Context, hash common.Hash, config *Tra return api.traceBlock(ctx, block, config) } -// StandardTraceBlockToFile dumps the structured logs created during the -// execution of EVM to the local file system and returns a list of files -// to the caller. -func (api *FileTracerAPI) StandardTraceBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) { - block, err := api.blockByHash(ctx, hash) - if err != nil { - return nil, err - } - return api.standardTraceBlockToFile(ctx, block, config) -} - // IntermediateRoots executes a block (bad- or canon- or side-), and returns a list // of intermediate roots: the stateroot after each transaction. func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config *TraceConfig) ([]common.Hash, error) { @@ -551,30 +504,25 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, release, err := api.backend.StateAtNextBlock(ctx, parent, block, reexec, nil, true, false) + statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, err } - defer release() - var ( roots []common.Hash - signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time()) + signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), new(big.Int).SetUint64(block.Time())) chainConfig = api.backend.ChainConfig() vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) deleteEmptyObjects = chainConfig.IsEIP158(block.Number()) ) for i, tx := range block.Transactions() { - if err := ctx.Err(); err != nil { - return nil, err - } var ( - msg, _ = core.TransactionToMessage(tx, signer, block.BaseFee()) + msg, _ = tx.AsMessage(signer, block.BaseFee()) txContext = core.NewEVMTxContext(msg) vmenv = vm.NewEVM(vmctx, txContext, statedb, chainConfig, vm.Config{}) ) - statedb.SetTxContext(tx.Hash(), i) - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil { + statedb.Prepare(tx.Hash(), i) + if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil { log.Warn("Tracing intermediate roots did not complete", "txindex", i, "txhash", tx.Hash(), "err", err) // We intentionally don't return the error here: if we do, then the RPC server will not // return the roots. Most likely, the caller already knows that a certain transaction fails to @@ -591,35 +539,13 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config return roots, nil } -// StandardTraceBadBlockToFile dumps the structured logs created during the -// execution of EVM against a block pulled from the pool of bad ones to the -// local file system and returns a list of files to the caller. -func (api *FileTracerAPI) StandardTraceBadBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) { - // Search for the bad block corresponding to [hash]. - var ( - badBlocks, _ = api.backend.BadBlocks() - block *types.Block - ) - for _, badBlock := range badBlocks { - if hash == block.Hash() { - block = badBlock - break - } - } - if block == nil { - return nil, fmt.Errorf("bad block %#x not found", hash) - } - return api.standardTraceBlockToFile(ctx, block, config) -} - // traceBlock configures a new tracer according to the provided configuration, and // executes all the transactions contained within. The return value will be one item -// per transaction, dependent on the requested tracer. -func (api *baseAPI) traceBlock(ctx context.Context, block *types.Block, config *TraceConfig) ([]*txTraceResult, error) { +// per transaction, dependent on the requestd tracer. +func (api *API) traceBlock(ctx context.Context, block *types.Block, config *TraceConfig) ([]*txTraceResult, error) { if block.NumberU64() == 0 { return nil, errors.New("genesis is not traceable") } - // Prepare base state parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash()) if err != nil { return nil, err @@ -628,117 +554,66 @@ func (api *baseAPI) traceBlock(ctx context.Context, block *types.Block, config * if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, release, err := api.backend.StateAtNextBlock(ctx, parent, block, reexec, nil, true, false) + statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, err } - defer release() - - // JS tracers have high overhead. In this case run a parallel - // process that generates states in one thread and traces txes - // in separate worker threads. - if config != nil && config.Tracer != nil && *config.Tracer != "" { - if isJS := DefaultDirectory.IsJS(*config.Tracer); isJS { - return api.traceBlockParallel(ctx, block, statedb, config) - } - } - // Native tracers have low overhead - var ( - txs = block.Transactions() - blockHash = block.Hash() - is158 = api.backend.ChainConfig().IsEIP158(block.Number()) - blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) - signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time()) - results = make([]*txTraceResult, len(txs)) - ) - for i, tx := range txs { - // Generate the next state snapshot fast without tracing - msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) - txctx := &Context{ - BlockHash: blockHash, - BlockNumber: block.Number(), - TxIndex: i, - TxHash: tx.Hash(), - } - res, err := api.traceTx(ctx, msg, txctx, blockCtx, statedb, config) - if err != nil { - return nil, err - } - results[i] = &txTraceResult{TxHash: tx.Hash(), Result: res} - // Finalize the state so any modifications are written to the trie - // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect - statedb.Finalise(is158) - } - return results, nil -} - -// traceBlockParallel is for tracers that have a high overhead (read JS tracers). One thread -// runs along and executes txes without tracing enabled to generate their prestate. -// Worker threads take the tasks and the prestate and trace them. -func (api *baseAPI) traceBlockParallel(ctx context.Context, block *types.Block, statedb *state.StateDB, config *TraceConfig) ([]*txTraceResult, error) { // Execute all the transaction contained within the block concurrently var ( - txs = block.Transactions() - blockHash = block.Hash() - blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) - signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time()) - results = make([]*txTraceResult, len(txs)) - pend sync.WaitGroup + signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), new(big.Int).SetUint64(block.Time())) + txs = block.Transactions() + results = make([]*txTraceResult, len(txs)) + + pend = new(sync.WaitGroup) + jobs = make(chan *txTraceTask, len(txs)) ) threads := runtime.NumCPU() if threads > len(txs) { threads = len(txs) } - jobs := make(chan *txTraceTask, threads) + blockHash := block.Hash() for th := 0; th < threads; th++ { pend.Add(1) go func() { defer pend.Done() + + blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) // Fetch and execute the next transaction trace tasks for task := range jobs { - msg, _ := core.TransactionToMessage(txs[task.index], signer, block.BaseFee()) + msg, _ := txs[task.index].AsMessage(signer, block.BaseFee()) txctx := &Context{ - BlockHash: blockHash, - BlockNumber: block.Number(), - TxIndex: task.index, - TxHash: txs[task.index].Hash(), + BlockHash: blockHash, + TxIndex: task.index, + TxHash: txs[task.index].Hash(), } res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config) if err != nil { - results[task.index] = &txTraceResult{TxHash: txs[task.index].Hash(), Error: err.Error()} + results[task.index] = &txTraceResult{Error: err.Error()} continue } - results[task.index] = &txTraceResult{TxHash: txs[task.index].Hash(), Result: res} + results[task.index] = &txTraceResult{Result: res} } }() } - // Feed the transactions into the tracers and return var failed error -txloop: + blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) for i, tx := range txs { // Send the trace task over for execution - task := &txTraceTask{statedb: statedb.Copy(), index: i} - select { - case <-ctx.Done(): - failed = ctx.Err() - break txloop - case jobs <- task: - } + jobs <- &txTraceTask{statedb: statedb.Copy(), index: i} // Generate the next state snapshot fast without tracing - msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) - statedb.SetTxContext(tx.Hash(), i) + msg, _ := tx.AsMessage(signer, block.BaseFee()) + statedb.Prepare(tx.Hash(), i) vmenv := vm.NewEVM(blockCtx, core.NewEVMTxContext(msg), statedb, api.backend.ChainConfig(), vm.Config{}) - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil { + if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil { failed = err - break txloop + break } // Finalize the state so any modifications are written to the trie // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) } - close(jobs) pend.Wait() @@ -749,139 +624,13 @@ txloop: return results, nil } -// standardTraceBlockToFile configures a new tracer which uses standard JSON output, -// and traces either a full block or an individual transaction. The return value will -// be one filename per transaction traced. -func (api *FileTracerAPI) standardTraceBlockToFile(ctx context.Context, block *types.Block, config *StdTraceConfig) ([]string, error) { - // If we're tracing a single transaction, make sure it's present - if config != nil && config.TxHash != (common.Hash{}) { - if !containsTx(block, config.TxHash) { - return nil, fmt.Errorf("transaction %#x not found in block", config.TxHash) - } - } - if block.NumberU64() == 0 { - return nil, errors.New("genesis is not traceable") - } - parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash()) - if err != nil { - return nil, err - } - reexec := defaultTraceReexec - if config != nil && config.Reexec != nil { - reexec = *config.Reexec - } - statedb, release, err := api.backend.StateAtNextBlock(ctx, parent, block, reexec, nil, true, false) - if err != nil { - return nil, err - } - defer release() - - // Retrieve the tracing configurations, or use default values - var ( - logConfig logger.Config - txHash common.Hash - ) - if config != nil { - logConfig = config.Config - txHash = config.TxHash - } - logConfig.Debug = true - - // Execute transaction, either tracing all or just the requested one - var ( - dumps []string - signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time()) - chainConfig = api.backend.ChainConfig() - vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) - canon = true - ) - // Check if there are any overrides: the caller may wish to enable a future - // fork when executing this block. Note, such overrides are only applicable to the - // actual specified block, not any preceding blocks that we have to go through - // in order to obtain the state. - // Therefore, it's perfectly valid to specify `"futureForkBlock": 0`, to enable `futureFork` - if config != nil && config.Overrides != nil { - // Note: This copies the config, to not screw up the main config - chainConfig, canon = overrideConfig(chainConfig, config.Overrides) - } - for i, tx := range block.Transactions() { - // Prepare the transaction for un-traced execution - var ( - msg, _ = core.TransactionToMessage(tx, signer, block.BaseFee()) - txContext = core.NewEVMTxContext(msg) - vmConf vm.Config - dump *os.File - writer *bufio.Writer - err error - ) - // If the transaction needs tracing, swap out the configs - if tx.Hash() == txHash || txHash == (common.Hash{}) { - // Generate a unique temporary file to dump it into - prefix := fmt.Sprintf("block_%#x-%d-%#x-", block.Hash().Bytes()[:4], i, tx.Hash().Bytes()[:4]) - if !canon { - prefix = fmt.Sprintf("%valt-", prefix) - } - dump, err = os.CreateTemp(os.TempDir(), prefix) - if err != nil { - return nil, err - } - dumps = append(dumps, dump.Name()) - - // Swap out the noop logger to the standard tracer - writer = bufio.NewWriter(dump) - vmConf = vm.Config{ - Tracer: logger.NewJSONLogger(&logConfig, writer), - EnablePreimageRecording: true, - } - } - // Execute the transaction and flush any traces to disk - vmenv := vm.NewEVM(vmctx, txContext, statedb, chainConfig, vmConf) - statedb.SetTxContext(tx.Hash(), i) - _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)) - if writer != nil { - writer.Flush() - } - if dump != nil { - dump.Close() - log.Info("Wrote standard trace", "file", dump.Name()) - } - if err != nil { - return dumps, err - } - // Finalize the state so any modifications are written to the trie - // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect - statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) - - // If we've traced the transaction we were looking for, abort - if tx.Hash() == txHash { - break - } - } - return dumps, nil -} - -// containsTx reports whether the transaction with a certain hash -// is contained within the specified block. -func containsTx(block *types.Block, hash common.Hash) bool { - for _, tx := range block.Transactions() { - if tx.Hash() == hash { - return true - } - } - return false -} - // TraceTransaction returns the structured logs created during the execution of EVM // and returns them as a JSON object. func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) { - tx, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash) + _, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash) if err != nil { return nil, err } - // Only mined txes are supported - if tx == nil { - return nil, errTxNotFound - } // It shouldn't happen in practice. if blockNumber == 0 { return nil, errors.New("genesis is not traceable") @@ -894,17 +643,14 @@ func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config * if err != nil { return nil, err } - msg, vmctx, statedb, release, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec) + msg, vmctx, statedb, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec) if err != nil { return nil, err } - defer release() - txctx := &Context{ - BlockHash: blockHash, - BlockNumber: block.Number(), - TxIndex: int(index), - TxHash: hash, + BlockHash: blockHash, + TxIndex: int(index), + TxHash: hash, } return api.traceTx(ctx, msg, txctx, vmctx, statedb, config) } @@ -912,29 +658,16 @@ func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config * // TraceCall lets you trace a given eth_call. It collects the structured logs // created during the execution of EVM if the given transaction was added on // top of the provided block and returns them as a JSON object. -// If no transaction index is specified, the trace will be conducted on the state -// after executing the specified block. However, if a transaction index is provided, -// the trace will be conducted on the state after executing the specified transaction -// within the specified block. +// You can provide -2 as a block number to trace on top of the pending block. func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, config *TraceCallConfig) (interface{}, error) { // Try to retrieve the specified block var ( - err error - block *types.Block - statedb *state.StateDB - release StateReleaseFunc + err error + block *types.Block ) if hash, ok := blockNrOrHash.Hash(); ok { block, err = api.blockByHash(ctx, hash) } else if number, ok := blockNrOrHash.Number(); ok { - if number == rpc.PendingBlockNumber { - // We don't have access to the miner here. For tracing 'future' transactions, - // it can be done with block- and state-overrides instead, which offers - // more flexibility and stability than trying to trace on 'pending', since - // the contents of 'pending' is unstable and probably not a true representation - // of what the next actual block is likely to contain. - return nil, errors.New("tracing on top of pending is not supported") - } block, err = api.blockByNumber(ctx, number) } else { return nil, errors.New("invalid arguments; neither block nor hash specified") @@ -947,29 +680,12 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc if config != nil && config.Reexec != nil { reexec = *config.Reexec } - - if config != nil && config.TxIndex != nil { - _, _, statedb, release, err = api.backend.StateAtTransaction(ctx, block, int(*config.TxIndex), reexec) - } else { - statedb, release, err = api.backend.StateAtBlock(ctx, block, reexec, nil, true, false) - } + statedb, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true, false) if err != nil { return nil, err } - defer release() - - vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) - // Apply the customization rules if required. + // Apply the customized state rules if required. if config != nil { - originalTime := block.Time() - config.BlockOverrides.Apply(&vmctx) - // Apply all relevant upgrades from [originalTime] to the block time set in the override. - // Should be applied before the state overrides. - err = core.ApplyUpgrades(api.backend.ChainConfig(), &originalTime, &vmctx, statedb) - if err != nil { - return nil, err - } - if err := config.StateOverrides.Apply(statedb); err != nil { return nil, err } @@ -979,10 +695,16 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc if err != nil { return nil, err } + vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) var traceConfig *TraceConfig if config != nil { - traceConfig = &config.TraceConfig + traceConfig = &TraceConfig{ + Config: config.Config, + Tracer: config.Tracer, + Timeout: config.Timeout, + Reexec: config.Reexec, + } } return api.traceTx(ctx, msg, new(Context), vmctx, statedb, traceConfig) } @@ -990,49 +712,73 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc // traceTx configures a new tracer according to the provided configuration, and // executes the given message in the provided environment. The return value will // be tracer dependent. -func (api *baseAPI) traceTx(ctx context.Context, message *core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { +func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { + // Assemble the structured logger or the JavaScript tracer var ( - tracer Tracer + tracer vm.EVMLogger err error - timeout = defaultTraceTimeout txContext = core.NewEVMTxContext(message) ) - if config == nil { - config = &TraceConfig{} - } - // Default tracer is the struct logger - tracer = logger.NewStructLogger(config.Config) - if config.Tracer != nil { - tracer, err = DefaultDirectory.New(*config.Tracer, txctx, config.TracerConfig) - if err != nil { - return nil, err + switch { + case config == nil: + tracer = logger.NewStructLogger(nil) + case config.Tracer != nil: + // Define a meaningful timeout of a single transaction trace + timeout := defaultTraceTimeout + if config.Timeout != nil { + if timeout, err = time.ParseDuration(*config.Timeout); err != nil { + return nil, err + } } - } - vmenv := vm.NewEVM(vmctx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer, NoBaseFee: true}) - - // Define a meaningful timeout of a single transaction trace - if config.Timeout != nil { - if timeout, err = time.ParseDuration(*config.Timeout); err != nil { + if t, err := New(*config.Tracer, txctx); err != nil { return nil, err + } else { + deadlineCtx, cancel := context.WithTimeout(ctx, timeout) + go func() { + <-deadlineCtx.Done() + if errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) { + t.Stop(errors.New("execution timeout")) + } + }() + defer cancel() + tracer = t } + + default: + tracer = logger.NewStructLogger(config.Config) } - deadlineCtx, cancel := context.WithTimeout(ctx, timeout) - go func() { - <-deadlineCtx.Done() - if errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) { - tracer.Stop(errors.New("execution timeout")) - // Stop evm execution. Note cancellation is not necessarily immediate. - vmenv.Cancel() - } - }() - defer cancel() + // Run the transaction with tracing enabled. + vmenv := vm.NewEVM(vmctx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Debug: true, Tracer: tracer, NoBaseFee: true}) // Call Prepare to clear out the statedb access list - statedb.SetTxContext(txctx.TxHash, txctx.TxIndex) - if _, err = core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.GasLimit)); err != nil { + statedb.Prepare(txctx.TxHash, txctx.TxIndex) + + result, err := core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas())) + if err != nil { return nil, fmt.Errorf("tracing failed: %w", err) } - return tracer.GetResult() + + // Depending on the tracer type, format and return the output. + switch tracer := tracer.(type) { + case *logger.StructLogger: + // If the result contains a revert reason, return it. + returnVal := fmt.Sprintf("%x", result.Return()) + if len(result.Revert()) > 0 { + returnVal = fmt.Sprintf("%x", result.Revert()) + } + return ðapi.ExecutionResult{ + Gas: result.UsedGas, + Failed: result.Failed(), + ReturnValue: returnVal, + StructLogs: ethapi.FormatLogs(tracer.StructLogs()), + }, nil + + case Tracer: + return tracer.GetResult() + + default: + panic(fmt.Sprintf("bad tracer type %T", tracer)) + } } // APIs return the collection of RPC services the tracer package offers. @@ -1041,70 +787,10 @@ func APIs(backend Backend) []rpc.API { return []rpc.API{ { Namespace: "debug", + Version: "1.0", Service: NewAPI(backend), + Public: false, Name: "debug-tracer", }, - { - Namespace: "debug", - Service: NewFileTracerAPI(backend), - Name: "debug-file-tracer", - }, } } - -// overrideConfig returns a copy of [original] with network upgrades enabled by [override] enabled, -// along with a boolean that indicates whether the copy is canonical (equivalent to the original). -func overrideConfig(original *params.ChainConfig, override *params.ChainConfig) (*params.ChainConfig, bool) { - copy := new(params.ChainConfig) - *copy = *original - canon := true - - // Apply network upgrades (after Berlin) to the copy. - // Note in coreth, ApricotPhase2 is the "equivalent" to Berlin. - if timestamp := override.ApricotPhase2BlockTimestamp; timestamp != nil { - copy.ApricotPhase2BlockTimestamp = timestamp - canon = false - } - if timestamp := override.ApricotPhase3BlockTimestamp; timestamp != nil { - copy.ApricotPhase3BlockTimestamp = timestamp - canon = false - } - if timestamp := override.ApricotPhase4BlockTimestamp; timestamp != nil { - copy.ApricotPhase4BlockTimestamp = timestamp - canon = false - } - if timestamp := override.ApricotPhase5BlockTimestamp; timestamp != nil { - copy.ApricotPhase5BlockTimestamp = timestamp - canon = false - } - if timestamp := override.ApricotPhasePre6BlockTimestamp; timestamp != nil { - copy.ApricotPhasePre6BlockTimestamp = timestamp - canon = false - } - if timestamp := override.ApricotPhase6BlockTimestamp; timestamp != nil { - copy.ApricotPhase6BlockTimestamp = timestamp - canon = false - } - if timestamp := override.ApricotPhasePost6BlockTimestamp; timestamp != nil { - copy.ApricotPhasePost6BlockTimestamp = timestamp - canon = false - } - if timestamp := override.BanffBlockTimestamp; timestamp != nil { - copy.BanffBlockTimestamp = timestamp - canon = false - } - if timestamp := override.CortinaBlockTimestamp; timestamp != nil { - copy.CortinaBlockTimestamp = timestamp - canon = false - } - if timestamp := override.DurangoBlockTimestamp; timestamp != nil { - copy.DurangoBlockTimestamp = timestamp - canon = false - } - if timestamp := override.CancunTime; timestamp != nil { - copy.CancunTime = timestamp - canon = false - } - - return copy, canon -} diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index ecf4741dc6..90e1b52e18 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -27,37 +27,36 @@ package tracers import ( + "bytes" "context" "crypto/ecdsa" - "encoding/json" "errors" "fmt" "math/big" "reflect" - "sync/atomic" + "sort" "testing" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/eth/tracers/logger" - "github.com/ava-labs/coreth/internal/ethapi" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/internal/ethapi" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "golang.org/x/exp/slices" ) var ( - errStateNotFound = errors.New("state not found") - errBlockNotFound = errors.New("block not found") + errStateNotFound = errors.New("state not found") + errBlockNotFound = errors.New("block not found") + errTransactionNotFound = errors.New("transaction not found") ) type testBackend struct { @@ -65,34 +64,34 @@ type testBackend struct { engine consensus.Engine chaindb ethdb.Database chain *core.BlockChain - - refHook func() // Hook is invoked when the requested state is referenced - relHook func() // Hook is invoked when the requested state is released } -// testBackend creates a new test backend. OBS: After test is done, teardown must be -// invoked in order to release associated resources. func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { backend := &testBackend{ - chainConfig: gspec.Config, + chainConfig: params.TestChainConfig, engine: dummy.NewETHFaker(), chaindb: rawdb.NewMemoryDatabase(), } // Generate blocks for testing - _, blocks, _, err := core.GenerateChainWithGenesis(gspec, backend.engine, n, 10, generator) + gspec.Config = backend.chainConfig + var ( + gendb = rawdb.NewMemoryDatabase() + genesis = gspec.MustCommit(gendb) + ) + blocks, _, err := core.GenerateChain(backend.chainConfig, genesis, backend.engine, gendb, n, 10, generator) if err != nil { t.Fatal(err) } // Import the canonical chain + gspec.MustCommit(backend.chaindb) cacheConfig := &core.CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TriePrefetcherParallelism: 4, - SnapshotLimit: 128, - Pruning: false, // Archive mode + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + SnapshotLimit: 128, + Pruning: false, // Archive mode } - chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, backend.engine, vm.Config{}, common.Hash{}, false) + chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, backend.chainConfig, backend.engine, vm.Config{}, common.Hash{}) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -126,15 +125,18 @@ func (b *testBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types func (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { if number == rpc.PendingBlockNumber || number == rpc.LatestBlockNumber { - return b.chain.GetBlockByNumber(b.chain.CurrentBlock().Number.Uint64()), nil + return b.chain.CurrentBlock(), nil } return b.chain.GetBlockByNumber(uint64(number)), nil } -func (b *testBackend) BadBlocks() ([]*types.Block, []*core.BadBlockReason) { return nil, nil } +func (b *testBackend) BadBlocks() []*types.Block { return nil } func (b *testBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { tx, hash, blockNumber, index := rawdb.ReadTransaction(b.chaindb, txHash) + if tx == nil { + return nil, common.Hash{}, 0, 0, errTransactionNotFound + } return tx, hash, blockNumber, index, nil } @@ -154,70 +156,42 @@ func (b *testBackend) ChainDb() ethdb.Database { return b.chaindb } -// teardown releases the associated resources. -func (b *testBackend) teardown() { - b.chain.Stop() -} - -func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) { +func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) { statedb, err := b.chain.StateAt(block.Root()) if err != nil { - return nil, nil, errStateNotFound - } - if b.refHook != nil { - b.refHook() + return nil, errStateNotFound } - release := func() { - if b.relHook != nil { - b.relHook() - } - } - return statedb, release, nil + return statedb, nil } -func (b *testBackend) StateAtNextBlock(ctx context.Context, parent, nextBlock *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) { - statedb, release, err := b.StateAtBlock(ctx, parent, reexec, base, readOnly, preferDisk) - if err != nil { - return nil, nil, err - } - // Apply upgrades to the parent state - err = core.ApplyUpgrades(b.chainConfig, &parent.Header().Time, nextBlock, statedb) - if err != nil { - release() - return nil, nil, err - } - - return statedb, release, nil -} - -func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error) { +func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { parent := b.chain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, vm.BlockContext{}, nil, nil, errBlockNotFound + return nil, vm.BlockContext{}, nil, errBlockNotFound } - statedb, release, err := b.StateAtNextBlock(ctx, parent, block, reexec, nil, true, false) + statedb, err := b.chain.StateAt(parent.Root()) if err != nil { - return nil, vm.BlockContext{}, nil, nil, err + return nil, vm.BlockContext{}, nil, errStateNotFound } if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, statedb, release, nil + return nil, vm.BlockContext{}, statedb, nil } // Recompute transactions up to the target index. - signer := types.MakeSigner(b.chainConfig, block.Number(), block.Time()) + signer := types.MakeSigner(b.chainConfig, block.Number(), new(big.Int).SetUint64(block.Time())) for idx, tx := range block.Transactions() { - msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) + msg, _ := tx.AsMessage(signer, block.BaseFee()) txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), b.chain, nil) if idx == txIndex { - return msg, context, statedb, release, nil + return msg, context, statedb, nil } vmenv := vm.NewEVM(context, txContext, statedb, b.chainConfig, vm.Config{}) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) } - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) + return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } func TestTraceCall(t *testing.T) { @@ -225,69 +199,27 @@ func TestTraceCall(t *testing.T) { // Initialize test accounts accounts := newAccounts(3) - genesis := &core.Genesis{ - Config: params.TestBanffChainConfig, // TODO: go-ethereum has not enabled Shanghai yet, so we use Banff here so tests pass. - Alloc: core.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - }, - } + genesis := &core.Genesis{Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + accounts[2].addr: {Balance: big.NewInt(params.Ether)}, + }} genBlocks := 10 signer := types.HomesteadSigner{} - nonce := uint64(0) - backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { // Transfer from account[0] to account[1] // value: 1000 wei // fee: 0 wei - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: nonce, - To: &accounts[1].addr, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: b.BaseFee(), - Data: nil}), - signer, accounts[0].key) + tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, new(big.Int).Add(b.BaseFee(), big.NewInt(int64(500*params.GWei))), nil), signer, accounts[0].key) b.AddTx(tx) - nonce++ - - if i == genBlocks-2 { - // Transfer from account[0] to account[2] - tx, _ = types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: nonce, - To: &accounts[2].addr, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: b.BaseFee(), - Data: nil}), - signer, accounts[0].key) - b.AddTx(tx) - nonce++ - - // Transfer from account[0] to account[1] again - tx, _ = types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: nonce, - To: &accounts[1].addr, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: b.BaseFee(), - Data: nil}), - signer, accounts[0].key) - b.AddTx(tx) - nonce++ - } - }) + })) - uintPtr := func(i int) *hexutil.Uint { x := hexutil.Uint(i); return &x } - - defer backend.teardown() - api := NewAPI(backend) var testSuite = []struct { blockNumber rpc.BlockNumber call ethapi.TransactionArgs config *TraceCallConfig expectErr error - expect string + expect interface{} }{ // Standard JSON trace upon the genesis, plain transfer. { @@ -299,7 +231,12 @@ func TestTraceCall(t *testing.T) { }, config: nil, expectErr: nil, - expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`, + expect: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, + }, }, // Standard JSON trace upon the head, plain transfer. { @@ -311,52 +248,12 @@ func TestTraceCall(t *testing.T) { }, config: nil, expectErr: nil, - expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`, - }, - // Upon the last state, default to the post block's state - { - blockNumber: rpc.BlockNumber(genBlocks - 1), - call: ethapi.TransactionArgs{ - From: &accounts[2].addr, - To: &accounts[0].addr, - Value: (*hexutil.Big)(new(big.Int).Add(big.NewInt(params.Ether), big.NewInt(100))), + expect: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, }, - config: nil, - expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`, - }, - // Before the first transaction, should be failed - { - blockNumber: rpc.BlockNumber(genBlocks - 1), - call: ethapi.TransactionArgs{ - From: &accounts[2].addr, - To: &accounts[0].addr, - Value: (*hexutil.Big)(new(big.Int).Add(big.NewInt(params.Ether), big.NewInt(100))), - }, - config: &TraceCallConfig{TxIndex: uintPtr(0)}, - expectErr: fmt.Errorf("tracing failed: insufficient funds for gas * price + value: address %s have 1000000000000000000 want 1000000000000000100", accounts[2].addr), - }, - // Before the target transaction, should be failed - { - blockNumber: rpc.BlockNumber(genBlocks - 1), - call: ethapi.TransactionArgs{ - From: &accounts[2].addr, - To: &accounts[0].addr, - Value: (*hexutil.Big)(new(big.Int).Add(big.NewInt(params.Ether), big.NewInt(100))), - }, - config: &TraceCallConfig{TxIndex: uintPtr(1)}, - expectErr: fmt.Errorf("tracing failed: insufficient funds for gas * price + value: address %s have 1000000000000000000 want 1000000000000000100", accounts[2].addr), - }, - // After the target transaction, should be succeed - { - blockNumber: rpc.BlockNumber(genBlocks - 1), - call: ethapi.TransactionArgs{ - From: &accounts[2].addr, - To: &accounts[0].addr, - Value: (*hexutil.Big)(new(big.Int).Add(big.NewInt(params.Ether), big.NewInt(100))), - }, - config: &TraceCallConfig{TxIndex: uintPtr(2)}, - expectErr: nil, - expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`, }, // Standard JSON trace upon the non-existent block, error expects { @@ -368,7 +265,7 @@ func TestTraceCall(t *testing.T) { }, config: nil, expectErr: fmt.Errorf("block #%d not found", genBlocks+1), - //expect: nil, + expect: nil, }, // Standard JSON trace upon the latest block { @@ -380,9 +277,14 @@ func TestTraceCall(t *testing.T) { }, config: nil, expectErr: nil, - expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`, + expect: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, + }, }, - // Tracing on 'pending' should fail: + // Standard JSON trace upon the pending block { blockNumber: rpc.PendingBlockNumber, call: ethapi.TransactionArgs{ @@ -391,48 +293,32 @@ func TestTraceCall(t *testing.T) { Value: (*hexutil.Big)(big.NewInt(1000)), }, config: nil, - expectErr: errors.New("tracing on top of pending is not supported"), - }, - { - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &accounts[0].addr, - Input: &hexutil.Bytes{0x43}, // blocknumber - }, - config: &TraceCallConfig{ - BlockOverrides: ðapi.BlockOverrides{Number: (*hexutil.Big)(big.NewInt(0x1337))}, - }, expectErr: nil, - expect: ` {"gas":53018,"failed":false,"returnValue":"","structLogs":[ - {"pc":0,"op":"NUMBER","gas":24946984,"gasCost":2,"depth":1,"stack":[]}, - {"pc":1,"op":"STOP","gas":24946982,"gasCost":0,"depth":1,"stack":["0x1337"]}]}`, + expect: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, + }, }, } - for i, testspec := range testSuite { + for _, testspec := range testSuite { result, err := api.TraceCall(context.Background(), testspec.call, rpc.BlockNumberOrHash{BlockNumber: &testspec.blockNumber}, testspec.config) if testspec.expectErr != nil { if err == nil { - t.Errorf("test %d: expect error %v, got nothing", i, testspec.expectErr) + t.Errorf("Expect error %v, get nothing", testspec.expectErr) continue } - if !reflect.DeepEqual(err.Error(), testspec.expectErr.Error()) { - t.Errorf("test %d: error mismatch, want '%v', got '%v'", i, testspec.expectErr, err) + if !reflect.DeepEqual(err, testspec.expectErr) { + t.Errorf("Error mismatch, want %v, get %v", testspec.expectErr, err) } } else { if err != nil { - t.Errorf("test %d: expect no error, got %v", i, err) + t.Errorf("Expect no error, get %v", err) continue } - var have *logger.ExecutionResult - if err := json.Unmarshal(result.(json.RawMessage), &have); err != nil { - t.Errorf("test %d: failed to unmarshal result %v", i, err) - } - var want *logger.ExecutionResult - if err := json.Unmarshal([]byte(testspec.expect), &want); err != nil { - t.Errorf("test %d: failed to unmarshal result %v", i, err) - } - if !reflect.DeepEqual(have, want) { - t.Errorf("test %d: result mismatch, want %v, got %v", i, testspec.expect, string(result.(json.RawMessage))) + if !reflect.DeepEqual(result, testspec.expect) { + t.Errorf("Result mismatch, want %v, get %v", testspec.expect, result) } } } @@ -443,54 +329,31 @@ func TestTraceTransaction(t *testing.T) { // Initialize test accounts accounts := newAccounts(2) - genesis := &core.Genesis{ - Config: params.TestChainConfig, - Alloc: core.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - }, - } + genesis := &core.Genesis{Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + }} target := common.Hash{} signer := types.HomesteadSigner{} - backend := newTestBackend(t, 1, genesis, func(i int, b *core.BlockGen) { + api := NewAPI(newTestBackend(t, 1, genesis, func(i int, b *core.BlockGen) { // Transfer from account[0] to account[1] // value: 1000 wei // fee: 0 wei - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: uint64(i), - To: &accounts[1].addr, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: new(big.Int).Add(b.BaseFee(), big.NewInt(int64(500*params.GWei))), - Data: nil}), - signer, accounts[0].key) + tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, new(big.Int).Add(b.BaseFee(), big.NewInt(int64(500*params.GWei))), nil), signer, accounts[0].key) b.AddTx(tx) target = tx.Hash() - }) - defer backend.chain.Stop() - api := NewAPI(backend) + })) result, err := api.TraceTransaction(context.Background(), target, nil) if err != nil { t.Errorf("Failed to trace transaction %v", err) } - var have *logger.ExecutionResult - if err := json.Unmarshal(result.(json.RawMessage), &have); err != nil { - t.Errorf("failed to unmarshal result %v", err) - } - expected := &logger.ExecutionResult{ + if !reflect.DeepEqual(result, ðapi.ExecutionResult{ Gas: params.TxGas, Failed: false, ReturnValue: "", - StructLogs: []logger.StructLogRes{}, - } - if !reflect.DeepEqual(have, expected) { - t.Errorf("Transaction tracing result is different: have %v want %v", have, expected) - } - - // Test non-existent transaction - _, err = api.TraceTransaction(context.Background(), common.Hash{42}, nil) - if !errors.Is(err, errTxNotFound) { - t.Fatalf("want %v, have %v", errTxNotFound, err) + StructLogs: []ethapi.StructLogRes{}, + }) { + t.Error("Transaction tracing result is different") } } @@ -499,422 +362,108 @@ func TestTraceBlock(t *testing.T) { // Initialize test accounts accounts := newAccounts(3) - genesis := &core.Genesis{ - Config: params.TestChainConfig, - Alloc: core.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - }, - } + genesis := &core.Genesis{Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + accounts[2].addr: {Balance: big.NewInt(params.Ether)}, + }} genBlocks := 10 signer := types.HomesteadSigner{} - var txHash common.Hash - backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { // Transfer from account[0] to account[1] // value: 1000 wei // fee: 0 wei - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: uint64(i), - To: &accounts[1].addr, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: b.BaseFee(), - Data: nil}), - signer, accounts[0].key) + tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, new(big.Int).Add(b.BaseFee(), big.NewInt(int64(500*params.GWei))), nil), signer, accounts[0].key) b.AddTx(tx) - txHash = tx.Hash() - }) - defer backend.chain.Stop() - api := NewAPI(backend) + })) var testSuite = []struct { blockNumber rpc.BlockNumber config *TraceConfig - want string + expect interface{} expectErr error }{ // Trace genesis block, expect error { blockNumber: rpc.BlockNumber(0), + config: nil, + expect: nil, expectErr: errors.New("genesis is not traceable"), }, // Trace head block { blockNumber: rpc.BlockNumber(genBlocks), - want: fmt.Sprintf(`[{"txHash":"%v","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, txHash), + config: nil, + expectErr: nil, + expect: []*txTraceResult{ + { + Result: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, + }, + }, + }, }, // Trace non-existent block { blockNumber: rpc.BlockNumber(genBlocks + 1), + config: nil, expectErr: fmt.Errorf("block #%d not found", genBlocks+1), + expect: nil, }, // Trace latest block { blockNumber: rpc.LatestBlockNumber, - want: fmt.Sprintf(`[{"txHash":"%v","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, txHash), - }, - // Trace pending block - { - blockNumber: rpc.PendingBlockNumber, - want: fmt.Sprintf(`[{"txHash":"%v","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, txHash), - }, - } - for i, tc := range testSuite { - result, err := api.TraceBlockByNumber(context.Background(), tc.blockNumber, tc.config) - if tc.expectErr != nil { - if err == nil { - t.Errorf("test %d, want error %v", i, tc.expectErr) - continue - } - if !reflect.DeepEqual(err, tc.expectErr) { - t.Errorf("test %d: error mismatch, want %v, get %v", i, tc.expectErr, err) - } - continue - } - if err != nil { - t.Errorf("test %d, want no error, have %v", i, err) - continue - } - have, _ := json.Marshal(result) - want := tc.want - if string(have) != want { - t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, string(have), want) - } - } -} - -func TestTracingWithOverrides(t *testing.T) { - t.Parallel() - // Initialize test accounts - accounts := newAccounts(3) - storageAccount := common.Address{0x13, 37} - genesis := &core.Genesis{ - Config: params.TestCortinaChainConfig, // TODO: go-ethereum has not enabled Shanghai yet, so we use Cortina here so tests pass. - Alloc: core.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - // An account with existing storage - storageAccount: { - Balance: new(big.Int), - Storage: map[common.Hash]common.Hash{ - common.HexToHash("0x03"): common.HexToHash("0x33"), - common.HexToHash("0x04"): common.HexToHash("0x44"), - }, - }, - }, - } - genBlocks := 10 - signer := types.HomesteadSigner{} - backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { - // Transfer from account[0] to account[1] - // value: 1000 wei - // fee: 0 wei - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: uint64(i), - To: &accounts[1].addr, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: b.BaseFee(), - Data: nil}), - signer, accounts[0].key) - b.AddTx(tx) - }) - defer backend.chain.Stop() - api := NewAPI(backend) - randomAccounts := newAccounts(3) - type res struct { - Gas int - Failed bool - ReturnValue string - } - var testSuite = []struct { - blockNumber rpc.BlockNumber - call ethapi.TransactionArgs - config *TraceCallConfig - expectErr error - want string - }{ - // Call which can only succeed if state is state overridden - { - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - config: &TraceCallConfig{ - StateOverrides: ðapi.StateOverride{ - randomAccounts[0].addr: ethapi.OverrideAccount{Balance: newRPCBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(params.Ether)))}, - }, - }, - want: `{"gas":21000,"failed":false,"returnValue":""}`, - }, - // Invalid call without state overriding - { - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - config: &TraceCallConfig{}, - expectErr: core.ErrInsufficientFunds, - }, - // Successful simple contract call - // - // // SPDX-License-Identifier: GPL-3.0 - // - // pragma solidity >=0.7.0 <0.8.0; - // - // /** - // * @title Storage - // * @dev Store & retrieve value in a variable - // */ - // contract Storage { - // uint256 public number; - // constructor() { - // number = block.number; - // } - // } - { - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[2].addr, - Data: newRPCBytes(common.Hex2Bytes("8381f58a")), // call number() - }, - config: &TraceCallConfig{ - //Tracer: &tracer, - StateOverrides: ðapi.StateOverride{ - randomAccounts[2].addr: ethapi.OverrideAccount{ - Code: newRPCBytes(common.Hex2Bytes("6080604052348015600f57600080fd5b506004361060285760003560e01c80638381f58a14602d575b600080fd5b60336049565b6040518082815260200191505060405180910390f35b6000548156fea2646970667358221220eab35ffa6ab2adfe380772a48b8ba78e82a1b820a18fcb6f59aa4efb20a5f60064736f6c63430007040033")), - StateDiff: newStates([]common.Hash{{}}, []common.Hash{common.BigToHash(big.NewInt(123))}), - }, - }, - }, - want: `{"gas":23347,"failed":false,"returnValue":"000000000000000000000000000000000000000000000000000000000000007b"}`, - }, - { // Override blocknumber - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &accounts[0].addr, - // BLOCKNUMBER PUSH1 MSTORE - Input: newRPCBytes(common.Hex2Bytes("4360005260206000f3")), - //&hexutil.Bytes{0x43}, // blocknumber - }, - config: &TraceCallConfig{ - BlockOverrides: ðapi.BlockOverrides{Number: (*hexutil.Big)(big.NewInt(0x1337))}, - }, - want: `{"gas":59537,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000001337"}`, - }, - { // Override blocknumber, and query a blockhash - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &accounts[0].addr, - Input: &hexutil.Bytes{ - 0x60, 0x00, 0x40, // BLOCKHASH(0) - 0x60, 0x00, 0x52, // STORE memory offset 0 - 0x61, 0x13, 0x36, 0x40, // BLOCKHASH(0x1336) - 0x60, 0x20, 0x52, // STORE memory offset 32 - 0x61, 0x13, 0x37, 0x40, // BLOCKHASH(0x1337) - 0x60, 0x40, 0x52, // STORE memory offset 64 - 0x60, 0x60, 0x60, 0x00, 0xf3, // RETURN (0-96) - - }, // blocknumber - }, - config: &TraceCallConfig{ - BlockOverrides: ðapi.BlockOverrides{Number: (*hexutil.Big)(big.NewInt(0x1337))}, - }, - want: `{"gas":72666,"failed":false,"returnValue":"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}`, - }, - /* - pragma solidity =0.8.12; - - contract Test { - uint private x; - - function test2() external { - x = 1337; - revert(); - } - - function test() external returns (uint) { - x = 1; - try this.test2() {} catch (bytes memory) {} - return x; - } - } - */ - { // First with only code override, not storage override - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[2].addr, - Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // - }, - config: &TraceCallConfig{ - StateOverrides: ðapi.StateOverride{ - randomAccounts[2].addr: ethapi.OverrideAccount{ - Code: newRPCBytes(common.Hex2Bytes("6080604052348015600f57600080fd5b506004361060325760003560e01c806366e41cb7146037578063f8a8fd6d14603f575b600080fd5b603d6057565b005b60456062565b60405190815260200160405180910390f35b610539600090815580fd5b60006001600081905550306001600160a01b03166366e41cb76040518163ffffffff1660e01b8152600401600060405180830381600087803b15801560a657600080fd5b505af192505050801560b6575060015b60e9573d80801560e1576040519150601f19603f3d011682016040523d82523d6000602084013e60e6565b606091505b50505b506000549056fea26469706673582212205ce45de745a5308f713cb2f448589177ba5a442d1a2eff945afaa8915961b4d064736f6c634300080c0033")), - }, - }, - }, - want: `{"gas":44100,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000001"}`, - }, - { // Same again, this time with storage override - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[2].addr, - Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // - }, - config: &TraceCallConfig{ - StateOverrides: ðapi.StateOverride{ - randomAccounts[2].addr: ethapi.OverrideAccount{ - Code: newRPCBytes(common.Hex2Bytes("6080604052348015600f57600080fd5b506004361060325760003560e01c806366e41cb7146037578063f8a8fd6d14603f575b600080fd5b603d6057565b005b60456062565b60405190815260200160405180910390f35b610539600090815580fd5b60006001600081905550306001600160a01b03166366e41cb76040518163ffffffff1660e01b8152600401600060405180830381600087803b15801560a657600080fd5b505af192505050801560b6575060015b60e9573d80801560e1576040519150601f19603f3d011682016040523d82523d6000602084013e60e6565b606091505b50505b506000549056fea26469706673582212205ce45de745a5308f713cb2f448589177ba5a442d1a2eff945afaa8915961b4d064736f6c634300080c0033")), - State: newStates([]common.Hash{{}}, []common.Hash{{}}), - }, - }, - }, - //want: `{"gas":46900,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000539"}`, - want: `{"gas":44100,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000001"}`, - }, - { // No state override - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &storageAccount, - Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // - }, - config: &TraceCallConfig{ - StateOverrides: ðapi.StateOverride{ - storageAccount: ethapi.OverrideAccount{ - Code: newRPCBytes([]byte{ - // SLOAD(3) + SLOAD(4) (which is 0x77) - byte(vm.PUSH1), 0x04, - byte(vm.SLOAD), - byte(vm.PUSH1), 0x03, - byte(vm.SLOAD), - byte(vm.ADD), - // 0x77 -> MSTORE(0) - byte(vm.PUSH1), 0x00, - byte(vm.MSTORE), - // RETURN (0, 32) - byte(vm.PUSH1), 32, - byte(vm.PUSH1), 00, - byte(vm.RETURN), - }), - }, - }, - }, - want: `{"gas":25288,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000077"}`, - }, - { // Full state override - // The original storage is - // 3: 0x33 - // 4: 0x44 - // With a full override, where we set 3:0x11, the slot 4 should be - // removed. So SLOT(3)+SLOT(4) should be 0x11. - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &storageAccount, - Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // - }, - config: &TraceCallConfig{ - StateOverrides: ðapi.StateOverride{ - storageAccount: ethapi.OverrideAccount{ - Code: newRPCBytes([]byte{ - // SLOAD(3) + SLOAD(4) (which is now 0x11 + 0x00) - byte(vm.PUSH1), 0x04, - byte(vm.SLOAD), - byte(vm.PUSH1), 0x03, - byte(vm.SLOAD), - byte(vm.ADD), - // 0x11 -> MSTORE(0) - byte(vm.PUSH1), 0x00, - byte(vm.MSTORE), - // RETURN (0, 32) - byte(vm.PUSH1), 32, - byte(vm.PUSH1), 00, - byte(vm.RETURN), - }), - State: newStates( - []common.Hash{common.HexToHash("0x03")}, - []common.Hash{common.HexToHash("0x11")}), + config: nil, + expectErr: nil, + expect: []*txTraceResult{ + { + Result: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, }, }, }, - want: `{"gas":25288,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000011"}`, }, - { // Partial state override - // The original storage is - // 3: 0x33 - // 4: 0x44 - // With a partial override, where we set 3:0x11, the slot 4 as before. - // So SLOT(3)+SLOT(4) should be 0x55. - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &storageAccount, - Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // - }, - config: &TraceCallConfig{ - StateOverrides: ðapi.StateOverride{ - storageAccount: ethapi.OverrideAccount{ - Code: newRPCBytes([]byte{ - // SLOAD(3) + SLOAD(4) (which is now 0x11 + 0x44) - byte(vm.PUSH1), 0x04, - byte(vm.SLOAD), - byte(vm.PUSH1), 0x03, - byte(vm.SLOAD), - byte(vm.ADD), - // 0x55 -> MSTORE(0) - byte(vm.PUSH1), 0x00, - byte(vm.MSTORE), - // RETURN (0, 32) - byte(vm.PUSH1), 32, - byte(vm.PUSH1), 00, - byte(vm.RETURN), - }), - StateDiff: &map[common.Hash]common.Hash{ - common.HexToHash("0x03"): common.HexToHash("0x11"), - }, + // Trace pending block + { + blockNumber: rpc.PendingBlockNumber, + config: nil, + expectErr: nil, + expect: []*txTraceResult{ + { + Result: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, }, }, }, - want: `{"gas":25288,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000055"}`, }, } - for i, tc := range testSuite { - result, err := api.TraceCall(context.Background(), tc.call, rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, tc.config) - if tc.expectErr != nil { + for _, testspec := range testSuite { + result, err := api.TraceBlockByNumber(context.Background(), testspec.blockNumber, testspec.config) + if testspec.expectErr != nil { if err == nil { - t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr) + t.Errorf("Expect error %v, get nothing", testspec.expectErr) continue } - if !errors.Is(err, tc.expectErr) { - t.Errorf("test %d: error mismatch, want %v, have %v", i, tc.expectErr, err) + if !reflect.DeepEqual(err, testspec.expectErr) { + t.Errorf("Error mismatch, want %v, get %v", testspec.expectErr, err) + } + } else { + if err != nil { + t.Errorf("Expect no error, get %v", err) + continue + } + if !reflect.DeepEqual(result, testspec.expect) { + t.Errorf("Result mismatch, want %v, get %v", testspec.expect, result) } - continue - } - if err != nil { - t.Errorf("test %d: want no error, have %v", i, err) - continue - } - // Turn result into res-struct - var ( - have res - want res - ) - resBytes, _ := json.Marshal(result) - json.Unmarshal(resBytes, &have) - json.Unmarshal([]byte(tc.want), &want) - if !reflect.DeepEqual(have, want) { - t.Logf("result: %v\n", string(resBytes)) - t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, have, want) } } } @@ -924,111 +473,18 @@ type Account struct { addr common.Address } -func newAccounts(n int) (accounts []Account) { +type Accounts []Account + +func (a Accounts) Len() int { return len(a) } +func (a Accounts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a Accounts) Less(i, j int) bool { return bytes.Compare(a[i].addr.Bytes(), a[j].addr.Bytes()) < 0 } + +func newAccounts(n int) (accounts Accounts) { for i := 0; i < n; i++ { key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) accounts = append(accounts, Account{key: key, addr: addr}) } - slices.SortFunc(accounts, func(a, b Account) int { return a.addr.Cmp(b.addr) }) + sort.Sort(accounts) return accounts } - -func newRPCBalance(balance *big.Int) **hexutil.Big { - rpcBalance := (*hexutil.Big)(balance) - return &rpcBalance -} - -func newRPCBytes(bytes []byte) *hexutil.Bytes { - rpcBytes := hexutil.Bytes(bytes) - return &rpcBytes -} - -func newStates(keys []common.Hash, vals []common.Hash) *map[common.Hash]common.Hash { - if len(keys) != len(vals) { - panic("invalid input") - } - m := make(map[common.Hash]common.Hash) - for i := 0; i < len(keys); i++ { - m[keys[i]] = vals[i] - } - return &m -} - -func TestTraceChain(t *testing.T) { - // Initialize test accounts - // Note: the balances in this test have been increased compared to go-ethereum. - accounts := newAccounts(3) - genesis := &core.Genesis{ - Config: params.TestChainConfig, - Alloc: core.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(5 * params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(5 * params.Ether)}, - accounts[2].addr: {Balance: big.NewInt(5 * params.Ether)}, - }, - } - genBlocks := 50 - signer := types.HomesteadSigner{} - - var ( - ref atomic.Uint32 // total refs has made - rel atomic.Uint32 // total rels has made - nonce uint64 - ) - backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { - // Transfer from account[0] to account[1] - // value: 1000 wei - // fee: 0 wei - for j := 0; j < i+1; j++ { - tx, _ := types.SignTx(types.NewTransaction(nonce, accounts[1].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[0].key) - b.AddTx(tx) - nonce += 1 - } - }) - backend.refHook = func() { ref.Add(1) } - backend.relHook = func() { rel.Add(1) } - api := NewAPI(backend) - - single := `{"txHash":"0x0000000000000000000000000000000000000000000000000000000000000000","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}` - var cases = []struct { - start uint64 - end uint64 - config *TraceConfig - }{ - {0, 50, nil}, // the entire chain range, blocks [1, 50] - {10, 20, nil}, // the middle chain range, blocks [11, 20] - } - for _, c := range cases { - ref.Store(0) - rel.Store(0) - - from, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.start)) - to, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.end)) - resCh := api.traceChain(from, to, c.config, nil) - - next := c.start + 1 - for result := range resCh { - if have, want := uint64(result.Block), next; have != want { - t.Fatalf("unexpected tracing block, have %d want %d", have, want) - } - if have, want := len(result.Traces), int(next); have != want { - t.Fatalf("unexpected result length, have %d want %d", have, want) - } - for _, trace := range result.Traces { - trace.TxHash = common.Hash{} - blob, _ := json.Marshal(trace) - if have, want := string(blob), single; have != want { - t.Fatalf("unexpected tracing result, have\n%v\nwant:\n%v", have, want) - } - } - next += 1 - } - if next != c.end+1 { - t.Error("Missing tracing block") - } - - if nref, nrel := ref.Load(), rel.Load(); nref != nrel { - t.Errorf("Ref and deref actions are not equal, ref %d rel %d", nref, nrel) - } - } -} diff --git a/eth/tracers/logger/access_list_tracer.go b/eth/tracers/logger/access_list_tracer.go index e2bb12f2d7..a761a33ed6 100644 --- a/eth/tracers/logger/access_list_tracer.go +++ b/eth/tracers/logger/access_list_tracer.go @@ -19,9 +19,9 @@ package logger import ( "math/big" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" ) // accessList is an accumulator for the set of accounts and storage slots an EVM diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go index d94ffd3cf7..a2c18f392e 100644 --- a/eth/tracers/logger/logger.go +++ b/eth/tracers/logger/logger.go @@ -24,15 +24,16 @@ import ( "math/big" "strings" "sync/atomic" + "time" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/holiman/uint256" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/vmerrs" ) // Storage represents a contract's storage. @@ -84,7 +85,6 @@ type structLogMarshaling struct { GasCost math.HexOrDecimal64 Memory hexutil.Bytes ReturnData hexutil.Bytes - Stack []hexutil.U256 OpName string `json:"opName"` // adds call to OpName() in MarshalJSON ErrorString string `json:"error,omitempty"` // adds call to ErrorString() in MarshalJSON } @@ -118,8 +118,8 @@ type StructLogger struct { gasLimit uint64 usedGas uint64 - interrupt atomic.Bool // Atomic flag to signal execution interruption - reason error // Textual reason for the interruption + interrupt uint32 // Atomic flag to signal execution interruption + reason error // Textual reason for the interruption } // NewStructLogger returns a new logger @@ -151,14 +151,14 @@ func (l *StructLogger) CaptureStart(env *vm.EVM, from common.Address, to common. // CaptureState also tracks SLOAD/SSTORE ops to track storage change. func (l *StructLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { // If tracing was interrupted, set the error and stop - if l.interrupt.Load() { + if atomic.LoadUint32(&l.interrupt) > 0 { + l.env.Cancel() return } // check if already accumulated the specified number of logs if l.cfg.Limit != 0 && l.cfg.Limit <= len(l.logs) { return } - memory := scope.Memory stack := scope.Stack contract := scope.Contract @@ -220,7 +220,7 @@ func (l *StructLogger) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, s } // CaptureEnd is called after the call finishes to finalize the tracing. -func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, err error) { +func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { l.output = output l.err = err if l.cfg.Debug { @@ -260,7 +260,7 @@ func (l *StructLogger) GetResult() (json.RawMessage, error) { // Stop terminates execution of the tracer at the first opportune moment. func (l *StructLogger) Stop(err error) { l.reason = err - l.interrupt.Store(true) + atomic.StoreUint32(&l.interrupt, 1) } func (l *StructLogger) CaptureTxStart(gasLimit uint64) { @@ -386,7 +386,7 @@ func (t *mdLogger) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope fmt.Fprintf(t.out, "\nError: at pc=%d, op=%v: %v\n", pc, op, err) } -func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, err error) { +func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, tm time.Duration, err error) { fmt.Fprintf(t.out, "\nOutput: `%#x`\nConsumed gas: `%d`\nError: `%v`\n", output, gasUsed, err) } @@ -420,7 +420,6 @@ type StructLogRes struct { Depth int `json:"depth"` Error string `json:"error,omitempty"` Stack *[]string `json:"stack,omitempty"` - ReturnData string `json:"returnData,omitempty"` Memory *[]string `json:"memory,omitempty"` Storage *map[string]string `json:"storage,omitempty"` RefundCounter uint64 `json:"refund,omitempty"` @@ -446,9 +445,6 @@ func formatLogs(logs []StructLog) []StructLogRes { } formatted[index].Stack = &stack } - if trace.ReturnData != nil && len(trace.ReturnData) > 0 { - formatted[index].ReturnData = hexutil.Bytes(trace.ReturnData).String() - } if trace.Memory != nil { memory := make([]string, 0, (len(trace.Memory)+31)/32) for i := 0; i+32 <= len(trace.Memory); i += 32 { diff --git a/eth/tracers/logger/logger_json.go b/eth/tracers/logger/logger_json.go index 3b2b783252..e677cf67c6 100644 --- a/eth/tracers/logger/logger_json.go +++ b/eth/tracers/logger/logger_json.go @@ -21,9 +21,9 @@ import ( "io" "math/big" - "github.com/ava-labs/coreth/core/vm" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/tenderly/coreth/core/vm" ) type JSONLogger struct { diff --git a/eth/tracers/native/4byte.go b/eth/tracers/native/4byte.go index 113dfa3a4a..6e30f9e116 100644 --- a/eth/tracers/native/4byte.go +++ b/eth/tracers/native/4byte.go @@ -31,14 +31,15 @@ import ( "math/big" "strconv" "sync/atomic" + "time" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/eth/tracers" "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/eth/tracers" ) func init() { - tracers.DefaultDirectory.Register("4byteTracer", newFourByteTracer, false) + register("4byteTracer", newFourByteTracer) } // fourByteTracer searches for 4byte-identifiers, and collects them for post-processing. @@ -46,19 +47,18 @@ func init() { // a reversed signature can be matched against the size of the data. // // Example: -// -// > debug.traceTransaction( "0x214e597e35da083692f5386141e69f47e973b2c56e7a8073b1ea08fd7571e9de", {tracer: "4byteTracer"}) -// { -// 0x27dc297e-128: 1, -// 0x38cc4831-0: 2, -// 0x524f3889-96: 1, -// 0xadf59f99-288: 1, -// 0xc281d19e-0: 1 -// } +// > debug.traceTransaction( "0x214e597e35da083692f5386141e69f47e973b2c56e7a8073b1ea08fd7571e9de", {tracer: "4byteTracer"}) +// { +// 0x27dc297e-128: 1, +// 0x38cc4831-0: 2, +// 0x524f3889-96: 1, +// 0xadf59f99-288: 1, +// 0xc281d19e-0: 1 +// } type fourByteTracer struct { - noopTracer + env *vm.EVM ids map[string]int // ids aggregates the 4byte ids found - interrupt atomic.Bool // Atomic flag to signal execution interruption + interrupt uint32 // Atomic flag to signal execution interruption reason error // Textual reason for the interruption activePrecompiles []common.Address // Updated on CaptureStart based on given rules } @@ -90,8 +90,10 @@ func (t *fourByteTracer) store(id []byte, size int) { // CaptureStart implements the EVMLogger interface to initialize the tracing operation. func (t *fourByteTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { + t.env = env + // Update list of precompiles based on current block - rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Time) + rules := env.ChainConfig().AvalancheRules(env.Context.BlockNumber, env.Context.Time) t.activePrecompiles = vm.ActivePrecompiles(rules) // Save the outer calldata also @@ -100,10 +102,15 @@ func (t *fourByteTracer) CaptureStart(env *vm.EVM, from common.Address, to commo } } +// CaptureState implements the EVMLogger interface to trace a single step of VM execution. +func (t *fourByteTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { +} + // CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). func (t *fourByteTracer) CaptureEnter(op vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { // Skip if tracing was interrupted - if t.interrupt.Load() { + if atomic.LoadUint32(&t.interrupt) > 0 { + t.env.Cancel() return } if len(input) < 4 { @@ -121,6 +128,23 @@ func (t *fourByteTracer) CaptureEnter(op vm.OpCode, from common.Address, to comm t.store(input[0:4], len(input)-4) } +// CaptureExit is called when EVM exits a scope, even if the scope didn't +// execute any code. +func (t *fourByteTracer) CaptureExit(output []byte, gasUsed uint64, err error) { +} + +// CaptureFault implements the EVMLogger interface to trace an execution fault. +func (t *fourByteTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { +} + +// CaptureEnd is called after the call finishes to finalize the tracing. +func (t *fourByteTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) { +} + +func (*fourByteTracer) CaptureTxStart(gasLimit uint64) {} + +func (*fourByteTracer) CaptureTxEnd(restGas uint64) {} + // GetResult returns the json-encoded nested list of call traces, and any // error arising from the encoding or forceful termination (via `Stop`). func (t *fourByteTracer) GetResult() (json.RawMessage, error) { @@ -134,9 +158,5 @@ func (t *fourByteTracer) GetResult() (json.RawMessage, error) { // Stop terminates execution of the tracer at the first opportune moment. func (t *fourByteTracer) Stop(err error) { t.reason = err - t.interrupt.Store(true) -} - -func bytesToHex(s []byte) string { - return "0x" + common.Bytes2Hex(s) + atomic.StoreUint32(&t.interrupt, 1) } diff --git a/eth/tracers/native/noop.go b/eth/tracers/native/noop.go index 11daa94aa4..8290504a50 100644 --- a/eth/tracers/native/noop.go +++ b/eth/tracers/native/noop.go @@ -30,9 +30,9 @@ import ( "encoding/json" "math/big" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/eth/tracers" "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/eth/tracers" ) func init() { diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go index 5c800646c7..cf35b47841 100644 --- a/eth/tracers/native/prestate.go +++ b/eth/tracers/native/prestate.go @@ -27,76 +27,44 @@ package native import ( - "bytes" "encoding/json" "math/big" "sync/atomic" + "time" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/eth/tracers" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/eth/tracers" ) -//go:generate go run github.com/fjl/gencodec -type account -field-override accountMarshaling -out gen_account_json.go - func init() { - tracers.DefaultDirectory.Register("prestateTracer", newPrestateTracer, false) + register("prestateTracer", newPrestateTracer) } -type state = map[common.Address]*account - +type prestate = map[common.Address]*account type account struct { - Balance *big.Int `json:"balance,omitempty"` - Code []byte `json:"code,omitempty"` - Nonce uint64 `json:"nonce,omitempty"` - Storage map[common.Hash]common.Hash `json:"storage,omitempty"` -} - -func (a *account) exists() bool { - return a.Nonce > 0 || len(a.Code) > 0 || len(a.Storage) > 0 || (a.Balance != nil && a.Balance.Sign() != 0) -} - -type accountMarshaling struct { - Balance *hexutil.Big - Code hexutil.Bytes + Balance string `json:"balance"` + Nonce uint64 `json:"nonce"` + Code string `json:"code"` + Storage map[common.Hash]common.Hash `json:"storage"` } type prestateTracer struct { - noopTracer env *vm.EVM - pre state - post state + prestate prestate create bool to common.Address gasLimit uint64 // Amount of gas bought for the whole tx - config prestateTracerConfig - interrupt atomic.Bool // Atomic flag to signal execution interruption - reason error // Textual reason for the interruption - created map[common.Address]bool - deleted map[common.Address]bool -} - -type prestateTracerConfig struct { - DiffMode bool `json:"diffMode"` // If true, this tracer will return state modifications + interrupt uint32 // Atomic flag to signal execution interruption + reason error // Textual reason for the interruption } -func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { - var config prestateTracerConfig - if cfg != nil { - if err := json.Unmarshal(cfg, &config); err != nil { - return nil, err - } - } - return &prestateTracer{ - pre: state{}, - post: state{}, - config: config, - created: make(map[common.Address]bool), - deleted: make(map[common.Address]bool), - }, nil +func newPrestateTracer(ctx *tracers.Context, _ json.RawMessage) (tracers.Tracer, error) { + // First callframe contains tx context info + // and is populated on start and end. + return &prestateTracer{prestate: prestate{}}, nil } // CaptureStart implements the EVMLogger interface to initialize the tracing operation. @@ -107,168 +75,82 @@ func (t *prestateTracer) CaptureStart(env *vm.EVM, from common.Address, to commo t.lookupAccount(from) t.lookupAccount(to) - t.lookupAccount(env.Context.Coinbase) // The recipient balance includes the value transferred. - toBal := new(big.Int).Sub(t.pre[to].Balance, value) - t.pre[to].Balance = toBal + toBal := hexutil.MustDecodeBig(t.prestate[to].Balance) + toBal = new(big.Int).Sub(toBal, value) + t.prestate[to].Balance = hexutil.EncodeBig(toBal) // The sender balance is after reducing: value and gasLimit. // We need to re-add them to get the pre-tx balance. - fromBal := new(big.Int).Set(t.pre[from].Balance) + fromBal := hexutil.MustDecodeBig(t.prestate[from].Balance) gasPrice := env.TxContext.GasPrice consumedGas := new(big.Int).Mul(gasPrice, new(big.Int).SetUint64(t.gasLimit)) fromBal.Add(fromBal, new(big.Int).Add(value, consumedGas)) - t.pre[from].Balance = fromBal - t.pre[from].Nonce-- - - if create && t.config.DiffMode { - t.created[to] = true - } + t.prestate[from].Balance = hexutil.EncodeBig(fromBal) + t.prestate[from].Nonce-- } // CaptureEnd is called after the call finishes to finalize the tracing. -func (t *prestateTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { - if t.config.DiffMode { - return - } - +func (t *prestateTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) { if t.create { - // Keep existing account prior to contract creation at that address - if s := t.pre[t.to]; s != nil && !s.exists() { - // Exclude newly created contract. - delete(t.pre, t.to) - } + // Exclude created contract. + delete(t.prestate, t.to) } } // CaptureState implements the EVMLogger interface to trace a single step of VM execution. func (t *prestateTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { - if err != nil { - return - } - // Skip if tracing was interrupted - if t.interrupt.Load() { - return - } stack := scope.Stack stackData := stack.Data() stackLen := len(stackData) - caller := scope.Contract.Address() switch { case stackLen >= 1 && (op == vm.SLOAD || op == vm.SSTORE): slot := common.Hash(stackData[stackLen-1].Bytes32()) - t.lookupStorage(caller, slot) + t.lookupStorage(scope.Contract.Address(), slot) case stackLen >= 1 && (op == vm.EXTCODECOPY || op == vm.EXTCODEHASH || op == vm.EXTCODESIZE || op == vm.BALANCE || op == vm.SELFDESTRUCT): addr := common.Address(stackData[stackLen-1].Bytes20()) t.lookupAccount(addr) - if op == vm.SELFDESTRUCT { - t.deleted[caller] = true - } case stackLen >= 5 && (op == vm.DELEGATECALL || op == vm.CALL || op == vm.STATICCALL || op == vm.CALLCODE): addr := common.Address(stackData[stackLen-2].Bytes20()) t.lookupAccount(addr) case op == vm.CREATE: - nonce := t.env.StateDB.GetNonce(caller) - addr := crypto.CreateAddress(caller, nonce) - t.lookupAccount(addr) - t.created[addr] = true + addr := scope.Contract.Address() + nonce := t.env.StateDB.GetNonce(addr) + t.lookupAccount(crypto.CreateAddress(addr, nonce)) case stackLen >= 4 && op == vm.CREATE2: offset := stackData[stackLen-2] size := stackData[stackLen-3] - init, err := tracers.GetMemoryCopyPadded(scope.Memory, int64(offset.Uint64()), int64(size.Uint64())) - if err != nil { - log.Warn("failed to copy CREATE2 input", "err", err, "tracer", "prestateTracer", "offset", offset, "size", size) - return - } + init := scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) inithash := crypto.Keccak256(init) salt := stackData[stackLen-4] - addr := crypto.CreateAddress2(caller, salt.Bytes32(), inithash) - t.lookupAccount(addr) - t.created[addr] = true + t.lookupAccount(crypto.CreateAddress2(scope.Contract.Address(), salt.Bytes32(), inithash)) } } -func (t *prestateTracer) CaptureTxStart(gasLimit uint64) { - t.gasLimit = gasLimit +// CaptureFault implements the EVMLogger interface to trace an execution fault. +func (t *prestateTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) { } -func (t *prestateTracer) CaptureTxEnd(restGas uint64) { - if !t.config.DiffMode { - return - } - - for addr, state := range t.pre { - // The deleted account's state is pruned from `post` but kept in `pre` - if _, ok := t.deleted[addr]; ok { - continue - } - modified := false - postAccount := &account{Storage: make(map[common.Hash]common.Hash)} - newBalance := t.env.StateDB.GetBalance(addr) - newNonce := t.env.StateDB.GetNonce(addr) - newCode := t.env.StateDB.GetCode(addr) - - if newBalance.Cmp(t.pre[addr].Balance) != 0 { - modified = true - postAccount.Balance = newBalance - } - if newNonce != t.pre[addr].Nonce { - modified = true - postAccount.Nonce = newNonce - } - if !bytes.Equal(newCode, t.pre[addr].Code) { - modified = true - postAccount.Code = newCode - } +// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). +func (t *prestateTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { +} - for key, val := range state.Storage { - // don't include the empty slot - if val == (common.Hash{}) { - delete(t.pre[addr].Storage, key) - } +func (t *prestateTracer) CaptureTxStart(gasLimit uint64) { + t.gasLimit = gasLimit +} - newVal := t.env.StateDB.GetState(addr, key) - if val == newVal { - // Omit unchanged slots - delete(t.pre[addr].Storage, key) - } else { - modified = true - if newVal != (common.Hash{}) { - postAccount.Storage[key] = newVal - } - } - } +func (t *prestateTracer) CaptureTxEnd(restGas uint64) {} - if modified { - t.post[addr] = postAccount - } else { - // if state is not modified, then no need to include into the pre state - delete(t.pre, addr) - } - } - // the new created contracts' prestate were empty, so delete them - for a := range t.created { - // the created contract maybe exists in statedb before the creating tx - if s := t.pre[a]; s != nil && !s.exists() { - delete(t.pre, a) - } - } +// CaptureExit is called when EVM exits a scope, even if the scope didn't +// execute any code. +func (t *prestateTracer) CaptureExit(output []byte, gasUsed uint64, err error) { } // GetResult returns the json-encoded nested list of call traces, and any // error arising from the encoding or forceful termination (via `Stop`). func (t *prestateTracer) GetResult() (json.RawMessage, error) { - var res []byte - var err error - if t.config.DiffMode { - res, err = json.Marshal(struct { - Post state `json:"post"` - Pre state `json:"pre"` - }{t.post, t.pre}) - } else { - res, err = json.Marshal(t.pre) - } + res, err := json.Marshal(t.prestate) if err != nil { return nil, err } @@ -278,20 +160,19 @@ func (t *prestateTracer) GetResult() (json.RawMessage, error) { // Stop terminates execution of the tracer at the first opportune moment. func (t *prestateTracer) Stop(err error) { t.reason = err - t.interrupt.Store(true) + atomic.StoreUint32(&t.interrupt, 1) } // lookupAccount fetches details of an account and adds it to the prestate // if it doesn't exist there. func (t *prestateTracer) lookupAccount(addr common.Address) { - if _, ok := t.pre[addr]; ok { + if _, ok := t.prestate[addr]; ok { return } - - t.pre[addr] = &account{ - Balance: t.env.StateDB.GetBalance(addr), + t.prestate[addr] = &account{ + Balance: bigToHex(t.env.StateDB.GetBalance(addr)), Nonce: t.env.StateDB.GetNonce(addr), - Code: t.env.StateDB.GetCode(addr), + Code: bytesToHex(t.env.StateDB.GetCode(addr)), Storage: make(map[common.Hash]common.Hash), } } @@ -300,16 +181,8 @@ func (t *prestateTracer) lookupAccount(addr common.Address) { // it to the prestate of the given contract. It assumes `lookupAccount` // has been performed on the contract before. func (t *prestateTracer) lookupStorage(addr common.Address, key common.Hash) { - // lookupStorage assumes that lookupAccount has already been called. - // This assumption is violated for some historical blocks by the NativeAssetCall - // precompile. To fix this, we perform an extra call to lookupAccount here to ensure - // that the pre-state account is populated before attempting to read from the Storage - // map. When the invariant is maintained properly (since de-activation of the precompile), - // lookupAccount is a no-op. When the invariant is broken by the precompile, this avoids - // the panic and correctly captures the account prestate before the next opcode is executed. - t.lookupAccount(addr) - if _, ok := t.pre[addr].Storage[key]; ok { + if _, ok := t.prestate[addr].Storage[key]; ok { return } - t.pre[addr].Storage[key] = t.env.StateDB.GetState(addr, key) + t.prestate[addr].Storage[key] = t.env.StateDB.GetState(addr, key) } diff --git a/eth/tracers/native/revertreason.go b/eth/tracers/native/revertreason.go new file mode 100644 index 0000000000..0973a6f1dd --- /dev/null +++ b/eth/tracers/native/revertreason.go @@ -0,0 +1,119 @@ +// (c) 2022, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package native + +import ( + "bytes" + "encoding/json" + "math/big" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/tenderly/coreth/accounts/abi" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/eth/tracers" + "github.com/tenderly/coreth/vmerrs" +) + +func init() { + register("revertReasonTracer", newRevertReasonTracer) +} + +var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4] + +// revertReasonTracer is a go implementation of the Tracer interface which +// track the error message or revert reason return by the contract. +type revertReasonTracer struct { + env *vm.EVM + revertReason string // The revert reason return from the tx, if tx success, empty string return + interrupt uint32 // Atomic flag to signal execution interruption + reason error // Textual reason for the interruption +} + +// newRevertReasonTracer returns a new revert reason tracer. +func newRevertReasonTracer(_ *tracers.Context, _ json.RawMessage) (tracers.Tracer, error) { + return &revertReasonTracer{}, nil +} + +// CaptureStart implements the EVMLogger interface to initialize the tracing operation. +func (t *revertReasonTracer) CaptureStart(env *vm.EVM, _ common.Address, _ common.Address, _ bool, _ []byte, _ uint64, _ *big.Int) { + t.env = env +} + +// CaptureEnd is called after the call finishes to finalize the tracing. +func (t *revertReasonTracer) CaptureEnd(output []byte, _ uint64, _ time.Duration, err error) { + if err != nil { + if err == vmerrs.ErrExecutionReverted && len(output) > 4 && bytes.Equal(output[:4], revertSelector) { + errMsg, _ := abi.UnpackRevert(output) + t.revertReason = err.Error() + ": " + errMsg + } else { + t.revertReason = err.Error() + } + } +} + +// CaptureState implements the EVMLogger interface to trace a single step of VM execution. +func (t *revertReasonTracer) CaptureState(_ uint64, _ vm.OpCode, _, _ uint64, _ *vm.ScopeContext, _ []byte, _ int, _ error) { +} + +// CaptureFault implements the EVMLogger interface to trace an execution fault. +func (t *revertReasonTracer) CaptureFault(_ uint64, _ vm.OpCode, _, _ uint64, _ *vm.ScopeContext, _ int, _ error) { +} + +// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). +func (t *revertReasonTracer) CaptureEnter(_ vm.OpCode, _ common.Address, _ common.Address, _ []byte, _ uint64, _ *big.Int) { + // Skip if tracing was interrupted + if atomic.LoadUint32(&t.interrupt) > 0 { + t.env.Cancel() + return + } +} + +// CaptureExit is called when EVM exits a scope, even if the scope didn't +// execute any code. +func (t *revertReasonTracer) CaptureExit(_ []byte, _ uint64, _ error) {} + +func (t *revertReasonTracer) CaptureTxStart(_ uint64) {} + +func (t *revertReasonTracer) CaptureTxEnd(_ uint64) {} + +// GetResult returns an error message json object. +func (t *revertReasonTracer) GetResult() (json.RawMessage, error) { + res, err := json.Marshal(t.revertReason) + if err != nil { + return nil, err + } + return res, t.reason +} + +// Stop terminates execution of the tracer at the first opportune moment. +func (t *revertReasonTracer) Stop(err error) { + t.reason = err + atomic.StoreUint32(&t.interrupt, 1) +} diff --git a/eth/tracers/native/tracer.go b/eth/tracers/native/tracer.go new file mode 100644 index 0000000000..729b5e477f --- /dev/null +++ b/eth/tracers/native/tracer.go @@ -0,0 +1,93 @@ +// (c) 2020-2021, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +/* +Package native is a collection of tracers written in go. + +In order to add a native tracer and have it compiled into the binary, a new +file needs to be added to this folder, containing an implementation of the +`eth.tracers.Tracer` interface. + +Aside from implementing the tracer, it also needs to register itself, using the +`register` method -- and this needs to be done in the package initialization. + +Example: + +```golang +func init() { + register("noopTracerNative", newNoopTracer) +} +``` +*/ +package native + +import ( + "encoding/json" + "errors" + + "github.com/tenderly/coreth/eth/tracers" +) + +// init registers itself this packages as a lookup for tracers. +func init() { + tracers.RegisterLookup(false, lookup) +} + +// ctorFn is the constructor signature of a native tracer. +type ctorFn = func(*tracers.Context, json.RawMessage) (tracers.Tracer, error) + +/* +ctors is a map of package-local tracer constructors. + +We cannot be certain about the order of init-functions within a package, +The go spec (https://golang.org/ref/spec#Package_initialization) says + +> To ensure reproducible initialization behavior, build systems +> are encouraged to present multiple files belonging to the same +> package in lexical file name order to a compiler. + +Hence, we cannot make the map in init, but must make it upon first use. +*/ +var ctors map[string]ctorFn + +// register is used by native tracers to register their presence. +func register(name string, ctor ctorFn) { + if ctors == nil { + ctors = make(map[string]ctorFn) + } + ctors[name] = ctor +} + +// lookup returns a tracer, if one can be matched to the given name. +func lookup(name string, ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { + if ctors == nil { + ctors = make(map[string]ctorFn) + } + if ctor, ok := ctors[name]; ok { + return ctor(ctx, cfg) + } + return nil, errors.New("no tracer found") +} diff --git a/eth/tracers/tracers.go b/eth/tracers/tracers.go index 7a754fa824..4c434fd3f3 100644 --- a/eth/tracers/tracers.go +++ b/eth/tracers/tracers.go @@ -20,20 +20,17 @@ package tracers import ( "encoding/json" "errors" - "fmt" - "math/big" - "github.com/ava-labs/coreth/core/vm" "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/vm" ) // Context contains some contextual infos for a transaction execution that is not // available from within the EVM object. type Context struct { - BlockHash common.Hash // Hash of the block the tx is contained within (zero if dangling tx or call) - BlockNumber *big.Int // Number of the block the tx is contained within (zero if dangling tx or call) - TxIndex int // Index of the transaction within a block (zero if dangling tx or call) - TxHash common.Hash // Hash of the transaction being traced (zero if dangling call) + BlockHash common.Hash // Hash of the block the tx is contained within (zero if dangling tx or call) + TxIndex int // Index of the transaction within a block (zero if dangling tx or call) + TxHash common.Hash // Hash of the transaction being traced (zero if dangling call) } // Tracer interface extends vm.EVMLogger and additionally @@ -45,79 +42,31 @@ type Tracer interface { Stop(err error) } -type ctorFn func(*Context, json.RawMessage) (Tracer, error) -type jsCtorFn func(string, *Context, json.RawMessage) (Tracer, error) +type lookupFunc func(string, *Context, json.RawMessage) (Tracer, error) -type elem struct { - ctor ctorFn - isJS bool -} - -// DefaultDirectory is the collection of tracers bundled by default. -var DefaultDirectory = directory{elems: make(map[string]elem)} - -// directory provides functionality to lookup a tracer by name -// and a function to instantiate it. It falls back to a JS code evaluator -// if no tracer of the given name exists. -type directory struct { - elems map[string]elem - jsEval jsCtorFn -} - -// Register registers a method as a lookup for tracers, meaning that -// users can invoke a named tracer through that lookup. -func (d *directory) Register(name string, f ctorFn, isJS bool) { - d.elems[name] = elem{ctor: f, isJS: isJS} -} - -// RegisterJSEval registers a tracer that is able to parse -// dynamic user-provided JS code. -func (d *directory) RegisterJSEval(f jsCtorFn) { - d.jsEval = f -} - -// New returns a new instance of a tracer, by iterating through the -// registered lookups. Name is either name of an existing tracer -// or an arbitrary JS code. -func (d *directory) New(name string, ctx *Context, cfg json.RawMessage) (Tracer, error) { - if elem, ok := d.elems[name]; ok { - return elem.ctor(ctx, cfg) - } - // Assume JS code - return d.jsEval(name, ctx, cfg) -} +var ( + lookups []lookupFunc +) -// IsJS will return true if the given tracer will evaluate -// JS code. Because code evaluation has high overhead, this -// info will be used in determining fast and slow code paths. -func (d *directory) IsJS(name string) bool { - if elem, ok := d.elems[name]; ok { - return elem.isJS +// RegisterLookup registers a method as a lookup for tracers, meaning that +// users can invoke a named tracer through that lookup. If 'wildcard' is true, +// then the lookup will be placed last. This is typically meant for interpreted +// engines (js) which can evaluate dynamic user-supplied code. +func RegisterLookup(wildcard bool, lookup lookupFunc) { + if wildcard { + lookups = append(lookups, lookup) + } else { + lookups = append([]lookupFunc{lookup}, lookups...) } - // JS eval will execute JS code - return true } -const ( - memoryPadLimit = 1024 * 1024 -) - -// GetMemoryCopyPadded returns offset + size as a new slice. -// It zero-pads the slice if it extends beyond memory bounds. -func GetMemoryCopyPadded(m *vm.Memory, offset, size int64) ([]byte, error) { - if offset < 0 || size < 0 { - return nil, errors.New("offset or size must not be negative") - } - if int(offset+size) < m.Len() { // slice fully inside memory - return m.GetCopy(offset, size), nil - } - paddingNeeded := int(offset+size) - m.Len() - if paddingNeeded > memoryPadLimit { - return nil, fmt.Errorf("reached limit for padding memory slice: %d", paddingNeeded) - } - cpy := make([]byte, size) - if overlap := int64(m.Len()) - offset; overlap > 0 { - copy(cpy, m.GetPtr(offset, overlap)) +// New returns a new instance of a tracer, by iterating through the +// registered lookups. +func New(code string, ctx *Context, cfg json.RawMessage) (Tracer, error) { + for _, lookup := range lookups { + if tracer, err := lookup(code, ctx, cfg); err == nil { + return tracer, nil + } } - return cpy, nil + return nil, errors.New("tracer not found") } diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index 2fe2676d75..90fb4eb2e0 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -30,13 +30,13 @@ import ( "math/big" "testing" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/eth/tracers/logger" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/tests" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/eth/tracers/logger" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/tests" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" ) diff --git a/ethclient/corethclient/corethclient.go b/ethclient/corethclient/corethclient.go index aa3e328e76..08e12b246f 100644 --- a/ethclient/corethclient/corethclient.go +++ b/ethclient/corethclient/corethclient.go @@ -33,10 +33,10 @@ import ( "runtime" "runtime/debug" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethclient" - "github.com/ava-labs/coreth/interfaces" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethclient" + "github.com/tenderly/coreth/interfaces" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" ) diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index e2ed88d4ac..92266089cc 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -35,10 +35,10 @@ import ( "math/big" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/interfaces" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/accounts/abi/bind" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/interfaces" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" diff --git a/ethclient/signer.go b/ethclient/signer.go index ba647de1a6..f4822449ae 100644 --- a/ethclient/signer.go +++ b/ethclient/signer.go @@ -30,7 +30,7 @@ import ( "errors" "math/big" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/core/types" "github.com/ethereum/go-ethereum/common" ) diff --git a/ethdb/batch.go b/ethdb/batch.go new file mode 100644 index 0000000000..be4f52d8c1 --- /dev/null +++ b/ethdb/batch.go @@ -0,0 +1,84 @@ +// (c) 2020-2021, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethdb + +// IdealBatchSize defines the size of the data batches should ideally add in one +// write. +const IdealBatchSize = 100 * 1024 + +// Batch is a write-only database that commits changes to its host database +// when Write is called. A batch cannot be used concurrently. +type Batch interface { + KeyValueWriter + + // ValueSize retrieves the amount of data queued up for writing. + ValueSize() int + + // Write flushes any accumulated data to disk. + Write() error + + // Reset resets the batch for reuse. + Reset() + + // Replay replays the batch contents. + Replay(w KeyValueWriter) error +} + +// Batcher wraps the NewBatch method of a backing data store. +type Batcher interface { + // NewBatch creates a write-only database that buffers changes to its host db + // until a final write is called. + NewBatch() Batch + + // NewBatchWithSize creates a write-only database batch with pre-allocated buffer. + NewBatchWithSize(size int) Batch +} + +// HookedBatch wraps an arbitrary batch where each operation may be hooked into +// to monitor from black box code. +type HookedBatch struct { + Batch + + OnPut func(key []byte, value []byte) // Callback if a key is inserted + OnDelete func(key []byte) // Callback if a key is deleted +} + +// Put inserts the given value into the key-value data store. +func (b HookedBatch) Put(key []byte, value []byte) error { + if b.OnPut != nil { + b.OnPut(key, value) + } + return b.Batch.Put(key, value) +} + +// Delete removes the key from the key-value data store. +func (b HookedBatch) Delete(key []byte) error { + if b.OnDelete != nil { + b.OnDelete(key) + } + return b.Batch.Delete(key) +} diff --git a/ethdb/dbtest/testsuite.go b/ethdb/dbtest/testsuite.go new file mode 100644 index 0000000000..e6a083d36f --- /dev/null +++ b/ethdb/dbtest/testsuite.go @@ -0,0 +1,335 @@ +// (c) 2020-2021, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package dbtest + +import ( + "bytes" + "reflect" + "sort" + "testing" + + "github.com/tenderly/coreth/ethdb" +) + +// TestDatabaseSuite runs a suite of tests against a KeyValueStore database +// implementation. +func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) { + t.Run("Iterator", func(t *testing.T) { + tests := []struct { + content map[string]string + prefix string + start string + order []string + }{ + // Empty databases should be iterable + {map[string]string{}, "", "", nil}, + {map[string]string{}, "non-existent-prefix", "", nil}, + + // Single-item databases should be iterable + {map[string]string{"key": "val"}, "", "", []string{"key"}}, + {map[string]string{"key": "val"}, "k", "", []string{"key"}}, + {map[string]string{"key": "val"}, "l", "", nil}, + + // Multi-item databases should be fully iterable + { + map[string]string{"k1": "v1", "k5": "v5", "k2": "v2", "k4": "v4", "k3": "v3"}, + "", "", + []string{"k1", "k2", "k3", "k4", "k5"}, + }, + { + map[string]string{"k1": "v1", "k5": "v5", "k2": "v2", "k4": "v4", "k3": "v3"}, + "k", "", + []string{"k1", "k2", "k3", "k4", "k5"}, + }, + { + map[string]string{"k1": "v1", "k5": "v5", "k2": "v2", "k4": "v4", "k3": "v3"}, + "l", "", + nil, + }, + // Multi-item databases should be prefix-iterable + { + map[string]string{ + "ka1": "va1", "ka5": "va5", "ka2": "va2", "ka4": "va4", "ka3": "va3", + "kb1": "vb1", "kb5": "vb5", "kb2": "vb2", "kb4": "vb4", "kb3": "vb3", + }, + "ka", "", + []string{"ka1", "ka2", "ka3", "ka4", "ka5"}, + }, + { + map[string]string{ + "ka1": "va1", "ka5": "va5", "ka2": "va2", "ka4": "va4", "ka3": "va3", + "kb1": "vb1", "kb5": "vb5", "kb2": "vb2", "kb4": "vb4", "kb3": "vb3", + }, + "kc", "", + nil, + }, + // Multi-item databases should be prefix-iterable with start position + { + map[string]string{ + "ka1": "va1", "ka5": "va5", "ka2": "va2", "ka4": "va4", "ka3": "va3", + "kb1": "vb1", "kb5": "vb5", "kb2": "vb2", "kb4": "vb4", "kb3": "vb3", + }, + "ka", "3", + []string{"ka3", "ka4", "ka5"}, + }, + { + map[string]string{ + "ka1": "va1", "ka5": "va5", "ka2": "va2", "ka4": "va4", "ka3": "va3", + "kb1": "vb1", "kb5": "vb5", "kb2": "vb2", "kb4": "vb4", "kb3": "vb3", + }, + "ka", "8", + nil, + }, + } + for i, tt := range tests { + // Create the key-value data store + db := New() + for key, val := range tt.content { + if err := db.Put([]byte(key), []byte(val)); err != nil { + t.Fatalf("test %d: failed to insert item %s:%s into database: %v", i, key, val, err) + } + } + // Iterate over the database with the given configs and verify the results + it, idx := db.NewIterator([]byte(tt.prefix), []byte(tt.start)), 0 + for it.Next() { + if len(tt.order) <= idx { + t.Errorf("test %d: prefix=%q more items than expected: checking idx=%d (key %q), expecting len=%d", i, tt.prefix, idx, it.Key(), len(tt.order)) + break + } + if !bytes.Equal(it.Key(), []byte(tt.order[idx])) { + t.Errorf("test %d: item %d: key mismatch: have %s, want %s", i, idx, string(it.Key()), tt.order[idx]) + } + if !bytes.Equal(it.Value(), []byte(tt.content[tt.order[idx]])) { + t.Errorf("test %d: item %d: value mismatch: have %s, want %s", i, idx, string(it.Value()), tt.content[tt.order[idx]]) + } + idx++ + } + if err := it.Error(); err != nil { + t.Errorf("test %d: iteration failed: %v", i, err) + } + if idx != len(tt.order) { + t.Errorf("test %d: iteration terminated prematurely: have %d, want %d", i, idx, len(tt.order)) + } + db.Close() + } + }) + + t.Run("IteratorWith", func(t *testing.T) { + db := New() + defer db.Close() + + keys := []string{"1", "2", "3", "4", "6", "10", "11", "12", "20", "21", "22"} + sort.Strings(keys) // 1, 10, 11, etc + + for _, k := range keys { + if err := db.Put([]byte(k), nil); err != nil { + t.Fatal(err) + } + } + + { + it := db.NewIterator(nil, nil) + got, want := iterateKeys(it), keys + if err := it.Error(); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Iterator: got: %s; want: %s", got, want) + } + } + + { + it := db.NewIterator([]byte("1"), nil) + got, want := iterateKeys(it), []string{"1", "10", "11", "12"} + if err := it.Error(); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("IteratorWith(1,nil): got: %s; want: %s", got, want) + } + } + + { + it := db.NewIterator([]byte("5"), nil) + got, want := iterateKeys(it), []string{} + if err := it.Error(); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("IteratorWith(5,nil): got: %s; want: %s", got, want) + } + } + + { + it := db.NewIterator(nil, []byte("2")) + got, want := iterateKeys(it), []string{"2", "20", "21", "22", "3", "4", "6"} + if err := it.Error(); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("IteratorWith(nil,2): got: %s; want: %s", got, want) + } + } + + { + it := db.NewIterator(nil, []byte("5")) + got, want := iterateKeys(it), []string{"6"} + if err := it.Error(); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("IteratorWith(nil,5): got: %s; want: %s", got, want) + } + } + }) + + t.Run("KeyValueOperations", func(t *testing.T) { + db := New() + defer db.Close() + + key := []byte("foo") + + if got, err := db.Has(key); err != nil { + t.Error(err) + } else if got { + t.Errorf("wrong value: %t", got) + } + + value := []byte("hello world") + if err := db.Put(key, value); err != nil { + t.Error(err) + } + + if got, err := db.Has(key); err != nil { + t.Error(err) + } else if !got { + t.Errorf("wrong value: %t", got) + } + + if got, err := db.Get(key); err != nil { + t.Error(err) + } else if !bytes.Equal(got, value) { + t.Errorf("wrong value: %q", got) + } + + if err := db.Delete(key); err != nil { + t.Error(err) + } + + if got, err := db.Has(key); err != nil { + t.Error(err) + } else if got { + t.Errorf("wrong value: %t", got) + } + }) + + t.Run("Batch", func(t *testing.T) { + db := New() + defer db.Close() + + b := db.NewBatch() + for _, k := range []string{"1", "2", "3", "4"} { + if err := b.Put([]byte(k), nil); err != nil { + t.Fatal(err) + } + } + + if has, err := db.Has([]byte("1")); err != nil { + t.Fatal(err) + } else if has { + t.Error("db contains element before batch write") + } + + if err := b.Write(); err != nil { + t.Fatal(err) + } + + { + it := db.NewIterator(nil, nil) + if got, want := iterateKeys(it), []string{"1", "2", "3", "4"}; !reflect.DeepEqual(got, want) { + t.Errorf("got: %s; want: %s", got, want) + } + } + + b.Reset() + + // Mix writes and deletes in batch + b.Put([]byte("5"), nil) + b.Delete([]byte("1")) + b.Put([]byte("6"), nil) + b.Delete([]byte("3")) + b.Put([]byte("3"), nil) + + if err := b.Write(); err != nil { + t.Fatal(err) + } + + { + it := db.NewIterator(nil, nil) + if got, want := iterateKeys(it), []string{"2", "3", "4", "5", "6"}; !reflect.DeepEqual(got, want) { + t.Errorf("got: %s; want: %s", got, want) + } + } + }) + + t.Run("BatchReplay", func(t *testing.T) { + db := New() + defer db.Close() + + want := []string{"1", "2", "3", "4"} + b := db.NewBatch() + for _, k := range want { + if err := b.Put([]byte(k), nil); err != nil { + t.Fatal(err) + } + } + + b2 := db.NewBatch() + if err := b.Replay(b2); err != nil { + t.Fatal(err) + } + + if err := b2.Replay(db); err != nil { + t.Fatal(err) + } + + it := db.NewIterator(nil, nil) + if got := iterateKeys(it); !reflect.DeepEqual(got, want) { + t.Errorf("got: %s; want: %s", got, want) + } + }) +} + +func iterateKeys(it ethdb.Iterator) []string { + keys := []string{} + for it.Next() { + keys = append(keys, string(it.Key())) + } + sort.Strings(keys) + it.Release() + return keys +} diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go new file mode 100644 index 0000000000..ed8d43d16f --- /dev/null +++ b/ethdb/leveldb/leveldb.go @@ -0,0 +1,539 @@ +// (c) 2021-2022, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +//go:build !js +// +build !js + +// Package leveldb implements the key-value database layer based on LevelDB. +package leveldb + +import ( + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/metrics" +) + +const ( + // degradationWarnInterval specifies how often warning should be printed if the + // leveldb database cannot keep up with requested writes. + degradationWarnInterval = time.Minute + + // minCache is the minimum amount of memory in megabytes to allocate to leveldb + // read and write caching, split half and half. + minCache = 16 + + // minHandles is the minimum number of files handles to allocate to the open + // database files. + minHandles = 16 + + // metricsGatheringInterval specifies the interval to retrieve leveldb database + // compaction, io and pause stats to report to the user. + metricsGatheringInterval = 3 * time.Second +) + +// Database is a persistent key-value store. Apart from basic data storage +// functionality it also supports batch writes and iterating over the keyspace in +// binary-alphabetical order. +type Database struct { + fn string // filename for reporting + db *leveldb.DB // LevelDB instance + + compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction + compReadMeter metrics.Meter // Meter for measuring the data read during compaction + compWriteMeter metrics.Meter // Meter for measuring the data written during compaction + writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction + writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction + diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database + diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read + diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written + memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction + level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0 + nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level + seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt + + quitLock sync.Mutex // Mutex protecting the quit channel access + quitChan chan chan error // Quit channel to stop the metrics collection before closing the database + + log log.Logger // Contextual logger tracking the database path +} + +// New returns a wrapped LevelDB object. The namespace is the prefix that the +// metrics reporting should use for surfacing internal stats. +func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) { + return NewCustom(file, namespace, func(options *opt.Options) { + // Ensure we have some minimal caching and file guarantees + if cache < minCache { + cache = minCache + } + if handles < minHandles { + handles = minHandles + } + // Set default options + options.OpenFilesCacheCapacity = handles + options.BlockCacheCapacity = cache / 2 * opt.MiB + options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally + if readonly { + options.ReadOnly = true + } + }) +} + +// NewCustom returns a wrapped LevelDB object. The namespace is the prefix that the +// metrics reporting should use for surfacing internal stats. +// The customize function allows the caller to modify the leveldb options. +func NewCustom(file string, namespace string, customize func(options *opt.Options)) (*Database, error) { + options := configureOptions(customize) + logger := log.New("database", file) + usedCache := options.GetBlockCacheCapacity() + options.GetWriteBuffer()*2 + logCtx := []interface{}{"cache", common.StorageSize(usedCache), "handles", options.GetOpenFilesCacheCapacity()} + if options.ReadOnly { + logCtx = append(logCtx, "readonly", "true") + } + logger.Info("Allocated cache and file handles", logCtx...) + + // Open the db and recover any potential corruptions + db, err := leveldb.OpenFile(file, options) + if _, corrupted := err.(*errors.ErrCorrupted); corrupted { + db, err = leveldb.RecoverFile(file, nil) + } + if err != nil { + return nil, err + } + // Assemble the wrapper with all the registered metrics + ldb := &Database{ + fn: file, + db: db, + log: logger, + quitChan: make(chan chan error), + } + ldb.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil) + ldb.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil) + ldb.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil) + ldb.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil) + ldb.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil) + ldb.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil) + ldb.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil) + ldb.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil) + ldb.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil) + ldb.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil) + ldb.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil) + ldb.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil) + + // Start up the metrics gathering and return + go ldb.meter(metricsGatheringInterval) + return ldb, nil +} + +// configureOptions sets some default options, then runs the provided setter. +func configureOptions(customizeFn func(*opt.Options)) *opt.Options { + // Set default options + options := &opt.Options{ + Filter: filter.NewBloomFilter(10), + DisableSeeksCompaction: true, + } + // Allow caller to make custom modifications to the options + if customizeFn != nil { + customizeFn(options) + } + return options +} + +// Close stops the metrics collection, flushes any pending data to disk and closes +// all io accesses to the underlying key-value store. +func (db *Database) Close() error { + db.quitLock.Lock() + defer db.quitLock.Unlock() + + if db.quitChan != nil { + errc := make(chan error) + db.quitChan <- errc + if err := <-errc; err != nil { + db.log.Error("Metrics collection failed", "err", err) + } + db.quitChan = nil + } + return db.db.Close() +} + +// Has retrieves if a key is present in the key-value store. +func (db *Database) Has(key []byte) (bool, error) { + return db.db.Has(key, nil) +} + +// Get retrieves the given key if it's present in the key-value store. +func (db *Database) Get(key []byte) ([]byte, error) { + dat, err := db.db.Get(key, nil) + if err != nil { + return nil, err + } + return dat, nil +} + +// Put inserts the given value into the key-value store. +func (db *Database) Put(key []byte, value []byte) error { + return db.db.Put(key, value, nil) +} + +// Delete removes the key from the key-value store. +func (db *Database) Delete(key []byte) error { + return db.db.Delete(key, nil) +} + +// NewBatch creates a write-only key-value store that buffers changes to its host +// database until a final write is called. +func (db *Database) NewBatch() ethdb.Batch { + return &batch{ + db: db.db, + b: new(leveldb.Batch), + } +} + +// NewBatchWithSize creates a write-only database batch with pre-allocated buffer. +func (db *Database) NewBatchWithSize(size int) ethdb.Batch { + return &batch{ + db: db.db, + b: leveldb.MakeBatch(size), + } +} + +// NewIterator creates a binary-alphabetical iterator over a subset +// of database content with a particular key prefix, starting at a particular +// initial key (or after, if it does not exist). +func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { + return db.db.NewIterator(bytesPrefixRange(prefix, start), nil) +} + +// Stat returns a particular internal stat of the database. +func (db *Database) Stat(property string) (string, error) { + return db.db.GetProperty(property) +} + +// Compact flattens the underlying data store for the given key range. In essence, +// deleted and overwritten versions are discarded, and the data is rearranged to +// reduce the cost of operations needed to access them. +// +// A nil start is treated as a key before all keys in the data store; a nil limit +// is treated as a key after all keys in the data store. If both is nil then it +// will compact entire data store. +func (db *Database) Compact(start []byte, limit []byte) error { + return db.db.CompactRange(util.Range{Start: start, Limit: limit}) +} + +// Path returns the path to the database directory. +func (db *Database) Path() string { + return db.fn +} + +// meter periodically retrieves internal leveldb counters and reports them to +// the metrics subsystem. +// +// This is how a LevelDB stats table looks like (currently): +// Compactions +// Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB) +// -------+------------+---------------+---------------+---------------+--------------- +// 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098 +// 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294 +// 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884 +// 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 +// +// This is how the write delay look like (currently): +// DelayN:5 Delay:406.604657ms Paused: false +// +// This is how the iostats look like (currently): +// Read(MB):3895.04860 Write(MB):3654.64712 +func (db *Database) meter(refresh time.Duration) { + // Create the counters to store current and previous compaction values + compactions := make([][]float64, 2) + for i := 0; i < 2; i++ { + compactions[i] = make([]float64, 4) + } + // Create storage for iostats. + var iostats [2]float64 + + // Create storage and warning log tracer for write delay. + var ( + delaystats [2]int64 + lastWritePaused time.Time + ) + + var ( + errc chan error + merr error + ) + + timer := time.NewTimer(refresh) + defer timer.Stop() + + // Iterate ad infinitum and collect the stats + for i := 1; errc == nil && merr == nil; i++ { + // Retrieve the database stats + stats, err := db.db.GetProperty("leveldb.stats") + if err != nil { + db.log.Error("Failed to read database stats", "err", err) + merr = err + continue + } + // Find the compaction table, skip the header + lines := strings.Split(stats, "\n") + for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" { + lines = lines[1:] + } + if len(lines) <= 3 { + db.log.Error("Compaction leveldbTable not found") + merr = errors.New("compaction leveldbTable not found") + continue + } + lines = lines[3:] + + // Iterate over all the leveldbTable rows, and accumulate the entries + for j := 0; j < len(compactions[i%2]); j++ { + compactions[i%2][j] = 0 + } + for _, line := range lines { + parts := strings.Split(line, "|") + if len(parts) != 6 { + break + } + for idx, counter := range parts[2:] { + value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) + if err != nil { + db.log.Error("Compaction entry parsing failed", "err", err) + merr = err + continue + } + compactions[i%2][idx] += value + } + } + // Update all the requested meters + if db.diskSizeGauge != nil { + db.diskSizeGauge.Update(int64(compactions[i%2][0] * 1024 * 1024)) + } + if db.compTimeMeter != nil { + db.compTimeMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1000 * 1000 * 1000)) + } + if db.compReadMeter != nil { + db.compReadMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024)) + } + if db.compWriteMeter != nil { + db.compWriteMeter.Mark(int64((compactions[i%2][3] - compactions[(i-1)%2][3]) * 1024 * 1024)) + } + // Retrieve the write delay statistic + writedelay, err := db.db.GetProperty("leveldb.writedelay") + if err != nil { + db.log.Error("Failed to read database write delay statistic", "err", err) + merr = err + continue + } + var ( + delayN int64 + delayDuration string + duration time.Duration + paused bool + ) + if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil { + db.log.Error("Write delay statistic not found") + merr = err + continue + } + duration, err = time.ParseDuration(delayDuration) + if err != nil { + db.log.Error("Failed to parse delay duration", "err", err) + merr = err + continue + } + if db.writeDelayNMeter != nil { + db.writeDelayNMeter.Mark(delayN - delaystats[0]) + } + if db.writeDelayMeter != nil { + db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1]) + } + // If a warning that db is performing compaction has been displayed, any subsequent + // warnings will be withheld for one minute not to overwhelm the user. + if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 && + time.Now().After(lastWritePaused.Add(degradationWarnInterval)) { + db.log.Warn("Database compacting, degraded performance") + lastWritePaused = time.Now() + } + delaystats[0], delaystats[1] = delayN, duration.Nanoseconds() + + // Retrieve the database iostats. + ioStats, err := db.db.GetProperty("leveldb.iostats") + if err != nil { + db.log.Error("Failed to read database iostats", "err", err) + merr = err + continue + } + var nRead, nWrite float64 + parts := strings.Split(ioStats, " ") + if len(parts) < 2 { + db.log.Error("Bad syntax of ioStats", "ioStats", ioStats) + merr = fmt.Errorf("bad syntax of ioStats %s", ioStats) + continue + } + if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil { + db.log.Error("Bad syntax of read entry", "entry", parts[0]) + merr = err + continue + } + if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil { + db.log.Error("Bad syntax of write entry", "entry", parts[1]) + merr = err + continue + } + if db.diskReadMeter != nil { + db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024)) + } + if db.diskWriteMeter != nil { + db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024)) + } + iostats[0], iostats[1] = nRead, nWrite + + compCount, err := db.db.GetProperty("leveldb.compcount") + if err != nil { + db.log.Error("Failed to read database iostats", "err", err) + merr = err + continue + } + + var ( + memComp uint32 + level0Comp uint32 + nonLevel0Comp uint32 + seekComp uint32 + ) + if n, err := fmt.Sscanf(compCount, "MemComp:%d Level0Comp:%d NonLevel0Comp:%d SeekComp:%d", &memComp, &level0Comp, &nonLevel0Comp, &seekComp); n != 4 || err != nil { + db.log.Error("Compaction count statistic not found") + merr = err + continue + } + db.memCompGauge.Update(int64(memComp)) + db.level0CompGauge.Update(int64(level0Comp)) + db.nonlevel0CompGauge.Update(int64(nonLevel0Comp)) + db.seekCompGauge.Update(int64(seekComp)) + + // Sleep a bit, then repeat the stats collection + select { + case errc = <-db.quitChan: + // Quit requesting, stop hammering the database + case <-timer.C: + timer.Reset(refresh) + // Timeout, gather a new set of stats + } + } + + if errc == nil { + errc = <-db.quitChan + } + errc <- merr +} + +// batch is a write-only leveldb batch that commits changes to its host database +// when Write is called. A batch cannot be used concurrently. +type batch struct { + db *leveldb.DB + b *leveldb.Batch + size int +} + +// Put inserts the given value into the batch for later committing. +func (b *batch) Put(key, value []byte) error { + b.b.Put(key, value) + b.size += len(value) + return nil +} + +// Delete inserts the a key removal into the batch for later committing. +func (b *batch) Delete(key []byte) error { + b.b.Delete(key) + b.size += len(key) + return nil +} + +// ValueSize retrieves the amount of data queued up for writing. +func (b *batch) ValueSize() int { + return b.size +} + +// Write flushes any accumulated data to disk. +func (b *batch) Write() error { + return b.db.Write(b.b, nil) +} + +// Reset resets the batch for reuse. +func (b *batch) Reset() { + b.b.Reset() + b.size = 0 +} + +// Replay replays the batch contents. +func (b *batch) Replay(w ethdb.KeyValueWriter) error { + return b.b.Replay(&replayer{writer: w}) +} + +// replayer is a small wrapper to implement the correct replay methods. +type replayer struct { + writer ethdb.KeyValueWriter + failure error +} + +// Put inserts the given value into the key-value data store. +func (r *replayer) Put(key, value []byte) { + // If the replay already failed, stop executing ops + if r.failure != nil { + return + } + r.failure = r.writer.Put(key, value) +} + +// Delete removes the key from the key-value data store. +func (r *replayer) Delete(key []byte) { + // If the replay already failed, stop executing ops + if r.failure != nil { + return + } + r.failure = r.writer.Delete(key) +} + +// bytesPrefixRange returns key range that satisfy +// - the given prefix, and +// - the given seek position +func bytesPrefixRange(prefix, start []byte) *util.Range { + r := util.BytesPrefix(prefix) + r.Start = append(r.Start, start...) + return r +} diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go new file mode 100644 index 0000000000..9e3dc970a5 --- /dev/null +++ b/ethdb/memorydb/memorydb.go @@ -0,0 +1,330 @@ +// (c) 2020-2021, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package memorydb implements the key-value database layer based on memory maps. +package memorydb + +import ( + "errors" + "sort" + "strings" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/ethdb" +) + +var ( + // errMemorydbClosed is returned if a memory database was already closed at the + // invocation of a data access operation. + errMemorydbClosed = errors.New("database closed") + + // errMemorydbNotFound is returned if a key is requested that is not found in + // the provided memory database. + errMemorydbNotFound = errors.New("not found") +) + +// Database is an ephemeral key-value store. Apart from basic data storage +// functionality it also supports batch writes and iterating over the keyspace in +// binary-alphabetical order. +type Database struct { + db map[string][]byte + lock sync.RWMutex +} + +// New returns a wrapped map with all the required database interface methods +// implemented. +func New() *Database { + return &Database{ + db: make(map[string][]byte), + } +} + +// NewWithCap returns a wrapped map pre-allocated to the provided capacity with +// all the required database interface methods implemented. +func NewWithCap(size int) *Database { + return &Database{ + db: make(map[string][]byte, size), + } +} + +// Close deallocates the internal map and ensures any consecutive data access op +// fails with an error. +func (db *Database) Close() error { + db.lock.Lock() + defer db.lock.Unlock() + + db.db = nil + return nil +} + +// Has retrieves if a key is present in the key-value store. +func (db *Database) Has(key []byte) (bool, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.db == nil { + return false, errMemorydbClosed + } + _, ok := db.db[string(key)] + return ok, nil +} + +// Get retrieves the given key if it's present in the key-value store. +func (db *Database) Get(key []byte) ([]byte, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.db == nil { + return nil, errMemorydbClosed + } + if entry, ok := db.db[string(key)]; ok { + return common.CopyBytes(entry), nil + } + return nil, errMemorydbNotFound +} + +// Put inserts the given value into the key-value store. +func (db *Database) Put(key []byte, value []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.db == nil { + return errMemorydbClosed + } + db.db[string(key)] = common.CopyBytes(value) + return nil +} + +// Delete removes the key from the key-value store. +func (db *Database) Delete(key []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.db == nil { + return errMemorydbClosed + } + delete(db.db, string(key)) + return nil +} + +// NewBatch creates a write-only key-value store that buffers changes to its host +// database until a final write is called. +func (db *Database) NewBatch() ethdb.Batch { + return &batch{ + db: db, + } +} + +// NewBatchWithSize creates a write-only database batch with pre-allocated buffer. +func (db *Database) NewBatchWithSize(size int) ethdb.Batch { + return &batch{ + db: db, + } +} + +// NewIterator creates a binary-alphabetical iterator over a subset +// of database content with a particular key prefix, starting at a particular +// initial key (or after, if it does not exist). +func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { + db.lock.RLock() + defer db.lock.RUnlock() + + var ( + pr = string(prefix) + st = string(append(prefix, start...)) + keys = make([]string, 0, len(db.db)) + values = make([][]byte, 0, len(db.db)) + ) + // Collect the keys from the memory database corresponding to the given prefix + // and start + for key := range db.db { + if !strings.HasPrefix(key, pr) { + continue + } + if key >= st { + keys = append(keys, key) + } + } + // Sort the items and retrieve the associated values + sort.Strings(keys) + for _, key := range keys { + values = append(values, db.db[key]) + } + return &iterator{ + index: -1, + keys: keys, + values: values, + } +} + +// Stat returns a particular internal stat of the database. +func (db *Database) Stat(property string) (string, error) { + return "", errors.New("unknown property") +} + +// Compact is not supported on a memory database, but there's no need either as +// a memory database doesn't waste space anyway. +func (db *Database) Compact(start []byte, limit []byte) error { + return nil +} + +// Len returns the number of entries currently present in the memory database. +// +// Note, this method is only used for testing (i.e. not public in general) and +// does not have explicit checks for closed-ness to allow simpler testing code. +func (db *Database) Len() int { + db.lock.RLock() + defer db.lock.RUnlock() + + return len(db.db) +} + +// keyvalue is a key-value tuple tagged with a deletion field to allow creating +// memory-database write batches. +type keyvalue struct { + key []byte + value []byte + delete bool +} + +// batch is a write-only memory batch that commits changes to its host +// database when Write is called. A batch cannot be used concurrently. +type batch struct { + db *Database + writes []keyvalue + size int +} + +// Put inserts the given value into the batch for later committing. +func (b *batch) Put(key, value []byte) error { + b.writes = append(b.writes, keyvalue{common.CopyBytes(key), common.CopyBytes(value), false}) + b.size += len(value) + return nil +} + +// Delete inserts the a key removal into the batch for later committing. +func (b *batch) Delete(key []byte) error { + b.writes = append(b.writes, keyvalue{common.CopyBytes(key), nil, true}) + b.size += len(key) + return nil +} + +// ValueSize retrieves the amount of data queued up for writing. +func (b *batch) ValueSize() int { + return b.size +} + +// Write flushes any accumulated data to the memory database. +func (b *batch) Write() error { + b.db.lock.Lock() + defer b.db.lock.Unlock() + + for _, keyvalue := range b.writes { + if keyvalue.delete { + delete(b.db.db, string(keyvalue.key)) + continue + } + b.db.db[string(keyvalue.key)] = keyvalue.value + } + return nil +} + +// Reset resets the batch for reuse. +func (b *batch) Reset() { + b.writes = b.writes[:0] + b.size = 0 +} + +// Replay replays the batch contents. +func (b *batch) Replay(w ethdb.KeyValueWriter) error { + for _, keyvalue := range b.writes { + if keyvalue.delete { + if err := w.Delete(keyvalue.key); err != nil { + return err + } + continue + } + if err := w.Put(keyvalue.key, keyvalue.value); err != nil { + return err + } + } + return nil +} + +// iterator can walk over the (potentially partial) keyspace of a memory key +// value store. Internally it is a deep copy of the entire iterated state, +// sorted by keys. +type iterator struct { + index int + keys []string + values [][]byte +} + +// Next moves the iterator to the next key/value pair. It returns whether the +// iterator is exhausted. +func (it *iterator) Next() bool { + // Short circuit if iterator is already exhausted in the forward direction. + if it.index >= len(it.keys) { + return false + } + it.index += 1 + return it.index < len(it.keys) +} + +// Error returns any accumulated error. Exhausting all the key/value pairs +// is not considered to be an error. A memory iterator cannot encounter errors. +func (it *iterator) Error() error { + return nil +} + +// Key returns the key of the current key/value pair, or nil if done. The caller +// should not modify the contents of the returned slice, and its contents may +// change on the next call to Next. +func (it *iterator) Key() []byte { + // Short circuit if iterator is not in a valid position + if it.index < 0 || it.index >= len(it.keys) { + return nil + } + return []byte(it.keys[it.index]) +} + +// Value returns the value of the current key/value pair, or nil if done. The +// caller should not modify the contents of the returned slice, and its contents +// may change on the next call to Next. +func (it *iterator) Value() []byte { + // Short circuit if iterator is not in a valid position + if it.index < 0 || it.index >= len(it.keys) { + return nil + } + return it.values[it.index] +} + +// Release releases associated resources. Release should always succeed and can +// be called multiple times without causing error. +func (it *iterator) Release() { + it.index, it.keys, it.values = -1, nil, nil +} diff --git a/go.mod b/go.mod index 31e62ddc39..e20f0c577b 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/ava-labs/coreth +module github.com/tenderly/coreth go 1.21.10 diff --git a/interfaces/interfaces.go b/interfaces/interfaces.go index 35b1855b11..9dd40631c7 100644 --- a/interfaces/interfaces.go +++ b/interfaces/interfaces.go @@ -32,7 +32,7 @@ import ( "errors" "math/big" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/core/types" "github.com/ethereum/go-ethereum/common" ) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 3eee4bda66..9f284eccb9 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -28,7 +28,6 @@ package ethapi import ( "context" - "encoding/hex" "errors" "fmt" "math/big" @@ -36,20 +35,19 @@ import ( "time" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/coreth/accounts/keystore" - "github.com/ava-labs/coreth/accounts/scwallet" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/eth/gasestimator" - "github.com/ava-labs/coreth/eth/tracers/logger" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/rpc" - "github.com/ava-labs/coreth/trie" + "github.com/tenderly/coreth/accounts" + "github.com/tenderly/coreth/accounts/keystore" + "github.com/tenderly/coreth/accounts/scwallet" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/eth/tracers/logger" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rpc" + "github.com/tenderly/coreth/vmerrs" "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" @@ -59,22 +57,19 @@ import ( "github.com/tyler-smith/go-bip39" ) -// estimateGasErrorRatio is the amount of overestimation eth_estimateGas is -// allowed to produce in order to speed up calculations. -const estimateGasErrorRatio = 0.015 - -// EthereumAPI provides an API to access Ethereum related information. -type EthereumAPI struct { +// PublicEthereumAPI provides an API to access Ethereum related information. +// It offers only methods that operate on public data that is freely available to anyone. +type PublicEthereumAPI struct { b Backend } -// NewEthereumAPI creates a new Ethereum protocol API. -func NewEthereumAPI(b Backend) *EthereumAPI { - return &EthereumAPI{b} +// NewPublicEthereumAPI creates a new Ethereum protocol API. +func NewPublicEthereumAPI(b Backend) *PublicEthereumAPI { + return &PublicEthereumAPI{b} } // GasPrice returns a suggestion for a gas price for legacy transactions. -func (s *EthereumAPI) GasPrice(ctx context.Context) (*hexutil.Big, error) { +func (s *PublicEthereumAPI) GasPrice(ctx context.Context) (*hexutil.Big, error) { gasPrice, err := s.b.SuggestPrice(ctx) if err != nil { return nil, err @@ -84,7 +79,7 @@ func (s *EthereumAPI) GasPrice(ctx context.Context) (*hexutil.Big, error) { // BaseFee returns an estimate for what the base fee will be on the next block if // it is produced now. -func (s *EthereumAPI) BaseFee(ctx context.Context) (*hexutil.Big, error) { +func (s *PublicEthereumAPI) BaseFee(ctx context.Context) (*hexutil.Big, error) { baseFee, err := s.b.EstimateBaseFee(ctx) if err != nil { return nil, err @@ -93,7 +88,7 @@ func (s *EthereumAPI) BaseFee(ctx context.Context) (*hexutil.Big, error) { } // MaxPriorityFeePerGas returns a suggestion for a gas tip cap for dynamic fee transactions. -func (s *EthereumAPI) MaxPriorityFeePerGas(ctx context.Context) (*hexutil.Big, error) { +func (s *PublicEthereumAPI) MaxPriorityFeePerGas(ctx context.Context) (*hexutil.Big, error) { tipcap, err := s.b.SuggestGasTipCap(ctx) if err != nil { return nil, err @@ -108,9 +103,8 @@ type feeHistoryResult struct { GasUsedRatio []float64 `json:"gasUsedRatio"` } -// FeeHistory returns the fee market history. -func (s *EthereumAPI) FeeHistory(ctx context.Context, blockCount math.HexOrDecimal64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*feeHistoryResult, error) { - oldest, reward, baseFee, gasUsed, err := s.b.FeeHistory(ctx, uint64(blockCount), lastBlock, rewardPercentiles) +func (s *PublicEthereumAPI) FeeHistory(ctx context.Context, blockCount rpc.DecimalOrHex, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*feeHistoryResult, error) { + oldest, reward, baseFee, gasUsed, err := s.b.FeeHistory(ctx, int(blockCount), lastBlock, rewardPercentiles) if err != nil { return nil, err } @@ -136,27 +130,18 @@ func (s *EthereumAPI) FeeHistory(ctx context.Context, blockCount math.HexOrDecim return results, nil } -// Syncing allows the caller to determine whether the chain is syncing or not. -// In geth, the response is either a map representing an ethereum.SyncProgress -// struct or "false" (indicating the chain is not syncing). -// In coreth, avalanchego prevents API calls unless bootstrapping is complete, -// so we always return false here for API compatibility. -func (s *EthereumAPI) Syncing() (interface{}, error) { - return false, nil -} - -// TxPoolAPI offers and API for the transaction pool. It only operates on data that is non-confidential. -type TxPoolAPI struct { +// PublicTxPoolAPI offers and API for the transaction pool. It only operates on data that is non confidential. +type PublicTxPoolAPI struct { b Backend } -// NewTxPoolAPI creates a new tx pool service that gives information about the transaction pool. -func NewTxPoolAPI(b Backend) *TxPoolAPI { - return &TxPoolAPI{b} +// NewPublicTxPoolAPI creates a new tx pool service that gives information about the transaction pool. +func NewPublicTxPoolAPI(b Backend) *PublicTxPoolAPI { + return &PublicTxPoolAPI{b} } // Content returns the transactions contained within the transaction pool. -func (s *TxPoolAPI) Content() map[string]map[string]map[string]*RPCTransaction { +func (s *PublicTxPoolAPI) Content() map[string]map[string]map[string]*RPCTransaction { content := map[string]map[string]map[string]*RPCTransaction{ "pending": make(map[string]map[string]*RPCTransaction), "queued": make(map[string]map[string]*RPCTransaction), @@ -168,7 +153,7 @@ func (s *TxPoolAPI) Content() map[string]map[string]map[string]*RPCTransaction { for account, txs := range pending { dump := make(map[string]*RPCTransaction) for _, tx := range txs { - dump[fmt.Sprintf("%d", tx.Nonce())] = NewRPCTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) + dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) } content["pending"][account.Hex()] = dump } @@ -176,7 +161,7 @@ func (s *TxPoolAPI) Content() map[string]map[string]map[string]*RPCTransaction { for account, txs := range queue { dump := make(map[string]*RPCTransaction) for _, tx := range txs { - dump[fmt.Sprintf("%d", tx.Nonce())] = NewRPCTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) + dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) } content["queued"][account.Hex()] = dump } @@ -184,7 +169,7 @@ func (s *TxPoolAPI) Content() map[string]map[string]map[string]*RPCTransaction { } // ContentFrom returns the transactions contained within the transaction pool. -func (s *TxPoolAPI) ContentFrom(addr common.Address) map[string]map[string]*RPCTransaction { +func (s *PublicTxPoolAPI) ContentFrom(addr common.Address) map[string]map[string]*RPCTransaction { content := make(map[string]map[string]*RPCTransaction, 2) pending, queue := s.b.TxPoolContentFrom(addr) curHeader := s.b.CurrentHeader() @@ -193,14 +178,14 @@ func (s *TxPoolAPI) ContentFrom(addr common.Address) map[string]map[string]*RPCT // Build the pending transactions dump := make(map[string]*RPCTransaction, len(pending)) for _, tx := range pending { - dump[fmt.Sprintf("%d", tx.Nonce())] = NewRPCTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) + dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) } content["pending"] = dump // Build the queued transactions dump = make(map[string]*RPCTransaction, len(queue)) for _, tx := range queue { - dump[fmt.Sprintf("%d", tx.Nonce())] = NewRPCTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) + dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) } content["queued"] = dump @@ -208,7 +193,7 @@ func (s *TxPoolAPI) ContentFrom(addr common.Address) map[string]map[string]*RPCT } // Status returns the number of pending and queued transaction in the pool. -func (s *TxPoolAPI) Status() map[string]hexutil.Uint { +func (s *PublicTxPoolAPI) Status() map[string]hexutil.Uint { pending, queue := s.b.Stats() return map[string]hexutil.Uint{ "pending": hexutil.Uint(pending), @@ -218,7 +203,7 @@ func (s *TxPoolAPI) Status() map[string]hexutil.Uint { // Inspect retrieves the content of the transaction pool and flattens it into an // easily inspectable list. -func (s *TxPoolAPI) Inspect() map[string]map[string]map[string]string { +func (s *PublicTxPoolAPI) Inspect() map[string]map[string]map[string]string { content := map[string]map[string]map[string]string{ "pending": make(map[string]map[string]string), "queued": make(map[string]map[string]string), @@ -251,34 +236,34 @@ func (s *TxPoolAPI) Inspect() map[string]map[string]map[string]string { return content } -// EthereumAccountAPI provides an API to access accounts managed by this node. +// PublicAccountAPI provides an API to access accounts managed by this node. // It offers only methods that can retrieve accounts. -type EthereumAccountAPI struct { +type PublicAccountAPI struct { am *accounts.Manager } -// NewEthereumAccountAPI creates a new EthereumAccountAPI. -func NewEthereumAccountAPI(am *accounts.Manager) *EthereumAccountAPI { - return &EthereumAccountAPI{am: am} +// NewPublicAccountAPI creates a new PublicAccountAPI. +func NewPublicAccountAPI(am *accounts.Manager) *PublicAccountAPI { + return &PublicAccountAPI{am: am} } -// Accounts returns the collection of accounts this node manages. -func (s *EthereumAccountAPI) Accounts() []common.Address { +// Accounts returns the collection of accounts this node manages +func (s *PublicAccountAPI) Accounts() []common.Address { return s.am.Accounts() } -// PersonalAccountAPI provides an API to access accounts managed by this node. +// PrivateAccountAPI provides an API to access accounts managed by this node. // It offers methods to create, (un)lock en list accounts. Some methods accept // passwords and are therefore considered private by default. -type PersonalAccountAPI struct { +type PrivateAccountAPI struct { am *accounts.Manager nonceLock *AddrLocker b Backend } -// NewPersonalAccountAPI create a new PersonalAccountAPI. -func NewPersonalAccountAPI(b Backend, nonceLock *AddrLocker) *PersonalAccountAPI { - return &PersonalAccountAPI{ +// NewPrivateAccountAPI create a new PrivateAccountAPI. +func NewPrivateAccountAPI(b Backend, nonceLock *AddrLocker) *PrivateAccountAPI { + return &PrivateAccountAPI{ am: b.AccountManager(), nonceLock: nonceLock, b: b, @@ -286,7 +271,7 @@ func NewPersonalAccountAPI(b Backend, nonceLock *AddrLocker) *PersonalAccountAPI } // ListAccounts will return a list of addresses for accounts this node manages. -func (s *PersonalAccountAPI) ListAccounts() []common.Address { +func (s *PrivateAccountAPI) ListAccounts() []common.Address { return s.am.Accounts() } @@ -300,7 +285,7 @@ type rawWallet struct { } // ListWallets will return a list of wallets this node manages. -func (s *PersonalAccountAPI) ListWallets() []rawWallet { +func (s *PrivateAccountAPI) ListWallets() []rawWallet { wallets := make([]rawWallet, 0) // return [] instead of nil if empty for _, wallet := range s.am.Wallets() { status, failure := wallet.Status() @@ -322,7 +307,7 @@ func (s *PersonalAccountAPI) ListWallets() []rawWallet { // connection and attempting to authenticate via the provided passphrase. Note, // the method may return an extra challenge requiring a second open (e.g. the // Trezor PIN matrix challenge). -func (s *PersonalAccountAPI) OpenWallet(url string, passphrase *string) error { +func (s *PrivateAccountAPI) OpenWallet(url string, passphrase *string) error { wallet, err := s.am.Wallet(url) if err != nil { return err @@ -334,9 +319,9 @@ func (s *PersonalAccountAPI) OpenWallet(url string, passphrase *string) error { return wallet.Open(pass) } -// DeriveAccount requests an HD wallet to derive a new account, optionally pinning +// DeriveAccount requests a HD wallet to derive a new account, optionally pinning // it for later reuse. -func (s *PersonalAccountAPI) DeriveAccount(url string, path string, pin *bool) (accounts.Account, error) { +func (s *PrivateAccountAPI) DeriveAccount(url string, path string, pin *bool) (accounts.Account, error) { wallet, err := s.am.Wallet(url) if err != nil { return accounts.Account{}, err @@ -352,20 +337,19 @@ func (s *PersonalAccountAPI) DeriveAccount(url string, path string, pin *bool) ( } // NewAccount will create a new account and returns the address for the new account. -func (s *PersonalAccountAPI) NewAccount(password string) (common.AddressEIP55, error) { +func (s *PrivateAccountAPI) NewAccount(password string) (common.Address, error) { ks, err := fetchKeystore(s.am) if err != nil { - return common.AddressEIP55{}, err + return common.Address{}, err } acc, err := ks.NewAccount(password) if err == nil { - addrEIP55 := common.AddressEIP55(acc.Address) - log.Info("Your new key was generated", "address", addrEIP55.String()) + log.Info("Your new key was generated", "address", acc.Address) log.Warn("Please backup your key file!", "path", acc.URL.Path) log.Warn("Please remember your password!") - return addrEIP55, nil + return acc.Address, nil } - return common.AddressEIP55{}, err + return common.Address{}, err } // fetchKeystore retrieves the encrypted keystore from the account manager. @@ -378,7 +362,7 @@ func fetchKeystore(am *accounts.Manager) (*keystore.KeyStore, error) { // ImportRawKey stores the given hex encoded ECDSA key into the key directory, // encrypting it with the passphrase. -func (s *PersonalAccountAPI) ImportRawKey(privkey string, password string) (common.Address, error) { +func (s *PrivateAccountAPI) ImportRawKey(privkey string, password string) (common.Address, error) { key, err := crypto.HexToECDSA(privkey) if err != nil { return common.Address{}, err @@ -394,7 +378,7 @@ func (s *PersonalAccountAPI) ImportRawKey(privkey string, password string) (comm // UnlockAccount will unlock the account associated with the given address with // the given password for duration seconds. If duration is nil it will use a // default of 300 seconds. It returns an indication if the account was unlocked. -func (s *PersonalAccountAPI) UnlockAccount(ctx context.Context, addr common.Address, password string, duration *uint64) (bool, error) { +func (s *PrivateAccountAPI) UnlockAccount(ctx context.Context, addr common.Address, password string, duration *uint64) (bool, error) { // When the API is exposed by external RPC(http, ws etc), unless the user // explicitly specifies to allow the insecure account unlocking, otherwise // it is disabled. @@ -423,7 +407,7 @@ func (s *PersonalAccountAPI) UnlockAccount(ctx context.Context, addr common.Addr } // LockAccount will lock the account associated with the given address when it's unlocked. -func (s *PersonalAccountAPI) LockAccount(addr common.Address) bool { +func (s *PrivateAccountAPI) LockAccount(addr common.Address) bool { if ks, err := fetchKeystore(s.am); err == nil { return ks.Lock(addr) == nil } @@ -433,7 +417,7 @@ func (s *PersonalAccountAPI) LockAccount(addr common.Address) bool { // signTransaction sets defaults and signs the given transaction // NOTE: the caller needs to ensure that the nonceLock is held, if applicable, // and release it after the transaction has been submitted to the tx pool -func (s *PersonalAccountAPI) signTransaction(ctx context.Context, args *TransactionArgs, passwd string) (*types.Transaction, error) { +func (s *PrivateAccountAPI) signTransaction(ctx context.Context, args *TransactionArgs, passwd string) (*types.Transaction, error) { // Look up the wallet containing the requested signer account := accounts.Account{Address: args.from()} wallet, err := s.am.Find(account) @@ -453,9 +437,9 @@ func (s *PersonalAccountAPI) signTransaction(ctx context.Context, args *Transact // SendTransaction will create a transaction from the given arguments and // tries to sign it with the key associated with args.From. If the given // passwd isn't able to decrypt the key it fails. -func (s *PersonalAccountAPI) SendTransaction(ctx context.Context, args TransactionArgs, passwd string) (common.Hash, error) { +func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args TransactionArgs, passwd string) (common.Hash, error) { if args.Nonce == nil { - // Hold the mutex around signing to prevent concurrent assignment of + // Hold the addresse's mutex around signing to prevent concurrent assignment of // the same nonce to multiple accounts. s.nonceLock.LockAddr(args.from()) defer s.nonceLock.UnlockAddr(args.from()) @@ -472,20 +456,20 @@ func (s *PersonalAccountAPI) SendTransaction(ctx context.Context, args Transacti // tries to sign it with the key associated with args.From. If the given passwd isn't // able to decrypt the key it fails. The transaction is returned in RLP-form, not broadcast // to other nodes -func (s *PersonalAccountAPI) SignTransaction(ctx context.Context, args TransactionArgs, passwd string) (*SignTransactionResult, error) { +func (s *PrivateAccountAPI) SignTransaction(ctx context.Context, args TransactionArgs, passwd string) (*SignTransactionResult, error) { // No need to obtain the noncelock mutex, since we won't be sending this // tx into the transaction pool, but right back to the user if args.From == nil { - return nil, errors.New("sender not specified") + return nil, fmt.Errorf("sender not specified") } if args.Gas == nil { - return nil, errors.New("gas not specified") + return nil, fmt.Errorf("gas not specified") } if args.GasPrice == nil && (args.MaxFeePerGas == nil || args.MaxPriorityFeePerGas == nil) { - return nil, errors.New("missing gasPrice or maxFeePerGas/maxPriorityFeePerGas") + return nil, fmt.Errorf("missing gasPrice or maxFeePerGas/maxPriorityFeePerGas") } if args.Nonce == nil { - return nil, errors.New("nonce not specified") + return nil, fmt.Errorf("nonce not specified") } // Before actually signing the transaction, ensure the transaction fee is reasonable. tx := args.toTransaction() @@ -505,7 +489,7 @@ func (s *PersonalAccountAPI) SignTransaction(ctx context.Context, args Transacti } // Sign calculates an Ethereum ECDSA signature for: -// keccak256("\x19Ethereum Signed Message:\n" + len(message) + message)) +// keccack256("\x19Ethereum Signed Message:\n" + len(message) + message)) // // Note, the produced signature conforms to the secp256k1 curve R, S and V values, // where the V value will be 27 or 28 for legacy reasons. @@ -513,7 +497,7 @@ func (s *PersonalAccountAPI) SignTransaction(ctx context.Context, args Transacti // The key used to calculate the signature is decrypted with the given password. // // https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_sign -func (s *PersonalAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr common.Address, passwd string) (hexutil.Bytes, error) { +func (s *PrivateAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr common.Address, passwd string) (hexutil.Bytes, error) { // Look up the wallet containing the requested signer account := accounts.Account{Address: addr} @@ -541,12 +525,12 @@ func (s *PersonalAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr // the V value must be 27 or 28 for legacy reasons. // // https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_ecRecover -func (s *PersonalAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Bytes) (common.Address, error) { +func (s *PrivateAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Bytes) (common.Address, error) { if len(sig) != crypto.SignatureLength { return common.Address{}, fmt.Errorf("signature must be %d bytes long", crypto.SignatureLength) } if sig[crypto.RecoveryIDOffset] != 27 && sig[crypto.RecoveryIDOffset] != 28 { - return common.Address{}, errors.New("invalid Ethereum signature (V is not 27 or 28)") + return common.Address{}, fmt.Errorf("invalid Ethereum signature (V is not 27 or 28)") } sig[crypto.RecoveryIDOffset] -= 27 // Transform yellow paper V from 27/28 to 0/1 @@ -557,8 +541,14 @@ func (s *PersonalAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.By return crypto.PubkeyToAddress(*rpk), nil } +// SignAndSendTransaction was renamed to SendTransaction. This method is deprecated +// and will be removed in the future. It primary goal is to give clients time to update. +func (s *PrivateAccountAPI) SignAndSendTransaction(ctx context.Context, args TransactionArgs, passwd string) (common.Hash, error) { + return s.SendTransaction(ctx, args, passwd) +} + // InitializeWallet initializes a new wallet at the provided URL, by generating and returning a new private key. -func (s *PersonalAccountAPI) InitializeWallet(ctx context.Context, url string) (string, error) { +func (s *PrivateAccountAPI) InitializeWallet(ctx context.Context, url string) (string, error) { wallet, err := s.am.Wallet(url) if err != nil { return "", err @@ -580,12 +570,12 @@ func (s *PersonalAccountAPI) InitializeWallet(ctx context.Context, url string) ( case *scwallet.Wallet: return mnemonic, wallet.Initialize(seed) default: - return "", errors.New("specified wallet does not support initialization") + return "", fmt.Errorf("specified wallet does not support initialization") } } // Unpair deletes a pairing between wallet and geth. -func (s *PersonalAccountAPI) Unpair(ctx context.Context, url string, pin string) error { +func (s *PrivateAccountAPI) Unpair(ctx context.Context, url string, pin string) error { wallet, err := s.am.Wallet(url) if err != nil { return err @@ -595,32 +585,32 @@ func (s *PersonalAccountAPI) Unpair(ctx context.Context, url string, pin string) case *scwallet.Wallet: return wallet.Unpair([]byte(pin)) default: - return errors.New("specified wallet does not support pairing") + return fmt.Errorf("specified wallet does not support pairing") } } -// BlockChainAPI provides an API to access Ethereum blockchain data. -type BlockChainAPI struct { +// PublicBlockChainAPI provides an API to access the Ethereum blockchain. +// It offers only methods that operate on public data that is freely available to anyone. +type PublicBlockChainAPI struct { b Backend } -// NewBlockChainAPI creates a new Ethereum blockchain API. -func NewBlockChainAPI(b Backend) *BlockChainAPI { - return &BlockChainAPI{b} +// NewPublicBlockChainAPI creates a new Ethereum blockchain API. +func NewPublicBlockChainAPI(b Backend) *PublicBlockChainAPI { + return &PublicBlockChainAPI{b} } -// ChainId is the EIP-155 replay-protection chain id for the current Ethereum chain config. -// -// Note, this method does not conform to EIP-695 because the configured chain ID is always -// returned, regardless of the current head block. We used to return an error when the chain -// wasn't synced up to a block where EIP-155 is enabled, but this behavior caused issues -// in CL clients. -func (api *BlockChainAPI) ChainId() *hexutil.Big { - return (*hexutil.Big)(api.b.ChainConfig().ChainID) +// ChainId is the EIP-155 replay-protection chain id for the current ethereum chain config. +func (s *PublicBlockChainAPI) ChainId() (*hexutil.Big, error) { + // if current block is at or past the EIP-155 replay-protection fork block, return chainID from config + if config := s.b.ChainConfig(); config.IsEIP155(s.b.CurrentBlock().Number()) { + return (*hexutil.Big)(config.ChainID), nil + } + return nil, fmt.Errorf("chain not synced beyond EIP-155 replay-protection fork block") } // BlockNumber returns the block number of the chain head. -func (s *BlockChainAPI) BlockNumber() hexutil.Uint64 { +func (s *PublicBlockChainAPI) BlockNumber() hexutil.Uint64 { header, _ := s.b.HeaderByNumber(context.Background(), rpc.LatestBlockNumber) // latest header should always be available return hexutil.Uint64(header.Number.Uint64()) } @@ -628,7 +618,7 @@ func (s *BlockChainAPI) BlockNumber() hexutil.Uint64 { // GetBalance returns the amount of wei for the given address in the state of the // given block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta // block numbers are also allowed. -func (s *BlockChainAPI) GetBalance(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Big, error) { +func (s *PublicBlockChainAPI) GetBalance(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Big, error) { state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if state == nil || err != nil { return nil, err @@ -639,7 +629,7 @@ func (s *BlockChainAPI) GetBalance(ctx context.Context, address common.Address, // GetAssetBalance returns the amount of [assetID] for the given address in the state of the // given block number. The rpc.LatestBlockNumber, rpc.PendingBlockNumber, and // rpc.AcceptedBlockNumber meta block numbers are also allowed. -func (s *BlockChainAPI) GetAssetBalance(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash, assetID ids.ID) (*hexutil.Big, error) { +func (s *PublicBlockChainAPI) GetAssetBalance(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash, assetID ids.ID) (*hexutil.Big, error) { state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if state == nil || err != nil { return nil, err @@ -664,120 +654,60 @@ type StorageResult struct { Proof []string `json:"proof"` } -// proofList implements ethdb.KeyValueWriter and collects the proofs as -// hex-strings for delivery to rpc-caller. -type proofList []string - -func (n *proofList) Put(key []byte, value []byte) error { - *n = append(*n, hexutil.Encode(value)) - return nil -} - -func (n *proofList) Delete(key []byte) error { - panic("not supported") -} - // GetProof returns the Merkle-proof for a given account and optionally some storage keys. -func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNrOrHash rpc.BlockNumberOrHash) (*AccountResult, error) { - var ( - keys = make([]common.Hash, len(storageKeys)) - keyLengths = make([]int, len(storageKeys)) - storageProof = make([]StorageResult, len(storageKeys)) - ) - // Deserialize all keys. This prevents state access on invalid input. - for i, hexKey := range storageKeys { - var err error - keys[i], keyLengths[i], err = decodeHash(hexKey) - if err != nil { - return nil, err - } - } - statedb, header, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) - if statedb == nil || err != nil { +func (s *PublicBlockChainAPI) GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNrOrHash rpc.BlockNumberOrHash) (*AccountResult, error) { + state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) + if state == nil || err != nil { return nil, err } - codeHash := statedb.GetCodeHash(address) - storageRoot := statedb.GetStorageRoot(address) - if len(keys) > 0 { - var storageTrie state.Trie - if storageRoot != types.EmptyRootHash && storageRoot != (common.Hash{}) { - id := trie.StorageTrieID(header.Root, crypto.Keccak256Hash(address.Bytes()), storageRoot) - st, err := trie.NewStateTrie(id, statedb.Database().TrieDB()) - if err != nil { - return nil, err - } - storageTrie = st - } - // Create the proofs for the storageKeys. - for i, key := range keys { - // Output key encoding is a bit special: if the input was a 32-byte hash, it is - // returned as such. Otherwise, we apply the QUANTITY encoding mandated by the - // JSON-RPC spec for getProof. This behavior exists to preserve backwards - // compatibility with older client versions. - var outputKey string - if keyLengths[i] != 32 { - outputKey = hexutil.EncodeBig(key.Big()) - } else { - outputKey = hexutil.Encode(key[:]) - } - if storageTrie == nil { - storageProof[i] = StorageResult{outputKey, &hexutil.Big{}, []string{}} - continue - } - var proof proofList - if err := storageTrie.Prove(crypto.Keccak256(key.Bytes()), &proof); err != nil { - return nil, err + storageTrie := state.StorageTrie(address) + storageHash := types.EmptyRootHash + codeHash := state.GetCodeHash(address) + storageProof := make([]StorageResult, len(storageKeys)) + + // if we have a storageTrie, (which means the account exists), we can update the storagehash + if storageTrie != nil { + storageHash = storageTrie.Hash() + } else { + // no storageTrie means the account does not exist, so the codeHash is the hash of an empty bytearray. + codeHash = crypto.Keccak256Hash(nil) + } + + // create the proof for the storageKeys + for i, key := range storageKeys { + if storageTrie != nil { + proof, storageError := state.GetStorageProof(address, common.HexToHash(key)) + if storageError != nil { + return nil, storageError } - value := (*hexutil.Big)(statedb.GetState(address, key).Big()) - storageProof[i] = StorageResult{outputKey, value, proof} + storageProof[i] = StorageResult{key, (*hexutil.Big)(state.GetState(address, common.HexToHash(key)).Big()), toHexSlice(proof)} + } else { + storageProof[i] = StorageResult{key, &hexutil.Big{}, []string{}} } } - // Create the accountProof. - tr, err := trie.NewStateTrie(trie.StateTrieID(header.Root), statedb.Database().TrieDB()) - if err != nil { - return nil, err - } - var accountProof proofList - if err := tr.Prove(crypto.Keccak256(address.Bytes()), &accountProof); err != nil { - return nil, err + + // create the accountProof + accountProof, proofErr := state.GetProof(address) + if proofErr != nil { + return nil, proofErr } + return &AccountResult{ Address: address, - AccountProof: accountProof, - Balance: (*hexutil.Big)(statedb.GetBalance(address)), + AccountProof: toHexSlice(accountProof), + Balance: (*hexutil.Big)(state.GetBalance(address)), CodeHash: codeHash, - Nonce: hexutil.Uint64(statedb.GetNonce(address)), - StorageHash: storageRoot, + Nonce: hexutil.Uint64(state.GetNonce(address)), + StorageHash: storageHash, StorageProof: storageProof, - }, statedb.Error() -} - -// decodeHash parses a hex-encoded 32-byte hash. The input may optionally -// be prefixed by 0x and can have a byte length up to 32. -func decodeHash(s string) (h common.Hash, inputLength int, err error) { - if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") { - s = s[2:] - } - if (len(s) & 1) > 0 { - s = "0" + s - } - b, err := hex.DecodeString(s) - if err != nil { - return common.Hash{}, 0, errors.New("hex string invalid") - } - if len(b) > 32 { - return common.Hash{}, len(b), errors.New("hex string too long, want at most 32 bytes") - } - return common.BytesToHash(b), len(b), nil + }, state.Error() } // GetHeaderByNumber returns the requested canonical block header. -// - When blockNr is -1 the chain pending header is returned. -// - When blockNr is -2 the chain latest header is returned. -// - When blockNr is -3 the chain finalized header is returned. -// - When blockNr is -4 the chain safe header is returned. -func (s *BlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) { +// * When blockNr is -1 the chain head is returned. +// * When blockNr is -2 the pending chain head is returned. +func (s *PublicBlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) { header, err := s.b.HeaderByNumber(ctx, number) if header != nil && err == nil { response := s.rpcMarshalHeader(ctx, header) @@ -794,7 +724,7 @@ func (s *BlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockN } // GetHeaderByHash returns the requested header by hash. -func (s *BlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) map[string]interface{} { +func (s *PublicBlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) map[string]interface{} { header, _ := s.b.HeaderByHash(ctx, hash) if header != nil { return s.rpcMarshalHeader(ctx, header) @@ -803,13 +733,11 @@ func (s *BlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) m } // GetBlockByNumber returns the requested canonical block. -// - When blockNr is -1 the chain pending block is returned. -// - When blockNr is -2 the chain latest block is returned. -// - When blockNr is -3 the chain finalized block is returned. -// - When blockNr is -4 the chain safe block is returned. -// - When fullTx is true all transactions in the block are returned, otherwise -// only the transaction hash is returned. -func (s *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { +// * When blockNr is -1 the chain head is returned. +// * When blockNr is -2 the pending chain head is returned. +// * When fullTx is true all transactions in the block are returned, otherwise +// only the transaction hash is returned. +func (s *PublicBlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { block, err := s.b.BlockByNumber(ctx, number) if block != nil && err == nil { response, err := s.rpcMarshalBlock(ctx, block, true, fullTx) @@ -827,7 +755,7 @@ func (s *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNu // GetBlockByHash returns the requested block. When fullTx is true all transactions in the block are returned in full // detail, otherwise only the transaction hash is returned. -func (s *BlockChainAPI) GetBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (map[string]interface{}, error) { +func (s *PublicBlockChainAPI) GetBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (map[string]interface{}, error) { block, err := s.b.BlockByHash(ctx, hash) if block != nil { return s.rpcMarshalBlock(ctx, block, true, fullTx) @@ -836,7 +764,7 @@ func (s *BlockChainAPI) GetBlockByHash(ctx context.Context, hash common.Hash, fu } // GetUncleByBlockNumberAndIndex returns the uncle block for the given block number and index. -func (s *BlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) (map[string]interface{}, error) { +func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) (map[string]interface{}, error) { block, err := s.b.BlockByNumber(ctx, blockNr) if block != nil { uncles := block.Uncles() @@ -851,7 +779,7 @@ func (s *BlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context, block } // GetUncleByBlockHashAndIndex returns the uncle block for the given block hash and index. -func (s *BlockChainAPI) GetUncleByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) (map[string]interface{}, error) { +func (s *PublicBlockChainAPI) GetUncleByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) (map[string]interface{}, error) { block, err := s.b.BlockByHash(ctx, blockHash) if block != nil { uncles := block.Uncles() @@ -866,7 +794,7 @@ func (s *BlockChainAPI) GetUncleByBlockHashAndIndex(ctx context.Context, blockHa } // GetUncleCountByBlockNumber returns number of uncles in the block for the given block number -func (s *BlockChainAPI) GetUncleCountByBlockNumber(ctx context.Context, blockNr rpc.BlockNumber) *hexutil.Uint { +func (s *PublicBlockChainAPI) GetUncleCountByBlockNumber(ctx context.Context, blockNr rpc.BlockNumber) *hexutil.Uint { if block, _ := s.b.BlockByNumber(ctx, blockNr); block != nil { n := hexutil.Uint(len(block.Uncles())) return &n @@ -875,7 +803,7 @@ func (s *BlockChainAPI) GetUncleCountByBlockNumber(ctx context.Context, blockNr } // GetUncleCountByBlockHash returns number of uncles in the block for the given block hash -func (s *BlockChainAPI) GetUncleCountByBlockHash(ctx context.Context, blockHash common.Hash) *hexutil.Uint { +func (s *PublicBlockChainAPI) GetUncleCountByBlockHash(ctx context.Context, blockHash common.Hash) *hexutil.Uint { if block, _ := s.b.BlockByHash(ctx, blockHash); block != nil { n := hexutil.Uint(len(block.Uncles())) return &n @@ -884,7 +812,7 @@ func (s *BlockChainAPI) GetUncleCountByBlockHash(ctx context.Context, blockHash } // GetCode returns the code stored at the given address in the state for the given block number. -func (s *BlockChainAPI) GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { +func (s *PublicBlockChainAPI) GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if state == nil || err != nil { return nil, err @@ -896,47 +824,15 @@ func (s *BlockChainAPI) GetCode(ctx context.Context, address common.Address, blo // GetStorageAt returns the storage from the state at the given address, key and // block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta block // numbers are also allowed. -func (s *BlockChainAPI) GetStorageAt(ctx context.Context, address common.Address, hexKey string, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { +func (s *PublicBlockChainAPI) GetStorageAt(ctx context.Context, address common.Address, key string, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if state == nil || err != nil { return nil, err } - key, _, err := decodeHash(hexKey) - if err != nil { - return nil, fmt.Errorf("unable to decode storage key: %s", err) - } - res := state.GetState(address, key) + res := state.GetState(address, common.HexToHash(key)) return res[:], state.Error() } -// GetBlockReceipts returns the block receipts for the given block hash or number or tag. -func (s *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]map[string]interface{}, error) { - block, err := s.b.BlockByNumberOrHash(ctx, blockNrOrHash) - if block == nil || err != nil { - // When the block doesn't exist, the RPC method should return JSON null - // as per specification. - return nil, nil - } - receipts, err := s.b.GetReceipts(ctx, block.Hash()) - if err != nil { - return nil, err - } - txs := block.Transactions() - if len(txs) != len(receipts) { - return nil, fmt.Errorf("receipts length mismatch: %d vs %d", len(txs), len(receipts)) - } - - // Derive the sender. - signer := types.MakeSigner(s.b.ChainConfig(), block.Number(), block.Time()) - - result := make([]map[string]interface{}, len(receipts)) - for i, receipt := range receipts { - result[i] = marshalReceipt(receipt, block.Hash(), block.NumberU64(), signer, txs[i], i) - } - - return result, nil -} - // OverrideAccount indicates the overriding fields of account during the execution // of a message call. // Note, state and stateDiff can't be specified at the same time. If state is @@ -986,88 +882,36 @@ func (diff *StateOverride) Apply(state *state.StateDB) error { } } } - // Now finalize the changes. Finalize is normally performed between transactions. - // By using finalize, the overrides are semantically behaving as - // if they were created in a transaction just before the tracing occur. - state.Finalise(false) return nil } -// BlockOverrides is a set of header fields to override. -type BlockOverrides struct { - Number *hexutil.Big - Difficulty *hexutil.Big - Time *hexutil.Uint64 - GasLimit *hexutil.Uint64 - Coinbase *common.Address - BaseFee *hexutil.Big - BlobBaseFee *hexutil.Big -} - -// Apply overrides the given header fields into the given block context. -func (diff *BlockOverrides) Apply(blockCtx *vm.BlockContext) { - if diff == nil { - return - } - if diff.Number != nil { - blockCtx.BlockNumber = diff.Number.ToInt() - } - if diff.Difficulty != nil { - blockCtx.Difficulty = diff.Difficulty.ToInt() - } - if diff.Time != nil { - blockCtx.Time = uint64(*diff.Time) - } - if diff.GasLimit != nil { - blockCtx.GasLimit = uint64(*diff.GasLimit) - } - if diff.Coinbase != nil { - blockCtx.Coinbase = *diff.Coinbase - } - if diff.BaseFee != nil { - blockCtx.BaseFee = diff.BaseFee.ToInt() - } - if diff.BlobBaseFee != nil { - blockCtx.BlobBaseFee = diff.BlobBaseFee.ToInt() - } -} - -// ChainContextBackend provides methods required to implement ChainContext. -type ChainContextBackend interface { - Engine() consensus.Engine - HeaderByNumber(context.Context, rpc.BlockNumber) (*types.Header, error) -} - -// ChainContext is an implementation of core.ChainContext. It's main use-case -// is instantiating a vm.BlockContext without having access to the BlockChain object. -type ChainContext struct { - b ChainContextBackend - ctx context.Context -} - -// NewChainContext creates a new ChainContext object. -func NewChainContext(ctx context.Context, backend ChainContextBackend) *ChainContext { - return &ChainContext{ctx: ctx, b: backend} -} - -func (context *ChainContext) Engine() consensus.Engine { - return context.b.Engine() -} +func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { + defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) -func (context *ChainContext) GetHeader(hash common.Hash, number uint64) *types.Header { - // This method is called to get the hash for a block number when executing the BLOCKHASH - // opcode. Hence no need to search for non-canonical blocks. - header, err := context.b.HeaderByNumber(context.ctx, rpc.BlockNumber(number)) - if err != nil || header.Hash() != hash { - return nil + state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) + if state == nil || err != nil { + return nil, err } - return header -} - -func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { if err := overrides.Apply(state); err != nil { return nil, err } + // If the request is for the pending block, override the block timestamp, number, and estimated + // base fee, so that the check runs as if it were run on a newly generated block. + if blkNumber, isNum := blockNrOrHash.Number(); isNum && blkNumber == rpc.PendingBlockNumber { + // Override header with a copy to ensure the original header is not modified + header = types.CopyHeader(header) + // Grab the hash of the unmodified header, so that the modified header can point to the + // prior block as its parent. + parentHash := header.Hash() + header.Time = uint64(time.Now().Unix()) + header.ParentHash = parentHash + header.Number = new(big.Int).Add(header.Number, big.NewInt(1)) + estimatedBaseFee, err := b.EstimateBaseFee(ctx) + if err != nil { + return nil, err + } + header.BaseFee = estimatedBaseFee + } // Setup context so it may be cancelled the call has completed // or, in case of unmetered gas, setup a context with a timeout. @@ -1086,12 +930,10 @@ func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.S if err != nil { return nil, err } - blockCtx := core.NewEVMBlockContext(header, NewChainContext(ctx, b), nil) - if blockOverrides != nil { - blockOverrides.Apply(&blockCtx) + evm, vmError, err := b.GetEVM(ctx, msg, state, header, &vm.Config{NoBaseFee: true}) + if err != nil { + return nil, err } - evm := b.GetEVM(ctx, msg, state, header, &vm.Config{NoBaseFee: true}, &blockCtx) - // Wait for the context to be done and cancel the evm. Even if the // EVM has finished, cancelling may be done (repeatedly) go func() { @@ -1102,7 +944,7 @@ func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.S // Execute the message. gp := new(core.GasPool).AddGas(math.MaxUint64) result, err := core.ApplyMessage(evm, msg, gp) - if err := state.Error(); err != nil { + if err := vmError(); err != nil { return nil, err } @@ -1111,38 +953,39 @@ func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.S return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout) } if err != nil { - return result, fmt.Errorf("err: %w (supplied gas %d)", err, msg.GasLimit) + return result, fmt.Errorf("err: %w (supplied gas %d)", err, msg.Gas()) } return result, nil } -func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { - defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) - - state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) - if state == nil || err != nil { - return nil, err +func newRevertError(result *core.ExecutionResult) *revertError { + reason, errUnpack := abi.UnpackRevert(result.Revert()) + err := errors.New("execution reverted") + if errUnpack == nil { + err = fmt.Errorf("execution reverted: %v", reason) } - - // If the request is for the pending block, override the block timestamp, number, and estimated - // base fee, so that the check runs as if it were run on a newly generated block. - if blkNumber, isNum := blockNrOrHash.Number(); isNum && blkNumber == rpc.PendingBlockNumber { - // Override header with a copy to ensure the original header is not modified - header = types.CopyHeader(header) - // Grab the hash of the unmodified header, so that the modified header can point to the - // prior block as its parent. - parentHash := header.Hash() - header.Time = uint64(time.Now().Unix()) - header.ParentHash = parentHash - header.Number = new(big.Int).Add(header.Number, big.NewInt(1)) - estimatedBaseFee, err := b.EstimateBaseFee(ctx) - if err != nil { - return nil, err - } - header.BaseFee = estimatedBaseFee + return &revertError{ + error: err, + reason: hexutil.Encode(result.Revert()), } +} + +// revertError is an API error that encompassas an EVM revertal with JSON error +// code and a binary data blob. +type revertError struct { + error + reason string // revert reason hex encoded +} + +// ErrorCode returns the JSON error code for a revertal. +// See: https://github.com/ethereum/wiki/wiki/JSON-RPC-Error-Codes-Improvement-Proposal +func (e *revertError) ErrorCode() int { + return 3 +} - return doCall(ctx, b, args, state, header, overrides, blockOverrides, timeout, globalGasCap) +// ErrorData returns the hex encoded revert reason. +func (e *revertError) ErrorData() interface{} { + return e.reason } // Call executes the given transaction on the state for the given block number. @@ -1151,75 +994,207 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash // // Note, this function doesn't make and changes in the state/blockchain and is // useful to execute and retrieve values. -func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides) (hexutil.Bytes, error) { - if blockNrOrHash == nil { - latest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) - blockNrOrHash = &latest - } - result, err := DoCall(ctx, s.b, args, *blockNrOrHash, overrides, blockOverrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap()) +func (s *PublicBlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Bytes, error) { + result, err := DoCall(ctx, s.b, args, blockNrOrHash, overrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap()) if err != nil { return nil, err } // If the result contains a revert reason, try to unpack and return it. if len(result.Revert()) > 0 { - return nil, newRevertError(result.Revert()) + return nil, newRevertError(result) } return result.Return(), result.Err } -// DoEstimateGas returns the lowest possible gas limit that allows the transaction to run -// successfully at block `blockNrOrHash`. It returns error if the transaction would revert, or if -// there are unexpected failures. The gas limit is capped by both `args.Gas` (if non-nil & -// non-zero) and `gasCap` (if non-zero). -func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, gasCap uint64) (hexutil.Uint64, error) { - // Retrieve the base state and mutate it with any overrides - state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) - if state == nil || err != nil { - return 0, err +func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, gasCap uint64) (hexutil.Uint64, error) { + // Binary search the gas requirement, as it may be higher than the amount used + var ( + lo uint64 = params.TxGas - 1 + hi uint64 + cap uint64 + ) + // Use zero address if sender unspecified. + if args.From == nil { + args.From = new(common.Address) } - if err = overrides.Apply(state); err != nil { - return 0, err + // Determine the highest gas limit can be used during the estimation. + if args.Gas != nil && uint64(*args.Gas) >= params.TxGas { + hi = uint64(*args.Gas) + } else { + // Retrieve the block to act as the gas ceiling + block, err := b.BlockByNumberOrHash(ctx, blockNrOrHash) + if err != nil { + return 0, err + } + if block == nil { + return 0, errors.New("block not found") + } + hi = block.GasLimit() + } + // Normalize the max fee per gas the call is willing to spend. + var feeCap *big.Int + if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) { + return 0, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + } else if args.GasPrice != nil { + feeCap = args.GasPrice.ToInt() + } else if args.MaxFeePerGas != nil { + feeCap = args.MaxFeePerGas.ToInt() + } else { + feeCap = common.Big0 } - // Construct the gas estimator option from the user input - opts := &gasestimator.Options{ - Config: b.ChainConfig(), - Chain: NewChainContext(ctx, b), - Header: header, - State: state, - ErrorRatio: estimateGasErrorRatio, + // Recap the highest gas limit with account's available balance. + if feeCap.BitLen() != 0 { + state, _, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) + if err != nil { + return 0, err + } + balance := state.GetBalance(*args.From) // from can't be nil + available := new(big.Int).Set(balance) + if args.Value != nil { + if args.Value.ToInt().Cmp(available) >= 0 { + return 0, errors.New("insufficient funds for transfer") + } + available.Sub(available, args.Value.ToInt()) + } + allowance := new(big.Int).Div(available, feeCap) + + // If the allowance is larger than maximum uint64, skip checking + if allowance.IsUint64() && hi > allowance.Uint64() { + transfer := args.Value + if transfer == nil { + transfer = new(hexutil.Big) + } + log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance, + "sent", transfer.ToInt(), "maxFeePerGas", feeCap, "fundable", allowance) + hi = allowance.Uint64() + } } + // Recap the highest gas allowance with specified gascap. + if gasCap != 0 && hi > gasCap { + log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", gasCap) + hi = gasCap + } + cap = hi - // If the user has not specified a gas limit, use the block gas limit - if args.Gas == nil { - args.Gas = new(hexutil.Uint64) - *args.Gas = hexutil.Uint64(header.GasLimit) + // Create a helper to check if a gas allowance results in an executable transaction + executable := func(gas uint64) (bool, *core.ExecutionResult, error) { + args.Gas = (*hexutil.Uint64)(&gas) + + result, err := DoCall(ctx, b, args, blockNrOrHash, nil, 0, gasCap) + if err != nil { + if errors.Is(err, core.ErrIntrinsicGas) { + return true, nil, nil // Special case, raise gas limit + } + return true, nil, err // Bail out + } + return result.Failed(), result, nil } - // Run the gas estimation andwrap any revertals into a custom return - call, err := args.ToMessage(gasCap, header.BaseFee) - if err != nil { - return 0, err + // Execute the binary search and hone in on an executable gas limit + for lo+1 < hi { + mid := (hi + lo) / 2 + failed, _, err := executable(mid) + + // If the error is not nil(consensus error), it means the provided message + // call or transaction will never be accepted no matter how much gas it is + // assigned. Return the error directly, don't struggle any more. + if err != nil { + return 0, err + } + if failed { + lo = mid + } else { + hi = mid + } } - estimate, revert, err := gasestimator.Estimate(ctx, call, opts, gasCap) - if err != nil { - if len(revert) > 0 { - return 0, newRevertError(revert) + // Reject the transaction as invalid if it still fails at the highest allowance + if hi == cap { + failed, result, err := executable(hi) + if err != nil { + return 0, err + } + if failed { + if result != nil && result.Err != vmerrs.ErrOutOfGas { + if len(result.Revert()) > 0 { + return 0, newRevertError(result) + } + return 0, result.Err + } + // Otherwise, the specified gas cap is too low + return 0, fmt.Errorf("gas required exceeds allowance (%d)", cap) } - return 0, err } - return hexutil.Uint64(estimate), nil + return hexutil.Uint64(hi), nil } -// EstimateGas returns the lowest possible gas limit that allows the transaction to run -// successfully at block `blockNrOrHash`, or the latest block if `blockNrOrHash` is unspecified. It -// returns error if the transaction would revert or if there are unexpected failures. The returned -// value is capped by both `args.Gas` (if non-nil & non-zero) and the backend's RPCGasCap -// configuration (if non-zero). -func (s *BlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Uint64, error) { - bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) +// EstimateGas returns an estimate of the amount of gas needed to execute the +// given transaction against the current pending block. +func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) { + bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) if blockNrOrHash != nil { bNrOrHash = *blockNrOrHash } - return DoEstimateGas(ctx, s.b, args, bNrOrHash, overrides, s.b.RPCGasCap()) + return DoEstimateGas(ctx, s.b, args, bNrOrHash, s.b.RPCGasCap()) +} + +// ExecutionResult groups all structured logs emitted by the EVM +// while replaying a transaction in debug mode as well as transaction +// execution status, the amount of gas used and the return value +type ExecutionResult struct { + Gas uint64 `json:"gas"` + Failed bool `json:"failed"` + ReturnValue string `json:"returnValue"` + StructLogs []StructLogRes `json:"structLogs"` +} + +// StructLogRes stores a structured log emitted by the EVM while replaying a +// transaction in debug mode +type StructLogRes struct { + Pc uint64 `json:"pc"` + Op string `json:"op"` + Gas uint64 `json:"gas"` + GasCost uint64 `json:"gasCost"` + Depth int `json:"depth"` + Error string `json:"error,omitempty"` + Stack *[]string `json:"stack,omitempty"` + Memory *[]string `json:"memory,omitempty"` + Storage *map[string]string `json:"storage,omitempty"` +} + +// FormatLogs formats EVM returned structured logs for json output +func FormatLogs(logs []logger.StructLog) []StructLogRes { + formatted := make([]StructLogRes, len(logs)) + for index, trace := range logs { + formatted[index] = StructLogRes{ + Pc: trace.Pc, + Op: trace.Op.String(), + Gas: trace.Gas, + GasCost: trace.GasCost, + Depth: trace.Depth, + Error: trace.ErrorString(), + } + if trace.Stack != nil { + stack := make([]string, len(trace.Stack)) + for i, stackValue := range trace.Stack { + stack[i] = stackValue.Hex() + } + formatted[index].Stack = &stack + } + if trace.Memory != nil { + memory := make([]string, 0, (len(trace.Memory)+31)/32) + for i := 0; i+32 <= len(trace.Memory); i += 32 { + memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32])) + } + formatted[index].Memory = &memory + } + if trace.Storage != nil { + storage := make(map[string]string) + for i, storageValue := range trace.Storage { + storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue) + } + formatted[index].Storage = &storage + } + } + return formatted } // RPCMarshalHeader converts the given header to the RPC output . @@ -1236,6 +1211,7 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} { "miner": head.Coinbase, "difficulty": (*hexutil.Big)(head.Difficulty), "extraData": hexutil.Bytes(head.Extra), + "size": hexutil.Uint64(head.Size()), "gasLimit": hexutil.Uint64(head.GasLimit), "gasUsed": hexutil.Uint64(head.GasUsed), "timestamp": hexutil.Uint64(head.Time), @@ -1243,6 +1219,7 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} { "receiptsRoot": head.ReceiptHash, "extDataHash": head.ExtDataHash, } + if head.BaseFee != nil { result["baseFeePerGas"] = (*hexutil.Big)(head.BaseFee) } @@ -1252,39 +1229,34 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} { if head.BlockGasCost != nil { result["blockGasCost"] = (*hexutil.Big)(head.BlockGasCost) } - if head.BlobGasUsed != nil { - result["blobGasUsed"] = hexutil.Uint64(*head.BlobGasUsed) - } - if head.ExcessBlobGas != nil { - result["excessBlobGas"] = hexutil.Uint64(*head.ExcessBlobGas) - } - if head.ParentBeaconRoot != nil { - result["parentBeaconBlockRoot"] = head.ParentBeaconRoot - } + return result } // RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are // returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain // transaction hashes. -func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *params.ChainConfig) map[string]interface{} { +func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *params.ChainConfig) (map[string]interface{}, error) { fields := RPCMarshalHeader(block.Header()) fields["size"] = hexutil.Uint64(block.Size()) fields["blockExtraData"] = hexutil.Bytes(block.ExtData()) if inclTx { - formatTx := func(idx int, tx *types.Transaction) interface{} { - return tx.Hash() + formatTx := func(tx *types.Transaction) (interface{}, error) { + return tx.Hash(), nil } if fullTx { - formatTx = func(idx int, tx *types.Transaction) interface{} { - return newRPCTransactionFromBlockIndex(block, uint64(idx), config) + formatTx = func(tx *types.Transaction) (interface{}, error) { + return newRPCTransactionFromBlockHash(block, tx.Hash(), config), nil } } txs := block.Transactions() transactions := make([]interface{}, len(txs)) + var err error for i, tx := range txs { - transactions[i] = formatTx(i, tx) + if transactions[i], err = formatTx(tx); err != nil { + return nil, err + } } fields["transactions"] = transactions } @@ -1294,12 +1266,13 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param uncleHashes[i] = uncle.Hash() } fields["uncles"] = uncleHashes - return fields + + return fields, nil } // rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field, which requires -// a `BlockchainAPI`. -func (s *BlockChainAPI) rpcMarshalHeader(ctx context.Context, header *types.Header) map[string]interface{} { +// a `PublicBlockchainAPI`. +func (s *PublicBlockChainAPI) rpcMarshalHeader(ctx context.Context, header *types.Header) map[string]interface{} { fields := RPCMarshalHeader(header) // Note: Coreth enforces that the difficulty of a block is always 1, such that the total difficulty of a block // will be equivalent to its height. @@ -1308,47 +1281,56 @@ func (s *BlockChainAPI) rpcMarshalHeader(ctx context.Context, header *types.Head } // rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field, which requires -// a `BlockchainAPI`. -func (s *BlockChainAPI) rpcMarshalBlock(ctx context.Context, b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { - fields := RPCMarshalBlock(b, inclTx, fullTx, s.b.ChainConfig()) +// a `PublicBlockchainAPI`. +func (s *PublicBlockChainAPI) rpcMarshalBlock(ctx context.Context, b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { + fields, err := RPCMarshalBlock(b, inclTx, fullTx, s.b.ChainConfig()) + if err != nil { + return nil, err + } if inclTx { // Note: Coreth enforces that the difficulty of a block is always 1, such that the total difficulty of a block // will be equivalent to its height. fields["totalDifficulty"] = (*hexutil.Big)(b.Number()) } - return fields, nil + return fields, err } // RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction type RPCTransaction struct { - BlockHash *common.Hash `json:"blockHash"` - BlockNumber *hexutil.Big `json:"blockNumber"` - From common.Address `json:"from"` - Gas hexutil.Uint64 `json:"gas"` - GasPrice *hexutil.Big `json:"gasPrice"` - GasFeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"` - GasTipCap *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"` - MaxFeePerBlobGas *hexutil.Big `json:"maxFeePerBlobGas,omitempty"` - Hash common.Hash `json:"hash"` - Input hexutil.Bytes `json:"input"` - Nonce hexutil.Uint64 `json:"nonce"` - To *common.Address `json:"to"` - TransactionIndex *hexutil.Uint64 `json:"transactionIndex"` - Value *hexutil.Big `json:"value"` - Type hexutil.Uint64 `json:"type"` - Accesses *types.AccessList `json:"accessList,omitempty"` - ChainID *hexutil.Big `json:"chainId,omitempty"` - BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"` - V *hexutil.Big `json:"v"` - R *hexutil.Big `json:"r"` - S *hexutil.Big `json:"s"` - YParity *hexutil.Uint64 `json:"yParity,omitempty"` + BlockHash *common.Hash `json:"blockHash"` + BlockNumber *hexutil.Big `json:"blockNumber"` + From common.Address `json:"from"` + Gas hexutil.Uint64 `json:"gas"` + GasPrice *hexutil.Big `json:"gasPrice"` + GasFeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"` + GasTipCap *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"` + Hash common.Hash `json:"hash"` + Input hexutil.Bytes `json:"input"` + Nonce hexutil.Uint64 `json:"nonce"` + To *common.Address `json:"to"` + TransactionIndex *hexutil.Uint64 `json:"transactionIndex"` + Value *hexutil.Big `json:"value"` + Type hexutil.Uint64 `json:"type"` + Accesses *types.AccessList `json:"accessList,omitempty"` + ChainID *hexutil.Big `json:"chainId,omitempty"` + V *hexutil.Big `json:"v"` + R *hexutil.Big `json:"r"` + S *hexutil.Big `json:"s"` } // newRPCTransaction returns a transaction that will serialize to the RPC // representation, with the given location metadata set (if available). -func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, blockTime uint64, index uint64, baseFee *big.Int, config *params.ChainConfig) *RPCTransaction { - signer := types.MakeSigner(config, new(big.Int).SetUint64(blockNumber), blockTime) +func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64, baseFee *big.Int, config *params.ChainConfig) *RPCTransaction { + // Determine the signer. For replay-protected transactions, use the most permissive + // signer, because we assume that signers are backwards-compatible with old + // transactions. For non-protected transactions, the homestead signer signer is used + // because the return value of ChainId is zero for those transactions. + var signer types.Signer + if tx.Protected() { + signer = types.LatestSignerForChainID(tx.ChainId()) + } else { + signer = types.HomesteadSigner{} + } from, _ := types.Sender(signer, tx) v, r, s := tx.RawSignatureValues() result := &RPCTransaction{ @@ -1370,82 +1352,36 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) result.TransactionIndex = (*hexutil.Uint64)(&index) } - switch tx.Type() { - case types.LegacyTxType: - // if a legacy transaction has an EIP-155 chain id, include it explicitly - if id := tx.ChainId(); id.Sign() != 0 { - result.ChainID = (*hexutil.Big)(id) - } - case types.AccessListTxType: al := tx.AccessList() - yparity := hexutil.Uint64(v.Sign()) result.Accesses = &al result.ChainID = (*hexutil.Big)(tx.ChainId()) - result.YParity = &yparity - case types.DynamicFeeTxType: al := tx.AccessList() - yparity := hexutil.Uint64(v.Sign()) result.Accesses = &al result.ChainID = (*hexutil.Big)(tx.ChainId()) - result.YParity = &yparity result.GasFeeCap = (*hexutil.Big)(tx.GasFeeCap()) result.GasTipCap = (*hexutil.Big)(tx.GasTipCap()) // if the transaction has been mined, compute the effective gas price if baseFee != nil && blockHash != (common.Hash{}) { - // price = min(gasTipCap + baseFee, gasFeeCap) - result.GasPrice = (*hexutil.Big)(effectiveGasPrice(tx, baseFee)) + // price = min(tip, gasFeeCap - baseFee) + baseFee + price := math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee), tx.GasFeeCap()) + result.GasPrice = (*hexutil.Big)(price) } else { result.GasPrice = (*hexutil.Big)(tx.GasFeeCap()) } - - case types.BlobTxType: - al := tx.AccessList() - yparity := hexutil.Uint64(v.Sign()) - result.Accesses = &al - result.ChainID = (*hexutil.Big)(tx.ChainId()) - result.YParity = &yparity - result.GasFeeCap = (*hexutil.Big)(tx.GasFeeCap()) - result.GasTipCap = (*hexutil.Big)(tx.GasTipCap()) - // if the transaction has been mined, compute the effective gas price - if baseFee != nil && blockHash != (common.Hash{}) { - result.GasPrice = (*hexutil.Big)(effectiveGasPrice(tx, baseFee)) - } else { - result.GasPrice = (*hexutil.Big)(tx.GasFeeCap()) - } - result.MaxFeePerBlobGas = (*hexutil.Big)(tx.BlobGasFeeCap()) - result.BlobVersionedHashes = tx.BlobHashes() } return result } -// effectiveGasPrice computes the transaction gas fee, based on the given basefee value. -// -// price = min(gasTipCap + baseFee, gasFeeCap) -func effectiveGasPrice(tx *types.Transaction, baseFee *big.Int) *big.Int { - fee := tx.GasTipCap() - fee = fee.Add(fee, baseFee) - if tx.GasFeeCapIntCmp(fee) < 0 { - return tx.GasFeeCap() - } - return fee -} - -// NewRPCTransaction returns a pending transaction that will serialize to the RPC representation -// Note: in go-ethereum this function is called NewRPCPendingTransaction. -// In coreth, we have renamed it to NewRPCTransaction as it is used for accepted transactions as well. -func NewRPCTransaction(tx *types.Transaction, current *types.Header, baseFee *big.Int, config *params.ChainConfig) *RPCTransaction { - var ( - blockNumber = uint64(0) - blockTime = uint64(0) - ) +// newRPCPendingTransaction returns a pending transaction that will serialize to the RPC representation +func newRPCPendingTransaction(tx *types.Transaction, current *types.Header, baseFee *big.Int, config *params.ChainConfig) *RPCTransaction { + blockNumber := uint64(0) if current != nil { blockNumber = current.Number.Uint64() - blockTime = current.Time } - return newRPCTransaction(tx, common.Hash{}, blockNumber, blockTime, 0, baseFee, config) + return newRPCTransaction(tx, common.Hash{}, blockNumber, 0, baseFee, config) } // newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation. @@ -1454,7 +1390,7 @@ func newRPCTransactionFromBlockIndex(b *types.Block, index uint64, config *param if index >= uint64(len(txs)) { return nil } - return newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), b.Time(), index, b.BaseFee(), config) + return newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index, b.BaseFee(), config) } // newRPCRawTransactionFromBlockIndex returns the bytes of a transaction given a block and a transaction index. @@ -1467,8 +1403,18 @@ func newRPCRawTransactionFromBlockIndex(b *types.Block, index uint64) hexutil.By return blob } +// newRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation. +func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash, config *params.ChainConfig) *RPCTransaction { + for idx, tx := range b.Transactions() { + if tx.Hash() == hash { + return newRPCTransactionFromBlockIndex(b, uint64(idx), config) + } + } + return nil +} + // accessListResult returns an optional accesslist -// It's the result of the `debug_createAccessList` RPC call. +// Its the result of the `debug_createAccessList` RPC call. // It contains an error if the transaction itself failed. type accessListResult struct { Accesslist *types.AccessList `json:"accessList"` @@ -1476,9 +1422,9 @@ type accessListResult struct { GasUsed hexutil.Uint64 `json:"gasUsed"` } -// CreateAccessList creates an EIP-2930 type AccessList for the given transaction. +// CreateAccessList creates a EIP-2930 type AccessList for the given transaction. // Reexec and BlockNrOrHash can be specified to create the accessList on top of a certain state. -func (s *BlockChainAPI) CreateAccessList(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (*accessListResult, error) { +func (s *PublicBlockChainAPI) CreateAccessList(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (*accessListResult, error) { bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) if blockNrOrHash != nil { bNrOrHash = *blockNrOrHash @@ -1503,11 +1449,9 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH if db == nil || err != nil { return nil, 0, nil, err } - // If the gas amount is not set, default to RPC gas cap. - if args.Gas == nil { - tmp := hexutil.Uint64(b.RPCGasCap()) - args.Gas = &tmp - } + // If the gas amount is not set, extract this as it will depend on access + // lists and we'll need to reestimate every time + nogas := args.Gas == nil // Ensure any missing fields are filled, extract the recipient and input data if err := args.setDefaults(ctx, b); err != nil { @@ -1520,7 +1464,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH to = crypto.CreateAddress(args.from(), uint64(*args.Nonce)) } // Retrieve the precompiles since they don't need to be added to the access list - precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number, header.Time)) + precompiles := vm.ActivePrecompiles(b.ChainConfig().AvalancheRules(header.Number, new(big.Int).SetUint64(header.Time))) // Create an initial tracer prevTracer := logger.NewAccessListTracer(nil, args.from(), to, precompiles) @@ -1532,9 +1476,19 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH accessList := prevTracer.AccessList() log.Trace("Creating access list", "input", accessList) + // If no gas amount was specified, each unique access list needs it's own + // gas calculation. This is quite expensive, but we need to be accurate + // and it's convered by the sender only anyway. + if nogas { + args.Gas = nil + if err := args.setDefaults(ctx, b); err != nil { + return nil, 0, nil, err // shouldn't happen, just in case + } + } // Copy the original db so we don't modify it statedb := db.Copy() - // Set the accesslist to the last al + // Set the access list tracer to the last al + args.AccessList = &accessList msg, err := args.ToMessage(b.RPCGasCap(), header.BaseFee) if err != nil { @@ -1543,9 +1497,12 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH // Apply the transaction with the access list tracer tracer := logger.NewAccessListTracer(accessList, args.from(), to, precompiles) - config := vm.Config{Tracer: tracer, NoBaseFee: true} - vmenv := b.GetEVM(ctx, msg, statedb, header, &config, nil) - res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)) + config := vm.Config{Tracer: tracer, Debug: true, NoBaseFee: true} + vmenv, _, err := b.GetEVM(ctx, msg, statedb, header, &config) + if err != nil { + return nil, 0, nil, err + } + res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())) if err != nil { return nil, 0, nil, fmt.Errorf("failed to apply transaction: %v err: %v", args.toTransaction().Hash(), err) } @@ -1556,23 +1513,23 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH } } -// TransactionAPI exposes methods for reading and creating transaction data. -type TransactionAPI struct { +// PublicTransactionPoolAPI exposes methods for the RPC interface +type PublicTransactionPoolAPI struct { b Backend nonceLock *AddrLocker signer types.Signer } -// NewTransactionAPI creates a new RPC service with methods for interacting with transactions. -func NewTransactionAPI(b Backend, nonceLock *AddrLocker) *TransactionAPI { +// NewPublicTransactionPoolAPI creates a new RPC service with methods specific for the transaction pool. +func NewPublicTransactionPoolAPI(b Backend, nonceLock *AddrLocker) *PublicTransactionPoolAPI { // The signer used by the API should always be the 'latest' known one because we expect // signers to be backwards-compatible with old transactions. signer := types.LatestSigner(b.ChainConfig()) - return &TransactionAPI{b, nonceLock, signer} + return &PublicTransactionPoolAPI{b, nonceLock, signer} } // GetBlockTransactionCountByNumber returns the number of transactions in the block with the given block number. -func (s *TransactionAPI) GetBlockTransactionCountByNumber(ctx context.Context, blockNr rpc.BlockNumber) *hexutil.Uint { +func (s *PublicTransactionPoolAPI) GetBlockTransactionCountByNumber(ctx context.Context, blockNr rpc.BlockNumber) *hexutil.Uint { if block, _ := s.b.BlockByNumber(ctx, blockNr); block != nil { n := hexutil.Uint(len(block.Transactions())) return &n @@ -1581,7 +1538,7 @@ func (s *TransactionAPI) GetBlockTransactionCountByNumber(ctx context.Context, b } // GetBlockTransactionCountByHash returns the number of transactions in the block with the given hash. -func (s *TransactionAPI) GetBlockTransactionCountByHash(ctx context.Context, blockHash common.Hash) *hexutil.Uint { +func (s *PublicTransactionPoolAPI) GetBlockTransactionCountByHash(ctx context.Context, blockHash common.Hash) *hexutil.Uint { if block, _ := s.b.BlockByHash(ctx, blockHash); block != nil { n := hexutil.Uint(len(block.Transactions())) return &n @@ -1590,7 +1547,7 @@ func (s *TransactionAPI) GetBlockTransactionCountByHash(ctx context.Context, blo } // GetTransactionByBlockNumberAndIndex returns the transaction for the given block number and index. -func (s *TransactionAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) *RPCTransaction { +func (s *PublicTransactionPoolAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) *RPCTransaction { if block, _ := s.b.BlockByNumber(ctx, blockNr); block != nil { return newRPCTransactionFromBlockIndex(block, uint64(index), s.b.ChainConfig()) } @@ -1598,7 +1555,7 @@ func (s *TransactionAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context } // GetTransactionByBlockHashAndIndex returns the transaction for the given block hash and index. -func (s *TransactionAPI) GetTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) *RPCTransaction { +func (s *PublicTransactionPoolAPI) GetTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) *RPCTransaction { if block, _ := s.b.BlockByHash(ctx, blockHash); block != nil { return newRPCTransactionFromBlockIndex(block, uint64(index), s.b.ChainConfig()) } @@ -1606,7 +1563,7 @@ func (s *TransactionAPI) GetTransactionByBlockHashAndIndex(ctx context.Context, } // GetRawTransactionByBlockNumberAndIndex returns the bytes of the transaction for the given block number and index. -func (s *TransactionAPI) GetRawTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) hexutil.Bytes { +func (s *PublicTransactionPoolAPI) GetRawTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) hexutil.Bytes { if block, _ := s.b.BlockByNumber(ctx, blockNr); block != nil { return newRPCRawTransactionFromBlockIndex(block, uint64(index)) } @@ -1614,7 +1571,7 @@ func (s *TransactionAPI) GetRawTransactionByBlockNumberAndIndex(ctx context.Cont } // GetRawTransactionByBlockHashAndIndex returns the bytes of the transaction for the given block hash and index. -func (s *TransactionAPI) GetRawTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) hexutil.Bytes { +func (s *PublicTransactionPoolAPI) GetRawTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) hexutil.Bytes { if block, _ := s.b.BlockByHash(ctx, blockHash); block != nil { return newRPCRawTransactionFromBlockIndex(block, uint64(index)) } @@ -1622,7 +1579,7 @@ func (s *TransactionAPI) GetRawTransactionByBlockHashAndIndex(ctx context.Contex } // GetTransactionCount returns the number of transactions the given address has sent for the given block number -func (s *TransactionAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Uint64, error) { +func (s *PublicTransactionPoolAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Uint64, error) { // Ask transaction pool for the nonce which includes pending transactions if blockNr, ok := blockNrOrHash.Number(); ok && blockNr == rpc.PendingBlockNumber { nonce, err := s.b.GetPoolNonce(ctx, address) @@ -1641,7 +1598,7 @@ func (s *TransactionAPI) GetTransactionCount(ctx context.Context, address common } // GetTransactionByHash returns the transaction for the given hash -func (s *TransactionAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) { +func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) { // Try to return an already finalized transaction tx, blockHash, blockNumber, index, err := s.b.GetTransaction(ctx, hash) if err != nil { @@ -1652,12 +1609,12 @@ func (s *TransactionAPI) GetTransactionByHash(ctx context.Context, hash common.H if err != nil { return nil, err } - return newRPCTransaction(tx, blockHash, blockNumber, header.Time, index, header.BaseFee, s.b.ChainConfig()), nil + return newRPCTransaction(tx, blockHash, blockNumber, index, header.BaseFee, s.b.ChainConfig()), nil } // No finalized transaction, try to retrieve it from the pool if tx := s.b.GetPoolTransaction(hash); tx != nil { estimatedBaseFee, _ := s.b.EstimateBaseFee(ctx) - return NewRPCTransaction(tx, s.b.CurrentHeader(), estimatedBaseFee, s.b.ChainConfig()), nil + return newRPCPendingTransaction(tx, s.b.CurrentHeader(), estimatedBaseFee, s.b.ChainConfig()), nil } // Transaction unknown, return as such @@ -1665,7 +1622,7 @@ func (s *TransactionAPI) GetTransactionByHash(ctx context.Context, hash common.H } // GetRawTransactionByHash returns the bytes of the transaction for the given hash. -func (s *TransactionAPI) GetRawTransactionByHash(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) { +func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) { // Retrieve a finalized transaction, or a pooled otherwise tx, _, _, _, err := s.b.GetTransaction(ctx, hash) if err != nil { @@ -1682,11 +1639,9 @@ func (s *TransactionAPI) GetRawTransactionByHash(ctx context.Context, hash commo } // GetTransactionReceipt returns the transaction receipt for the given transaction hash. -func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) { +func (s *PublicTransactionPoolAPI) GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) { tx, blockHash, blockNumber, index, err := s.b.GetTransaction(ctx, hash) - if tx == nil || err != nil { - // When the transaction doesn't exist, the RPC method should return JSON null - // as per specification. + if err != nil { return nil, nil } header, err := s.b.HeaderByHash(ctx, blockHash) @@ -1697,25 +1652,22 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common. if err != nil { return nil, err } - if uint64(len(receipts)) <= index { + if len(receipts) <= int(index) { return nil, nil } receipt := receipts[index] // Derive the sender. - signer := types.MakeSigner(s.b.ChainConfig(), header.Number, header.Time) - return marshalReceipt(receipt, blockHash, blockNumber, signer, tx, int(index)), nil -} - -// marshalReceipt marshals a transaction receipt into a JSON object. -func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber uint64, signer types.Signer, tx *types.Transaction, txIndex int) map[string]interface{} { + bigblock := new(big.Int).SetUint64(blockNumber) + timestamp := new(big.Int).SetUint64(header.Time) + signer := types.MakeSigner(s.b.ChainConfig(), bigblock, timestamp) from, _ := types.Sender(signer, tx) fields := map[string]interface{}{ "blockHash": blockHash, "blockNumber": hexutil.Uint64(blockNumber), - "transactionHash": tx.Hash(), - "transactionIndex": hexutil.Uint64(txIndex), + "transactionHash": hash, + "transactionIndex": hexutil.Uint64(index), "from": from, "to": tx.To(), "gasUsed": hexutil.Uint64(receipt.GasUsed), @@ -1724,9 +1676,18 @@ func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber u "logs": receipt.Logs, "logsBloom": receipt.Bloom, "type": hexutil.Uint(tx.Type()), - "effectiveGasPrice": (*hexutil.Big)(receipt.EffectiveGasPrice), } - + // Assign the effective gas price paid + if !s.b.ChainConfig().IsApricotPhase3(timestamp) { + fields["effectiveGasPrice"] = hexutil.Uint64(tx.GasPrice().Uint64()) + } else { + header, err := s.b.HeaderByHash(ctx, blockHash) + if err != nil { + return nil, err + } + gasPrice := new(big.Int).Add(header.BaseFee, tx.EffectiveGasTipValue(header.BaseFee)) + fields["effectiveGasPrice"] = hexutil.Uint64(gasPrice.Uint64()) + } // Assign receipt status or post state. if len(receipt.PostState) > 0 { fields["root"] = hexutil.Bytes(receipt.PostState) @@ -1736,21 +1697,15 @@ func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber u if receipt.Logs == nil { fields["logs"] = []*types.Log{} } - - if tx.Type() == types.BlobTxType { - fields["blobGasUsed"] = hexutil.Uint64(receipt.BlobGasUsed) - fields["blobGasPrice"] = (*hexutil.Big)(receipt.BlobGasPrice) - } - // If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation if receipt.ContractAddress != (common.Address{}) { fields["contractAddress"] = receipt.ContractAddress } - return fields + return fields, nil } // sign is a helper function that signs a transaction with the private key of the given address. -func (s *TransactionAPI) sign(addr common.Address, tx *types.Transaction) (*types.Transaction, error) { +func (s *PublicTransactionPoolAPI) sign(addr common.Address, tx *types.Transaction) (*types.Transaction, error) { // Look up the wallet containing the requested signer account := accounts.Account{Address: addr} @@ -1769,7 +1724,7 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c if err := checkTxFee(tx.GasPrice(), tx.Gas(), b.RPCTxFeeCap()); err != nil { return common.Hash{}, err } - if !b.UnprotectedAllowed(tx) && !tx.Protected() { + if !b.UnprotectedAllowed() && !tx.Protected() { // Ensure only eip155 signed transactions are submitted if EIP155Required is set. return common.Hash{}, errors.New("only replay-protected (EIP-155) transactions allowed over RPC") } @@ -1777,8 +1732,8 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c return common.Hash{}, err } // Print a log with full tx details for manual investigations and interventions - head := b.CurrentBlock() - signer := types.MakeSigner(b.ChainConfig(), head.Number, head.Time) + currentBlock := b.CurrentBlock() + signer := types.MakeSigner(b.ChainConfig(), currentBlock.Number(), new(big.Int).SetUint64(currentBlock.Time())) from, err := types.Sender(signer, tx) if err != nil { return common.Hash{}, err @@ -1795,7 +1750,7 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c // SendTransaction creates a transaction for the given argument, sign it and submit it to the // transaction pool. -func (s *TransactionAPI) SendTransaction(ctx context.Context, args TransactionArgs) (common.Hash, error) { +func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args TransactionArgs) (common.Hash, error) { // Look up the wallet containing the requested signer account := accounts.Account{Address: args.from()} @@ -1805,7 +1760,7 @@ func (s *TransactionAPI) SendTransaction(ctx context.Context, args TransactionAr } if args.Nonce == nil { - // Hold the mutex around signing to prevent concurrent assignment of + // Hold the addresse's mutex around signing to prevent concurrent assignment of // the same nonce to multiple accounts. s.nonceLock.LockAddr(args.from()) defer s.nonceLock.UnlockAddr(args.from()) @@ -1828,7 +1783,7 @@ func (s *TransactionAPI) SendTransaction(ctx context.Context, args TransactionAr // FillTransaction fills the defaults (nonce, gas, gasPrice or 1559 fields) // on a given unsigned transaction, and returns it to the caller for further // processing (signing + broadcast). -func (s *TransactionAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) { +func (s *PublicTransactionPoolAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) { // Set some sanity defaults and terminate on failure if err := args.setDefaults(ctx, s.b); err != nil { return nil, err @@ -1844,7 +1799,7 @@ func (s *TransactionAPI) FillTransaction(ctx context.Context, args TransactionAr // SendRawTransaction will add the signed transaction to the transaction pool. // The sender is responsible for signing the transaction and using the correct nonce. -func (s *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil.Bytes) (common.Hash, error) { +func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, input hexutil.Bytes) (common.Hash, error) { tx := new(types.Transaction) if err := tx.UnmarshalBinary(input); err != nil { return common.Hash{}, err @@ -1853,7 +1808,7 @@ func (s *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil.B } // Sign calculates an ECDSA signature for: -// keccak256("\x19Ethereum Signed Message:\n" + len(message) + message). +// keccack256("\x19Ethereum Signed Message:\n" + len(message) + message). // // Note, the produced signature conforms to the secp256k1 curve R, S and V values, // where the V value will be 27 or 28 for legacy reasons. @@ -1861,7 +1816,7 @@ func (s *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil.B // The account associated with addr must be unlocked. // // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_sign -func (s *TransactionAPI) Sign(addr common.Address, data hexutil.Bytes) (hexutil.Bytes, error) { +func (s *PublicTransactionPoolAPI) Sign(addr common.Address, data hexutil.Bytes) (hexutil.Bytes, error) { // Look up the wallet containing the requested signer account := accounts.Account{Address: addr} @@ -1886,15 +1841,15 @@ type SignTransactionResult struct { // SignTransaction will sign the given transaction with the from account. // The node needs to have the private key of the account corresponding with // the given from address and it needs to be unlocked. -func (s *TransactionAPI) SignTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) { +func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) { if args.Gas == nil { - return nil, errors.New("gas not specified") + return nil, fmt.Errorf("gas not specified") } if args.GasPrice == nil && (args.MaxPriorityFeePerGas == nil || args.MaxFeePerGas == nil) { - return nil, errors.New("missing gasPrice or maxFeePerGas/maxPriorityFeePerGas") + return nil, fmt.Errorf("missing gasPrice or maxFeePerGas/maxPriorityFeePerGas") } if args.Nonce == nil { - return nil, errors.New("nonce not specified") + return nil, fmt.Errorf("nonce not specified") } if err := args.setDefaults(ctx, s.b); err != nil { return nil, err @@ -1917,7 +1872,7 @@ func (s *TransactionAPI) SignTransaction(ctx context.Context, args TransactionAr // PendingTransactions returns the transactions that are in the transaction pool // and have a from address that is one of the accounts this node manages. -func (s *TransactionAPI) PendingTransactions() ([]*RPCTransaction, error) { +func (s *PublicTransactionPoolAPI) PendingTransactions() ([]*RPCTransaction, error) { pending, err := s.b.GetPoolTransactions() if err != nil { return nil, err @@ -1934,7 +1889,7 @@ func (s *TransactionAPI) PendingTransactions() ([]*RPCTransaction, error) { from, _ := types.Sender(s.signer, tx) if _, exists := accounts[from]; exists { estimatedBaseFee, _ := s.b.EstimateBaseFee(context.Background()) - transactions = append(transactions, NewRPCTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig())) + transactions = append(transactions, newRPCPendingTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig())) } } return transactions, nil @@ -1942,9 +1897,9 @@ func (s *TransactionAPI) PendingTransactions() ([]*RPCTransaction, error) { // Resend accepts an existing transaction and a new gas price and limit. It will remove // the given transaction from the pool and reinsert it with the new gas price and limit. -func (s *TransactionAPI) Resend(ctx context.Context, sendArgs TransactionArgs, gasPrice *hexutil.Big, gasLimit *hexutil.Uint64) (common.Hash, error) { +func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs TransactionArgs, gasPrice *hexutil.Big, gasLimit *hexutil.Uint64) (common.Hash, error) { if sendArgs.Nonce == nil { - return common.Hash{}, errors.New("missing transaction nonce in transaction spec") + return common.Hash{}, fmt.Errorf("missing transaction nonce in transaction spec") } if err := sendArgs.setDefaults(ctx, s.b); err != nil { return common.Hash{}, err @@ -1992,129 +1947,103 @@ func (s *TransactionAPI) Resend(ctx context.Context, sendArgs TransactionArgs, g return common.Hash{}, fmt.Errorf("transaction %#x not found", matchTx.Hash()) } -// DebugAPI is the collection of Ethereum APIs exposed over the debugging -// namespace. -type DebugAPI struct { +// PublicDebugAPI is the collection of Ethereum APIs exposed over the public +// debugging endpoint. +type PublicDebugAPI struct { b Backend } -// NewDebugAPI creates a new instance of DebugAPI. -func NewDebugAPI(b Backend) *DebugAPI { - return &DebugAPI{b: b} +// NewPublicDebugAPI creates a new API definition for the public debug methods +// of the Ethereum service. +func NewPublicDebugAPI(b Backend) *PublicDebugAPI { + return &PublicDebugAPI{b: b} } -// GetRawHeader retrieves the RLP encoding for a single header. -func (api *DebugAPI) GetRawHeader(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { - var hash common.Hash - if h, ok := blockNrOrHash.Hash(); ok { - hash = h - } else { - block, err := api.b.BlockByNumberOrHash(ctx, blockNrOrHash) - if err != nil { - return nil, err - } - hash = block.Hash() - } - header, _ := api.b.HeaderByHash(ctx, hash) +// GetHeaderRlp retrieves the RLP encoded for of a single header. +func (api *PublicDebugAPI) GetHeaderRlp(ctx context.Context, number uint64) (hexutil.Bytes, error) { + header, _ := api.b.HeaderByNumber(ctx, rpc.BlockNumber(number)) if header == nil { - return nil, fmt.Errorf("header #%d not found", hash) + return nil, fmt.Errorf("header #%d not found", number) } return rlp.EncodeToBytes(header) } -// GetRawBlock retrieves the RLP encoded for a single block. -func (api *DebugAPI) GetRawBlock(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { - var hash common.Hash - if h, ok := blockNrOrHash.Hash(); ok { - hash = h - } else { - block, err := api.b.BlockByNumberOrHash(ctx, blockNrOrHash) - if err != nil { - return nil, err - } - hash = block.Hash() - } - block, _ := api.b.BlockByHash(ctx, hash) +// GetBlockRlp retrieves the RLP encoded for of a single block. +func (api *PublicDebugAPI) GetBlockRlp(ctx context.Context, number uint64) (hexutil.Bytes, error) { + block, _ := api.b.BlockByNumber(ctx, rpc.BlockNumber(number)) if block == nil { - return nil, fmt.Errorf("block #%d not found", hash) + return nil, fmt.Errorf("block #%d not found", number) } return rlp.EncodeToBytes(block) } -// GetRawReceipts retrieves the binary-encoded receipts of a single block. -func (api *DebugAPI) GetRawReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]hexutil.Bytes, error) { - var hash common.Hash - if h, ok := blockNrOrHash.Hash(); ok { - hash = h - } else { - block, err := api.b.BlockByNumberOrHash(ctx, blockNrOrHash) - if err != nil { - return nil, err - } - hash = block.Hash() - } - receipts, err := api.b.GetReceipts(ctx, hash) - if err != nil { - return nil, err - } - result := make([]hexutil.Bytes, len(receipts)) - for i, receipt := range receipts { - b, err := receipt.MarshalBinary() - if err != nil { - return nil, err - } - result[i] = b +// PrintBlock retrieves a block and returns its pretty printed form. +func (api *PublicDebugAPI) PrintBlock(ctx context.Context, number uint64) (string, error) { + block, _ := api.b.BlockByNumber(ctx, rpc.BlockNumber(number)) + if block == nil { + return "", fmt.Errorf("block #%d not found", number) } - return result, nil + return spew.Sdump(block), nil } -// GetRawTransaction returns the bytes of the transaction for the given hash. -func (s *DebugAPI) GetRawTransaction(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) { - // Retrieve a finalized transaction, or a pooled otherwise - tx, _, _, _, err := s.b.GetTransaction(ctx, hash) - if err != nil { - return nil, err - } - if tx == nil { - if tx = s.b.GetPoolTransaction(hash); tx == nil { - // Transaction not found anywhere, abort - return nil, nil - } +// PrivateDebugAPI is the collection of Ethereum APIs exposed over the private +// debugging endpoint. +type PrivateDebugAPI struct { + b Backend +} + +// NewPrivateDebugAPI creates a new API definition for the private debug methods +// of the Ethereum service. +func NewPrivateDebugAPI(b Backend) *PrivateDebugAPI { + return &PrivateDebugAPI{b: b} +} + +// ChaindbProperty returns leveldb properties of the key-value database. +func (api *PrivateDebugAPI) ChaindbProperty(property string) (string, error) { + if property == "" { + property = "leveldb.stats" + } else if !strings.HasPrefix(property, "leveldb.") { + property = "leveldb." + property } - return tx.MarshalBinary() + return api.b.ChainDb().Stat(property) } -// PrintBlock retrieves a block and returns its pretty printed form. -func (api *DebugAPI) PrintBlock(ctx context.Context, number uint64) (string, error) { - block, _ := api.b.BlockByNumber(ctx, rpc.BlockNumber(number)) - if block == nil { - return "", fmt.Errorf("block #%d not found", number) +// ChaindbCompact flattens the entire key-value database into a single level, +// removing all unused slots and merging all keys. +func (api *PrivateDebugAPI) ChaindbCompact() error { + for b := byte(0); b < 255; b++ { + log.Info("Compacting chain database", "range", fmt.Sprintf("0x%0.2X-0x%0.2X", b, b+1)) + if err := api.b.ChainDb().Compact([]byte{b}, []byte{b + 1}); err != nil { + log.Error("Database compaction failed", "err", err) + return err + } } - return spew.Sdump(block), nil + return nil } -// NetAPI offers network related RPC methods -type NetAPI struct { +// PublicNetAPI offers network related RPC methods +type PublicNetAPI struct { + // net *p2p.Server networkVersion uint64 } -// NewNetAPI creates a new net API instance. -func NewNetAPI(networkVersion uint64) *NetAPI { - return &NetAPI{networkVersion} +// NewPublicNetAPI creates a new net API instance. +func NewPublicNetAPI(networkVersion uint64) *PublicNetAPI { + return &PublicNetAPI{networkVersion} } // Listening returns an indication if the node is listening for network connections. -func (s *NetAPI) Listening() bool { +func (s *PublicNetAPI) Listening() bool { return true // always listening } // PeerCount returns the number of connected peers -func (s *NetAPI) PeerCount() hexutil.Uint { +func (s *PublicNetAPI) PeerCount() hexutil.Uint { return hexutil.Uint(0) } // Version returns the current ethereum protocol version. -func (s *NetAPI) Version() string { +func (s *PublicNetAPI) Version() string { return fmt.Sprintf("%d", s.networkVersion) } @@ -2132,3 +2061,12 @@ func checkTxFee(gasPrice *big.Int, gas uint64, cap float64) error { } return nil } + +// toHexSlice creates a slice of hex-strings based on []byte. +func toHexSlice(b [][]byte) []string { + r := make([]string, len(b)) + for i := range b { + r[i] = hexutil.Encode(b[i]) + } + return r +} diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index d269db702f..be712f8e28 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -32,17 +32,17 @@ import ( "math/big" "time" - "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/bloombits" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/accounts" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/bloombits" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" ) @@ -53,33 +53,31 @@ type Backend interface { EstimateBaseFee(ctx context.Context) (*big.Int, error) SuggestPrice(ctx context.Context) (*big.Int, error) SuggestGasTipCap(ctx context.Context) (*big.Int, error) - FeeHistory(ctx context.Context, blockCount uint64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) + FeeHistory(ctx context.Context, blockCount int, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) ChainDb() ethdb.Database AccountManager() *accounts.Manager ExtRPCEnabled() bool RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs - - UnprotectedAllowed(tx *types.Transaction) bool // allows only for EIP155 transactions. + UnprotectedAllowed() bool // allows only for EIP155 transactions. // Blockchain API HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) CurrentHeader() *types.Header - CurrentBlock() *types.Header + CurrentBlock() *types.Block BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) - GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM + GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription - BadBlocks() ([]*types.Block, []*core.BadBlockReason) // Transaction pool API SendTx(ctx context.Context, signedTx *types.Transaction) error @@ -88,24 +86,22 @@ type Backend interface { GetPoolTransaction(txHash common.Hash) *types.Transaction GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) Stats() (pending int, queued int) - TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) - TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) + TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) + TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription + // Filter API + BloomStatus() (uint64, uint64) + GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) + ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) + SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription + SubscribeAcceptedLogsEvent(ch chan<- []*types.Log) event.Subscription + SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription + SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription + ChainConfig() *params.ChainConfig Engine() consensus.Engine LastAcceptedBlock() *types.Block - - // This is copied from filters.Backend - // eth/filters needs to be initialized from this backend type, so methods needed by - // it must also be included here. - GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) - GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) - SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription - SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription - SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription - BloomStatus() (uint64, uint64) - ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) } func GetAPIs(apiBackend Backend) []rpc.API { @@ -113,32 +109,51 @@ func GetAPIs(apiBackend Backend) []rpc.API { return []rpc.API{ { Namespace: "eth", - Service: NewEthereumAPI(apiBackend), - Name: "internal-eth", + Version: "1.0", + Service: NewPublicEthereumAPI(apiBackend), + Public: true, + Name: "internal-public-eth", }, { Namespace: "eth", - Service: NewBlockChainAPI(apiBackend), - Name: "internal-blockchain", + Version: "1.0", + Service: NewPublicBlockChainAPI(apiBackend), + Public: true, + Name: "internal-public-blockchain", }, { Namespace: "eth", - Service: NewTransactionAPI(apiBackend, nonceLock), - Name: "internal-transaction", + Version: "1.0", + Service: NewPublicTransactionPoolAPI(apiBackend, nonceLock), + Public: true, + Name: "internal-public-transaction-pool", }, { Namespace: "txpool", - Service: NewTxPoolAPI(apiBackend), - Name: "internal-tx-pool", + Version: "1.0", + Service: NewPublicTxPoolAPI(apiBackend), + Public: true, + Name: "internal-public-tx-pool", + }, { + Namespace: "debug", + Version: "1.0", + Service: NewPublicDebugAPI(apiBackend), + Public: true, + Name: "internal-public-debug", }, { Namespace: "debug", - Service: NewDebugAPI(apiBackend), - Name: "internal-debug", + Version: "1.0", + Service: NewPrivateDebugAPI(apiBackend), + Name: "internal-private-debug", }, { Namespace: "eth", - Service: NewEthereumAccountAPI(apiBackend.AccountManager()), - Name: "internal-account", + Version: "1.0", + Service: NewPublicAccountAPI(apiBackend.AccountManager()), + Public: true, + Name: "internal-public-account", }, { Namespace: "personal", - Service: NewPersonalAccountAPI(apiBackend, nonceLock), - Name: "internal-personal", + Version: "1.0", + Service: NewPrivateAccountAPI(apiBackend, nonceLock), + Public: false, + Name: "internal-private-personal", }, } } diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index 77d4716575..8c7f6bf3ec 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -33,10 +33,8 @@ import ( "fmt" "math/big" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" @@ -87,8 +85,56 @@ func (args *TransactionArgs) data() []byte { // setDefaults fills in default values for unspecified tx fields. func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { - if err := args.setFeeDefaults(ctx, b); err != nil { - return err + if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) { + return errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + } + // After london, default to 1559 unless gasPrice is set + head := b.CurrentHeader() + // If user specifies both maxPriorityfee and maxFee, then we do not + // need to consult the chain for defaults. It's definitely a London tx. + if args.MaxPriorityFeePerGas == nil || args.MaxFeePerGas == nil { + // In this clause, user left some fields unspecified. + if b.ChainConfig().IsApricotPhase3(new(big.Int).SetUint64(head.Time)) && args.GasPrice == nil { + if args.MaxPriorityFeePerGas == nil { + tip, err := b.SuggestGasTipCap(ctx) + if err != nil { + return err + } + args.MaxPriorityFeePerGas = (*hexutil.Big)(tip) + } + if args.MaxFeePerGas == nil { + gasFeeCap := new(big.Int).Add( + (*big.Int)(args.MaxPriorityFeePerGas), + new(big.Int).Mul(head.BaseFee, big.NewInt(2)), + ) + args.MaxFeePerGas = (*hexutil.Big)(gasFeeCap) + } + if args.MaxFeePerGas.ToInt().Cmp(args.MaxPriorityFeePerGas.ToInt()) < 0 { + return fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", args.MaxFeePerGas, args.MaxPriorityFeePerGas) + } + } else { + if args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil { + return errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet") + } + if args.GasPrice == nil { + price, err := b.SuggestGasTipCap(ctx) + if err != nil { + return err + } + if b.ChainConfig().IsApricotPhase3(new(big.Int).SetUint64(head.Time)) { + // The legacy tx gas price suggestion should not add 2x base fee + // because all fees are consumed, so it would result in a spiral + // upwards. + price.Add(price, head.BaseFee) + } + args.GasPrice = (*hexutil.Big)(price) + } + } + } else { + // Both maxPriorityfee and maxFee set by caller. Sanity-check their internal relation + if args.MaxFeePerGas.ToInt().Cmp(args.MaxPriorityFeePerGas.ToInt()) < 0 { + return fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", args.MaxFeePerGas, args.MaxPriorityFeePerGas) + } } if args.Value == nil { args.Value = new(hexutil.Big) @@ -122,110 +168,16 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { AccessList: args.AccessList, } pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) - estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, nil, b.RPCGasCap()) + estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, b.RPCGasCap()) if err != nil { return err } args.Gas = &estimated log.Trace("Estimate gas usage automatically", "gas", args.Gas) } - // If chain id is provided, ensure it matches the local chain id. Otherwise, set the local - // chain id as the default. - want := b.ChainConfig().ChainID - if args.ChainID != nil { - if have := (*big.Int)(args.ChainID); have.Cmp(want) != 0 { - return fmt.Errorf("chainId does not match node's (have=%v, want=%v)", have, want) - } - } else { - args.ChainID = (*hexutil.Big)(want) - } - return nil -} - -type feeBackend interface { - SuggestGasTipCap(ctx context.Context) (*big.Int, error) - CurrentHeader() *types.Header - ChainConfig() *params.ChainConfig -} - -// setFeeDefaults fills in default fee values for unspecified tx fields. -func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b feeBackend) error { - // If both gasPrice and at least one of the EIP-1559 fee parameters are specified, error. - if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) { - return errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") - } - // If the tx has completely specified a fee mechanism, no default is needed. - // This allows users who are not yet synced past London to get defaults for - // other tx values. See https://github.com/ethereum/go-ethereum/pull/23274 - // for more information. - eip1559ParamsSet := args.MaxFeePerGas != nil && args.MaxPriorityFeePerGas != nil - - // Sanity check the EIP-1559 fee parameters if present. - if args.GasPrice == nil && eip1559ParamsSet { - if args.MaxFeePerGas.ToInt().Sign() == 0 { - return errors.New("maxFeePerGas must be non-zero") - } - if args.MaxFeePerGas.ToInt().Cmp(args.MaxPriorityFeePerGas.ToInt()) < 0 { - return fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", args.MaxFeePerGas, args.MaxPriorityFeePerGas) - } - return nil // No need to set anything, user already set MaxFeePerGas and MaxPriorityFeePerGas - } - // Sanity check the non-EIP-1559 fee parameters. - head := b.CurrentHeader() - isLondon := b.ChainConfig().IsApricotPhase3(head.Time) - if args.GasPrice != nil && !eip1559ParamsSet { - // Zero gas-price is not allowed after London fork - if args.GasPrice.ToInt().Sign() == 0 && isLondon { - return errors.New("gasPrice must be non-zero after london fork") - } - return nil // No need to set anything, user already set GasPrice - } - - // Now attempt to fill in default value depending on whether London is active or not. - if isLondon { - // London is active, set maxPriorityFeePerGas and maxFeePerGas. - if err := args.setApricotPhase3FeeDefault(ctx, head, b); err != nil { - return err - } - } else { - if args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil { - return errors.New("maxFeePerGas and maxPriorityFeePerGas are not valid before London is active") - } - if args.GasPrice == nil { - price, err := b.SuggestGasTipCap(ctx) - if err != nil { - return err - } - args.GasPrice = (*hexutil.Big)(price) - } - } - return nil -} - -// setApricotPhase3FeeDefault fills in reasonable default fee values for unspecified fields. -func (args *TransactionArgs) setApricotPhase3FeeDefault(ctx context.Context, head *types.Header, b feeBackend) error { - // Set maxPriorityFeePerGas if it is missing. - if args.MaxPriorityFeePerGas == nil { - tip, err := b.SuggestGasTipCap(ctx) - if err != nil { - return err - } - args.MaxPriorityFeePerGas = (*hexutil.Big)(tip) - } - // Set maxFeePerGas if it is missing. - if args.MaxFeePerGas == nil { - // Set the max fee to be 2 times larger than the previous block's base fee. - // The additional slack allows the tx to not become invalidated if the base - // fee is rising. - gasFeeCap := new(big.Int).Add( - (*big.Int)(args.MaxPriorityFeePerGas), - new(big.Int).Mul(head.BaseFee, big.NewInt(2)), - ) - args.MaxFeePerGas = (*hexutil.Big)(gasFeeCap) - } - // Both EIP-1559 fee parameters are now set; sanity check them. - if args.MaxFeePerGas.ToInt().Cmp(args.MaxPriorityFeePerGas.ToInt()) < 0 { - return fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", args.MaxFeePerGas, args.MaxPriorityFeePerGas) + if args.ChainID == nil { + id := (*hexutil.Big)(b.ChainConfig().ChainID) + args.ChainID = id } return nil } @@ -233,10 +185,10 @@ func (args *TransactionArgs) setApricotPhase3FeeDefault(ctx context.Context, hea // ToMessage converts the transaction arguments to the Message type used by the // core evm. This method is used in calls and traces that do not require a real // live transaction. -func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (*core.Message, error) { +func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (types.Message, error) { // Reject invalid combinations of pre- and post-1559 fee styles if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) { - return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + return types.Message{}, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") } // Set sender address or use zero address if none specified. addr := args.from() @@ -250,7 +202,7 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (* gas = uint64(*args.Gas) } if globalGasCap != 0 && globalGasCap < gas { - log.Info("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap) + log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap) gas = globalGasCap } var ( @@ -272,7 +224,7 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (* gasPrice = args.GasPrice.ToInt() gasFeeCap, gasTipCap = gasPrice, gasPrice } else { - // User specified 1559 gas fields (or none), use those + // User specified 1559 gas feilds (or none), use those gasFeeCap = new(big.Int) if args.MaxFeePerGas != nil { gasFeeCap = args.MaxFeePerGas.ToInt() @@ -297,18 +249,7 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (* if args.AccessList != nil { accessList = *args.AccessList } - msg := &core.Message{ - From: addr, - To: args.To, - Value: value, - GasLimit: gas, - GasPrice: gasPrice, - GasFeeCap: gasFeeCap, - GasTipCap: gasTipCap, - Data: data, - AccessList: accessList, - SkipAccountChecks: true, - } + msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, gasFeeCap, gasTipCap, data, accessList, true) return msg, nil } diff --git a/internal/shutdowncheck/shutdown_tracker.go b/internal/shutdowncheck/shutdown_tracker.go index 35382305c4..d82fef7bd7 100644 --- a/internal/shutdowncheck/shutdown_tracker.go +++ b/internal/shutdowncheck/shutdown_tracker.go @@ -29,9 +29,9 @@ package shutdowncheck import ( "time" - "github.com/ava-labs/coreth/core/rawdb" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) diff --git a/internal/version/vcs.go b/internal/version/vcs.go index 7016458999..6a9f174b8c 100644 --- a/internal/version/vcs.go +++ b/internal/version/vcs.go @@ -1,4 +1,4 @@ -// (c) 2023, Ava Labs, Inc. +// (c) 2019-2020, Ava Labs, Inc. // // This file is a derived work, based on the go-ethereum library whose original // notices appear below. @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********** -// Copyright 2022 The go-ethereum Authors +// Copyright 2016 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -24,39 +24,28 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package version +package node import ( - "runtime/debug" - "time" + "github.com/tenderly/coreth/rpc" ) -// In go 1.18 and beyond, the go tool embeds VCS information into the build. - const ( - govcsTimeLayout = "2006-01-02T15:04:05Z" - ourTimeLayout = "20060102" + DefaultHTTPHost = "localhost" // Default host interface for the HTTP RPC server + DefaultHTTPPort = 8545 // Default TCP port for the HTTP RPC server + DefaultWSHost = "localhost" // Default host interface for the websocket RPC server + DefaultWSPort = 8546 // Default TCP port for the websocket RPC server + DefaultGraphQLHost = "localhost" // Default host interface for the GraphQL server + DefaultGraphQLPort = 8547 // Default TCP port for the GraphQL server ) -// buildInfoVCS returns VCS information of the build. -func buildInfoVCS(info *debug.BuildInfo) (s VCSInfo, ok bool) { - for _, v := range info.Settings { - switch v.Key { - case "vcs.revision": - s.Commit = v.Value - case "vcs.modified": - if v.Value == "true" { - s.Dirty = true - } - case "vcs.time": - t, err := time.Parse(govcsTimeLayout, v.Value) - if err == nil { - s.Date = t.Format(ourTimeLayout) - } - } - } - if s.Commit != "" && s.Date != "" { - ok = true - } - return +// DefaultConfig contains reasonable default settings. +var DefaultConfig = Config{ + HTTPPort: DefaultHTTPPort, + HTTPModules: []string{"net", "web3"}, + HTTPVirtualHosts: []string{"localhost"}, + HTTPTimeouts: rpc.DefaultHTTPTimeouts, + WSPort: DefaultWSPort, + WSModules: []string{"net", "web3"}, + GraphQLVirtualHosts: []string{"localhost"}, } diff --git a/metrics/prometheus/prometheus.go b/metrics/prometheus/prometheus.go index 1d79cfbad7..4f17dcd60e 100644 --- a/metrics/prometheus/prometheus.go +++ b/metrics/prometheus/prometheus.go @@ -7,7 +7,7 @@ import ( "sort" "strings" - "github.com/ava-labs/coreth/metrics" + "github.com/tenderly/coreth/metrics" "github.com/prometheus/client_golang/prometheus" diff --git a/metrics/prometheus/prometheus_test.go b/metrics/prometheus/prometheus_test.go index 967e3f2602..97bb7d627d 100644 --- a/metrics/prometheus/prometheus_test.go +++ b/metrics/prometheus/prometheus_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/ava-labs/coreth/metrics" + "github.com/tenderly/coreth/metrics" ) func TestGatherer(t *testing.T) { diff --git a/miner/worker.go b/miner/worker.go index 9f617a454f..6d9fed0bff 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -38,31 +38,27 @@ import ( "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/consensus/misc/eip4844" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/txpool" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/precompile/precompileconfig" - "github.com/ava-labs/coreth/predicate" + "github.com/tenderly/coreth/consensus" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/consensus/misc" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" ) const ( - // Leaves 256 KBs for other sections of the block (limit is 2MB). - // This should suffice for atomic txs, proposervm header, and serialization overhead. - targetTxsSize = 1792 * units.KiB + targetTxsSize = 192 * units.KiB ) // environment is the worker's current environment and holds all of the current state information. type environment struct { - signer types.Signer + signer types.Signer + state *state.StateDB // apply state changes here tcount int // tx count in cycle gasPool *core.GasPool // available gas used to pack transactions @@ -71,17 +67,7 @@ type environment struct { header *types.Header txs []*types.Transaction receipts []*types.Receipt - sidecars []*types.BlobTxSidecar - blobs int - size uint64 - - rules params.Rules - predicateContext *precompileconfig.PredicateContext - // predicateResults contains the results of checking the predicates for each transaction in the miner. - // The results are accumulated as transactions are executed by the miner and set on the BlockContext. - // If a transaction is dropped, its results must explicitly be removed from predicateResults in the same - // way that the gas pool and state is reset. - predicateResults *predicate.Results + size common.StorageSize start time.Time // Time that block building began } @@ -100,11 +86,10 @@ type worker struct { pendingLogsFeed event.Feed // Subscriptions - mux *event.TypeMux // TODO replace - mu sync.RWMutex // The lock used to protect the coinbase and extra fields - coinbase common.Address - clock *mockable.Clock // Allows us mock the clock for testing - beaconRoot *common.Hash // TODO: set to empty hash, retained for upstream compatibility and future use + mux *event.TypeMux // TODO replace + mu sync.RWMutex // The lock used to protect the coinbase and extra fields + coinbase common.Address + clock *mockable.Clock // Allows us mock the clock for testing } func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, clock *mockable.Clock) *worker { @@ -113,11 +98,9 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus chainConfig: chainConfig, engine: engine, eth: eth, - chain: eth.BlockChain(), mux: mux, - coinbase: config.Etherbase, + chain: eth.BlockChain(), clock: clock, - beaconRoot: &common.Hash{}, } return worker @@ -131,60 +114,45 @@ func (w *worker) setEtherbase(addr common.Address) { } // commitNewWork generates several new sealing tasks based on the parent block. -func (w *worker) commitNewWork(predicateContext *precompileconfig.PredicateContext) (*types.Block, error) { +func (w *worker) commitNewWork() (*types.Block, error) { w.mu.RLock() defer w.mu.RUnlock() tstart := w.clock.Time() - timestamp := uint64(tstart.Unix()) + timestamp := tstart.Unix() parent := w.chain.CurrentBlock() // Note: in order to support asynchronous block production, blocks are allowed to have // the same timestamp as their parent. This allows more than one block to be produced // per second. - if parent.Time >= timestamp { - timestamp = parent.Time + if parent.Time() >= uint64(timestamp) { + timestamp = int64(parent.Time()) } var gasLimit uint64 - if w.chainConfig.IsCortina(timestamp) { - gasLimit = params.CortinaGasLimit - } else if w.chainConfig.IsApricotPhase1(timestamp) { + if w.chainConfig.IsApricotPhase1(big.NewInt(timestamp)) { gasLimit = params.ApricotPhase1GasLimit } else { // The gas limit is set in phase1 to ApricotPhase1GasLimit because the ceiling and floor were set to the same value // such that the gas limit converged to it. Since this is hardbaked now, we remove the ability to configure it. - gasLimit = core.CalcGasLimit(parent.GasUsed, parent.GasLimit, params.ApricotPhase1GasLimit, params.ApricotPhase1GasLimit) + gasLimit = core.CalcGasLimit(parent.GasUsed(), parent.GasLimit(), params.ApricotPhase1GasLimit, params.ApricotPhase1GasLimit) } + num := parent.Number() header := &types.Header{ ParentHash: parent.Hash(), - Number: new(big.Int).Add(parent.Number, common.Big1), + Number: num.Add(num, common.Big1), GasLimit: gasLimit, Extra: nil, - Time: timestamp, + Time: uint64(timestamp), } - // Set BaseFee and Extra data field if we are post ApricotPhase3 - if w.chainConfig.IsApricotPhase3(timestamp) { + bigTimestamp := big.NewInt(timestamp) + if w.chainConfig.IsApricotPhase3(bigTimestamp) { var err error - header.Extra, header.BaseFee, err = dummy.CalcBaseFee(w.chainConfig, parent, timestamp) + header.Extra, header.BaseFee, err = dummy.CalcBaseFee(w.chainConfig, parent.Header(), uint64(timestamp)) if err != nil { return nil, fmt.Errorf("failed to calculate new base fee: %w", err) } } - // Apply EIP-4844, EIP-4788. - if w.chainConfig.IsCancun(header.Number, header.Time) { - var excessBlobGas uint64 - if w.chainConfig.IsCancun(parent.Number, parent.Time) { - excessBlobGas = eip4844.CalcExcessBlobGas(*parent.ExcessBlobGas, *parent.BlobGasUsed) - } else { - // For the first post-fork block, both parent.data_gas_used and parent.excess_data_gas are evaluated as 0 - excessBlobGas = eip4844.CalcExcessBlobGas(0, 0) - } - header.BlobGasUsed = new(uint64) - header.ExcessBlobGas = &excessBlobGas - header.ParentBeaconRoot = w.beaconRoot - } - if w.coinbase == (common.Address{}) { return nil, errors.New("cannot mine without etherbase") } @@ -193,209 +161,141 @@ func (w *worker) commitNewWork(predicateContext *precompileconfig.PredicateConte return nil, fmt.Errorf("failed to prepare header for mining: %w", err) } - env, err := w.createCurrentEnvironment(predicateContext, parent, header, tstart) + env, err := w.createCurrentEnvironment(parent, header, tstart) if err != nil { return nil, fmt.Errorf("failed to create new current environment: %w", err) } - if header.ParentBeaconRoot != nil { - context := core.NewEVMBlockContext(header, w.chain, nil) - vmenv := vm.NewEVM(context, vm.TxContext{}, env.state, w.chainConfig, vm.Config{}) - core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv, env.state) - } - // Ensure we always stop prefetcher after block building is complete. - defer func() { - if env.state == nil { - return - } - env.state.StopPrefetcher() - }() - // Configure any upgrades that should go into effect during this block. - err = core.ApplyUpgrades(w.chainConfig, &parent.Time, types.NewBlockWithHeader(header), env.state) - if err != nil { - log.Error("failed to configure precompiles mining new block", "parent", parent.Hash(), "number", header.Number, "timestamp", header.Time, "err", err) - return nil, err + if w.chainConfig.DAOForkSupport && w.chainConfig.DAOForkBlock != nil && w.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 { + misc.ApplyDAOHardFork(env.state) } + // Configure any stateful precompiles that should go into effect during this block. + w.chainConfig.CheckConfigurePrecompiles(new(big.Int).SetUint64(parent.Time()), types.NewBlockWithHeader(header), env.state) - pending := w.eth.TxPool().PendingWithBaseFee(true, header.BaseFee) + // Fill the block with all available pending transactions. + pending := w.eth.TxPool().Pending(true) - // Split the pending transactions into locals and remotes. - localTxs, remoteTxs := make(map[common.Address][]*txpool.LazyTransaction), pending + // Split the pending transactions into locals and remotes + localTxs := make(map[common.Address]types.Transactions) + remoteTxs := pending for _, account := range w.eth.TxPool().Locals() { if txs := remoteTxs[account]; len(txs) > 0 { delete(remoteTxs, account) localTxs[account] = txs } } - - // Fill the block with all available pending transactions. if len(localTxs) > 0 { - txs := newTransactionsByPriceAndNonce(env.signer, localTxs, header.BaseFee) - w.commitTransactions(env, txs, header.Coinbase) + txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, header.BaseFee) + w.commitTransactions(env, txs, w.coinbase) } if len(remoteTxs) > 0 { - txs := newTransactionsByPriceAndNonce(env.signer, remoteTxs, header.BaseFee) - w.commitTransactions(env, txs, header.Coinbase) + txs := types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, header.BaseFee) + w.commitTransactions(env, txs, w.coinbase) } return w.commit(env) } -func (w *worker) createCurrentEnvironment(predicateContext *precompileconfig.PredicateContext, parent *types.Header, header *types.Header, tstart time.Time) (*environment, error) { - state, err := w.chain.StateAt(parent.Root) +func (w *worker) createCurrentEnvironment(parent *types.Block, header *types.Header, tstart time.Time) (*environment, error) { + state, err := w.chain.StateAt(parent.Root()) if err != nil { return nil, err } - state.StartPrefetcher("miner", w.eth.BlockChain().CacheConfig().TriePrefetcherParallelism) return &environment{ - signer: types.MakeSigner(w.chainConfig, header.Number, header.Time), - state: state, - parent: parent, - header: header, - tcount: 0, - gasPool: new(core.GasPool).AddGas(header.GasLimit), - rules: w.chainConfig.Rules(header.Number, header.Time), - predicateContext: predicateContext, - predicateResults: predicate.NewResults(), - start: tstart, + signer: types.MakeSigner(w.chainConfig, header.Number, new(big.Int).SetUint64(header.Time)), + state: state, + parent: parent.Header(), + header: header, + tcount: 0, + gasPool: new(core.GasPool).AddGas(header.GasLimit), + start: tstart, }, nil } func (w *worker) commitTransaction(env *environment, tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { - if tx.Type() == types.BlobTxType { - return w.commitBlobTransaction(env, tx, coinbase) - } - receipt, err := w.applyTransaction(env, tx, coinbase) + snap := env.state.Snapshot() + + receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig()) if err != nil { + env.state.RevertToSnapshot(snap) return nil, err } env.txs = append(env.txs, tx) env.receipts = append(env.receipts, receipt) - return receipt.Logs, nil -} + env.size += tx.Size() -func (w *worker) commitBlobTransaction(env *environment, tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { - sc := tx.BlobTxSidecar() - if sc == nil { - panic("blob transaction without blobs in miner") - } - // Checking against blob gas limit: It's kind of ugly to perform this check here, but there - // isn't really a better place right now. The blob gas limit is checked at block validation time - // and not during execution. This means core.ApplyTransaction will not return an error if the - // tx has too many blobs. So we have to explicitly check it here. - if (env.blobs+len(sc.Blobs))*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { - return nil, errors.New("max data blobs reached") - } - receipt, err := w.applyTransaction(env, tx, coinbase) - if err != nil { - return nil, err - } - env.txs = append(env.txs, tx.WithoutBlobTxSidecar()) - env.receipts = append(env.receipts, receipt) - env.sidecars = append(env.sidecars, sc) - env.blobs += len(sc.Blobs) - *env.header.BlobGasUsed += receipt.BlobGasUsed return receipt.Logs, nil } -// applyTransaction runs the transaction. If execution fails, state and gas pool are reverted. -func (w *worker) applyTransaction(env *environment, tx *types.Transaction, coinbase common.Address) (*types.Receipt, error) { - var ( - snap = env.state.Snapshot() - gp = env.gasPool.Gas() - blockContext vm.BlockContext - ) - - if env.rules.IsDurango { - results, err := core.CheckPredicates(env.rules, env.predicateContext, tx) - if err != nil { - log.Debug("Transaction predicate failed verification in miner", "tx", tx.Hash(), "err", err) - return nil, err - } - env.predicateResults.SetTxResults(tx.Hash(), results) - - blockContext = core.NewEVMBlockContextWithPredicateResults(env.header, w.chain, &coinbase, env.predicateResults) - } else { - blockContext = core.NewEVMBlockContext(env.header, w.chain, &coinbase) - } - - receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, blockContext, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig()) - if err != nil { - env.state.RevertToSnapshot(snap) - env.gasPool.SetGas(gp) - env.predicateResults.DeleteTxResults(tx.Hash()) - } - return receipt, err -} - -func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAndNonce, coinbase common.Address) { +func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, coinbase common.Address) { for { - // If we don't have enough gas for any further transactions then we're done. + // If we don't have enough gas for any further transactions then we're done if env.gasPool.Gas() < params.TxGas { log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) break } - // Retrieve the next transaction and abort if all done. - ltx := txs.Peek() - if ltx == nil { - break - } - // If we don't have enough space for the next transaction, skip the account. - if env.gasPool.Gas() < ltx.Gas { - log.Trace("Not enough gas left for transaction", "hash", ltx.Hash, "left", env.gasPool.Gas(), "needed", ltx.Gas) - txs.Pop() - continue - } - if left := uint64(params.MaxBlobGasPerBlock - env.blobs*params.BlobTxBlobGasPerBlob); left < ltx.BlobGas { - log.Trace("Not enough blob gas left for transaction", "hash", ltx.Hash, "left", left, "needed", ltx.BlobGas) - txs.Pop() - continue - } - // Transaction seems to fit, pull it up from the pool - tx := ltx.Resolve() + // Retrieve the next transaction and abort if all done + tx := txs.Peek() if tx == nil { - log.Trace("Ignoring evicted transaction", "hash", ltx.Hash) - txs.Pop() - continue + break } // Abort transaction if it won't fit in the block and continue to search for a smaller // transction that will fit. if totalTxsSize := env.size + tx.Size(); totalTxsSize > targetTxsSize { log.Trace("Skipping transaction that would exceed target size", "hash", tx.Hash(), "totalTxsSize", totalTxsSize, "txSize", tx.Size()) + txs.Pop() continue } - // Error may be ignored here. The error has already been checked // during transaction acceptance is the transaction pool. + // + // We use the eip155 signer regardless of the current hf. from, _ := types.Sender(env.signer, tx) - // Check whether the tx is replay protected. If we're not in the EIP155 hf // phase, start ignoring the sender until we do. if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { - log.Trace("Ignoring replay protected transaction", "hash", ltx.Hash, "eip155", w.chainConfig.EIP155Block) + log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) + txs.Pop() continue } - // Start executing the transaction - env.state.SetTxContext(tx.Hash(), env.tcount) + env.state.Prepare(tx.Hash(), env.tcount) _, err := w.commitTransaction(env, tx, coinbase) switch { + case errors.Is(err, core.ErrGasLimitReached): + // Pop the current out-of-gas transaction without shifting in the next from the account + log.Trace("Gas limit exceeded for current block", "sender", from) + txs.Pop() + case errors.Is(err, core.ErrNonceTooLow): // New head notification data race between the transaction pool and miner, shift - log.Trace("Skipping transaction with low nonce", "hash", ltx.Hash, "sender", from, "nonce", tx.Nonce()) + log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) txs.Shift() + case errors.Is(err, core.ErrNonceTooHigh): + // Reorg notification data race between the transaction pool and miner, skip account = + log.Trace("Skipping account with high nonce", "sender", from, "nonce", tx.Nonce()) + txs.Pop() + case errors.Is(err, nil): env.tcount++ txs.Shift() - default: - // Transaction is regarded as invalid, drop all consecutive transactions from - // the same sender because of `nonce-too-high` clause. - log.Debug("Transaction failed, account skipped", "hash", ltx.Hash, "err", err) + case errors.Is(err, core.ErrTxTypeNotSupported): + // Pop the unsupported transaction without shifting in the next from the account + log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) txs.Pop() + case errors.Is(err, vmerrs.ErrToAddrProhibitedSoft): + log.Warn("Tx dropped: failed verification", "tx", tx.Hash(), "sender", from, "data", tx.Data(), "err", err) + w.eth.TxPool().RemoveTx(tx.Hash()) + txs.Pop() + default: + // Strange error, discard the transaction and get the next in line (note, the + // nonce-too-high clause will prevent us from executing in vain). + log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + txs.Shift() } } } @@ -403,13 +303,6 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn // commit runs any post-transaction state modifications, assembles the final block // and commits new work if consensus engine is running. func (w *worker) commit(env *environment) (*types.Block, error) { - if env.rules.IsDurango { - predicateResultsBytes, err := env.predicateResults.Bytes() - if err != nil { - return nil, fmt.Errorf("failed to marshal predicate results: %w", err) - } - env.header.Extra = append(env.header.Extra, predicateResultsBytes...) - } // Deep copy receipts here to avoid interaction between different tasks. receipts := copyReceipts(env.receipts) block, err := w.engine.FinalizeAndAssemble(w.chain, env.header, env.parent, env.state, env.txs, nil, receipts) @@ -452,12 +345,9 @@ func (w *worker) handleResult(env *environment, block *types.Block, createdAt ti } logs = append(logs, receipt.Logs...) } - fees := totalFees(block, receipts) - feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether)) - log.Info("Commit new mining work", "number", block.Number(), "hash", hash, - "uncles", 0, "txs", env.tcount, - "gas", block.GasUsed(), "fees", feesInEther, - "elapsed", common.PrettyDuration(time.Since(env.start))) + + log.Info("Commit new mining work", "number", block.Number(), "hash", hash, "uncles", 0, "txs", env.tcount, + "gas", block.GasUsed(), "fees", totalFees(block, receipts), "elapsed", common.PrettyDuration(time.Since(env.start))) // Note: the miner no longer emits a NewMinedBlock event. Instead the caller // is responsible for running any additional verification and then inserting @@ -475,19 +365,11 @@ func copyReceipts(receipts []*types.Receipt) []*types.Receipt { return result } -// totalFees computes total consumed miner fees in Wei. Block transactions and receipts have to have the same order. -func totalFees(block *types.Block, receipts []*types.Receipt) *big.Int { +// totalFees computes total consumed fees in ETH. Block transactions and receipts have to have the same order. +func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { feesWei := new(big.Int) for i, tx := range block.Transactions() { - var minerFee *big.Int - if baseFee := block.BaseFee(); baseFee != nil { - // Note in coreth the coinbase payment is (baseFee + effectiveGasTip) * gasUsed - minerFee = new(big.Int).Add(baseFee, tx.EffectiveGasTipValue(baseFee)) - } else { - // Prior to activation of EIP-1559, the coinbase payment was gasPrice * gasUsed - minerFee = tx.GasPrice() - } - feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), minerFee)) + feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), tx.GasPrice())) } - return feesWei + return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) } diff --git a/node/api.go b/node/api.go index f0be57313f..67b4ef5dcf 100644 --- a/node/api.go +++ b/node/api.go @@ -27,8 +27,8 @@ package node import ( - "github.com/ava-labs/coreth/internal/debug" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/internal/debug" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" ) diff --git a/node/config.go b/node/config.go index 58290afea7..7f33363a45 100644 --- a/node/config.go +++ b/node/config.go @@ -28,12 +28,14 @@ package node import ( "fmt" + "io/ioutil" "os" "path/filepath" - "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/coreth/accounts/external" - "github.com/ava-labs/coreth/accounts/keystore" + "github.com/tenderly/coreth/accounts" + "github.com/tenderly/coreth/accounts/external" + "github.com/tenderly/coreth/accounts/keystore" + "github.com/tenderly/coreth/rpc" "github.com/ethereum/go-ethereum/log" ) @@ -50,7 +52,7 @@ type Config struct { // is created by New and destroyed when the node is stopped. KeyStoreDir string `toml:",omitempty"` - // ExternalSigner specifies an external URI for a clef-type signer. + // ExternalSigner specifies an external URI for a clef-type signer ExternalSigner string `toml:",omitempty"` // UseLightweightKDF lowers the memory and CPU requirements of the key store @@ -60,20 +62,115 @@ type Config struct { // InsecureUnlockAllowed allows user to unlock accounts in unsafe http environment. InsecureUnlockAllowed bool `toml:",omitempty"` - // BatchRequestLimit is the maximum number of requests in a batch. - BatchRequestLimit int `toml:",omitempty"` - - // BatchResponseMaxSize is the maximum number of bytes returned from a batched rpc call. - BatchResponseMaxSize int `toml:",omitempty"` + // HTTPHost is the host interface on which to start the HTTP RPC server. If this + // field is empty, no HTTP API endpoint will be started. + HTTPHost string + + // HTTPPort is the TCP port number on which to start the HTTP RPC server. The + // default zero value is/ valid and will pick a port number randomly (useful + // for ephemeral nodes). + HTTPPort int `toml:",omitempty"` + + // HTTPCors is the Cross-Origin Resource Sharing header to send to requesting + // clients. Please be aware that CORS is a browser enforced security, it's fully + // useless for custom HTTP clients. + HTTPCors []string `toml:",omitempty"` + + // HTTPVirtualHosts is the list of virtual hostnames which are allowed on incoming requests. + // This is by default {'localhost'}. Using this prevents attacks like + // DNS rebinding, which bypasses SOP by simply masquerading as being within the same + // origin. These attacks do not utilize CORS, since they are not cross-domain. + // By explicitly checking the Host-header, the server will not allow requests + // made against the server with a malicious host domain. + // Requests using ip address directly are not affected + HTTPVirtualHosts []string `toml:",omitempty"` + + // HTTPModules is a list of API modules to expose via the HTTP RPC interface. + // If the module list is empty, all RPC API endpoints designated public will be + // exposed. + HTTPModules []string + + // HTTPTimeouts allows for customization of the timeout values used by the HTTP RPC + // interface. + HTTPTimeouts rpc.HTTPTimeouts + + // WSHost is the host interface on which to start the websocket RPC server. If + // this field is empty, no websocket API endpoint will be started. + WSHost string + + // WSPort is the TCP port number on which to start the websocket RPC server. The + // default zero value is/ valid and will pick a port number randomly (useful for + // ephemeral nodes). + WSPort int `toml:",omitempty"` + + // WSOrigins is the list of domain to accept websocket requests from. Please be + // aware that the server can only act upon the HTTP request the client sends and + // cannot verify the validity of the request header. + WSOrigins []string `toml:",omitempty"` + + // WSModules is a list of API modules to expose via the websocket RPC interface. + // If the module list is empty, all RPC API endpoints designated public will be + // exposed. + WSModules []string + + // WSExposeAll exposes all API modules via the WebSocket RPC interface rather + // than just the public ones. + // + // *WARNING* Only set this if the node is running in a trusted network, exposing + // private APIs to untrusted users is a major security risk. + WSExposeAll bool `toml:",omitempty"` + + // GraphQLCors is the Cross-Origin Resource Sharing header to send to requesting + // clients. Please be aware that CORS is a browser enforced security, it's fully + // useless for custom HTTP clients. + GraphQLCors []string `toml:",omitempty"` + + // GraphQLVirtualHosts is the list of virtual hostnames which are allowed on incoming requests. + // This is by default {'localhost'}. Using this prevents attacks like + // DNS rebinding, which bypasses SOP by simply masquerading as being within the same + // origin. These attacks do not utilize CORS, since they are not cross-domain. + // By explicitly checking the Host-header, the server will not allow requests + // made against the server with a malicious host domain. + // Requests using ip address directly are not affected + GraphQLVirtualHosts []string `toml:",omitempty"` CorethVersion string } +// HTTPEndpoint resolves an HTTP endpoint based on the configured host interface +// and port parameters. +func (c *Config) HTTPEndpoint() string { + if c.HTTPHost == "" { + return "" + } + return fmt.Sprintf("%s:%d", c.HTTPHost, c.HTTPPort) +} + +// DefaultHTTPEndpoint returns the HTTP endpoint used by default. +func DefaultHTTPEndpoint() string { + config := &Config{HTTPHost: DefaultHTTPHost, HTTPPort: DefaultHTTPPort} + return config.HTTPEndpoint() +} + +// WSEndpoint resolves a websocket endpoint based on the configured host interface +// and port parameters. +func (c *Config) WSEndpoint() string { + if c.WSHost == "" { + return "" + } + return fmt.Sprintf("%s:%d", c.WSHost, c.WSPort) +} + +// DefaultWSEndpoint returns the websocket endpoint used by default. +func DefaultWSEndpoint() string { + config := &Config{WSHost: DefaultWSHost, WSPort: DefaultWSPort} + return config.WSEndpoint() +} + // ExtRPCEnabled returns the indicator whether node enables the external // RPC(http, ws or graphql). func (c *Config) ExtRPCEnabled() bool { - // In avalanche, we always disable the external RPC. - return false + return c.HTTPHost != "" || c.WSHost != "" } // KeyDirConfig determines the settings for keydirectory @@ -91,17 +188,17 @@ func (c *Config) KeyDirConfig() (string, error) { return keydir, err } -// GetKeyStoreDir retrieves the key directory and will create +// getKeyStoreDir retrieves the key directory and will create // and ephemeral one if necessary. -func (c *Config) GetKeyStoreDir() (string, bool, error) { - keydir, err := c.KeyDirConfig() +func getKeyStoreDir(conf *Config) (string, bool, error) { + keydir, err := conf.KeyDirConfig() if err != nil { return "", false, err } isEphemeral := false if keydir == "" { // There is no datadir. - keydir, err = os.MkdirTemp("", "coreth-keystore") + keydir, err = ioutil.TempDir("", "coreth-keystore") isEphemeral = true } @@ -123,7 +220,7 @@ func makeAccountManager(conf *Config) (*accounts.Manager, error) { scryptP = keystore.LightScryptP } - keydir, _, err := conf.GetKeyStoreDir() + keydir, _, err := getKeyStoreDir(conf) if err != nil { return nil, err } diff --git a/node/node.go b/node/node.go index 2cc4551c29..decf9b9a97 100644 --- a/node/node.go +++ b/node/node.go @@ -27,8 +27,8 @@ package node import ( - "github.com/ava-labs/coreth/accounts" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/accounts" + "github.com/tenderly/coreth/rpc" ) // Node is a container on which services can be registered. diff --git a/params/avalanche_params.go b/params/avalanche_params.go index 58cbcc658d..526ebbb870 100644 --- a/params/avalanche_params.go +++ b/params/avalanche_params.go @@ -19,8 +19,8 @@ const ( AvalancheAtomicTxFee = units.MilliAvax ApricotPhase1GasLimit uint64 = 8_000_000 - CortinaGasLimit uint64 = 15_000_000 + ApricotPhase3ExtraDataSize uint64 = 80 ApricotPhase3MinBaseFee int64 = 75_000_000_000 ApricotPhase3MaxBaseFee int64 = 225_000_000_000 ApricotPhase3InitialBaseFee int64 = 225_000_000_000 @@ -31,18 +31,22 @@ const ( ApricotPhase5TargetGas uint64 = 15_000_000 ApricotPhase5BaseFeeChangeDenominator uint64 = 36 - DynamicFeeExtraDataSize = 80 - RollupWindow uint64 = 10 - // The base cost to charge per atomic transaction. Added in Apricot Phase 5. AtomicTxBaseCost uint64 = 10_000 ) -// The atomic gas limit specifies the maximum amount of gas that can be consumed by the atomic -// transactions included in a block and is enforced as of ApricotPhase5. Prior to ApricotPhase5, -// a block included a single atomic transaction. As of ApricotPhase5, each block can include a set -// of atomic transactions where the cumulative atomic gas consumed is capped by the atomic gas limit, -// similar to the block gas limit. -// -// This value must always remain <= MaxUint64. -var AtomicGasLimit *big.Int = big.NewInt(100_000) +// Constants for message sizes +const ( + MaxCodeHashesPerRequest = 5 +) + +var ( + // The atomic gas limit specifies the maximum amount of gas that can be consumed by the atomic + // transactions included in a block and is enforced as of ApricotPhase5. Prior to ApricotPhase5, + // a block included a single atomic transaction. As of ApricotPhase5, each block can include a set + // of atomic transactions where the cumulative atomic gas consumed is capped by the atomic gas limit, + // similar to the block gas limit. + // + // This value must always remain <= MaxUint64. + AtomicGasLimit *big.Int = big.NewInt(100_000) +) diff --git a/peer/peer_tracker.go b/peer/peer_tracker.go index 8070005ca6..4122419e20 100644 --- a/peer/peer_tracker.go +++ b/peer/peer_tracker.go @@ -10,12 +10,9 @@ import ( "github.com/ava-labs/avalanchego/ids" utils_math "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" - + "github.com/tenderly/coreth/metrics" "github.com/ethereum/go-ethereum/log" - - "github.com/ava-labs/coreth/metrics" ) const ( @@ -33,7 +30,7 @@ const ( // information we track on a given peer type peerInfo struct { - version *version.Application + version version.Application bandwidth utils_math.Averager } @@ -44,9 +41,9 @@ type peerInfo struct { type peerTracker struct { peers map[ids.NodeID]*peerInfo // all peers we are connected to numTrackedPeers metrics.Gauge - trackedPeers set.Set[ids.NodeID] // peers that we have sent a request to + trackedPeers ids.NodeIDSet // peers that we have sent a request to numResponsivePeers metrics.Gauge - responsivePeers set.Set[ids.NodeID] // peers that responded to the last request they were sent + responsivePeers ids.NodeIDSet // peers that responded to the last request they were sent bandwidthHeap utils_math.AveragerHeap // tracks bandwidth peers are responding with averageBandwidthMetric metrics.GaugeFloat64 averageBandwidth utils_math.Averager @@ -56,9 +53,9 @@ func NewPeerTracker() *peerTracker { return &peerTracker{ peers: make(map[ids.NodeID]*peerInfo), numTrackedPeers: metrics.GetOrRegisterGauge("net_tracked_peers", nil), - trackedPeers: make(set.Set[ids.NodeID]), + trackedPeers: make(ids.NodeIDSet), numResponsivePeers: metrics.GetOrRegisterGauge("net_responsive_peers", nil), - responsivePeers: make(set.Set[ids.NodeID]), + responsivePeers: make(ids.NodeIDSet), bandwidthHeap: utils_math.NewMaxAveragerHeap(), averageBandwidthMetric: metrics.GetOrRegisterGaugeFloat64("net_average_bandwidth", nil), averageBandwidth: utils_math.NewAverager(0, bandwidthHalflife, time.Now()), @@ -95,7 +92,7 @@ func (p *peerTracker) getResponsivePeer() (ids.NodeID, utils_math.Averager, bool return nodeID, peer.bandwidth, true } -func (p *peerTracker) GetAnyPeer(minVersion *version.Application) (ids.NodeID, bool) { +func (p *peerTracker) GetAnyPeer(minVersion version.Application) (ids.NodeID, bool) { if p.shouldTrackNewPeer() { for nodeID := range p.peers { // if minVersion is specified and peer's version is less, skip @@ -162,7 +159,7 @@ func (p *peerTracker) TrackBandwidth(nodeID ids.NodeID, bandwidth float64) { } // Connected should be called when [nodeID] connects to this node -func (p *peerTracker) Connected(nodeID ids.NodeID, nodeVersion *version.Application) { +func (p *peerTracker) Connected(nodeID ids.NodeID, nodeVersion version.Application) { if peer := p.peers[nodeID]; peer != nil { // Peer is already connected, update the version if it has changed. // Log a warning message since the consensus engine should never call Connected on a peer diff --git a/peer/stats/stats.go b/peer/stats/stats.go index 4f4cdb6f48..287824e9cc 100644 --- a/peer/stats/stats.go +++ b/peer/stats/stats.go @@ -6,7 +6,7 @@ package stats import ( "time" - "github.com/ava-labs/coreth/metrics" + "github.com/tenderly/coreth/metrics" ) // RequestHandlerStats provides the interface for metrics for both app requests and cross chain requests. diff --git a/peer/waiting_handler.go b/peer/waiting_handler.go index cf625131ed..68d4a4aa0e 100644 --- a/peer/waiting_handler.go +++ b/peer/waiting_handler.go @@ -4,9 +4,8 @@ package peer import ( - "context" - - "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/avalanchego/ids" + "github.com/tenderly/coreth/plugin/evm/message" ) var _ message.ResponseHandler = &waitingResponseHandler{} @@ -20,38 +19,21 @@ type waitingResponseHandler struct { failed bool // whether the original request is failed } -// newWaitingResponseHandler returns new instance of the waitingResponseHandler -func newWaitingResponseHandler() *waitingResponseHandler { - return &waitingResponseHandler{ - // Make buffer length 1 so that OnResponse can complete - // even if no goroutine is waiting on the channel (i.e. - // the context of a request is cancelled.) - responseChan: make(chan []byte, 1), - } -} - // OnResponse passes the response bytes to the responseChan and closes the channel -func (w *waitingResponseHandler) OnResponse(response []byte) error { +func (w *waitingResponseHandler) OnResponse(_ ids.NodeID, _ uint32, response []byte) error { w.responseChan <- response close(w.responseChan) return nil } // OnFailure sets the failed flag to true and closes the channel -func (w *waitingResponseHandler) OnFailure() error { +func (w *waitingResponseHandler) OnFailure(ids.NodeID, uint32) error { w.failed = true close(w.responseChan) return nil } -func (waitingHandler *waitingResponseHandler) WaitForResult(ctx context.Context) ([]byte, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case response := <-waitingHandler.responseChan: - if waitingHandler.failed { - return nil, ErrRequestFailed - } - return response, nil - } +// newWaitingResponseHandler returns new instance of the waitingResponseHandler +func newWaitingResponseHandler() *waitingResponseHandler { + return &waitingResponseHandler{responseChan: make(chan []byte)} } diff --git a/plugin/evm/atomic_syncer.go b/plugin/evm/atomic_syncer.go index d68d61d597..928ab6a858 100644 --- a/plugin/evm/atomic_syncer.go +++ b/plugin/evm/atomic_syncer.go @@ -9,37 +9,31 @@ import ( "encoding/binary" "fmt" - "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/tenderly/coreth/plugin/evm/message" + syncclient "github.com/tenderly/coreth/sync/client" "github.com/ethereum/go-ethereum/common" - - "github.com/ava-labs/coreth/plugin/evm/message" - syncclient "github.com/ava-labs/coreth/sync/client" - "github.com/ava-labs/coreth/trie" -) - -var ( - _ Syncer = &atomicSyncer{} - _ syncclient.LeafSyncTask = &atomicSyncerLeafTask{} ) // atomicSyncer is used to sync the atomic trie from the network. The CallbackLeafSyncer // is responsible for orchestrating the sync while atomicSyncer is responsible for maintaining // the state of progress and writing the actual atomic trie to the trieDB. type atomicSyncer struct { - db *versiondb.Database - atomicTrie AtomicTrie - trie *trie.Trie // used to update the atomic trie + atomicTrie *atomicTrie targetRoot common.Hash targetHeight uint64 // syncer is used to sync leaves from the network. syncer *syncclient.CallbackLeafSyncer - // lastHeight is the greatest height for which key / values - // were last inserted into the [atomicTrie] - lastHeight uint64 + // nextHeight is the height which key / values + // are being inserted into [atomicTrie] for + nextHeight uint64 + + // nextCommit is the next height at which the atomic trie + // should be committed. + nextCommit uint64 } // addZeros adds [common.HashLenth] zeros to [height] and returns the result as []byte @@ -50,102 +44,72 @@ func addZeroes(height uint64) []byte { return packer.Bytes } -func newAtomicSyncer(client syncclient.LeafClient, atomicBackend *atomicBackend, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (*atomicSyncer, error) { - atomicTrie := atomicBackend.AtomicTrie() - lastCommittedRoot, lastCommit := atomicTrie.LastCommitted() - trie, err := atomicTrie.OpenTrie(lastCommittedRoot) - if err != nil { - return nil, err - } +func newAtomicSyncer(client syncclient.LeafClient, atomicTrie *atomicTrie, targetRoot common.Hash, targetHeight uint64) *atomicSyncer { + _, lastCommit := atomicTrie.LastCommitted() - atomicSyncer := &atomicSyncer{ - db: atomicBackend.db, + return &atomicSyncer{ atomicTrie: atomicTrie, - trie: trie, targetRoot: targetRoot, targetHeight: targetHeight, - lastHeight: lastCommit, + nextCommit: lastCommit + atomicTrie.commitHeightInterval, + nextHeight: lastCommit + 1, + syncer: syncclient.NewCallbackLeafSyncer(client), } - tasks := make(chan syncclient.LeafSyncTask, 1) - tasks <- &atomicSyncerLeafTask{atomicSyncer: atomicSyncer} - close(tasks) - atomicSyncer.syncer = syncclient.NewCallbackLeafSyncer(client, tasks, requestSize) - return atomicSyncer, nil } // Start begins syncing the target atomic root. -func (s *atomicSyncer) Start(ctx context.Context) error { - s.syncer.Start(ctx, 1, s.onSyncFailure) - return nil +func (s *atomicSyncer) Start(ctx context.Context) { + s.syncer.Start(ctx, 1, &syncclient.LeafSyncTask{ + NodeType: message.AtomicTrieNode, + Root: s.targetRoot, + Start: addZeroes(s.nextHeight), + OnLeafs: s.onLeafs, + OnFinish: s.onFinish, + OnSyncFailure: s.onSyncFailure, + }) } // onLeafs is the callback for the leaf syncer, which will insert the key-value pairs into the trie. -func (s *atomicSyncer) onLeafs(keys [][]byte, values [][]byte) error { +func (s *atomicSyncer) onLeafs(_ common.Hash, keys [][]byte, values [][]byte) ([]*syncclient.LeafSyncTask, error) { for i, key := range keys { if len(key) != atomicKeyLength { - return fmt.Errorf("unexpected key len (%d) in atomic trie sync", len(key)) + return nil, fmt.Errorf("unexpected key len (%d) in atomic trie sync", len(key)) } // key = height + blockchainID height := binary.BigEndian.Uint64(key[:wrappers.LongLen]) - if height > s.lastHeight { - // If this key belongs to a new height, we commit - // the trie at the previous height before adding this key. - root, nodes, err := s.trie.Commit(false) - if err != nil { - return err - } - if err := s.atomicTrie.InsertTrie(nodes, root); err != nil { - return err - } - // AcceptTrie commits the trieDB and returns [isCommit] as true - // if we have reached or crossed a commit interval. - isCommit, err := s.atomicTrie.AcceptTrie(s.lastHeight, root) - if err != nil { - return err - } - if isCommit { - // Flush pending changes to disk to preserve progress and - // free up memory if the trieDB was committed. - if err := s.db.Commit(); err != nil { - return err - } + + // Commit the trie and update [nextCommit] if we are crossing a commit interval + if height > s.nextCommit { + if err := s.atomicTrie.commit(s.nextCommit); err != nil { + return nil, err } - // Trie must be re-opened after committing (not safe for re-use after commit) - trie, err := s.atomicTrie.OpenTrie(root) - if err != nil { - return err + if err := s.atomicTrie.db.Commit(); err != nil { + return nil, err } - s.trie = trie - s.lastHeight = height + s.nextCommit += s.atomicTrie.commitHeightInterval } - if err := s.trie.Update(key, values[i]); err != nil { - return err + if err := s.atomicTrie.trie.TryUpdate(key, values[i]); err != nil { + return nil, err } } - return nil + return nil, nil } // onFinish is called when sync for this trie is complete. // commit the trie to disk and perform the final checks that we synced the target root correctly. -func (s *atomicSyncer) onFinish() error { +func (s *atomicSyncer) onFinish(_ common.Hash) error { // commit the trie on finish - root, nodes, err := s.trie.Commit(false) - if err != nil { - return err - } - if err := s.atomicTrie.InsertTrie(nodes, root); err != nil { - return err - } - if _, err := s.atomicTrie.AcceptTrie(s.targetHeight, root); err != nil { + if err := s.atomicTrie.commit(s.targetHeight); err != nil { return err } - if err := s.db.Commit(); err != nil { + if err := s.atomicTrie.db.Commit(); err != nil { return err } // the root of the trie should always match the targetRoot since we already verified the proofs, // here we check the root mainly for correctness of the atomicTrie's pointers and it should never fail. + root, _ := s.atomicTrie.LastCommitted() if s.targetRoot != root { return fmt.Errorf("synced root (%s) does not match expected (%s) for atomic trie ", root, s.targetRoot) } @@ -160,18 +124,3 @@ func (s *atomicSyncer) onSyncFailure(error) error { // Done returns a channel which produces any error that occurred during syncing or nil on success. func (s *atomicSyncer) Done() <-chan error { return s.syncer.Done() } - -type atomicSyncerLeafTask struct { - atomicSyncer *atomicSyncer -} - -func (a *atomicSyncerLeafTask) Start() []byte { return addZeroes(a.atomicSyncer.lastHeight + 1) } -func (a *atomicSyncerLeafTask) End() []byte { return nil } -func (a *atomicSyncerLeafTask) NodeType() message.NodeType { return message.AtomicTrieNode } -func (a *atomicSyncerLeafTask) OnFinish(context.Context) error { return a.atomicSyncer.onFinish() } -func (a *atomicSyncerLeafTask) OnStart() (bool, error) { return false, nil } -func (a *atomicSyncerLeafTask) Root() common.Hash { return a.atomicSyncer.targetRoot } -func (a *atomicSyncerLeafTask) Account() common.Hash { return common.Hash{} } -func (a *atomicSyncerLeafTask) OnLeafs(keys, vals [][]byte) error { - return a.atomicSyncer.onLeafs(keys, vals) -} diff --git a/plugin/evm/atomic_syncer_test.go b/plugin/evm/atomic_syncer_test.go index 07e07fe46a..31819ce45e 100644 --- a/plugin/evm/atomic_syncer_test.go +++ b/plugin/evm/atomic_syncer_test.go @@ -4,7 +4,6 @@ package evm import ( - "bytes" "context" "fmt" "math/rand" @@ -12,17 +11,15 @@ import ( "github.com/stretchr/testify/assert" - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/versiondb" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/plugin/evm/message" - syncclient "github.com/ava-labs/coreth/sync/client" - "github.com/ava-labs/coreth/sync/handlers" - handlerstats "github.com/ava-labs/coreth/sync/handlers/stats" - "github.com/ava-labs/coreth/sync/syncutils" - "github.com/ava-labs/coreth/trie" + "github.com/tenderly/coreth/ethdb/memorydb" + "github.com/tenderly/coreth/plugin/evm/message" + syncclient "github.com/tenderly/coreth/sync/client" + "github.com/tenderly/coreth/sync/handlers" + handlerstats "github.com/tenderly/coreth/sync/handlers/stats" + "github.com/tenderly/coreth/trie" "github.com/ethereum/go-ethereum/common" ) @@ -50,23 +47,21 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *trie.Database, targetHeight ui ) clientDB := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(clientDB, message.Codec, 0, nil) + + repo, err := NewAtomicTxRepository(clientDB, message.Codec, 0) if err != nil { t.Fatal("could not initialize atomix tx repository", err) } - atomicBackend, err := NewAtomicBackend(clientDB, testSharedMemory(), nil, repo, 0, common.Hash{}, commitInterval) + atomicTrie, err := newAtomicTrie(clientDB, testSharedMemory(), nil, repo, message.Codec, 0, commitInterval) if err != nil { - t.Fatal("could not initialize atomic backend", err) + t.Fatal("could not initialize atomic trie", err) } // For each checkpoint, replace the leafsIntercept to shut off the syncer at the correct point and force resume from the checkpoint's // next trie. for i, checkpoint := range checkpoints { // Create syncer targeting the current [syncTrie]. - syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, defaultStateSyncRequestSize) - if err != nil { - t.Fatal(err) - } + syncer := newAtomicSyncer(mockClient, atomicTrie, targetRoot, targetHeight) mockClient.GetLeafsIntercept = func(_ message.LeafsRequest, leafsResponse message.LeafsResponse) (message.LeafsResponse, error) { // If this request exceeds the desired number of leaves, intercept the request with an error if numLeaves+len(leafsResponse.Keys) > checkpoint.leafCutoff { @@ -90,10 +85,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *trie.Database, targetHeight ui } // Create syncer targeting the current [targetRoot]. - syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, defaultStateSyncRequestSize) - if err != nil { - t.Fatal(err) - } + syncer := newAtomicSyncer(mockClient, atomicTrie, targetRoot, targetHeight) // Update intercept to only count the leaves mockClient.GetLeafsIntercept = func(_ message.LeafsRequest, leafsResponse message.LeafsResponse) (message.LeafsResponse, error) { @@ -111,50 +103,22 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *trie.Database, targetHeight ui // we re-initialise trie DB for asserting the trie to make sure any issues with unflushed writes // are caught here as this will only pass if all trie nodes have been written to the underlying DB - atomicTrie := atomicBackend.AtomicTrie() clientTrieDB := atomicTrie.TrieDB() - syncutils.AssertTrieConsistency(t, targetRoot, serverTrieDB, clientTrieDB, nil) - - // check all commit heights are created correctly - hasher := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)) - assert.NoError(t, err) - - serverTrie, err := trie.New(trie.TrieID(targetRoot), serverTrieDB) - assert.NoError(t, err) - addAllKeysWithPrefix := func(prefix []byte) error { - nodeIt, err := serverTrie.NodeIterator(prefix) - if err != nil { - return err - } - it := trie.NewIterator(nodeIt) - for it.Next() { - if !bytes.HasPrefix(it.Key, prefix) { - return it.Err - } - err := hasher.Update(it.Key, it.Value) - assert.NoError(t, err) - } - return it.Err - } + trie.AssertTrieConsistency(t, targetRoot, serverTrieDB, clientTrieDB, nil) - for height := uint64(0); height <= targetHeight; height++ { - err := addAllKeysWithPrefix(database.PackUInt64(height)) + // check all commit heights are created + for height := atomicTrie.commitHeightInterval; height <= targetHeight; height += atomicTrie.commitHeightInterval { + root, err := atomicTrie.Root(height) assert.NoError(t, err) - - if height%commitInterval == 0 { - expected := hasher.Hash() - root, err := atomicTrie.Root(height) - assert.NoError(t, err) - assert.Equal(t, expected, root) - } + assert.NotZero(t, root) } } func TestAtomicSyncer(t *testing.T) { rand.Seed(1) targetHeight := 10 * uint64(commitInterval) - serverTrieDB := trie.NewDatabase(rawdb.NewMemoryDatabase(), nil) - root, _, _ := syncutils.GenerateTrie(t, serverTrieDB, int(targetHeight), atomicKeyLength) + serverTrieDB := trie.NewDatabase(memorydb.New()) + root, _, _ := trie.GenerateTrie(t, serverTrieDB, int(targetHeight), atomicKeyLength) testAtomicSyncer(t, serverTrieDB, targetHeight, root, nil, int64(targetHeight)) } @@ -162,9 +126,9 @@ func TestAtomicSyncer(t *testing.T) { func TestAtomicSyncerResume(t *testing.T) { rand.Seed(1) targetHeight := 10 * uint64(commitInterval) - serverTrieDB := trie.NewDatabase(rawdb.NewMemoryDatabase(), nil) + serverTrieDB := trie.NewDatabase(memorydb.New()) numTrieKeys := int(targetHeight) - 1 // no atomic ops for genesis - root, _, _ := syncutils.GenerateTrie(t, serverTrieDB, numTrieKeys, atomicKeyLength) + root, _, _ := trie.GenerateTrie(t, serverTrieDB, numTrieKeys, atomicKeyLength) testAtomicSyncer(t, serverTrieDB, targetHeight, root, []atomicSyncTestCheckpoint{ { @@ -179,15 +143,14 @@ func TestAtomicSyncerResume(t *testing.T) { func TestAtomicSyncerResumeNewRootCheckpoint(t *testing.T) { rand.Seed(1) targetHeight1 := 10 * uint64(commitInterval) - serverTrieDB := trie.NewDatabase(rawdb.NewMemoryDatabase(), nil) + serverTrieDB := trie.NewDatabase(memorydb.New()) numTrieKeys1 := int(targetHeight1) - 1 // no atomic ops for genesis - root1, _, _ := syncutils.GenerateTrie(t, serverTrieDB, numTrieKeys1, atomicKeyLength) + root1, _, _ := trie.GenerateTrie(t, serverTrieDB, numTrieKeys1, atomicKeyLength) + rand.Seed(1) // seed rand again to get the same leafs in GenerateTrie targetHeight2 := 20 * uint64(commitInterval) numTrieKeys2 := int(targetHeight2) - 1 // no atomic ops for genesis - root2, _, _ := syncutils.FillTrie( - t, numTrieKeys1, numTrieKeys2, atomicKeyLength, serverTrieDB, root1, - ) + root2, _, _ := trie.GenerateTrie(t, serverTrieDB, numTrieKeys2, atomicKeyLength) testAtomicSyncer(t, serverTrieDB, targetHeight1, root1, []atomicSyncTestCheckpoint{ { diff --git a/plugin/evm/atomic_trie.go b/plugin/evm/atomic_trie.go index 3b068b648d..19b1062c7e 100644 --- a/plugin/evm/atomic_trie.go +++ b/plugin/evm/atomic_trie.go @@ -4,41 +4,37 @@ package evm import ( + "encoding/binary" "fmt" "time" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/trie/triedb/hashdb" - "github.com/ava-labs/coreth/trie/trienode" + "github.com/tenderly/coreth/core/types" + syncclient "github.com/tenderly/coreth/sync/client" + "github.com/tenderly/coreth/trie" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) const ( - progressLogFrequency = 30 * time.Second + trieCommitSizeCap = 10 * units.MiB + progressLogUpdate = 30 * time.Second atomicKeyLength = wrappers.LongLen + common.HashLength - sharedMemoryApplyBatchSize = 10_000 // specifies the number of atomic operations to batch progress updates - - atomicTrieTipBufferSize = 1 // No need to support a buffer of previously accepted tries for the atomic trie - atomicTrieMemoryCap = 64 * units.MiB + sharedMemoryApplyBatchSize = 2000 // specifies the number of atomic operations to batch progress updates ) var ( _ AtomicTrie = &atomicTrie{} lastCommittedKey = []byte("atomicTrieLastCommittedBlock") appliedSharedMemoryCursorKey = []byte("atomicTrieLastAppliedToSharedMemory") - heightMapRepairKey = []byte("atomicTrieHeightMapRepair") ) // AtomicTrie maintains an index of atomic operations by blockchainIDs for every block @@ -47,12 +43,9 @@ var ( // are the atomic operations applied to shared memory while processing the block accepted // at the corresponding height. type AtomicTrie interface { - // OpenTrie returns a modifiable instance of the atomic trie backed by trieDB - // opened at hash. - OpenTrie(hash common.Hash) (*trie.Trie, error) - - // UpdateTrie updates [tr] to inlude atomicOps for height. - UpdateTrie(tr *trie.Trie, height uint64, atomicOps map[ids.ID]*atomic.Requests) error + // Index indexes the given atomicOps at the specified block height + // Atomic trie is committed if the block height is multiple of commit interval + Index(height uint64, atomicOps map[ids.ID]*atomic.Requests) error // Iterator returns an AtomicTrieIterator to iterate the trie at the given // root hash starting at [cursor]. @@ -69,26 +62,23 @@ type AtomicTrie interface { // common.Hash{} instead Root(height uint64) (common.Hash, error) - // LastAcceptedRoot returns the most recent accepted root of the atomic trie, - // or the root it was initialized to if no new tries were accepted yet. - LastAcceptedRoot() common.Hash - - // InsertTrie updates the trieDB with the provided node set and adds a reference - // to root in the trieDB. Once InsertTrie is called, it is expected either - // AcceptTrie or RejectTrie be called for the same root. - InsertTrie(nodes *trienode.NodeSet, root common.Hash) error - - // AcceptTrie marks root as the last accepted atomic trie root, and - // commits the trie to persistent storage if height is divisible by - // the commit interval. Returns true if the trie was committed. - AcceptTrie(height uint64, root common.Hash) (bool, error) - - // RejectTrie dereferences root from the trieDB, freeing memory. - RejectTrie(root common.Hash) error - - // RepairHeightMap repairs the height map of the atomic trie by iterating - // over all leaves in the trie and committing the trie at every commit interval. - RepairHeightMap(to uint64) (bool, error) + // ApplyToSharedMemory applies the atomic operations that have been indexed into the trie + // but not yet applied to shared memory for heights less than or equal to [lastAcceptedBlock]. + // This executes operations in the range [cursorHeight+1, lastAcceptedBlock]. + // The cursor is initially set by MarkApplyToSharedMemoryCursor to signal to the atomic trie + // the range of operations that were added to the trie without being executed on shared memory. + ApplyToSharedMemory(lastAcceptedBlock uint64) error + + // MarkApplyToSharedMemoryCursor marks the atomic trie as containing atomic ops that + // have not been executed on shared memory starting at [previousLastAcceptedHeight+1]. + // This is used when state sync syncs the atomic trie, such that the atomic operations + // from [previousLastAcceptedHeight+1] to the [lastAcceptedHeight] set by state sync + // will not have been executed on shared memory. + MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight uint64) error + + // Syncer creates and returns a new Syncer object that can be used to sync the + // state of the atomic trie from peers + Syncer(client syncclient.LeafClient, targetRoot common.Hash, targetHeight uint64) Syncer } // AtomicTrieIterator is a stateful iterator that iterates the leafs of an AtomicTrie @@ -101,9 +91,6 @@ type AtomicTrieIterator interface { // returned []byte can be freely modified Key() []byte - // Value returns the current database value that the iterator is iterating - Value() []byte - // BlockNumber returns the current block number BlockNumber() uint64 @@ -119,32 +106,47 @@ type AtomicTrieIterator interface { } // atomicTrie implements the AtomicTrie interface +// using the eth trie.Trie implementation type atomicTrie struct { - commitInterval uint64 // commit interval, same as commitHeightInterval by default - metadataDB database.Database // Underlying database containing the atomic trie metadata - trieDB *trie.Database // Trie database - lastCommittedRoot common.Hash // trie root of the most recent commit - lastCommittedHeight uint64 // index height of the most recent commit - lastAcceptedRoot common.Hash // most recent trie root passed to accept trie or the root of the atomic trie on intialization. - codec codec.Manager - memoryCap common.StorageSize - tipBuffer *core.BoundedBuffer[common.Hash] + commitHeightInterval uint64 // commit interval, same as commitHeightInterval by default + db *versiondb.Database // Underlying database + bonusBlocks map[uint64]ids.ID // Map of height to blockID for blocks to skip indexing + metadataDB database.Database // Underlying database containing the atomic trie metadata + atomicTrieDB database.Database // Underlying database containing the atomic trie + trieDB *trie.Database // Trie database + trie *trie.Trie // Atomic trie.Trie mapping key (height+blockchainID) and value (codec serialized atomic.Requests) + repo AtomicTxRepository + lastCommittedHash common.Hash // trie root hash of the most recent commit + lastCommittedHeight uint64 // index height of the most recent commit + codec codec.Manager + log log.Logger // struct logger + sharedMemory atomic.SharedMemory +} + +// NewAtomicTrie returns a new instance of a atomicTrie with the default commitHeightInterval. +// Initializes the trie before returning it. +// If the cursor set by MarkApplyToSharedMemoryCursor exists, the atomic operations are applied synchronously +// during initialization (blocks until ApplyToSharedMemory completes). +func NewAtomicTrie( + db *versiondb.Database, sharedMemory atomic.SharedMemory, + bonusBlocks map[uint64]ids.ID, repo AtomicTxRepository, codec codec.Manager, lastAcceptedHeight uint64, commitHeightInterval uint64, +) (AtomicTrie, error) { + return newAtomicTrie(db, sharedMemory, bonusBlocks, repo, codec, lastAcceptedHeight, commitHeightInterval) } // newAtomicTrie returns a new instance of a atomicTrie with a configurable commitHeightInterval, used in testing. // Initializes the trie before returning it. func newAtomicTrie( - atomicTrieDB database.Database, metadataDB database.Database, - codec codec.Manager, lastAcceptedHeight uint64, commitHeightInterval uint64, + db *versiondb.Database, sharedMemory atomic.SharedMemory, + bonusBlocks map[uint64]ids.ID, repo AtomicTxRepository, codec codec.Manager, + lastAcceptedHeight uint64, commitHeightInterval uint64, ) (*atomicTrie, error) { + atomicTrieDB := prefixdb.New(atomicTrieDBPrefix, db) + metadataDB := prefixdb.New(atomicTrieMetaDBPrefix, db) root, height, err := lastCommittedRootIfExists(metadataDB) if err != nil { return nil, err } - // initialize to EmptyRootHash if there is no committed root. - if root == (common.Hash{}) { - root = types.EmptyRootHash - } // If the last committed height is above the last accepted height, then we fall back to // the last commit below the last accepted height. if height > lastAcceptedHeight { @@ -155,30 +157,42 @@ func newAtomicTrie( } } - trieDB := trie.NewDatabase( - rawdb.NewDatabase(Database{atomicTrieDB}), + triedb := trie.NewDatabaseWithConfig( + Database{atomicTrieDB}, &trie.Config{ - HashDB: &hashdb.Config{ - CleanCacheSize: 64 * units.MiB, // Allocate 64MB of memory for clean cache - }, + Cache: 64, // Allocate 64MB of memory for clean cache + Preimages: false, // Keys are not hashed, so there is no need for preimages }, ) + t, err := trie.New(root, triedb) + if err != nil { + return nil, err + } - return &atomicTrie{ - commitInterval: commitHeightInterval, - metadataDB: metadataDB, - trieDB: trieDB, - codec: codec, - lastCommittedRoot: root, - lastCommittedHeight: height, - tipBuffer: core.NewBoundedBuffer(atomicTrieTipBufferSize, trieDB.Dereference), - memoryCap: atomicTrieMemoryCap, - // Initialize lastAcceptedRoot to the last committed root. - // If there were further blocks processed (ahead of the commit interval), - // AtomicBackend will call InsertTrie/AcceptTrie on atomic ops - // for those blocks. - lastAcceptedRoot: root, - }, nil + atomicTrie := &atomicTrie{ + commitHeightInterval: commitHeightInterval, + db: db, + bonusBlocks: bonusBlocks, + atomicTrieDB: atomicTrieDB, + metadataDB: metadataDB, + trieDB: triedb, + trie: t, + repo: repo, + codec: codec, + lastCommittedHash: root, + lastCommittedHeight: height, + log: log.New("c", "atomicTrie"), + sharedMemory: sharedMemory, + } + + // We call ApplyToSharedMemory here to ensure that if the node was shut down in the middle + // of applying atomic operations from state sync, we finish the operation to ensure we never + // return an atomic trie that is out of sync with shared memory. + // In normal operation, the cursor is not set, such that this call will be a no-op. + if err := atomicTrie.ApplyToSharedMemory(lastAcceptedHeight); err != nil { + return nil, err + } + return atomicTrie, atomicTrie.initialize(lastAcceptedHeight) } // lastCommittedRootIfExists returns the last committed trie root and height if it exists @@ -193,13 +207,10 @@ func lastCommittedRootIfExists(db database.Database) (common.Hash, uint64, error return common.Hash{}, 0, nil case err != nil: return common.Hash{}, 0, err + case len(lastCommittedHeightBytes) != wrappers.LongLen: + return common.Hash{}, 0, fmt.Errorf("expected value of lastCommittedKey to be %d but was %d", wrappers.LongLen, len(lastCommittedHeightBytes)) } - - height, err := database.ParseUInt64(lastCommittedHeightBytes) - if err != nil { - return common.Hash{}, 0, fmt.Errorf("expected value at lastCommittedKey to be a valid uint64: %w", err) - } - + height := binary.BigEndian.Uint64(lastCommittedHeightBytes) hash, err := db.Get(lastCommittedHeightBytes) if err != nil { return common.Hash{}, 0, fmt.Errorf("committed hash does not exist for committed height: %d: %w", height, err) @@ -212,20 +223,203 @@ func nearestCommitHeight(blockNumber uint64, commitInterval uint64) uint64 { return blockNumber - (blockNumber % commitInterval) } -func (a *atomicTrie) OpenTrie(root common.Hash) (*trie.Trie, error) { - return trie.New(trie.TrieID(root), a.trieDB) +// initializes the atomic trie using the atomic repository height index. +// Iterating from the last indexed height to lastAcceptedBlockNumber, making a single commit at the +// most recent height divisible by the commitInterval. +// Subsequent updates to this trie are made using the Index call as blocks are accepted. +// Note: this method assumes no atomic txs are applied at genesis. +func (a *atomicTrie) initialize(lastAcceptedBlockNumber uint64) error { + start := time.Now() + a.log.Info("initializing atomic trie", "lastAcceptedBlockNumber", lastAcceptedBlockNumber) + // finalCommitHeight is the highest block that can be committed i.e. is divisible by b.commitHeightInterval + // Txs from heights greater than commitHeight are to be included in the trie corresponding to the block at + // finalCommitHeight+b.commitHeightInterval, which has not been accepted yet. + finalCommitHeight := nearestCommitHeight(lastAcceptedBlockNumber, a.commitHeightInterval) + uncommittedOpsMap := make(map[uint64]map[ids.ID]*atomic.Requests, lastAcceptedBlockNumber-finalCommitHeight) + + // iterate by height, from [a.lastCommittedHeight+1] to [lastAcceptedBlockNumber] + iter := a.repo.IterateByHeight(a.lastCommittedHeight + 1) + defer iter.Release() + + preCommitBlockIndexed := 0 + postCommitTxIndexed := 0 + lastUpdate := time.Now() + + // keep track of the latest generated trie's root and height. + lastHash := common.Hash{} + lastHeight := a.lastCommittedHeight + for iter.Next() { + // Get the height and transactions for this iteration (from the key and value, respectively) + // iterate over the transactions, indexing them if the height is < commit height + // otherwise, add the atomic operations from the transaction to the uncommittedOpsMap + height := binary.BigEndian.Uint64(iter.Key()) + txs, err := ExtractAtomicTxs(iter.Value(), true, a.codec) + if err != nil { + return err + } + + // combine atomic operations from all transactions at this block height + combinedOps, err := mergeAtomicOps(txs) + if err != nil { + return err + } + + if _, skipBonusBlock := a.bonusBlocks[height]; skipBonusBlock { + // If [height] is a bonus block, do not index the atomic operations into the trie + } else if height > finalCommitHeight { + // if height is greater than commit height, add it to the map so that we can write it later + // this is to ensure we have all the data before the commit height so that we can commit the + // trie + uncommittedOpsMap[height] = combinedOps + } else { + if err := a.updateTrie(height, combinedOps); err != nil { + return err + } + preCommitBlockIndexed++ + } + + if time.Since(lastUpdate) > progressLogUpdate { + a.log.Info("imported entries into atomic trie pre-commit", "heightsIndexed", preCommitBlockIndexed) + lastUpdate = time.Now() + } + + // if height has reached or skipped over the next commit interval, + // keep track of progress and keep commit size under commitSizeCap + commitHeight := nearestCommitHeight(height, a.commitHeightInterval) + if lastHeight < commitHeight { + hash, _, err := a.trie.Commit(nil) + if err != nil { + return err + } + // Dereference lashHash to avoid writing more intermediary + // trie nodes than needed to disk, while keeping the commit + // size under commitSizeCap (approximately). + // Check [lastHash != hash] here to avoid dereferencing the + // trie root in case there were no atomic txs since the + // last commit. + if (lastHash != common.Hash{} && lastHash != hash) { + a.trieDB.Dereference(lastHash) + } + storage, _ := a.trieDB.Size() + if storage > trieCommitSizeCap { + a.log.Info("committing atomic trie progress", "storage", storage) + a.commit(commitHeight) + // Flush any remaining changes that have not been committed yet in the versiondb. + if err := a.db.Commit(); err != nil { + return err + } + } + lastHash = hash + lastHeight = commitHeight + } + } + if err := iter.Error(); err != nil { + return err + } + + // Note: we should never create a commit at the genesis block (should not contain any atomic txs) + if lastAcceptedBlockNumber == 0 { + return nil + } + // now that all heights less than [finalCommitHeight] have been processed + // commit the trie. If [a.lastCommittedHeight] is already the same as + // [finalCommitHeight] (or higher, which can occur if we resume a state sync) + // we skip this commit. + if finalCommitHeight > a.lastCommittedHeight { + if err := a.commit(finalCommitHeight); err != nil { + return err + } + // Flush any remaining changes that have not been committed yet in the versiondb. + if err := a.db.Commit(); err != nil { + return err + } + } + + // process uncommitted ops for heights > finalCommitHeight + for height, ops := range uncommittedOpsMap { + if err := a.updateTrie(height, ops); err != nil { + return fmt.Errorf("failed to update trie at height %d: %w", height, err) + } + + postCommitTxIndexed++ + if time.Since(lastUpdate) > progressLogUpdate { + a.log.Info("imported entries into atomic trie post-commit", "entriesIndexed", postCommitTxIndexed) + lastUpdate = time.Now() + } + } + + a.log.Info( + "finished initializing atomic trie", + "lastAcceptedBlockNumber", lastAcceptedBlockNumber, + "preCommitEntriesIndexed", preCommitBlockIndexed, + "postCommitEntriesIndexed", postCommitTxIndexed, + "lastCommittedHash", a.lastCommittedHash, + "lastCommitedHeight", a.lastCommittedHeight, + "time", time.Since(start), + ) + return nil +} + +// Index updates the trie with entries in atomicOps +// height must be greater than lastCommittedHeight and less than (lastCommittedHeight+commitInterval) +// This function updates the following: +// - heightBytes => trie root hash (if the trie was committed) +// - lastCommittedBlock => height (if the trie was committed) +func (a *atomicTrie) Index(height uint64, atomicOps map[ids.ID]*atomic.Requests) error { + if err := a.validateIndexHeight(height); err != nil { + return err + } + + if err := a.updateTrie(height, atomicOps); err != nil { + return err + } + + if height%a.commitHeightInterval == 0 { + return a.commit(height) + } + + return nil +} + +// validateIndexHeight returns an error if [height] is not currently valid to be indexed. +func (a *atomicTrie) validateIndexHeight(height uint64) error { + // Do not allow a height that we have already passed to be indexed + if height < a.lastCommittedHeight { + return fmt.Errorf("height %d must be after last committed height %d", height, a.lastCommittedHeight) + } + + // Do not allow a height that is more than a commit interval ahead + // of the current index + nextCommitHeight := a.lastCommittedHeight + a.commitHeightInterval + if height > nextCommitHeight { + return fmt.Errorf("height %d not within the next commit height %d", height, nextCommitHeight) + } + + return nil } -// commit calls commit on the underlying trieDB and updates metadata pointers. -func (a *atomicTrie) commit(height uint64, root common.Hash) error { - if err := a.trieDB.Commit(root, false); err != nil { +// commit calls commit on the trie to generate a root, commits the underlying trieDB, and updates the +// metadata pointers. +// assumes that the caller is aware of the commit rules i.e. the height being within commitInterval. +// returns the trie root from the commit +func (a *atomicTrie) commit(height uint64) error { + hash, _, err := a.trie.Commit(nil) + if err != nil { + return err + } + + a.log.Info("committed atomic trie", "hash", hash.String(), "height", height) + if err := a.trieDB.Commit(hash, false, nil); err != nil { + return err + } + + if err := a.updateLastCommitted(hash, height); err != nil { return err } - log.Info("committed atomic trie", "root", root.String(), "height", height) - return a.updateLastCommitted(root, height) + return nil } -func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[ids.ID]*atomic.Requests) error { +func (a *atomicTrie) updateTrie(height uint64, atomicOps map[ids.ID]*atomic.Requests) error { for blockchainID, requests := range atomicOps { valueBytes, err := a.codec.Marshal(codecVersion, requests) if err != nil { @@ -238,7 +432,7 @@ func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[id keyPacker := wrappers.Packer{Bytes: make([]byte, atomicKeyLength)} keyPacker.PackLong(height) keyPacker.PackFixedBytes(blockchainID[:]) - if err := trie.Update(keyPacker.Bytes, valueBytes); err != nil { + if err := a.trie.TryUpdate(keyPacker.Bytes, valueBytes); err != nil { return err } } @@ -248,13 +442,14 @@ func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[id // LastCommitted returns the last committed trie hash and last committed height func (a *atomicTrie) LastCommitted() (common.Hash, uint64) { - return a.lastCommittedRoot, a.lastCommittedHeight + return a.lastCommittedHash, a.lastCommittedHeight } // updateLastCommitted adds [height] -> [root] to the index and marks it as the last committed // root/height pair. func (a *atomicTrie) updateLastCommitted(root common.Hash, height uint64) error { - heightBytes := database.PackUInt64(height) + heightBytes := make([]byte, wrappers.LongLen) + binary.BigEndian.PutUint64(heightBytes, height) // now save the trie hash against the height it was committed at if err := a.metadataDB.Put(heightBytes, root[:]); err != nil { @@ -266,7 +461,7 @@ func (a *atomicTrie) updateLastCommitted(root common.Hash, height uint64) error return err } - a.lastCommittedRoot = root + a.lastCommittedHash = root a.lastCommittedHeight = height return nil } @@ -274,16 +469,12 @@ func (a *atomicTrie) updateLastCommitted(root common.Hash, height uint64) error // Iterator returns a types.AtomicTrieIterator that iterates the trie from the given // atomic trie root, starting at the specified [cursor]. func (a *atomicTrie) Iterator(root common.Hash, cursor []byte) (AtomicTrieIterator, error) { - t, err := trie.New(trie.TrieID(root), a.trieDB) + t, err := trie.New(root, a.trieDB) if err != nil { return nil, err } - nodeIt, err := t.NodeIterator(cursor) - if err != nil { - return nil, err - } - iter := trie.NewIterator(nodeIt) + iter := trie.NewIterator(t.NodeIterator(cursor)) return NewAtomicTrieIterator(iter, a.codec), iter.Err } @@ -308,7 +499,9 @@ func getRoot(metadataDB database.Database, height uint64) (common.Hash, error) { return types.EmptyRootHash, nil } - heightBytes := database.PackUInt64(height) + heightBytes := make([]byte, wrappers.LongLen) + binary.BigEndian.PutUint64(heightBytes, height) + hash, err := metadataDB.Get(heightBytes) switch { case err == database.ErrNotFound: @@ -319,62 +512,108 @@ func getRoot(metadataDB database.Database, height uint64) (common.Hash, error) { return common.BytesToHash(hash), nil } -func (a *atomicTrie) LastAcceptedRoot() common.Hash { - return a.lastAcceptedRoot -} - -func (a *atomicTrie) InsertTrie(nodes *trienode.NodeSet, root common.Hash) error { - if nodes != nil { - if err := a.trieDB.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { - return err - } +// ApplyToSharedMemory applies the atomic operations that have been indexed into the trie +// but not yet applied to shared memory for heights less than or equal to [lastAcceptedBlock]. +// This executes operations in the range [cursorHeight+1, lastAcceptedBlock]. +// The cursor is initially set by MarkApplyToSharedMemoryCursor to signal to the atomic trie +// the range of operations that were added to the trie without being executed on shared memory. +func (a *atomicTrie) ApplyToSharedMemory(lastAcceptedBlock uint64) error { + sharedMemoryCursor, err := a.metadataDB.Get(appliedSharedMemoryCursorKey) + if err == database.ErrNotFound { + return nil + } else if err != nil { + return err } - a.trieDB.Reference(root, common.Hash{}) - // The use of [Cap] in [insertTrie] prevents exceeding the configured memory - // limit (and OOM) in case there is a large backlog of processing (unaccepted) blocks. - if _, nodeSize, _ := a.trieDB.Size(); nodeSize <= a.memoryCap { - return nil + log.Info("applying atomic operations to shared memory", "root", a.lastCommittedHash, "lastAcceptedBlock", lastAcceptedBlock, "startHeight", binary.BigEndian.Uint64(sharedMemoryCursor[:wrappers.LongLen])) + + it, err := a.Iterator(a.lastCommittedHash, sharedMemoryCursor) + if err != nil { + return err } - if err := a.trieDB.Cap(a.memoryCap - ethdb.IdealBatchSize); err != nil { - return fmt.Errorf("failed to cap atomic trie for root %s: %w", root, err) + lastUpdate := time.Now() + putRequests, removeRequests := 0, 0 + totalPutRequests, totalRemoveRequests := 0, 0 + + // value of sharedMemoryCursor is either a uint64 signifying the + // height iteration should begin at or is a uint64+blockchainID + // specifying the last atomic operation that was applied to shared memory. + // To avoid applying the same operation twice, we call [it.Next()] in the + // latter case. + if len(sharedMemoryCursor) > wrappers.LongLen { + it.Next() } - return nil -} + batchOps := make(map[ids.ID]*atomic.Requests) + for it.Next() { + height := it.BlockNumber() + atomicOps := it.AtomicOps() -// AcceptTrie commits the triedb at [root] if needed and returns true if a commit -// was performed. -func (a *atomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { - hasCommitted := false - // Because we do not accept the trie at every height, we may need to - // populate roots at prior commit heights that were skipped. - for nextCommitHeight := a.lastCommittedHeight + a.commitInterval; nextCommitHeight < height; nextCommitHeight += a.commitInterval { - if err := a.commit(nextCommitHeight, a.lastAcceptedRoot); err != nil { - return false, err + if height > lastAcceptedBlock { + log.Warn("Found height above last accepted block while applying operations to shared memory", "height", height, "lastAcceptedBlock", lastAcceptedBlock) + break } - hasCommitted = true - } - - // Attempt to dereference roots at least [tipBufferSize] old - // - // Note: It is safe to dereference roots that have been committed to disk - // (they are no-ops). - a.tipBuffer.Insert(root) - // Commit this root if we have reached the [commitInterval]. - if height%a.commitInterval == 0 { - if err := a.commit(height, root); err != nil { - return false, err + putRequests += len(atomicOps.PutRequests) + removeRequests += len(atomicOps.RemoveRequests) + totalPutRequests += len(atomicOps.PutRequests) + totalRemoveRequests += len(atomicOps.RemoveRequests) + if time.Since(lastUpdate) > 10*time.Second { + log.Info("atomic trie iteration", "height", height, "puts", totalPutRequests, "removes", totalRemoveRequests) + lastUpdate = time.Now() } - hasCommitted = true + mergeAtomicOpsToMap(batchOps, it.BlockchainID(), atomicOps) + + if putRequests+removeRequests > sharedMemoryApplyBatchSize { + // Update the cursor to the key of the atomic operation being executed on shared memory. + // If the node shuts down in the middle of this function call, ApplyToSharedMemory will + // resume operation starting at the key immediately following [it.Key()]. + if err = a.metadataDB.Put(appliedSharedMemoryCursorKey, it.Key()); err != nil { + return err + } + batch, err := a.db.CommitBatch() + if err != nil { + return err + } + // calling [sharedMemory.Apply] updates the last applied pointer atomically with the shared memory operation. + if err = a.sharedMemory.Apply(batchOps, batch); err != nil { + return err + } + putRequests, removeRequests = 0, 0 + batchOps = make(map[ids.ID]*atomic.Requests) + } + } + if err := it.Error(); err != nil { + return err + } + + if err = a.metadataDB.Delete(appliedSharedMemoryCursorKey); err != nil { + return err + } + batch, err := a.db.CommitBatch() + if err != nil { + return err + } + if err = a.sharedMemory.Apply(batchOps, batch); err != nil { + return err } + log.Info("finished applying atomic operations", "puts", totalPutRequests, "removes", totalRemoveRequests) + return nil +} - a.lastAcceptedRoot = root - return hasCommitted, nil +// MarkApplyToSharedMemoryCursor marks the atomic trie as containing atomic ops that +// have not been executed on shared memory starting at [previousLastAcceptedHeight+1]. +// This is used when state sync syncs the atomic trie, such that the atomic operations +// from [previousLastAcceptedHeight+1] to the [lastAcceptedHeight] set by state sync +// will not have been executed on shared memory. +func (a *atomicTrie) MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight uint64) error { + // Set the cursor to [previousLastAcceptedHeight+1] so that we begin the iteration at the + // first item that has not been applied to shared memory. + return database.PutUInt64(a.metadataDB, appliedSharedMemoryCursorKey, previousLastAcceptedHeight+1) } -func (a *atomicTrie) RejectTrie(root common.Hash) error { - a.trieDB.Dereference(root) - return nil +// Syncer creates and returns a new Syncer object that can be used to sync the +// state of the atomic trie from peers +func (a *atomicTrie) Syncer(client syncclient.LeafClient, targetRoot common.Hash, targetHeight uint64) Syncer { + return newAtomicSyncer(client, a, targetRoot, targetHeight) } diff --git a/plugin/evm/atomic_trie_iterator.go b/plugin/evm/atomic_trie_iterator.go index 2bdf90b581..749d35efae 100644 --- a/plugin/evm/atomic_trie_iterator.go +++ b/plugin/evm/atomic_trie_iterator.go @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/trie" + "github.com/tenderly/coreth/trie" "github.com/ethereum/go-ethereum/common" ) diff --git a/plugin/evm/block_builder_test.go b/plugin/evm/block_builder_test.go new file mode 100644 index 0000000000..8864a68d54 --- /dev/null +++ b/plugin/evm/block_builder_test.go @@ -0,0 +1,92 @@ +// (c) 2019-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "math/big" + "sync" + "testing" + "time" + + "github.com/tenderly/coreth/params" + + "github.com/ava-labs/avalanchego/snow" +) + +func TestBlockBuilderShutsDown(t *testing.T) { + shutdownChan := make(chan struct{}) + wg := &sync.WaitGroup{} + config := *params.TestChainConfig + // Set ApricotPhase4BlockTime one hour in the future so that it will + // create a goroutine waiting for an hour before shutting down the + // buildBlocktimer. + config.ApricotPhase4BlockTimestamp = big.NewInt(time.Now().Add(time.Hour).Unix()) + builder := &blockBuilder{ + ctx: snow.DefaultContextTest(), + chainConfig: &config, + shutdownChan: shutdownChan, + shutdownWg: wg, + } + + builder.handleBlockBuilding() + // Close [shutdownChan] and ensure that the wait group finishes in a reasonable + // amount of time. + close(shutdownChan) + attemptAwait(t, wg, 5*time.Second) +} + +func TestBlockBuilderSkipsTimerInitialization(t *testing.T) { + shutdownChan := make(chan struct{}) + wg := &sync.WaitGroup{} + builder := &blockBuilder{ + ctx: snow.DefaultContextTest(), + chainConfig: params.TestChainConfig, + shutdownChan: shutdownChan, + shutdownWg: wg, + } + + builder.handleBlockBuilding() + // The wait group should finish immediately since no goroutine + // should be created when all prices should be set from the start + attemptAwait(t, wg, time.Millisecond) +} + +func TestBlockBuilderStopsTimer(t *testing.T) { + shutdownChan := make(chan struct{}) + wg := &sync.WaitGroup{} + config := *params.TestChainConfig + // Set ApricotPhase4BlockTime 250ms in the future so that it will + // create a goroutine waiting for the time to stop timer + config.ApricotPhase4BlockTimestamp = big.NewInt(time.Now().Add(1 * time.Second).Unix()) + builder := &blockBuilder{ + ctx: snow.DefaultContextTest(), + chainConfig: &config, + shutdownChan: shutdownChan, + shutdownWg: wg, + } + + builder.handleBlockBuilding() + + if builder.buildBlockTimer == nil { + t.Fatal("expected block timer to not be nil") + } + builder.buildBlockLock.Lock() + builder.buildStatus = conditionalBuild + builder.buildBlockLock.Unlock() + + // With ApricotPhase4 set slightly in the future, the builder should create a + // goroutine to sleep until its time to update and mark the wait group as done when it has + // completed the update. + attemptAwait(t, wg, 5*time.Second) + + if builder.buildBlockTimer == nil { + t.Fatal("expected block timer to be non-nil") + } + if builder.buildStatus != mayBuild { + t.Fatalf("expected build status to be %d but got %d", dontBuild, builder.buildStatus) + } + if !builder.isAP4 { + t.Fatal("expected isAP4 to be true") + } +} diff --git a/plugin/evm/block_verification.go b/plugin/evm/block_verification.go index 38f2bc29fa..49cf13c5c4 100644 --- a/plugin/evm/block_verification.go +++ b/plugin/evm/block_verification.go @@ -4,7 +4,6 @@ package evm import ( - "errors" "fmt" "math/big" @@ -12,85 +11,169 @@ import ( safemath "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/coreth/constants" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/trie" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/trie" + + coreth "github.com/tenderly/coreth/chain" ) var ( + phase0BlockValidator = blockValidatorPhase0{} apricotPhase0MinGasPrice = big.NewInt(params.LaunchMinGasPrice) + phase1BlockValidator = blockValidatorPhase1{} apricotPhase1MinGasPrice = big.NewInt(params.ApricotPhase1MinGasPrice) + phase3BlockValidator = blockValidatorPhase3{} + phase4BlockValidator = blockValidatorPhase4{} + phase5BlockValidator = blockValidatorPhase5{} ) type BlockValidator interface { - SyntacticVerify(b *Block, rules params.Rules) error + SyntacticVerify(b *Block) error } -type blockValidator struct { +type blockValidatorPhase0 struct { extDataHashes map[common.Hash]common.Hash } -func NewBlockValidator(extDataHashes map[common.Hash]common.Hash) BlockValidator { - return &blockValidator{ - extDataHashes: extDataHashes, - } -} - -func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { +func (v blockValidatorPhase0) SyntacticVerify(b *Block) error { if b == nil || b.ethBlock == nil { return errInvalidBlock } - ethHeader := b.ethBlock.Header() blockHash := b.ethBlock.Hash() - - if !rules.IsApricotPhase1 { - if v.extDataHashes != nil { - extData := b.ethBlock.ExtData() - extDataHash := types.CalcExtDataHash(extData) - // If there is no extra data, check that there is no extra data in the hash map either to ensure we do not - // have a block that is unexpectedly missing extra data. - expectedExtDataHash, ok := v.extDataHashes[blockHash] - if len(extData) == 0 { - if ok { - return fmt.Errorf("found block with unexpected missing extra data (%s, %d), expected extra data hash: %s", blockHash, b.Height(), expectedExtDataHash) - } - } else { - // If there is extra data, check to make sure that the extra data hash matches the expected extra data hash for this - // block - if extDataHash != expectedExtDataHash { - return fmt.Errorf("extra data hash in block (%s, %d): %s, did not match the expected extra data hash: %s", blockHash, b.Height(), extDataHash, expectedExtDataHash) - } + if v.extDataHashes != nil { + extData := b.ethBlock.ExtData() + extDataHash := types.CalcExtDataHash(extData) + // If there is no extra data, check that there is no extra data in the hash map either to ensure we do not + // have a block that is unexpectedly missing extra data. + expectedExtDataHash, ok := v.extDataHashes[blockHash] + if len(extData) == 0 { + if ok { + return fmt.Errorf("found block with unexpected missing extra data (%s, %d), expected extra data hash: %s", blockHash, b.Height(), expectedExtDataHash) + } + } else { + // If there is extra data, check to make sure that the extra data hash matches the expected extra data hash for this + // block + if extDataHash != expectedExtDataHash { + return fmt.Errorf("extra data hash in block (%s, %d): %s, did not match the expected extra data hash: %s", blockHash, b.Height(), extDataHash, expectedExtDataHash) } } } - // Skip verification of the genesis block since it should already be marked as accepted. + // Skip verification of the genesis block since it + // should already be marked as accepted if blockHash == b.vm.genesisHash { return nil } - // Verify the ExtDataHash field - if rules.IsApricotPhase1 { - if hash := types.CalcExtDataHash(b.ethBlock.ExtData()); ethHeader.ExtDataHash != hash { - return fmt.Errorf("extra data hash mismatch: have %x, want %x", ethHeader.ExtDataHash, hash) - } - } else { - if ethHeader.ExtDataHash != (common.Hash{}) { - return fmt.Errorf( - "expected ExtDataHash to be empty but got %x", - ethHeader.ExtDataHash, - ) + // Perform block and header sanity checks + ethHeader := b.ethBlock.Header() + if ethHeader.Number == nil || !ethHeader.Number.IsUint64() { + return errInvalidBlock + } + if ethHeader.Difficulty == nil || !ethHeader.Difficulty.IsUint64() || + ethHeader.Difficulty.Uint64() != 1 { + return fmt.Errorf( + "expected difficulty to be 1 but got %v: %w", + ethHeader.Difficulty, errInvalidDifficulty, + ) + } + if ethHeader.Nonce.Uint64() != 0 { + return fmt.Errorf( + "expected nonce to be 0 but got %d: %w", + ethHeader.Nonce.Uint64(), errInvalidNonce, + ) + } + if ethHeader.MixDigest != (common.Hash{}) { + return fmt.Errorf( + "expected MixDigest to be empty but got %x: %w", + ethHeader.MixDigest, errInvalidMixDigest, + ) + } + if ethHeader.ExtDataHash != (common.Hash{}) { + return fmt.Errorf( + "expected ExtDataHash to be empty but got %x: %w", + ethHeader.ExtDataHash, errInvalidExtDataHash, + ) + } + headerExtraDataSize := uint64(len(ethHeader.Extra)) + if headerExtraDataSize > params.MaximumExtraDataSize { + return fmt.Errorf( + "expected header ExtraData to be <= %d but got %d: %w", + params.MaximumExtraDataSize, headerExtraDataSize, errHeaderExtraDataTooBig, + ) + } + if b.ethBlock.Version() != 0 { + return fmt.Errorf( + "expected block version to be 0 but got %d: %w", + b.ethBlock.Version(), errInvalidBlockVersion, + ) + } + + // Check that the tx hash in the header matches the body + txsHash := types.DeriveSha(b.ethBlock.Transactions(), new(trie.Trie)) + if txsHash != ethHeader.TxHash { + return errTxHashMismatch + } + // Check that the uncle hash in the header matches the body + uncleHash := types.CalcUncleHash(b.ethBlock.Uncles()) + if uncleHash != ethHeader.UncleHash { + return errUncleHashMismatch + } + // Coinbase must be zero on C-Chain + if b.ethBlock.Coinbase() != coreth.BlackholeAddr { + return errInvalidBlock + } + // Block must not have any uncles + if len(b.ethBlock.Uncles()) > 0 { + return errUnclesUnsupported + } + // Block must not be empty + txs := b.ethBlock.Transactions() + if len(txs) == 0 && len(b.atomicTxs) == 0 { + return errEmptyBlock + } + + // Make sure that all the txs have the correct fee set. + for _, tx := range txs { + if tx.GasPrice().Cmp(apricotPhase0MinGasPrice) < 0 { + return fmt.Errorf("block contains tx %s with gas price too low (%d < %d)", tx.Hash(), tx.GasPrice(), params.LaunchMinGasPrice) } } + // Make sure the block isn't too far in the future + blockTimestamp := b.ethBlock.Time() + if maxBlockTime := uint64(b.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { + return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) + } + return nil +} + +type blockValidatorPhase1 struct{} + +func (blockValidatorPhase1) SyntacticVerify(b *Block) error { + if b == nil || b.ethBlock == nil { + return errInvalidBlock + } + + // Skip verification of the genesis block since it + // should already be marked as accepted + if b.ethBlock.Hash() == b.vm.genesisHash { + return nil + } + // Perform block and header sanity checks - if !ethHeader.Number.IsUint64() { - return fmt.Errorf("invalid block number: %v", ethHeader.Number) + ethHeader := b.ethBlock.Header() + if ethHeader.Number == nil || !ethHeader.Number.IsUint64() { + return errInvalidBlock } - if !ethHeader.Difficulty.IsUint64() || ethHeader.Difficulty.Cmp(common.Big1) != 0 { - return fmt.Errorf("invalid difficulty: %d", ethHeader.Difficulty) + if ethHeader.Difficulty == nil || !ethHeader.Difficulty.IsUint64() || + ethHeader.Difficulty.Uint64() != 1 { + return fmt.Errorf( + "expected difficulty to be 1 but got %v: %w", + ethHeader.Difficulty, errInvalidDifficulty, + ) } if ethHeader.Nonce.Uint64() != 0 { return fmt.Errorf( @@ -98,195 +181,428 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { ethHeader.Nonce.Uint64(), errInvalidNonce, ) } - + if ethHeader.GasLimit != params.ApricotPhase1GasLimit { + return fmt.Errorf( + "expected gas limit to be %d in apricot phase 1 but got %d", + params.ApricotPhase1GasLimit, ethHeader.GasLimit, + ) + } if ethHeader.MixDigest != (common.Hash{}) { - return fmt.Errorf("invalid mix digest: %v", ethHeader.MixDigest) + return fmt.Errorf( + "expected MixDigest to be empty but got %x: %w", + ethHeader.MixDigest, errInvalidMixDigest, + ) + } + if hash := types.CalcExtDataHash(b.ethBlock.ExtData()); ethHeader.ExtDataHash != hash { + return fmt.Errorf("extra data hash mismatch: have %x, want %x", ethHeader.ExtDataHash, hash) + } + headerExtraDataSize := uint64(len(ethHeader.Extra)) + if headerExtraDataSize > 0 { + return fmt.Errorf( + "expected header ExtraData to be <= 0 but got %d: %w", + headerExtraDataSize, errHeaderExtraDataTooBig, + ) + } + if b.ethBlock.Version() != 0 { + return fmt.Errorf( + "expected block version to be 0 but got %d: %w", + b.ethBlock.Version(), errInvalidBlockVersion, + ) } - // Enforce static gas limit after ApricotPhase1 (prior to ApricotPhase1 it's handled in processing). - if rules.IsCortina { - if ethHeader.GasLimit != params.CortinaGasLimit { - return fmt.Errorf( - "expected gas limit to be %d after cortina but got %d", - params.CortinaGasLimit, ethHeader.GasLimit, - ) - } - } else if rules.IsApricotPhase1 { - if ethHeader.GasLimit != params.ApricotPhase1GasLimit { - return fmt.Errorf( - "expected gas limit to be %d after apricot phase 1 but got %d", - params.ApricotPhase1GasLimit, ethHeader.GasLimit, - ) - } + // Check that the tx hash in the header matches the body + txsHash := types.DeriveSha(b.ethBlock.Transactions(), new(trie.Trie)) + if txsHash != ethHeader.TxHash { + return errTxHashMismatch + } + // Check that the uncle hash in the header matches the body + uncleHash := types.CalcUncleHash(b.ethBlock.Uncles()) + if uncleHash != ethHeader.UncleHash { + return errUncleHashMismatch + } + // Coinbase must be zero on C-Chain + if b.ethBlock.Coinbase() != coreth.BlackholeAddr { + return errInvalidBlock + } + // Block must not have any uncles + if len(b.ethBlock.Uncles()) > 0 { + return errUnclesUnsupported + } + // Block must not be empty + txs := b.ethBlock.Transactions() + if len(txs) == 0 && len(b.atomicTxs) == 0 { + return errEmptyBlock } - // Check that the size of the header's Extra data field is correct for [rules]. - headerExtraDataSize := len(ethHeader.Extra) - switch { - case rules.IsDurango: - if headerExtraDataSize < params.DynamicFeeExtraDataSize { - return fmt.Errorf( - "expected header ExtraData to be len >= %d but got %d", - params.DynamicFeeExtraDataSize, len(ethHeader.Extra), - ) - } - case rules.IsApricotPhase3: - if headerExtraDataSize != params.DynamicFeeExtraDataSize { - return fmt.Errorf( - "expected header ExtraData to be len %d but got %d", - params.DynamicFeeExtraDataSize, headerExtraDataSize, - ) - } - case rules.IsApricotPhase1: - if headerExtraDataSize != 0 { - return fmt.Errorf( - "expected header ExtraData to be 0 but got %d", - headerExtraDataSize, - ) - } - default: - if uint64(headerExtraDataSize) > params.MaximumExtraDataSize { - return fmt.Errorf( - "expected header ExtraData to be <= %d but got %d", - params.MaximumExtraDataSize, headerExtraDataSize, - ) + // Make sure that all the txs have the correct fee set. + for _, tx := range txs { + if tx.GasPrice().Cmp(apricotPhase1MinGasPrice) < 0 { + return fmt.Errorf("block contains tx %s with gas price too low (%d < %d)", tx.Hash(), tx.GasPrice(), params.ApricotPhase1MinGasPrice) } } + // Make sure the block isn't too far in the future + blockTimestamp := b.ethBlock.Time() + if maxBlockTime := uint64(b.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { + return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) + } + return nil +} + +type blockValidatorPhase3 struct{} + +func (blockValidatorPhase3) SyntacticVerify(b *Block) error { + if b == nil || b.ethBlock == nil { + return errInvalidBlock + } + + // Skip verification of the genesis block since it + // should already be marked as accepted + if b.ethBlock.Hash() == b.vm.genesisHash { + return nil + } + + // Perform block and header sanity checks + ethHeader := b.ethBlock.Header() + if ethHeader.Number == nil || !ethHeader.Number.IsUint64() { + return errInvalidBlock + } + if ethHeader.Difficulty == nil || !ethHeader.Difficulty.IsUint64() || + ethHeader.Difficulty.Uint64() != 1 { + return fmt.Errorf( + "expected difficulty to be 1 but got %v: %w", + ethHeader.Difficulty, errInvalidDifficulty, + ) + } + if ethHeader.Nonce.Uint64() != 0 { + return fmt.Errorf( + "expected nonce to be 0 but got %d: %w", + ethHeader.Nonce.Uint64(), errInvalidNonce, + ) + } + if ethHeader.GasLimit != params.ApricotPhase1GasLimit { + return fmt.Errorf( + "expected gas limit to be %d in apricot phase 1 but got %d", + params.ApricotPhase1GasLimit, ethHeader.GasLimit, + ) + } + if ethHeader.MixDigest != (common.Hash{}) { + return fmt.Errorf( + "expected MixDigest to be empty but got %x: %w", + ethHeader.MixDigest, errInvalidMixDigest, + ) + } + if hash := types.CalcExtDataHash(b.ethBlock.ExtData()); ethHeader.ExtDataHash != hash { + return fmt.Errorf("extra data hash mismatch: have %x, want %x", ethHeader.ExtDataHash, hash) + } + if headerExtraDataSize := uint64(len(ethHeader.Extra)); headerExtraDataSize != params.ApricotPhase3ExtraDataSize { + return fmt.Errorf( + "expected header ExtraData to be %d but got %d: %w", + params.ApricotPhase3ExtraDataSize, headerExtraDataSize, errHeaderExtraDataTooBig, + ) + } + if ethHeader.BaseFee == nil { + return errNilBaseFeeApricotPhase3 + } + if bfLen := ethHeader.BaseFee.BitLen(); bfLen > 256 { + return fmt.Errorf("too large base fee: bitlen %d", bfLen) + } if b.ethBlock.Version() != 0 { - return fmt.Errorf("invalid version: %d", b.ethBlock.Version()) + return fmt.Errorf( + "expected block version to be 0 but got %d: %w", + b.ethBlock.Version(), errInvalidBlockVersion, + ) } // Check that the tx hash in the header matches the body - txsHash := types.DeriveSha(b.ethBlock.Transactions(), trie.NewStackTrie(nil)) + txsHash := types.DeriveSha(b.ethBlock.Transactions(), new(trie.Trie)) if txsHash != ethHeader.TxHash { - return fmt.Errorf("invalid txs hash %v does not match calculated txs hash %v", ethHeader.TxHash, txsHash) + return errTxHashMismatch } // Check that the uncle hash in the header matches the body uncleHash := types.CalcUncleHash(b.ethBlock.Uncles()) if uncleHash != ethHeader.UncleHash { - return fmt.Errorf("invalid uncle hash %v does not match calculated uncle hash %v", ethHeader.UncleHash, uncleHash) + return errUncleHashMismatch } - // Coinbase must match the BlackholeAddr on C-Chain - if ethHeader.Coinbase != constants.BlackholeAddr { - return fmt.Errorf("invalid coinbase %v does not match required blackhole address %v", ethHeader.Coinbase, constants.BlackholeAddr) + // Coinbase must be zero on C-Chain + if b.ethBlock.Coinbase() != coreth.BlackholeAddr { + return errInvalidBlock } // Block must not have any uncles if len(b.ethBlock.Uncles()) > 0 { return errUnclesUnsupported } - // Block must not be empty txs := b.ethBlock.Transactions() if len(txs) == 0 && len(b.atomicTxs) == 0 { return errEmptyBlock } - // Enforce minimum gas prices here prior to dynamic fees going into effect. - switch { - case !rules.IsApricotPhase1: - // If we are in ApricotPhase0, enforce each transaction has a minimum gas price of at least the LaunchMinGasPrice - for _, tx := range b.ethBlock.Transactions() { - if tx.GasPrice().Cmp(apricotPhase0MinGasPrice) < 0 { - return fmt.Errorf("block contains tx %s with gas price too low (%d < %d)", tx.Hash(), tx.GasPrice(), params.LaunchMinGasPrice) - } - } - case !rules.IsApricotPhase3: - // If we are prior to ApricotPhase3, enforce each transaction has a minimum gas price of at least the ApricotPhase1MinGasPrice - for _, tx := range b.ethBlock.Transactions() { - if tx.GasPrice().Cmp(apricotPhase1MinGasPrice) < 0 { - return fmt.Errorf("block contains tx %s with gas price too low (%d < %d)", tx.Hash(), tx.GasPrice(), params.ApricotPhase1MinGasPrice) - } - } + // Make sure the block isn't too far in the future + blockTimestamp := b.ethBlock.Time() + if maxBlockTime := uint64(b.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { + return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) + } + return nil +} + +type blockValidatorPhase4 struct{} + +func (blockValidatorPhase4) SyntacticVerify(b *Block) error { + if b == nil || b.ethBlock == nil { + return errInvalidBlock + } + + // Skip verification of the genesis block since it + // should already be marked as accepted + if b.ethBlock.Hash() == b.vm.genesisHash { + return nil + } + + // Perform block and header sanity checks + ethHeader := b.ethBlock.Header() + if ethHeader.Number == nil || !ethHeader.Number.IsUint64() { + return errInvalidBlock + } + if ethHeader.Difficulty == nil || !ethHeader.Difficulty.IsUint64() || + ethHeader.Difficulty.Uint64() != 1 { + return fmt.Errorf( + "expected difficulty to be 1 but got %v: %w", + ethHeader.Difficulty, errInvalidDifficulty, + ) + } + if ethHeader.Nonce.Uint64() != 0 { + return fmt.Errorf( + "expected nonce to be 0 but got %d: %w", + ethHeader.Nonce.Uint64(), errInvalidNonce, + ) + } + if ethHeader.GasLimit != params.ApricotPhase1GasLimit { + return fmt.Errorf( + "expected gas limit to be %d in apricot phase 1 but got %d", + params.ApricotPhase1GasLimit, ethHeader.GasLimit, + ) + } + if ethHeader.MixDigest != (common.Hash{}) { + return fmt.Errorf( + "expected MixDigest to be empty but got %x: %w", + ethHeader.MixDigest, errInvalidMixDigest, + ) + } + if hash := types.CalcExtDataHash(b.ethBlock.ExtData()); ethHeader.ExtDataHash != hash { + return fmt.Errorf("extra data hash mismatch: have %x, want %x", ethHeader.ExtDataHash, hash) + } + if headerExtraDataSize := uint64(len(ethHeader.Extra)); headerExtraDataSize != params.ApricotPhase3ExtraDataSize { + return fmt.Errorf( + "expected header ExtraData to be %d but got %d: %w", + params.ApricotPhase3ExtraDataSize, headerExtraDataSize, errHeaderExtraDataTooBig, + ) + } + if ethHeader.BaseFee == nil { + return errNilBaseFeeApricotPhase3 + } + if bfLen := ethHeader.BaseFee.BitLen(); bfLen > 256 { + return fmt.Errorf("too large base fee: bitlen %d", bfLen) + } + if b.ethBlock.Version() != 0 { + return fmt.Errorf( + "expected block version to be 0 but got %d: %w", + b.ethBlock.Version(), errInvalidBlockVersion, + ) + } + + // Check that the tx hash in the header matches the body + txsHash := types.DeriveSha(b.ethBlock.Transactions(), new(trie.Trie)) + if txsHash != ethHeader.TxHash { + return errTxHashMismatch + } + // Check that the uncle hash in the header matches the body + uncleHash := types.CalcUncleHash(b.ethBlock.Uncles()) + if uncleHash != ethHeader.UncleHash { + return errUncleHashMismatch + } + // Coinbase must be zero on C-Chain + if b.ethBlock.Coinbase() != coreth.BlackholeAddr { + return errInvalidBlock + } + // Block must not have any uncles + if len(b.ethBlock.Uncles()) > 0 { + return errUnclesUnsupported + } + // Block must not be empty + txs := b.ethBlock.Transactions() + if len(txs) == 0 && len(b.atomicTxs) == 0 { + return errEmptyBlock } // Make sure the block isn't too far in the future - // TODO: move this to only be part of semantic verification. blockTimestamp := b.ethBlock.Time() if maxBlockTime := uint64(b.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) } - // Ensure BaseFee is non-nil as of ApricotPhase3. - if rules.IsApricotPhase3 { - if ethHeader.BaseFee == nil { - return errNilBaseFeeApricotPhase3 + // Make sure ExtDataGasUsed is not nil and correct + if ethHeader.ExtDataGasUsed == nil { + return errNilExtDataGasUsedApricotPhase4 + } + if !ethHeader.ExtDataGasUsed.IsUint64() { + return fmt.Errorf("too large extDataGasUsed: %d", ethHeader.ExtDataGasUsed) + } + var totalGasUsed uint64 + for _, atomicTx := range b.atomicTxs { + // We perform this check manually here to avoid the overhead of having to + // reparse the atomicTx in `CalcExtDataGasUsed`. + gasUsed, err := atomicTx.GasUsed(false) + if err != nil { + return err } - // TODO: this should be removed as 256 is the maximum possible bit length of a big int - if bfLen := ethHeader.BaseFee.BitLen(); bfLen > 256 { - return fmt.Errorf("too large base fee: bitlen %d", bfLen) + totalGasUsed, err = safemath.Add64(totalGasUsed, gasUsed) + if err != nil { + return err } } - // If we are in ApricotPhase4, ensure that ExtDataGasUsed is populated correctly. - if rules.IsApricotPhase4 { - // Make sure ExtDataGasUsed is not nil and correct - if ethHeader.ExtDataGasUsed == nil { - return errNilExtDataGasUsedApricotPhase4 - } - if rules.IsApricotPhase5 { - if ethHeader.ExtDataGasUsed.Cmp(params.AtomicGasLimit) == 1 { - return fmt.Errorf("too large extDataGasUsed: %d", ethHeader.ExtDataGasUsed) - } - } else { - if !ethHeader.ExtDataGasUsed.IsUint64() { - return fmt.Errorf("too large extDataGasUsed: %d", ethHeader.ExtDataGasUsed) - } - } - var totalGasUsed uint64 - for _, atomicTx := range b.atomicTxs { - // We perform this check manually here to avoid the overhead of having to - // reparse the atomicTx in `CalcExtDataGasUsed`. - fixedFee := rules.IsApricotPhase5 // Charge the atomic tx fixed fee as of ApricotPhase5 - gasUsed, err := atomicTx.GasUsed(fixedFee) - if err != nil { - return err - } - totalGasUsed, err = safemath.Add64(totalGasUsed, gasUsed) - if err != nil { - return err - } - } + switch { + case ethHeader.ExtDataGasUsed.Cmp(new(big.Int).SetUint64(totalGasUsed)) != 0: + return fmt.Errorf("invalid extDataGasUsed: have %d, want %d", ethHeader.ExtDataGasUsed, totalGasUsed) + + // Make sure BlockGasCost is not nil + // NOTE: ethHeader.BlockGasCost correctness is checked in header verification + case ethHeader.BlockGasCost == nil: + return errNilBlockGasCostApricotPhase4 + case !ethHeader.BlockGasCost.IsUint64(): + return fmt.Errorf("too large blockGasCost: %d", ethHeader.BlockGasCost) + } + return nil +} - switch { - case ethHeader.ExtDataGasUsed.Cmp(new(big.Int).SetUint64(totalGasUsed)) != 0: - return fmt.Errorf("invalid extDataGasUsed: have %d, want %d", ethHeader.ExtDataGasUsed, totalGasUsed) +type blockValidatorPhase5 struct{} - // Make sure BlockGasCost is not nil - // NOTE: ethHeader.BlockGasCost correctness is checked in header verification - case ethHeader.BlockGasCost == nil: - return errNilBlockGasCostApricotPhase4 - case !ethHeader.BlockGasCost.IsUint64(): - return fmt.Errorf("too large blockGasCost: %d", ethHeader.BlockGasCost) - } +func (blockValidatorPhase5) SyntacticVerify(b *Block) error { + if b == nil || b.ethBlock == nil { + return errInvalidBlock } - // Verify the existence / non-existence of excessBlobGas - cancun := rules.IsCancun - if !cancun && ethHeader.ExcessBlobGas != nil { - return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", *ethHeader.ExcessBlobGas) + // Skip verification of the genesis block since it + // should already be marked as accepted + if b.ethBlock.Hash() == b.vm.genesisHash { + return nil } - if !cancun && ethHeader.BlobGasUsed != nil { - return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *ethHeader.BlobGasUsed) + + // Perform block and header sanity checks + ethHeader := b.ethBlock.Header() + if ethHeader.Number == nil || !ethHeader.Number.IsUint64() { + return errInvalidBlock } - if cancun && ethHeader.ExcessBlobGas == nil { - return errors.New("header is missing excessBlobGas") + if ethHeader.Difficulty == nil || !ethHeader.Difficulty.IsUint64() || + ethHeader.Difficulty.Uint64() != 1 { + return fmt.Errorf( + "expected difficulty to be 1 but got %v: %w", + ethHeader.Difficulty, errInvalidDifficulty, + ) + } + if ethHeader.Nonce.Uint64() != 0 { + return fmt.Errorf( + "expected nonce to be 0 but got %d: %w", + ethHeader.Nonce.Uint64(), errInvalidNonce, + ) + } + if ethHeader.GasLimit != params.ApricotPhase1GasLimit { + return fmt.Errorf( + "expected gas limit to be %d in apricot phase 1 but got %d", + params.ApricotPhase1GasLimit, ethHeader.GasLimit, + ) + } + if ethHeader.MixDigest != (common.Hash{}) { + return fmt.Errorf( + "expected MixDigest to be empty but got %x: %w", + ethHeader.MixDigest, errInvalidMixDigest, + ) + } + if hash := types.CalcExtDataHash(b.ethBlock.ExtData()); ethHeader.ExtDataHash != hash { + return fmt.Errorf("extra data hash mismatch: have %x, want %x", ethHeader.ExtDataHash, hash) + } + if headerExtraDataSize := uint64(len(ethHeader.Extra)); headerExtraDataSize != params.ApricotPhase3ExtraDataSize { + return fmt.Errorf( + "expected header ExtraData to be %d but got %d: %w", + params.ApricotPhase3ExtraDataSize, headerExtraDataSize, errHeaderExtraDataTooBig, + ) } - if cancun && ethHeader.BlobGasUsed == nil { - return errors.New("header is missing blobGasUsed") + if ethHeader.BaseFee == nil { + return errNilBaseFeeApricotPhase3 } - if !cancun && ethHeader.ParentBeaconRoot != nil { - return fmt.Errorf("invalid parentBeaconRoot: have %x, expected nil", *ethHeader.ParentBeaconRoot) + if bfLen := ethHeader.BaseFee.BitLen(); bfLen > 256 { + return fmt.Errorf("too large base fee: bitlen %d", bfLen) } - // TODO: decide what to do after Cancun - // currently we are enforcing it to be empty hash - if cancun { - switch { - case ethHeader.ParentBeaconRoot == nil: - return errors.New("header is missing parentBeaconRoot") - case *ethHeader.ParentBeaconRoot != (common.Hash{}): - return fmt.Errorf("invalid parentBeaconRoot: have %x, expected empty hash", ethHeader.ParentBeaconRoot) + if b.ethBlock.Version() != 0 { + return fmt.Errorf( + "expected block version to be 0 but got %d: %w", + b.ethBlock.Version(), errInvalidBlockVersion, + ) + } + + // Check that the tx hash in the header matches the body + txsHash := types.DeriveSha(b.ethBlock.Transactions(), new(trie.Trie)) + if txsHash != ethHeader.TxHash { + return errTxHashMismatch + } + // Check that the uncle hash in the header matches the body + uncleHash := types.CalcUncleHash(b.ethBlock.Uncles()) + if uncleHash != ethHeader.UncleHash { + return errUncleHashMismatch + } + // Coinbase must be zero on C-Chain + if b.ethBlock.Coinbase() != coreth.BlackholeAddr { + return errInvalidBlock + } + // Block must not have any uncles + if len(b.ethBlock.Uncles()) > 0 { + return errUnclesUnsupported + } + // Block must not be empty + txs := b.ethBlock.Transactions() + if len(txs) == 0 && len(b.atomicTxs) == 0 { + return errEmptyBlock + } + + // Make sure the block isn't too far in the future + blockTimestamp := b.ethBlock.Time() + if maxBlockTime := uint64(b.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { + return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) + } + + // Make sure ExtDataGasUsed is not nil and correct + if ethHeader.ExtDataGasUsed == nil { + return errNilExtDataGasUsedApricotPhase4 + } + if ethHeader.ExtDataGasUsed.Cmp(params.AtomicGasLimit) == 1 { + return fmt.Errorf("too large extDataGasUsed: %d", ethHeader.ExtDataGasUsed) + } + + var totalGasUsed uint64 + for _, atomicTx := range b.atomicTxs { + // We perform this check manually here to avoid the overhead of having to + // reparse the atomicTx in `CalcExtDataGasUsed`. + gasUsed, err := atomicTx.GasUsed(true) + if err != nil { + return err + } + totalGasUsed, err = safemath.Add64(totalGasUsed, gasUsed) + if err != nil { + return err } } + + switch { + case ethHeader.ExtDataGasUsed.Cmp(new(big.Int).SetUint64(totalGasUsed)) != 0: + return fmt.Errorf("invalid extDataGasUsed: have %d, want %d", ethHeader.ExtDataGasUsed, totalGasUsed) + + // Make sure BlockGasCost is not nil + // NOTE: ethHeader.BlockGasCost correctness is checked in header verification + case ethHeader.BlockGasCost == nil: + return errNilBlockGasCostApricotPhase4 + case !ethHeader.BlockGasCost.IsUint64(): + return fmt.Errorf("too large blockGasCost: %d", ethHeader.BlockGasCost) + } return nil } diff --git a/plugin/evm/config.go b/plugin/evm/config.go index 2b35d32b1a..9d1da725e5 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -8,49 +8,32 @@ import ( "fmt" "time" - "github.com/ava-labs/coreth/core/txpool/legacypool" - "github.com/ava-labs/coreth/eth" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/tenderly/coreth/eth" "github.com/spf13/cast" ) const ( - defaultAcceptorQueueLimit = 64 // Provides 2 minutes of buffer (2s block target) for a commit delay - defaultPruningEnabled = true - defaultCommitInterval = 4096 - defaultTrieCleanCache = 512 - defaultTrieDirtyCache = 512 - defaultTrieDirtyCommitTarget = 20 - defaultTriePrefetcherParallelism = 16 - defaultSnapshotCache = 256 - defaultSyncableCommitInterval = defaultCommitInterval * 4 - defaultSnapshotWait = false - defaultRpcGasCap = 50_000_000 // Default to 50M Gas Limit - defaultRpcTxFeeCap = 100 // 100 AVAX - defaultMetricsExpensiveEnabled = true - defaultApiMaxDuration = 0 // Default to no maximum API call duration - defaultWsCpuRefillRate = 0 // Default to no maximum WS CPU usage - defaultWsCpuMaxStored = 0 // Default to no maximum WS CPU usage - defaultMaxBlocksPerRequest = 0 // Default to no maximum on the number of blocks per getLogs request - defaultContinuousProfilerFrequency = 15 * time.Minute - defaultContinuousProfilerMaxFiles = 5 - defaultPushGossipPercentStake = .9 - defaultPushGossipNumValidators = 100 - defaultPushGossipNumPeers = 0 - defaultPushRegossipNumValidators = 10 - defaultPushRegossipNumPeers = 0 - defaultPushGossipFrequency = 100 * time.Millisecond - defaultPullGossipFrequency = 1 * time.Second - defaultTxRegossipFrequency = 30 * time.Second - defaultOfflinePruningBloomFilterSize uint64 = 512 // Default size (MB) for the offline pruner to use - defaultLogLevel = "info" - defaultLogJSONFormat = false - defaultMaxOutboundActiveRequests = 16 - defaultMaxOutboundActiveCrossChainRequests = 64 - defaultPopulateMissingTriesParallelism = 1024 - defaultStateSyncServerTrieCache = 64 // MB - defaultAcceptedCacheSize = 32 // blocks + defaultAcceptorQueueLimit = 64 // Provides 2 minutes of buffer (2s block target) for a commit delay + defaultPruningEnabled = true + defaultCommitInterval = 4096 + defaultSyncableCommitInterval = defaultCommitInterval * 4 + defaultSnapshotAsync = true + defaultRpcGasCap = 50_000_000 // Default to 50M Gas Limit + defaultRpcTxFeeCap = 100 // 100 AVAX + defaultMetricsExpensiveEnabled = false + defaultApiMaxDuration = 0 // Default to no maximum API call duration + defaultWsCpuRefillRate = 0 // Default to no maximum WS CPU usage + defaultWsCpuMaxStored = 0 // Default to no maximum WS CPU usage + defaultMaxBlocksPerRequest = 0 // Default to no maximum on the number of blocks per getLogs request + defaultContinuousProfilerFrequency = 15 * time.Minute + defaultContinuousProfilerMaxFiles = 5 + defaultTxRegossipFrequency = 1 * time.Minute + defaultTxRegossipMaxSize = 15 + defaultOfflinePruningBloomFilterSize uint64 = 512 // Default size (MB) for the offline pruner to use + defaultLogLevel = "info" + defaultPopulateMissingTriesParallelism = 1024 + defaultMaxOutboundActiveRequests = 8 + defaultStateSyncServerTrieCache = 64 // MB // defaultStateSyncMinBlocks is the minimum number of blocks the blockchain // should be ahead of local last accepted to perform state sync. @@ -59,24 +42,18 @@ const ( // time assumptions: // - normal bootstrap processing time: ~14 blocks / second // - state sync time: ~6 hrs. - defaultStateSyncMinBlocks = 300_000 - defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request + defaultStateSyncMinBlocks = 300_000 ) -var ( - defaultEnabledAPIs = []string{ - "eth", - "eth-filter", - "net", - "web3", - "internal-eth", - "internal-blockchain", - "internal-transaction", - } - defaultAllowUnprotectedTxHashes = []common.Hash{ - common.HexToHash("0xfefb2da535e927b85fe68eb81cb2e4a5827c905f78381a01ef2322aa9b0aee8e"), // EIP-1820: https://eips.ethereum.org/EIPS/eip-1820 - } -) +var defaultEnabledAPIs = []string{ + "public-eth", + "public-eth-filter", + "net", + "web3", + "internal-public-eth", + "internal-public-blockchain", + "internal-public-transaction-pool", +} type Duration struct { time.Duration @@ -86,11 +63,8 @@ type Duration struct { type Config struct { // Coreth APIs SnowmanAPIEnabled bool `json:"snowman-api-enabled"` - AdminAPIEnabled bool `json:"admin-api-enabled"` - AdminAPIDir string `json:"admin-api-dir"` - CorethAdminAPIEnabled bool `json:"coreth-admin-api-enabled"` // Deprecated: use AdminAPIEnabled instead - CorethAdminAPIDir string `json:"coreth-admin-api-dir"` // Deprecated: use AdminAPIDir instead - WarpAPIEnabled bool `json:"warp-api-enabled"` + CorethAdminAPIEnabled bool `json:"coreth-admin-api-enabled"` + CorethAdminAPIDir string `json:"coreth-admin-api-dir"` // EnabledEthAPIs is a list of Ethereum services that should be enabled // If none is specified, then we use the default list [defaultEnabledAPIs] @@ -101,20 +75,13 @@ type Config struct { ContinuousProfilerFrequency Duration `json:"continuous-profiler-frequency"` // Frequency to run continuous profiler if enabled ContinuousProfilerMaxFiles int `json:"continuous-profiler-max-files"` // Maximum number of files to maintain - // API Gas/Price Caps + // Coreth API Gas/Price Caps RPCGasCap uint64 `json:"rpc-gas-cap"` RPCTxFeeCap float64 `json:"rpc-tx-fee-cap"` - // Cache settings - TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB) - TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB) - TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB) - TriePrefetcherParallelism int `json:"trie-prefetcher-parallelism"` // Max concurrent disk reads trie prefetcher should perform at once - SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB) - // Eth Settings Preimages bool `json:"preimages-enabled"` - SnapshotWait bool `json:"snapshot-wait"` + SnapshotAsync bool `json:"snapshot-async"` SnapshotVerify bool `json:"snapshot-verification-enabled"` // Pruning Settings @@ -124,29 +91,18 @@ type Config struct { AllowMissingTries bool `json:"allow-missing-tries"` // If enabled, warnings preventing an incomplete trie index are suppressed PopulateMissingTries *uint64 `json:"populate-missing-tries,omitempty"` // Sets the starting point for re-populating missing tries. Disables re-generation if nil. PopulateMissingTriesParallelism int `json:"populate-missing-tries-parallelism"` // Number of concurrent readers to use when re-populating missing tries on startup. - PruneWarpDB bool `json:"prune-warp-db-enabled"` // Determines if the warpDB should be cleared on startup // Metric Settings MetricsExpensiveEnabled bool `json:"metrics-expensive-enabled"` // Debug-level metrics that might impact runtime performance // API Settings - LocalTxsEnabled bool `json:"local-txs-enabled"` - - TxPoolPriceLimit uint64 `json:"tx-pool-price-limit"` - TxPoolPriceBump uint64 `json:"tx-pool-price-bump"` - TxPoolAccountSlots uint64 `json:"tx-pool-account-slots"` - TxPoolGlobalSlots uint64 `json:"tx-pool-global-slots"` - TxPoolAccountQueue uint64 `json:"tx-pool-account-queue"` - TxPoolGlobalQueue uint64 `json:"tx-pool-global-queue"` - TxPoolLifetime Duration `json:"tx-pool-lifetime"` - - APIMaxDuration Duration `json:"api-max-duration"` - WSCPURefillRate Duration `json:"ws-cpu-refill-rate"` - WSCPUMaxStored Duration `json:"ws-cpu-max-stored"` - MaxBlocksPerRequest int64 `json:"api-max-blocks-per-request"` - AllowUnfinalizedQueries bool `json:"allow-unfinalized-queries"` - AllowUnprotectedTxs bool `json:"allow-unprotected-txs"` - AllowUnprotectedTxHashes []common.Hash `json:"allow-unprotected-tx-hashes"` + LocalTxsEnabled bool `json:"local-txs-enabled"` + APIMaxDuration Duration `json:"api-max-duration"` + WSCPURefillRate Duration `json:"ws-cpu-refill-rate"` + WSCPUMaxStored Duration `json:"ws-cpu-max-stored"` + MaxBlocksPerRequest int64 `json:"api-max-blocks-per-request"` + AllowUnfinalizedQueries bool `json:"allow-unfinalized-queries"` + AllowUnprotectedTxs bool `json:"allow-unprotected-txs"` // Keystore Settings KeystoreDirectory string `json:"keystore-directory"` // both absolute and relative supported @@ -154,19 +110,12 @@ type Config struct { KeystoreInsecureUnlockAllowed bool `json:"keystore-insecure-unlock-allowed"` // Gossip Settings - PushGossipPercentStake float64 `json:"push-gossip-percent-stake"` - PushGossipNumValidators int `json:"push-gossip-num-validators"` - PushGossipNumPeers int `json:"push-gossip-num-peers"` - PushRegossipNumValidators int `json:"push-regossip-num-validators"` - PushRegossipNumPeers int `json:"push-regossip-num-peers"` - PushGossipFrequency Duration `json:"push-gossip-frequency"` - PullGossipFrequency Duration `json:"pull-gossip-frequency"` - RegossipFrequency Duration `json:"regossip-frequency"` - TxRegossipFrequency Duration `json:"tx-regossip-frequency"` // Deprecated: use RegossipFrequency instead + RemoteTxGossipOnlyEnabled bool `json:"remote-tx-gossip-only-enabled"` + TxRegossipFrequency Duration `json:"tx-regossip-frequency"` + TxRegossipMaxSize int `json:"tx-regossip-max-size"` - // Log - LogLevel string `json:"log-level"` - LogJSONFormat bool `json:"log-json-format"` + // Log level + LogLevel string `json:"log-level"` // Offline Pruning Settings OfflinePruning bool `json:"offline-pruning-enabled"` @@ -174,52 +123,15 @@ type Config struct { OfflinePruningDataDirectory string `json:"offline-pruning-data-directory"` // VM2VM network - MaxOutboundActiveRequests int64 `json:"max-outbound-active-requests"` - MaxOutboundActiveCrossChainRequests int64 `json:"max-outbound-active-cross-chain-requests"` + MaxOutboundActiveRequests int64 `json:"max-outbound-active-requests"` // Sync settings - StateSyncEnabled *bool `json:"state-sync-enabled"` // Pointer distinguishes false (no state sync) and not set (state sync only at genesis). + StateSyncEnabled bool `json:"state-sync-enabled"` StateSyncSkipResume bool `json:"state-sync-skip-resume"` // Forces state sync to use the highest available summary block StateSyncServerTrieCache int `json:"state-sync-server-trie-cache"` StateSyncIDs string `json:"state-sync-ids"` StateSyncCommitInterval uint64 `json:"state-sync-commit-interval"` StateSyncMinBlocks uint64 `json:"state-sync-min-blocks"` - StateSyncRequestSize uint16 `json:"state-sync-request-size"` - - // Database Settings - InspectDatabase bool `json:"inspect-database"` // Inspects the database on startup if enabled. - - // SkipUpgradeCheck disables checking that upgrades must take place before the last - // accepted block. Skipping this check is useful when a node operator does not update - // their node before the network upgrade and their node accepts blocks that have - // identical state with the pre-upgrade ruleset. - SkipUpgradeCheck bool `json:"skip-upgrade-check"` - - // AcceptedCacheSize is the depth to keep in the accepted headers cache and the - // accepted logs cache at the accepted tip. - // - // This is particularly useful for improving the performance of eth_getLogs - // on RPC nodes. - AcceptedCacheSize int `json:"accepted-cache-size"` - - // TransactionHistory is the maximum number of blocks from head whose tx indices - // are reserved: - // * 0: means no limit - // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes - TransactionHistory uint64 `json:"transaction-history"` - // Deprecated, use 'TransactionHistory' instead. - TxLookupLimit uint64 `json:"tx-lookup-limit"` - - // SkipTxIndexing skips indexing transactions. - // This is useful for validators that don't need to index transactions. - // TxLookupLimit can be still used to control unindexing old transactions. - SkipTxIndexing bool `json:"skip-tx-indexing"` - - // WarpOffChainMessages encodes off-chain messages (unrelated to any on-chain event ie. block or AddressedCall) - // that the node should be willing to sign. - // Note: only supports AddressedCall payloads as defined here: - // https://github.com/ava-labs/avalanchego/tree/7623ffd4be915a5185c9ed5e11fa9be15a6e1f00/vms/platformvm/warp/payload#addressedcall - WarpOffChainMessages []hexutil.Bytes `json:"warp-off-chain-messages"` } // EthAPIs returns an array of strings representing the Eth APIs that should be enabled @@ -236,15 +148,6 @@ func (c *Config) SetDefaults() { c.RPCGasCap = defaultRpcGasCap c.RPCTxFeeCap = defaultRpcTxFeeCap c.MetricsExpensiveEnabled = defaultMetricsExpensiveEnabled - - c.TxPoolPriceLimit = legacypool.DefaultConfig.PriceLimit - c.TxPoolPriceBump = legacypool.DefaultConfig.PriceBump - c.TxPoolAccountSlots = legacypool.DefaultConfig.AccountSlots - c.TxPoolGlobalSlots = legacypool.DefaultConfig.GlobalSlots - c.TxPoolAccountQueue = legacypool.DefaultConfig.AccountQueue - c.TxPoolGlobalQueue = legacypool.DefaultConfig.GlobalQueue - c.TxPoolLifetime.Duration = legacypool.DefaultConfig.Lifetime - c.APIMaxDuration.Duration = defaultApiMaxDuration c.WSCPURefillRate.Duration = defaultWsCpuRefillRate c.WSCPUMaxStored.Duration = defaultWsCpuMaxStored @@ -252,34 +155,18 @@ func (c *Config) SetDefaults() { c.ContinuousProfilerFrequency.Duration = defaultContinuousProfilerFrequency c.ContinuousProfilerMaxFiles = defaultContinuousProfilerMaxFiles c.Pruning = defaultPruningEnabled - c.TrieCleanCache = defaultTrieCleanCache - c.TrieDirtyCache = defaultTrieDirtyCache - c.TrieDirtyCommitTarget = defaultTrieDirtyCommitTarget - c.TriePrefetcherParallelism = defaultTriePrefetcherParallelism - c.SnapshotCache = defaultSnapshotCache c.AcceptorQueueLimit = defaultAcceptorQueueLimit - c.CommitInterval = defaultCommitInterval - c.SnapshotWait = defaultSnapshotWait - c.PushGossipPercentStake = defaultPushGossipPercentStake - c.PushGossipNumValidators = defaultPushGossipNumValidators - c.PushGossipNumPeers = defaultPushGossipNumPeers - c.PushRegossipNumValidators = defaultPushRegossipNumValidators - c.PushRegossipNumPeers = defaultPushRegossipNumPeers - c.PushGossipFrequency.Duration = defaultPushGossipFrequency - c.PullGossipFrequency.Duration = defaultPullGossipFrequency - c.RegossipFrequency.Duration = defaultTxRegossipFrequency + c.SnapshotAsync = defaultSnapshotAsync + c.TxRegossipFrequency.Duration = defaultTxRegossipFrequency + c.TxRegossipMaxSize = defaultTxRegossipMaxSize c.OfflinePruningBloomFilterSize = defaultOfflinePruningBloomFilterSize c.LogLevel = defaultLogLevel - c.LogJSONFormat = defaultLogJSONFormat - c.MaxOutboundActiveRequests = defaultMaxOutboundActiveRequests - c.MaxOutboundActiveCrossChainRequests = defaultMaxOutboundActiveCrossChainRequests c.PopulateMissingTriesParallelism = defaultPopulateMissingTriesParallelism + c.MaxOutboundActiveRequests = defaultMaxOutboundActiveRequests c.StateSyncServerTrieCache = defaultStateSyncServerTrieCache + c.CommitInterval = defaultCommitInterval c.StateSyncCommitInterval = defaultSyncableCommitInterval c.StateSyncMinBlocks = defaultStateSyncMinBlocks - c.StateSyncRequestSize = defaultStateSyncRequestSize - c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes - c.AcceptedCacheSize = defaultAcceptedCacheSize } func (d *Duration) UnmarshalJSON(data []byte) (err error) { @@ -291,16 +178,6 @@ func (d *Duration) UnmarshalJSON(data []byte) (err error) { return err } -// String implements the stringer interface. -func (d Duration) String() string { - return d.Duration.String() -} - -// String implements the stringer interface. -func (d Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(d.Duration.String()) -} - // Validate returns an error if this is an invalid config. func (c *Config) Validate() error { if c.PopulateMissingTries != nil && (c.OfflinePruning || c.Pruning) { @@ -318,31 +195,5 @@ func (c *Config) Validate() error { return fmt.Errorf("cannot use commit interval of 0 with pruning enabled") } - if c.PushGossipPercentStake < 0 || c.PushGossipPercentStake > 1 { - return fmt.Errorf("push-gossip-percent-stake is %f but must be in the range [0, 1]", c.PushGossipPercentStake) - } return nil } - -func (c *Config) Deprecate() string { - msg := "" - // Deprecate the old config options and set the new ones. - if c.CorethAdminAPIEnabled { - msg += "coreth-admin-api-enabled is deprecated, use admin-api-enabled instead. " - c.AdminAPIEnabled = c.CorethAdminAPIEnabled - } - if c.CorethAdminAPIDir != "" { - msg += "coreth-admin-api-dir is deprecated, use admin-api-dir instead. " - c.AdminAPIDir = c.CorethAdminAPIDir - } - if c.TxRegossipFrequency != (Duration{}) { - msg += "tx-regossip-frequency is deprecated, use regossip-frequency instead. " - c.RegossipFrequency = c.TxRegossipFrequency - } - if c.TxLookupLimit != 0 { - msg += "tx-lookup-limit is deprecated, use transaction-history instead. " - c.TransactionHistory = c.TxLookupLimit - } - - return msg -} diff --git a/plugin/evm/export_tx.go b/plugin/evm/export_tx.go index 4f6828ec03..43ebe53057 100644 --- a/plugin/evm/export_tx.go +++ b/plugin/evm/export_tx.go @@ -4,22 +4,18 @@ package evm import ( - "context" - "errors" "fmt" - "github.com/ava-labs/coreth/core/vm" "math/big" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/params" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/crypto" "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -28,16 +24,9 @@ import ( "github.com/ethereum/go-ethereum/log" ) -var ( - _ UnsignedAtomicTx = &UnsignedExportTx{} - _ secp256k1fx.UnsignedTx = &UnsignedExportTx{} - errExportNonAVAXInputBanff = errors.New("export input cannot contain non-AVAX in Banff") - errExportNonAVAXOutputBanff = errors.New("export output cannot contain non-AVAX in Banff") -) - // UnsignedExportTx is an unsigned ExportTx type UnsignedExportTx struct { - Metadata + avax.Metadata // ID of the network on which this tx was issued NetworkID uint32 `serialize:"true" json:"networkID"` // ID of this blockchain. @@ -51,11 +40,11 @@ type UnsignedExportTx struct { } // InputUTXOs returns a set of all the hash(address:nonce) exporting funds. -func (utx *UnsignedExportTx) InputUTXOs() set.Set[ids.ID] { - set := set.NewSet[ids.ID](len(utx.Ins)) - for _, in := range utx.Ins { - // Total populated bytes is exactly 32 bytes. - // 8 (Nonce) + 4 (Address Length) + 20 (Address) +func (tx *UnsignedExportTx) InputUTXOs() ids.Set { + set := ids.NewSet(len(tx.Ins)) + for _, in := range tx.Ins { + // Total populated bytes is 20 (Address) + 8 (Nonce), however, we allocate + // 32 bytes to make ids.ID casting easier. var rawID [32]byte packer := wrappers.Packer{Bytes: rawID[:]} packer.PackLong(in.Nonce) @@ -66,18 +55,18 @@ func (utx *UnsignedExportTx) InputUTXOs() set.Set[ids.ID] { } // Verify this transaction is well-formed -func (utx *UnsignedExportTx) Verify( +func (tx *UnsignedExportTx) Verify( ctx *snow.Context, rules params.Rules, ) error { switch { - case utx == nil: + case tx == nil: return errNilTx - case len(utx.ExportedOutputs) == 0: + case len(tx.ExportedOutputs) == 0: return errNoExportOutputs - case utx.NetworkID != ctx.NetworkID: + case tx.NetworkID != ctx.NetworkID: return errWrongNetworkID - case ctx.ChainID != utx.BlockchainID: + case ctx.ChainID != tx.BlockchainID: return errWrongBlockchainID } @@ -85,49 +74,43 @@ func (utx *UnsignedExportTx) Verify( if rules.IsApricotPhase5 { // Note that SameSubnet verifies that [tx.DestinationChain] isn't this // chain's ID - if err := verify.SameSubnet(context.TODO(), ctx, utx.DestinationChain); err != nil { + if err := verify.SameSubnet(ctx, tx.DestinationChain); err != nil { return errWrongChainID } } else { - if utx.DestinationChain != ctx.XChainID { + if tx.DestinationChain != ctx.XChainID { return errWrongChainID } } - for _, in := range utx.Ins { + for _, in := range tx.Ins { if err := in.Verify(); err != nil { return err } - if rules.IsBanff && in.AssetID != ctx.AVAXAssetID { - return errExportNonAVAXInputBanff - } } - for _, out := range utx.ExportedOutputs { + for _, out := range tx.ExportedOutputs { if err := out.Verify(); err != nil { return err } assetID := out.AssetID() - if assetID != ctx.AVAXAssetID && utx.DestinationChain == constants.PlatformChainID { + if assetID != ctx.AVAXAssetID && tx.DestinationChain == constants.PlatformChainID { return errWrongChainID } - if rules.IsBanff && assetID != ctx.AVAXAssetID { - return errExportNonAVAXOutputBanff - } } - if !avax.IsSortedTransferableOutputs(utx.ExportedOutputs, Codec) { + if !avax.IsSortedTransferableOutputs(tx.ExportedOutputs, Codec) { return errOutputsNotSorted } - if rules.IsApricotPhase1 && !utils.IsSortedAndUnique(utx.Ins) { + if rules.IsApricotPhase1 && !IsSortedAndUniqueEVMInputs(tx.Ins) { return errInputsNotSortedUnique } return nil } -func (utx *UnsignedExportTx) GasUsed(fixedFee bool) (uint64, error) { - byteCost := calcBytesCost(len(utx.Bytes())) - numSigs := uint64(len(utx.Ins)) +func (tx *UnsignedExportTx) GasUsed(fixedFee bool) (uint64, error) { + byteCost := calcBytesCost(len(tx.UnsignedBytes())) + numSigs := uint64(len(tx.Ins)) sigCost, err := math.Mul64(numSigs, secp256k1fx.CostPerSignature) if err != nil { return 0, err @@ -147,13 +130,13 @@ func (utx *UnsignedExportTx) GasUsed(fixedFee bool) (uint64, error) { } // Amount of [assetID] burned by this transaction -func (utx *UnsignedExportTx) Burned(assetID ids.ID) (uint64, error) { +func (tx *UnsignedExportTx) Burned(assetID ids.ID) (uint64, error) { var ( spent uint64 input uint64 err error ) - for _, out := range utx.ExportedOutputs { + for _, out := range tx.ExportedOutputs { if out.AssetID() == assetID { spent, err = math.Add64(spent, out.Output().Amount()) if err != nil { @@ -161,7 +144,7 @@ func (utx *UnsignedExportTx) Burned(assetID ids.ID) (uint64, error) { } } } - for _, in := range utx.Ins { + for _, in := range tx.Ins { if in.AssetID == assetID { input, err = math.Add64(input, in.Amount) if err != nil { @@ -170,18 +153,18 @@ func (utx *UnsignedExportTx) Burned(assetID ids.ID) (uint64, error) { } } - return math.Sub(input, spent) + return math.Sub64(input, spent) } // SemanticVerify this transaction is valid. -func (utx *UnsignedExportTx) SemanticVerify( +func (tx *UnsignedExportTx) SemanticVerify( vm *VM, stx *Tx, _ *Block, baseFee *big.Int, rules params.Rules, ) error { - if err := utx.Verify(vm.ctx, rules); err != nil { + if err := tx.Verify(vm.ctx, rules); err != nil { return err } @@ -194,7 +177,7 @@ func (utx *UnsignedExportTx) SemanticVerify( if err != nil { return err } - txFee, err := CalculateDynamicFee(gasUsed, baseFee) + txFee, err := calculateDynamicFee(gasUsed, baseFee) if err != nil { return err } @@ -203,10 +186,10 @@ func (utx *UnsignedExportTx) SemanticVerify( default: fc.Produce(vm.ctx.AVAXAssetID, params.AvalancheAtomicTxFee) } - for _, out := range utx.ExportedOutputs { + for _, out := range tx.ExportedOutputs { fc.Produce(out.AssetID(), out.Output().Amount()) } - for _, in := range utx.Ins { + for _, in := range tx.Ins { fc.Consume(in.AssetID, in.Amount) } @@ -214,11 +197,11 @@ func (utx *UnsignedExportTx) SemanticVerify( return fmt.Errorf("export tx flow check failed due to: %w", err) } - if len(utx.Ins) != len(stx.Creds) { - return fmt.Errorf("export tx contained mismatched number of inputs/credentials (%d vs. %d)", len(utx.Ins), len(stx.Creds)) + if len(tx.Ins) != len(stx.Creds) { + return fmt.Errorf("export tx contained mismatched number of inputs/credentials (%d vs. %d)", len(tx.Ins), len(stx.Creds)) } - for i, input := range utx.Ins { + for i, input := range tx.Ins { cred, ok := stx.Creds[i].(*secp256k1fx.Credential) if !ok { return fmt.Errorf("expected *secp256k1fx.Credential but got %T", cred) @@ -230,10 +213,15 @@ func (utx *UnsignedExportTx) SemanticVerify( if len(cred.Sigs) != 1 { return fmt.Errorf("expected one signature for EVM Input Credential, but found: %d", len(cred.Sigs)) } - pubKey, err := vm.secpCache.RecoverPublicKey(utx.Bytes(), cred.Sigs[0][:]) + pubKeyIntf, err := vm.secpFactory.RecoverPublicKey(tx.UnsignedBytes(), cred.Sigs[0][:]) if err != nil { return err } + pubKey, ok := pubKeyIntf.(*crypto.PublicKeySECP256K1R) + if !ok { + // This should never happen + return fmt.Errorf("expected *crypto.PublicKeySECP256K1R but got %T", pubKeyIntf) + } if input.Address != PublicKeyToEthAddress(pubKey) { return errPublicKeySignatureMismatch } @@ -243,11 +231,11 @@ func (utx *UnsignedExportTx) SemanticVerify( } // AtomicOps returns the atomic operations for this transaction. -func (utx *UnsignedExportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { - txID := utx.ID() +func (tx *UnsignedExportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { + txID := tx.ID() - elems := make([]*atomic.Element, len(utx.ExportedOutputs)) - for i, out := range utx.ExportedOutputs { + elems := make([]*atomic.Element, len(tx.ExportedOutputs)) + for i, out := range tx.ExportedOutputs { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: txID, @@ -273,7 +261,7 @@ func (utx *UnsignedExportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { elems[i] = elem } - return utx.DestinationChain, &atomic.Requests{PutRequests: elems}, nil + return tx.DestinationChain, &atomic.Requests{PutRequests: elems}, nil } // newExportTx returns a new ExportTx @@ -283,9 +271,9 @@ func (vm *VM) newExportTx( chainID ids.ID, // Chain to send the UTXOs to to ids.ShortID, // Address of chain recipient baseFee *big.Int, // fee to use post-AP3 - keys []*secp256k1.PrivateKey, // Pay the fee and provide the tokens + keys []*crypto.PrivateKeySECP256K1R, // Pay the fee and provide the tokens ) (*Tx, error) { - outs := []*avax.TransferableOutput{{ + outs := []*avax.TransferableOutput{{ // Exported to X-Chain Asset: avax.Asset{ID: assetID}, Out: &secp256k1fx.TransferOutput{ Amt: amount, @@ -300,7 +288,7 @@ func (vm *VM) newExportTx( var ( avaxNeeded uint64 = 0 ins, avaxIns []EVMInput - signers, avaxSigners [][]*secp256k1.PrivateKey + signers, avaxSigners [][]*crypto.PrivateKeySECP256K1R err error ) @@ -369,11 +357,11 @@ func (vm *VM) newExportTx( } // EVMStateTransfer executes the state update from the atomic export transaction -func (utx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state vm.StateDB) error { +func (tx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state *state.StateDB) error { addrs := map[[20]byte]uint64{} - for _, from := range utx.Ins { + for _, from := range tx.Ins { if from.AssetID == ctx.AVAXAssetID { - log.Debug("crosschain", "dest", utx.DestinationChain, "addr", from.Address, "amount", from.Amount, "assetID", "AVAX") + log.Debug("crosschain", "dest", tx.DestinationChain, "addr", from.Address, "amount", from.Amount, "assetID", "AVAX") // We multiply the input amount by x2cRate to convert AVAX back to the appropriate // denomination before export. amount := new(big.Int).Mul( @@ -383,7 +371,7 @@ func (utx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state vm.StateD } state.SubBalance(from.Address, amount) } else { - log.Debug("crosschain", "dest", utx.DestinationChain, "addr", from.Address, "amount", from.Amount, "assetID", from.AssetID) + log.Debug("crosschain", "dest", tx.DestinationChain, "addr", from.Address, "amount", from.Amount, "assetID", from.AssetID) amount := new(big.Int).SetUint64(from.Amount) if state.GetBalanceMultiCoin(from.Address, common.Hash(from.AssetID)).Cmp(amount) < 0 { return errInsufficientFunds diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index f7c0e92cc6..9472b86fe5 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -17,7 +17,7 @@ import ( "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/params" "github.com/ethereum/go-ethereum/common" ) diff --git a/plugin/evm/gasprice_update.go b/plugin/evm/gasprice_update.go new file mode 100644 index 0000000000..5056fea880 --- /dev/null +++ b/plugin/evm/gasprice_update.go @@ -0,0 +1,90 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "math/big" + "sync" + "time" + + "github.com/tenderly/coreth/params" +) + +type gasPriceUpdater struct { + setter gasPriceSetter + chainConfig *params.ChainConfig + shutdownChan <-chan struct{} + + wg *sync.WaitGroup +} + +type gasPriceSetter interface { + SetGasPrice(price *big.Int) + SetMinFee(price *big.Int) +} + +// handleGasPriceUpdates creates and runs an instance of +func (vm *VM) handleGasPriceUpdates() { + gpu := &gasPriceUpdater{ + setter: vm.chain.GetTxPool(), + chainConfig: vm.chainConfig, + shutdownChan: vm.shutdownChan, + wg: &vm.shutdownWg, + } + + gpu.start() +} + +// start handles the appropriate gas price and minimum fee updates required by [gpu.chainConfig] +func (gpu *gasPriceUpdater) start() { + // Sets the initial gas price to the launch minimum gas price + gpu.setter.SetGasPrice(big.NewInt(params.LaunchMinGasPrice)) + + // Updates to the minimum gas price as of ApricotPhase1 if it's already in effect or starts a goroutine to enable it at the correct time + if disabled := gpu.handleUpdate(gpu.setter.SetGasPrice, gpu.chainConfig.ApricotPhase1BlockTimestamp, big.NewInt(params.ApricotPhase1MinGasPrice)); disabled { + return + } + // Updates to the minimum gas price as of ApricotPhase3 if it's already in effect or starts a goroutine to enable it at the correct time + if disabled := gpu.handleUpdate(gpu.setter.SetGasPrice, gpu.chainConfig.ApricotPhase3BlockTimestamp, big.NewInt(0)); disabled { + return + } + if disabled := gpu.handleUpdate(gpu.setter.SetMinFee, gpu.chainConfig.ApricotPhase3BlockTimestamp, big.NewInt(params.ApricotPhase3MinBaseFee)); disabled { + return + } + // Updates to the minimum gas price as of ApricotPhase4 if it's already in effect or starts a goroutine to enable it at the correct time + gpu.handleUpdate(gpu.setter.SetMinFee, gpu.chainConfig.ApricotPhase4BlockTimestamp, big.NewInt(params.ApricotPhase4MinBaseFee)) +} + +// handleUpdate handles calling update(price) at the appropriate time based on +// the value of [timestamp]. +// 1) If [timestamp] is nil, update is never called +// 2) If [timestamp] has already passed, update is called immediately +// 3) [timestamp] is some time in the future, starts a goroutine that will call update(price) at the time +// given by [timestamp]. +func (gpu *gasPriceUpdater) handleUpdate(update func(price *big.Int), timestamp *big.Int, price *big.Int) bool { + if timestamp == nil { + return true + } + + currentTime := time.Now() + upgradeTime := time.Unix(timestamp.Int64(), 0) + if currentTime.After(upgradeTime) { + update(price) + } else { + gpu.wg.Add(1) + go gpu.updatePrice(update, time.Until(upgradeTime), price) + } + return false +} + +// updatePrice calls update(updatedPrice) after waiting for [duration] or shuts down early +// if the [shutdownChan] is closed. +func (gpu *gasPriceUpdater) updatePrice(update func(price *big.Int), duration time.Duration, updatedPrice *big.Int) { + defer gpu.wg.Done() + select { + case <-time.After(duration): + update(updatedPrice) + case <-gpu.shutdownChan: + } +} diff --git a/plugin/evm/gasprice_update_test.go b/plugin/evm/gasprice_update_test.go new file mode 100644 index 0000000000..07dfa04d7b --- /dev/null +++ b/plugin/evm/gasprice_update_test.go @@ -0,0 +1,141 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "math/big" + "sync" + "testing" + "time" + + "github.com/tenderly/coreth/params" +) + +type mockGasPriceSetter struct { + lock sync.Mutex + price, minFee *big.Int +} + +func (m *mockGasPriceSetter) SetGasPrice(price *big.Int) { + m.lock.Lock() + defer m.lock.Unlock() + + m.price = price +} + +func (m *mockGasPriceSetter) SetMinFee(minFee *big.Int) { + m.lock.Lock() + defer m.lock.Unlock() + + m.minFee = minFee +} + +func (m *mockGasPriceSetter) GetStatus() (*big.Int, *big.Int) { + m.lock.Lock() + defer m.lock.Unlock() + + return m.price, m.minFee +} + +func attemptAwait(t *testing.T, wg *sync.WaitGroup, delay time.Duration) { + ticker := make(chan struct{}) + + // Wait for [wg] and then close [ticket] to indicate that + // the wait group has finished. + go func() { + wg.Wait() + close(ticker) + }() + + select { + case <-time.After(delay): + t.Fatal("Timed out waiting for wait group to complete") + case <-ticker: + // The wait group completed without issue + } +} + +func TestUpdateGasPriceShutsDown(t *testing.T) { + shutdownChan := make(chan struct{}) + wg := &sync.WaitGroup{} + config := *params.TestChainConfig + // Set ApricotPhase3BlockTime one hour in the future so that it will + // create a goroutine waiting for an hour before updating the gas price + config.ApricotPhase3BlockTimestamp = big.NewInt(time.Now().Add(time.Hour).Unix()) + gpu := &gasPriceUpdater{ + setter: &mockGasPriceSetter{price: big.NewInt(1)}, + chainConfig: &config, + shutdownChan: shutdownChan, + wg: wg, + } + + gpu.start() + // Close [shutdownChan] and ensure that the wait group finishes in a reasonable + // amount of time. + close(shutdownChan) + attemptAwait(t, wg, 5*time.Second) +} + +func TestUpdateGasPriceInitializesPrice(t *testing.T) { + shutdownChan := make(chan struct{}) + wg := &sync.WaitGroup{} + gpu := &gasPriceUpdater{ + setter: &mockGasPriceSetter{price: big.NewInt(1)}, + chainConfig: params.TestChainConfig, + shutdownChan: shutdownChan, + wg: wg, + } + + gpu.start() + // The wait group should finish immediately since no goroutine + // should be created when all prices should be set from the start + attemptAwait(t, wg, time.Millisecond) + + if gpu.setter.(*mockGasPriceSetter).price.Cmp(big.NewInt(0)) != 0 { + t.Fatalf("Expected price to match minimum base fee for apricot phase3") + } + if minFee := gpu.setter.(*mockGasPriceSetter).minFee; minFee == nil || minFee.Cmp(big.NewInt(params.ApricotPhase4MinBaseFee)) != 0 { + t.Fatalf("Expected min fee to match minimum fee for apricotPhase4, but found: %d", minFee) + } +} + +func TestUpdateGasPriceUpdatesPrice(t *testing.T) { + shutdownChan := make(chan struct{}) + wg := &sync.WaitGroup{} + config := *params.TestChainConfig + // Set ApricotPhase3BlockTime 250ms in the future so that it will + // create a goroutine waiting for the time to update the gas price + config.ApricotPhase3BlockTimestamp = big.NewInt(time.Now().Add(250 * time.Millisecond).Unix()) + config.ApricotPhase4BlockTimestamp = big.NewInt(time.Now().Add(3 * time.Second).Unix()) + gpu := &gasPriceUpdater{ + setter: &mockGasPriceSetter{price: big.NewInt(1)}, + chainConfig: &config, + shutdownChan: shutdownChan, + wg: wg, + } + + gpu.start() + + // With ApricotPhase3 set slightly in the future, the gas price updater should create a + // goroutine to sleep until its time to update and mark the wait group as done when it has + // completed the update. + time.Sleep(1 * time.Second) + price, minFee := gpu.setter.(*mockGasPriceSetter).GetStatus() + if price.Cmp(big.NewInt(0)) != 0 { + t.Fatalf("Expected price to match minimum base fee for apricot phase3") + } + if minFee == nil || minFee.Cmp(big.NewInt(params.ApricotPhase3MinBaseFee)) != 0 { + t.Fatalf("Expected min fee to match minimum fee for apricotPhase3, but found: %d", minFee) + } + + // Confirm ApricotPhase4 settings are applied at the very end. + attemptAwait(t, wg, 5*time.Second) + price, minFee = gpu.setter.(*mockGasPriceSetter).GetStatus() + if price.Cmp(big.NewInt(0)) != 0 { + t.Fatalf("Expected price to match minimum base fee for apricot phase4") + } + if minFee == nil || minFee.Cmp(big.NewInt(params.ApricotPhase4MinBaseFee)) != 0 { + t.Fatalf("Expected min fee to match minimum fee for apricotPhase4, but found: %d", minFee) + } +} diff --git a/plugin/evm/gossiper.go b/plugin/evm/gossiper.go new file mode 100644 index 0000000000..43306eb72c --- /dev/null +++ b/plugin/evm/gossiper.go @@ -0,0 +1,520 @@ +// (c) 2019-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "container/heap" + "math/big" + "sync" + "time" + + "github.com/ava-labs/avalanchego/codec" + + "github.com/tenderly/coreth/peer" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/wrappers" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/rlp" + + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/plugin/evm/message" +) + +const ( + // We allow [recentCacheSize] to be fairly large because we only store hashes + // in the cache, not entire transactions. + recentCacheSize = 512 + + // [ethTxsGossipInterval] is how often we attempt to gossip newly seen + // transactions to other nodes. + ethTxsGossipInterval = 500 * time.Millisecond +) + +// Gossiper handles outgoing gossip of transactions +type Gossiper interface { + // GossipAtomicTxs sends AppGossip message containing the given [txs] + GossipAtomicTxs(txs []*Tx) error + // GossipEthTxs sends AppGossip message containing the given [txs] + GossipEthTxs(txs []*types.Transaction) error +} + +// pushGossiper is used to gossip transactions to the network +type pushGossiper struct { + ctx *snow.Context + gossipActivationTime time.Time + config Config + + client peer.NetworkClient + blockchain *core.BlockChain + txPool *core.TxPool + atomicMempool *Mempool + + // We attempt to batch transactions we need to gossip to avoid runaway + // amplification of mempol chatter. + ethTxsToGossipChan chan []*types.Transaction + ethTxsToGossip map[common.Hash]*types.Transaction + lastGossiped time.Time + shutdownChan chan struct{} + shutdownWg *sync.WaitGroup + + // [recentAtomicTxs] and [recentEthTxs] prevent us from over-gossiping the + // same transaction in a short period of time. + recentAtomicTxs *cache.LRU + recentEthTxs *cache.LRU + + codec codec.Manager +} + +// createGossiper constructs and returns a pushGossiper or noopGossiper +// based on whether vm.chainConfig.ApricotPhase4BlockTimestamp is set +func (vm *VM) createGossiper() Gossiper { + if vm.chainConfig.ApricotPhase4BlockTimestamp == nil { + return &noopGossiper{} + } + + net := &pushGossiper{ + ctx: vm.ctx, + gossipActivationTime: time.Unix(vm.chainConfig.ApricotPhase4BlockTimestamp.Int64(), 0), + config: vm.config, + client: vm.client, + blockchain: vm.chain.BlockChain(), + txPool: vm.chain.GetTxPool(), + atomicMempool: vm.mempool, + ethTxsToGossipChan: make(chan []*types.Transaction), + ethTxsToGossip: make(map[common.Hash]*types.Transaction), + shutdownChan: vm.shutdownChan, + shutdownWg: &vm.shutdownWg, + recentAtomicTxs: &cache.LRU{Size: recentCacheSize}, + recentEthTxs: &cache.LRU{Size: recentCacheSize}, + codec: vm.networkCodec, + } + net.awaitEthTxGossip() + return net +} + +// queueExecutableTxs attempts to select up to [maxTxs] from the tx pool for +// regossiping. +// +// We assume that [txs] contains an array of nonce-ordered transactions for a given +// account. This array of transactions can have gaps and start at a nonce lower +// than the current state of an account. +func (n *pushGossiper) queueExecutableTxs(state *state.StateDB, baseFee *big.Int, txs map[common.Address]types.Transactions, maxTxs int) types.Transactions { + // Setup heap for transactions + heads := make(types.TxByPriceAndTime, 0, len(txs)) + for addr, accountTxs := range txs { + // Short-circuit here to avoid performing an unnecessary state lookup + if len(accountTxs) == 0 { + continue + } + + // Ensure any transactions regossiped are immediately executable + var ( + currentNonce = state.GetNonce(addr) + tx *types.Transaction + ) + for _, accountTx := range accountTxs { + // The tx pool may be out of sync with current state, so we iterate + // through the account transactions until we get to one that is + // executable. + if accountTx.Nonce() == currentNonce { + tx = accountTx + break + } + // There may be gaps in the tx pool and we could jump past the nonce we'd + // like to execute. + if accountTx.Nonce() > currentNonce { + break + } + } + if tx == nil { + continue + } + + // Don't try to regossip a transaction too frequently + if time.Since(tx.FirstSeen()) < n.config.TxRegossipFrequency.Duration { + continue + } + + // Ensure the fee the transaction pays is valid at tip + wrapped, err := types.NewTxWithMinerFee(tx, baseFee) + if err != nil { + log.Debug( + "not queuing tx for regossip", + "tx", tx.Hash(), + "err", err, + ) + continue + } + + heads = append(heads, wrapped) + } + heap.Init(&heads) + + // Add up to [maxTxs] transactions to be gossiped + queued := make([]*types.Transaction, 0, maxTxs) + for len(heads) > 0 && len(queued) < maxTxs { + tx := heads[0].Tx + queued = append(queued, tx) + heap.Pop(&heads) + } + + return queued +} + +// queueRegossipTxs finds the best transactions in the mempool and adds up to +// [TxRegossipMaxSize] of them to [ethTxsToGossip]. +func (n *pushGossiper) queueRegossipTxs() types.Transactions { + // Fetch all pending transactions + pending := n.txPool.Pending(true) + + // Split the pending transactions into locals and remotes + localTxs := make(map[common.Address]types.Transactions) + remoteTxs := pending + for _, account := range n.txPool.Locals() { + if txs := remoteTxs[account]; len(txs) > 0 { + delete(remoteTxs, account) + localTxs[account] = txs + } + } + + // Add best transactions to be gossiped (preferring local txs) + tip := n.blockchain.CurrentBlock() + state, err := n.blockchain.StateAt(tip.Root()) + if err != nil || state == nil { + log.Debug( + "could not get state at tip", + "tip", tip.Hash(), + "err", err, + ) + return nil + } + localQueued := n.queueExecutableTxs(state, tip.BaseFee(), localTxs, n.config.TxRegossipMaxSize) + localCount := len(localQueued) + if localCount >= n.config.TxRegossipMaxSize { + return localQueued + } + remoteQueued := n.queueExecutableTxs(state, tip.BaseFee(), remoteTxs, n.config.TxRegossipMaxSize-localCount) + return append(localQueued, remoteQueued...) +} + +// awaitEthTxGossip periodically gossips transactions that have been queued for +// gossip at least once every [ethTxsGossipInterval]. +func (n *pushGossiper) awaitEthTxGossip() { + n.shutdownWg.Add(1) + go n.ctx.Log.RecoverAndPanic(func() { + defer n.shutdownWg.Done() + + var ( + gossipTicker = time.NewTicker(ethTxsGossipInterval) + regossipTicker = time.NewTicker(n.config.TxRegossipFrequency.Duration) + ) + + for { + select { + case <-gossipTicker.C: + if attempted, err := n.gossipEthTxs(false); err != nil { + log.Warn( + "failed to send eth transactions", + "len(txs)", attempted, + "err", err, + ) + } + case <-regossipTicker.C: + for _, tx := range n.queueRegossipTxs() { + n.ethTxsToGossip[tx.Hash()] = tx + } + if attempted, err := n.gossipEthTxs(true); err != nil { + log.Warn( + "failed to send eth transactions", + "len(txs)", attempted, + "err", err, + ) + } + case txs := <-n.ethTxsToGossipChan: + for _, tx := range txs { + n.ethTxsToGossip[tx.Hash()] = tx + } + if attempted, err := n.gossipEthTxs(false); err != nil { + log.Warn( + "failed to send eth transactions", + "len(txs)", attempted, + "err", err, + ) + } + case <-n.shutdownChan: + return + } + } + }) +} + +func (n *pushGossiper) GossipAtomicTxs(txs []*Tx) error { + if time.Now().Before(n.gossipActivationTime) { + log.Trace( + "not gossiping atomic tx before the gossiping activation time", + "txs", txs, + ) + return nil + } + + errs := wrappers.Errs{} + for _, tx := range txs { + errs.Add(n.gossipAtomicTx(tx)) + } + return errs.Err +} + +func (n *pushGossiper) gossipAtomicTx(tx *Tx) error { + txID := tx.ID() + // Don't gossip transaction if it has been recently gossiped. + if _, has := n.recentAtomicTxs.Get(txID); has { + return nil + } + // If the transaction is not pending according to the mempool + // then there is no need to gossip it further. + if _, pending := n.atomicMempool.GetPendingTx(txID); !pending { + return nil + } + n.recentAtomicTxs.Put(txID, nil) + + msg := message.AtomicTxGossip{ + Tx: tx.Bytes(), + } + msgBytes, err := message.BuildGossipMessage(n.codec, msg) + if err != nil { + return err + } + + log.Trace( + "gossiping atomic tx", + "txID", txID, + ) + return n.client.Gossip(msgBytes) +} + +func (n *pushGossiper) sendEthTxs(txs []*types.Transaction) error { + if len(txs) == 0 { + return nil + } + + txBytes, err := rlp.EncodeToBytes(txs) + if err != nil { + return err + } + msg := message.EthTxsGossip{ + Txs: txBytes, + } + msgBytes, err := message.BuildGossipMessage(n.codec, msg) + if err != nil { + return err + } + + log.Trace( + "gossiping eth txs", + "len(txs)", len(txs), + "size(txs)", len(msg.Txs), + ) + return n.client.Gossip(msgBytes) +} + +func (n *pushGossiper) gossipEthTxs(force bool) (int, error) { + if (!force && time.Since(n.lastGossiped) < ethTxsGossipInterval) || len(n.ethTxsToGossip) == 0 { + return 0, nil + } + n.lastGossiped = time.Now() + txs := make([]*types.Transaction, 0, len(n.ethTxsToGossip)) + for _, tx := range n.ethTxsToGossip { + txs = append(txs, tx) + delete(n.ethTxsToGossip, tx.Hash()) + } + + selectedTxs := make([]*types.Transaction, 0) + for _, tx := range txs { + txHash := tx.Hash() + txStatus := n.txPool.Status([]common.Hash{txHash})[0] + if txStatus != core.TxStatusPending { + continue + } + + if n.config.RemoteTxGossipOnlyEnabled && n.txPool.HasLocal(txHash) { + continue + } + + // We check [force] outside of the if statement to avoid an unnecessary + // cache lookup. + if !force { + if _, has := n.recentEthTxs.Get(txHash); has { + continue + } + } + n.recentEthTxs.Put(txHash, nil) + + selectedTxs = append(selectedTxs, tx) + } + + if len(selectedTxs) == 0 { + return 0, nil + } + + // Attempt to gossip [selectedTxs] + msgTxs := make([]*types.Transaction, 0) + msgTxsSize := common.StorageSize(0) + for _, tx := range selectedTxs { + size := tx.Size() + if msgTxsSize+size > message.EthMsgSoftCapSize { + if err := n.sendEthTxs(msgTxs); err != nil { + return len(selectedTxs), err + } + msgTxs = msgTxs[:0] + msgTxsSize = 0 + } + msgTxs = append(msgTxs, tx) + msgTxsSize += size + } + + // Send any remaining [msgTxs] + return len(selectedTxs), n.sendEthTxs(msgTxs) +} + +// GossipEthTxs enqueues the provided [txs] for gossiping. At some point, the +// [pushGossiper] will attempt to gossip the provided txs to other nodes +// (usually right away if not under load). +// +// NOTE: We never return a non-nil error from this function but retain the +// option to do so in case it becomes useful. +func (n *pushGossiper) GossipEthTxs(txs []*types.Transaction) error { + if time.Now().Before(n.gossipActivationTime) { + log.Trace( + "not gossiping eth txs before the gossiping activation time", + "len(txs)", len(txs), + ) + return nil + } + + select { + case n.ethTxsToGossipChan <- txs: + case <-n.shutdownChan: + } + return nil +} + +// GossipHandler handles incoming gossip messages +type GossipHandler struct { + vm *VM + atomicMempool *Mempool + txPool *core.TxPool +} + +func NewGossipHandler(vm *VM) *GossipHandler { + return &GossipHandler{ + vm: vm, + atomicMempool: vm.mempool, + txPool: vm.chain.GetTxPool(), + } +} + +func (h *GossipHandler) HandleAtomicTx(nodeID ids.NodeID, msg message.AtomicTxGossip) error { + log.Trace( + "AppGossip called with AtomicTxGossip", + "peerID", nodeID, + ) + + if len(msg.Tx) == 0 { + log.Trace( + "AppGossip received empty AtomicTxGossip Message", + "peerID", nodeID, + ) + return nil + } + + // In the case that the gossip message contains a transaction, + // attempt to parse it and add it as a remote. + tx := Tx{} + if _, err := Codec.Unmarshal(msg.Tx, &tx); err != nil { + log.Trace( + "AppGossip provided invalid tx", + "err", err, + ) + return nil + } + unsignedBytes, err := Codec.Marshal(codecVersion, &tx.UnsignedAtomicTx) + if err != nil { + log.Trace( + "AppGossip failed to marshal unsigned tx", + "err", err, + ) + return nil + } + tx.Initialize(unsignedBytes, msg.Tx) + + txID := tx.ID() + if _, dropped, found := h.atomicMempool.GetTx(txID); found || dropped { + return nil + } + + if err := h.vm.issueTx(&tx, false /*=local*/); err != nil { + log.Trace( + "AppGossip provided invalid transaction", + "peerID", nodeID, + "err", err, + ) + } + + return nil +} + +func (h *GossipHandler) HandleEthTxs(nodeID ids.NodeID, msg message.EthTxsGossip) error { + log.Trace( + "AppGossip called with EthTxsGossip", + "peerID", nodeID, + "size(txs)", len(msg.Txs), + ) + + if len(msg.Txs) == 0 { + log.Trace( + "AppGossip received empty EthTxsGossip Message", + "peerID", nodeID, + ) + return nil + } + + // The maximum size of this encoded object is enforced by the codec. + txs := make([]*types.Transaction, 0) + if err := rlp.DecodeBytes(msg.Txs, &txs); err != nil { + log.Trace( + "AppGossip provided invalid txs", + "peerID", nodeID, + "err", err, + ) + return nil + } + errs := h.txPool.AddRemotes(txs) + for i, err := range errs { + if err != nil { + log.Trace( + "AppGossip failed to add to mempool", + "err", err, + "tx", txs[i].Hash(), + ) + } + } + return nil +} + +// noopGossiper should be used when gossip communication is not supported +type noopGossiper struct{} + +func (n *noopGossiper) GossipAtomicTxs([]*Tx) error { + return nil +} +func (n *noopGossiper) GossipEthTxs([]*types.Transaction) error { + return nil +} diff --git a/plugin/evm/gossiper_atomic_gossiping_test.go b/plugin/evm/gossiper_atomic_gossiping_test.go index c2aadeb575..28441f193c 100644 --- a/plugin/evm/gossiper_atomic_gossiping_test.go +++ b/plugin/evm/gossiper_atomic_gossiping_test.go @@ -4,29 +4,83 @@ package evm import ( - "context" - "os" "sync" "testing" "time" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/stretchr/testify/assert" - commonEng "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/stretchr/testify/assert" - "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/tenderly/coreth/plugin/evm/message" ) +// locally issued txs should be gossiped +func TestMempoolAtmTxsIssueTxAndGossiping(t *testing.T) { + assert := assert.New(t) + + _, vm, _, sharedMemory, sender := GenesisVM(t, true, genesisJSONApricotPhase4, "", "") + defer func() { + assert.NoError(vm.Shutdown()) + }() + + // Create conflicting transactions + importTxs := createImportTxOptions(t, vm, sharedMemory) + tx, conflictingTx := importTxs[0], importTxs[1] + + var gossiped int + var gossipedLock sync.Mutex // needed to prevent race + sender.CantSendAppGossip = false + sender.SendAppGossipF = func(gossipedBytes []byte) error { + gossipedLock.Lock() + defer gossipedLock.Unlock() + + notifyMsgIntf, err := message.ParseGossipMessage(vm.networkCodec, gossipedBytes) + assert.NoError(err) + + requestMsg, ok := notifyMsgIntf.(message.AtomicTxGossip) + assert.NotEmpty(requestMsg.Tx) + assert.True(ok) + + txg := Tx{} + _, err = Codec.Unmarshal(requestMsg.Tx, &txg) + assert.NoError(err) + unsignedBytes, err := Codec.Marshal(codecVersion, &txg.UnsignedAtomicTx) + assert.NoError(err) + txg.Initialize(unsignedBytes, requestMsg.Tx) + assert.Equal(tx.ID(), txg.ID()) + gossiped++ + return nil + } + + // Optimistically gossip raw tx + assert.NoError(vm.issueTx(tx, true /*=local*/)) + time.Sleep(waitBlockTime * 3) + gossipedLock.Lock() + assert.Equal(1, gossiped) + gossipedLock.Unlock() + + // Test hash on retry + assert.NoError(vm.gossiper.GossipAtomicTxs([]*Tx{tx})) + gossipedLock.Lock() + assert.Equal(1, gossiped) + gossipedLock.Unlock() + + // Attempt to gossip conflicting tx + assert.ErrorIs(vm.issueTx(conflictingTx, true /*=local*/), errConflictingAtomicTx) + gossipedLock.Lock() + assert.Equal(1, gossiped) + gossipedLock.Unlock() +} + // show that a txID discovered from gossip is requested to the same node only if // the txID is unknown func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { assert := assert.New(t) - _, vm, _, sharedMemory, sender := GenesisVM(t, true, "", "", "") + _, vm, _, sharedMemory, sender := GenesisVM(t, true, genesisJSONApricotPhase4, "", "") defer func() { - assert.NoError(vm.Shutdown(context.Background())) + assert.NoError(vm.Shutdown()) }() nodeID := ids.GenerateTestNodeID() @@ -37,14 +91,14 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { txRequested bool ) sender.CantSendAppGossip = false - sender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { + sender.SendAppGossipF = func(_ []byte) error { txGossipedLock.Lock() defer txGossipedLock.Unlock() txGossiped++ return nil } - sender.SendAppRequestF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) error { + sender.SendAppRequestF = func(_ ids.NodeIDSet, _ uint32, _ []byte) error { txRequested = true return nil } @@ -55,66 +109,49 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { // gossip tx and check it is accepted and gossiped msg := message.AtomicTxGossip{ - Tx: tx.SignedBytes(), + Tx: tx.Bytes(), } msgBytes, err := message.BuildGossipMessage(vm.networkCodec, msg) assert.NoError(err) - vm.ctx.Lock.Unlock() - // show that no txID is requested - assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) - time.Sleep(500 * time.Millisecond) - - vm.ctx.Lock.Lock() + assert.NoError(vm.AppGossip(nodeID, msgBytes)) + time.Sleep(waitBlockTime * 3) assert.False(txRequested, "tx should not have been requested") txGossipedLock.Lock() - assert.Equal(0, txGossiped, "tx should not have been gossiped") + assert.Equal(1, txGossiped, "tx should have been gossiped") txGossipedLock.Unlock() assert.True(vm.mempool.has(tx.ID())) - vm.ctx.Lock.Unlock() - // show that tx is not re-gossiped - assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) - - vm.ctx.Lock.Lock() - + assert.NoError(vm.AppGossip(nodeID, msgBytes)) txGossipedLock.Lock() - assert.Equal(0, txGossiped, "tx should not have been gossiped") + assert.Equal(1, txGossiped, "tx should have only been gossiped once") txGossipedLock.Unlock() // show that conflicting tx is not added to mempool msg = message.AtomicTxGossip{ - Tx: conflictingTx.SignedBytes(), + Tx: conflictingTx.Bytes(), } msgBytes, err = message.BuildGossipMessage(vm.networkCodec, msg) assert.NoError(err) - - vm.ctx.Lock.Unlock() - - assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) - - vm.ctx.Lock.Lock() - + assert.NoError(vm.AppGossip(nodeID, msgBytes)) assert.False(txRequested, "tx should not have been requested") txGossipedLock.Lock() - assert.Equal(0, txGossiped, "tx should not have been gossiped") + assert.Equal(1, txGossiped, "tx should not have been gossiped") txGossipedLock.Unlock() assert.False(vm.mempool.has(conflictingTx.ID()), "conflicting tx should not be in the atomic mempool") } // show that txs already marked as invalid are not re-requested on gossiping func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { - if os.Getenv("RUN_FLAKY_TESTS") != "true" { - t.Skip("FLAKY") - } + t.Skip("FLAKY") assert := assert.New(t) - _, vm, _, sharedMemory, sender := GenesisVM(t, true, "", "", "") + _, vm, _, sharedMemory, sender := GenesisVM(t, true, genesisJSONApricotPhase4, "", "") defer func() { - assert.NoError(vm.Shutdown(context.Background())) + assert.NoError(vm.Shutdown()) }() mempool := vm.mempool @@ -124,14 +161,14 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { txRequested bool ) sender.CantSendAppGossip = false - sender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { + sender.SendAppGossipF = func(_ []byte) error { txGossipedLock.Lock() defer txGossipedLock.Unlock() txGossiped++ return nil } - sender.SendAppRequestF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) error { + sender.SendAppRequestF = func(ids.NodeIDSet, uint32, []byte) error { txRequested = true return nil } @@ -152,17 +189,12 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { // and is not re-gossipped. nodeID := ids.GenerateTestNodeID() msg := message.AtomicTxGossip{ - Tx: tx.SignedBytes(), + Tx: tx.Bytes(), } msgBytes, err := message.BuildGossipMessage(vm.networkCodec, msg) assert.NoError(err) - vm.ctx.Lock.Unlock() - - assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) - - vm.ctx.Lock.Lock() - + assert.NoError(vm.AppGossip(nodeID, msgBytes)) assert.False(txRequested, "tx shouldn't be requested") txGossipedLock.Lock() assert.Zero(txGossiped, "tx should not have been gossiped") @@ -175,18 +207,13 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { // to the network. nodeID = ids.GenerateTestNodeID() msg = message.AtomicTxGossip{ - Tx: conflictingTx.SignedBytes(), + Tx: conflictingTx.Bytes(), } msgBytes, err = message.BuildGossipMessage(vm.networkCodec, msg) assert.NoError(err) - vm.ctx.Lock.Unlock() - - assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) - time.Sleep(500 * time.Millisecond) - - vm.ctx.Lock.Lock() - + assert.NoError(vm.AppGossip(nodeID, msgBytes)) + time.Sleep(waitBlockTime * 3) assert.False(txRequested, "tx shouldn't be requested") txGossipedLock.Lock() assert.Equal(1, txGossiped, "conflicting tx should have been gossiped") diff --git a/plugin/evm/gossiper_eth_gossiping_test.go b/plugin/evm/gossiper_eth_gossiping_test.go index 21b9bc6620..abdf47c20b 100644 --- a/plugin/evm/gossiper_eth_gossiping_test.go +++ b/plugin/evm/gossiper_eth_gossiping_test.go @@ -21,14 +21,14 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/rlp" "github.com/stretchr/testify/assert" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/plugin/evm/message" ) func fundAddressByGenesis(addrs []common.Address) (string, error) { diff --git a/plugin/evm/import_tx.go b/plugin/evm/import_tx.go index cd5d493368..fd97d48d06 100644 --- a/plugin/evm/import_tx.go +++ b/plugin/evm/import_tx.go @@ -4,22 +4,17 @@ package evm import ( - "context" - "errors" "fmt" - "github.com/ava-labs/coreth/core/vm" "math/big" - "slices" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/params" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/crypto" "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -27,16 +22,9 @@ import ( "github.com/ethereum/go-ethereum/log" ) -var ( - _ UnsignedAtomicTx = &UnsignedImportTx{} - _ secp256k1fx.UnsignedTx = &UnsignedImportTx{} - errImportNonAVAXInputBanff = errors.New("import input cannot contain non-AVAX in Banff") - errImportNonAVAXOutputBanff = errors.New("import output cannot contain non-AVAX in Banff") -) - // UnsignedImportTx is an unsigned ImportTx type UnsignedImportTx struct { - Metadata + avax.Metadata // ID of the network on which this tx was issued NetworkID uint32 `serialize:"true" json:"networkID"` // ID of this blockchain. @@ -50,29 +38,29 @@ type UnsignedImportTx struct { } // InputUTXOs returns the UTXOIDs of the imported funds -func (utx *UnsignedImportTx) InputUTXOs() set.Set[ids.ID] { - set := set.NewSet[ids.ID](len(utx.ImportedInputs)) - for _, in := range utx.ImportedInputs { +func (tx *UnsignedImportTx) InputUTXOs() ids.Set { + set := ids.NewSet(len(tx.ImportedInputs)) + for _, in := range tx.ImportedInputs { set.Add(in.InputID()) } return set } // Verify this transaction is well-formed -func (utx *UnsignedImportTx) Verify( +func (tx *UnsignedImportTx) Verify( ctx *snow.Context, rules params.Rules, ) error { switch { - case utx == nil: + case tx == nil: return errNilTx - case len(utx.ImportedInputs) == 0: + case len(tx.ImportedInputs) == 0: return errNoImportInputs - case utx.NetworkID != ctx.NetworkID: + case tx.NetworkID != ctx.NetworkID: return errWrongNetworkID - case ctx.ChainID != utx.BlockchainID: + case ctx.ChainID != tx.BlockchainID: return errWrongBlockchainID - case rules.IsApricotPhase3 && len(utx.Outs) == 0: + case rules.IsApricotPhase3 && len(tx.Outs) == 0: return errNoEVMOutputs } @@ -80,42 +68,36 @@ func (utx *UnsignedImportTx) Verify( if rules.IsApricotPhase5 { // Note that SameSubnet verifies that [tx.SourceChain] isn't this // chain's ID - if err := verify.SameSubnet(context.TODO(), ctx, utx.SourceChain); err != nil { + if err := verify.SameSubnet(ctx, tx.SourceChain); err != nil { return errWrongChainID } } else { - if utx.SourceChain != ctx.XChainID { + if tx.SourceChain != ctx.XChainID { return errWrongChainID } } - for _, out := range utx.Outs { + for _, out := range tx.Outs { if err := out.Verify(); err != nil { return fmt.Errorf("EVM Output failed verification: %w", err) } - if rules.IsBanff && out.AssetID != ctx.AVAXAssetID { - return errImportNonAVAXOutputBanff - } } - for _, in := range utx.ImportedInputs { + for _, in := range tx.ImportedInputs { if err := in.Verify(); err != nil { return fmt.Errorf("atomic input failed verification: %w", err) } - if rules.IsBanff && in.AssetID() != ctx.AVAXAssetID { - return errImportNonAVAXInputBanff - } } - if !utils.IsSortedAndUnique(utx.ImportedInputs) { + if !avax.IsSortedAndUniqueTransferableInputs(tx.ImportedInputs) { return errInputsNotSortedUnique } if rules.IsApricotPhase2 { - if !utils.IsSortedAndUnique(utx.Outs) { + if !IsSortedAndUniqueEVMOutputs(tx.Outs) { return errOutputsNotSortedUnique } } else if rules.IsApricotPhase1 { - if !slices.IsSortedFunc(utx.Outs, EVMOutput.Compare) { + if !IsSortedEVMOutputs(tx.Outs) { return errOutputsNotSorted } } @@ -123,12 +105,12 @@ func (utx *UnsignedImportTx) Verify( return nil } -func (utx *UnsignedImportTx) GasUsed(fixedFee bool) (uint64, error) { +func (tx *UnsignedImportTx) GasUsed(fixedFee bool) (uint64, error) { var ( - cost = calcBytesCost(len(utx.Bytes())) + cost = calcBytesCost(len(tx.UnsignedBytes())) err error ) - for _, in := range utx.ImportedInputs { + for _, in := range tx.ImportedInputs { inCost, err := in.In.Cost() if err != nil { return 0, err @@ -148,13 +130,13 @@ func (utx *UnsignedImportTx) GasUsed(fixedFee bool) (uint64, error) { } // Amount of [assetID] burned by this transaction -func (utx *UnsignedImportTx) Burned(assetID ids.ID) (uint64, error) { +func (tx *UnsignedImportTx) Burned(assetID ids.ID) (uint64, error) { var ( spent uint64 input uint64 err error ) - for _, out := range utx.Outs { + for _, out := range tx.Outs { if out.AssetID == assetID { spent, err = math.Add64(spent, out.Amount) if err != nil { @@ -162,7 +144,7 @@ func (utx *UnsignedImportTx) Burned(assetID ids.ID) (uint64, error) { } } } - for _, in := range utx.ImportedInputs { + for _, in := range tx.ImportedInputs { if in.AssetID() == assetID { input, err = math.Add64(input, in.Input().Amount()) if err != nil { @@ -171,18 +153,18 @@ func (utx *UnsignedImportTx) Burned(assetID ids.ID) (uint64, error) { } } - return math.Sub(input, spent) + return math.Sub64(input, spent) } // SemanticVerify this transaction is valid. -func (utx *UnsignedImportTx) SemanticVerify( +func (tx *UnsignedImportTx) SemanticVerify( vm *VM, stx *Tx, parent *Block, baseFee *big.Int, rules params.Rules, ) error { - if err := utx.Verify(vm.ctx, rules); err != nil { + if err := tx.Verify(vm.ctx, rules); err != nil { return err } @@ -195,7 +177,7 @@ func (utx *UnsignedImportTx) SemanticVerify( if err != nil { return err } - txFee, err := CalculateDynamicFee(gasUsed, baseFee) + txFee, err := calculateDynamicFee(gasUsed, baseFee) if err != nil { return err } @@ -205,10 +187,10 @@ func (utx *UnsignedImportTx) SemanticVerify( case rules.IsApricotPhase2: fc.Produce(vm.ctx.AVAXAssetID, params.AvalancheAtomicTxFee) } - for _, out := range utx.Outs { + for _, out := range tx.Outs { fc.Produce(out.AssetID, out.Amount) } - for _, in := range utx.ImportedInputs { + for _, in := range tx.ImportedInputs { fc.Consume(in.AssetID(), in.Input().Amount()) } @@ -216,8 +198,8 @@ func (utx *UnsignedImportTx) SemanticVerify( return fmt.Errorf("import tx flow check failed due to: %w", err) } - if len(stx.Creds) != len(utx.ImportedInputs) { - return fmt.Errorf("import tx contained mismatched number of inputs/credentials (%d vs. %d)", len(utx.ImportedInputs), len(stx.Creds)) + if len(stx.Creds) != len(tx.ImportedInputs) { + return fmt.Errorf("import tx contained mismatched number of inputs/credentials (%d vs. %d)", len(tx.ImportedInputs), len(stx.Creds)) } if !vm.bootstrapped { @@ -225,18 +207,18 @@ func (utx *UnsignedImportTx) SemanticVerify( return nil } - utxoIDs := make([][]byte, len(utx.ImportedInputs)) - for i, in := range utx.ImportedInputs { + utxoIDs := make([][]byte, len(tx.ImportedInputs)) + for i, in := range tx.ImportedInputs { inputID := in.UTXOID.InputID() utxoIDs[i] = inputID[:] } // allUTXOBytes is guaranteed to be the same length as utxoIDs - allUTXOBytes, err := vm.ctx.SharedMemory.Get(utx.SourceChain, utxoIDs) + allUTXOBytes, err := vm.ctx.SharedMemory.Get(tx.SourceChain, utxoIDs) if err != nil { - return fmt.Errorf("failed to fetch import UTXOs from %s due to: %w", utx.SourceChain, err) + return fmt.Errorf("failed to fetch import UTXOs from %s due to: %w", tx.SourceChain, err) } - for i, in := range utx.ImportedInputs { + for i, in := range tx.ImportedInputs { utxoBytes := allUTXOBytes[i] utxo := &avax.UTXO{} @@ -252,12 +234,12 @@ func (utx *UnsignedImportTx) SemanticVerify( return errAssetIDMismatch } - if err := vm.fx.VerifyTransfer(utx, in.In, cred, utxo.Out); err != nil { + if err := vm.fx.VerifyTransfer(tx, in.In, cred, utxo.Out); err != nil { return fmt.Errorf("import tx transfer failed verification: %w", err) } } - return vm.conflicts(utx.InputUTXOs(), parent) + return vm.conflicts(tx.InputUTXOs(), parent) } // AtomicOps returns imported inputs spent on this transaction @@ -265,13 +247,13 @@ func (utx *UnsignedImportTx) SemanticVerify( // we don't want to remove an imported UTXO in semanticVerify // only to have the transaction not be Accepted. This would be inconsistent. // Recall that imported UTXOs are not kept in a versionDB. -func (utx *UnsignedImportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { - utxoIDs := make([][]byte, len(utx.ImportedInputs)) - for i, in := range utx.ImportedInputs { +func (tx *UnsignedImportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { + utxoIDs := make([][]byte, len(tx.ImportedInputs)) + for i, in := range tx.ImportedInputs { inputID := in.InputID() utxoIDs[i] = inputID[:] } - return utx.SourceChain, &atomic.Requests{RemoveRequests: utxoIDs}, nil + return tx.SourceChain, &atomic.Requests{RemoveRequests: utxoIDs}, nil } // newImportTx returns a new ImportTx @@ -279,7 +261,7 @@ func (vm *VM) newImportTx( chainID ids.ID, // chain to import from to common.Address, // Address of recipient baseFee *big.Int, // fee to use post-AP3 - keys []*secp256k1.PrivateKey, // Keys to import the funds + keys []*crypto.PrivateKeySECP256K1R, // Keys to import the funds ) (*Tx, error) { kc := secp256k1fx.NewKeychain() for _, key := range keys { @@ -303,7 +285,7 @@ func (vm *VM) newImportTxWithUTXOs( atomicUTXOs []*avax.UTXO, // UTXOs to spend ) (*Tx, error) { importedInputs := []*avax.TransferableInput{} - signers := [][]*secp256k1.PrivateKey{} + signers := [][]*crypto.PrivateKeySECP256K1R{} importedAmount := make(map[ids.ID]uint64) now := vm.clock.Unix() @@ -376,11 +358,11 @@ func (vm *VM) newImportTxWithUTXOs( } gasUsedWithChange := gasUsedWithoutChange + EVMOutputGas - txFeeWithoutChange, err = CalculateDynamicFee(gasUsedWithoutChange, baseFee) + txFeeWithoutChange, err = calculateDynamicFee(gasUsedWithoutChange, baseFee) if err != nil { return nil, err } - txFeeWithChange, err = CalculateDynamicFee(gasUsedWithChange, baseFee) + txFeeWithChange, err = calculateDynamicFee(gasUsedWithChange, baseFee) if err != nil { return nil, err } @@ -409,7 +391,7 @@ func (vm *VM) newImportTxWithUTXOs( return nil, errNoEVMOutputs } - utils.Sort(outs) + SortEVMOutputs(outs) // Create the transaction utx := &UnsignedImportTx{ @@ -428,17 +410,17 @@ func (vm *VM) newImportTxWithUTXOs( // EVMStateTransfer performs the state transfer to increase the balances of // accounts accordingly with the imported EVMOutputs -func (utx *UnsignedImportTx) EVMStateTransfer(ctx *snow.Context, state vm.StateDB) error { - for _, to := range utx.Outs { +func (tx *UnsignedImportTx) EVMStateTransfer(ctx *snow.Context, state *state.StateDB) error { + for _, to := range tx.Outs { if to.AssetID == ctx.AVAXAssetID { - log.Debug("crosschain", "src", utx.SourceChain, "addr", to.Address, "amount", to.Amount, "assetID", "AVAX") + log.Debug("crosschain", "src", tx.SourceChain, "addr", to.Address, "amount", to.Amount, "assetID", "AVAX") // If the asset is AVAX, convert the input amount in nAVAX to gWei by // multiplying by the x2c rate. amount := new(big.Int).Mul( new(big.Int).SetUint64(to.Amount), x2cRate) state.AddBalance(to.Address, amount) } else { - log.Debug("crosschain", "src", utx.SourceChain, "addr", to.Address, "amount", to.Amount, "assetID", to.AssetID) + log.Debug("crosschain", "src", tx.SourceChain, "addr", to.Address, "amount", to.Amount, "assetID", to.AssetID) amount := new(big.Int).SetUint64(to.Amount) state.AddBalanceMultiCoin(to.Address, common.Hash(to.AssetID), amount) } diff --git a/plugin/evm/import_tx_test.go b/plugin/evm/import_tx_test.go index ddcde4a879..a7e1d413c0 100644 --- a/plugin/evm/import_tx_test.go +++ b/plugin/evm/import_tx_test.go @@ -7,7 +7,7 @@ import ( "math/big" "testing" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ava-labs/avalanchego/chains/atomic" diff --git a/plugin/evm/static_service.go b/plugin/evm/static_service.go index 7b0fa8bde5..e8d552500e 100644 --- a/plugin/evm/static_service.go +++ b/plugin/evm/static_service.go @@ -8,7 +8,7 @@ import ( "encoding/json" "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/coreth/core" + "github.com/tenderly/coreth/core" ) // StaticService defines the static API services exposed by the evm diff --git a/plugin/evm/syncervm_server.go b/plugin/evm/syncervm_server.go index 3bf051bf87..d82edc5728 100644 --- a/plugin/evm/syncervm_server.go +++ b/plugin/evm/syncervm_server.go @@ -10,8 +10,8 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/plugin/evm/message" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 23a461f0c0..1c2958b886 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -13,36 +13,31 @@ import ( "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/crypto" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/coreth/accounts/keystore" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/constants" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/predicate" - statesyncclient "github.com/ava-labs/coreth/sync/client" - "github.com/ava-labs/coreth/sync/statesync" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/accounts/keystore" + coreth "github.com/tenderly/coreth/chain" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/metrics" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rlp" + statesyncclient "github.com/tenderly/coreth/sync/client" + "github.com/tenderly/coreth/sync/statesync" + "github.com/tenderly/coreth/trie" ) func TestSkipStateSync(t *testing.T) { @@ -50,9 +45,10 @@ func TestSkipStateSync(t *testing.T) { test := syncTest{ syncableInterval: 256, stateSyncMinBlocks: 300, // must be greater than [syncableInterval] to skip sync - syncMode: block.StateSyncSkipped, + shouldSync: false, } - vmSetup := createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup := createSyncServerAndClientVMs(t, test) + defer vmSetup.Teardown(t) testSyncerVM(t, vmSetup, test) } @@ -62,22 +58,10 @@ func TestStateSyncFromScratch(t *testing.T) { test := syncTest{ syncableInterval: 256, stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync - syncMode: block.StateSyncStatic, + shouldSync: true, } - vmSetup := createSyncServerAndClientVMs(t, test, parentsToGet) - - testSyncerVM(t, vmSetup, test) -} - -func TestStateSyncFromScratchExceedParent(t *testing.T) { - rand.Seed(1) - numToGen := parentsToGet + uint64(32) - test := syncTest{ - syncableInterval: numToGen, - stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync - syncMode: block.StateSyncStatic, - } - vmSetup := createSyncServerAndClientVMs(t, test, int(numToGen)) + vmSetup := createSyncServerAndClientVMs(t, test) + defer vmSetup.Teardown(t) testSyncerVM(t, vmSetup, test) } @@ -95,7 +79,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { test := syncTest{ syncableInterval: 256, stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync - syncMode: block.StateSyncStatic, + shouldSync: true, responseIntercept: func(syncerVM *VM, nodeID ids.NodeID, requestID uint32, response []byte) { lock.Lock() defer lock.Unlock() @@ -103,7 +87,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { reqCount++ // Fail all requests after number 50 to interrupt the sync if reqCount > 50 { - if err := syncerVM.AppRequestFailed(context.Background(), nodeID, requestID, commonEng.ErrTimeout); err != nil { + if err := syncerVM.AppRequestFailed(nodeID, requestID); err != nil { panic(err) } cancel := syncerVM.StateSyncClient.(*stateSyncerClient).cancel @@ -113,40 +97,40 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { t.Fatal("state sync client not populated correctly") } } else { - syncerVM.AppResponse(context.Background(), nodeID, requestID, response) + syncerVM.AppResponse(nodeID, requestID, response) } }, expectedErr: context.Canceled, } - vmSetup := createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup := createSyncServerAndClientVMs(t, test) + defer vmSetup.Teardown(t) // Perform sync resulting in early termination. testSyncerVM(t, vmSetup, test) - test.syncMode = block.StateSyncStatic + test.shouldSync = true test.responseIntercept = nil test.expectedErr = nil syncDisabledVM := &VM{} appSender := &commonEng.SenderTest{T: t} - appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } - appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { + appSender.SendAppGossipF = func([]byte) error { return nil } + appSender.SendAppRequestF = func(nodeSet ids.NodeIDSet, requestID uint32, request []byte) error { nodeID, hasItem := nodeSet.Pop() if !hasItem { t.Fatal("expected nodeSet to contain at least 1 nodeID") } - go vmSetup.serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) + go vmSetup.serverVM.AppRequest(nodeID, requestID, time.Now().Add(1*time.Second), request) return nil } // Disable metrics to prevent duplicate registerer - stateSyncDisabledConfigJSON := `{"state-sync-enabled":false}` + configJSON := "{\"metrics-enabled\":false}" if err := syncDisabledVM.Initialize( - context.Background(), vmSetup.syncerVM.ctx, - vmSetup.syncerDB, - []byte(genesisJSONLatest), + vmSetup.syncerDBManager, + []byte(genesisJSONApricotPhase5), nil, - []byte(stateSyncDisabledConfigJSON), + []byte(configJSON), vmSetup.syncerVM.toEngine, []*commonEng.Fx{}, appSender, @@ -155,7 +139,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { } defer func() { - if err := syncDisabledVM.Shutdown(context.Background()); err != nil { + if err := syncDisabledVM.Shutdown(); err != nil { t.Fatal(err) } }() @@ -164,13 +148,13 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { t.Fatalf("Unexpected last accepted height: %d", height) } - enabled, err := syncDisabledVM.StateSyncEnabled(context.Background()) + enabled, err := syncDisabledVM.StateSyncEnabled() assert.NoError(t, err) assert.False(t, enabled, "sync should be disabled") // Process the first 10 blocks from the serverVM for i := uint64(1); i < 10; i++ { - ethBlock := vmSetup.serverVM.blockChain.GetBlockByNumber(i) + ethBlock := vmSetup.serverVM.chain.GetBlockByNumber(i) if ethBlock == nil { t.Fatalf("VM Server did not have a block available at height %d", i) } @@ -178,36 +162,34 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { if err != nil { t.Fatal(err) } - blk, err := syncDisabledVM.ParseBlock(context.Background(), b) + blk, err := syncDisabledVM.ParseBlock(b) if err != nil { t.Fatal(err) } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(); err != nil { t.Fatal(err) } - if err := blk.Accept(context.Background()); err != nil { + if err := blk.Accept(); err != nil { t.Fatal(err) } } // Verify the snapshot disk layer matches the last block root - lastRoot := syncDisabledVM.blockChain.CurrentBlock().Root - if err := syncDisabledVM.blockChain.Snapshots().Verify(lastRoot); err != nil { + lastRoot := syncDisabledVM.chain.BlockChain().CurrentBlock().Root() + if err := syncDisabledVM.chain.BlockChain().Snapshots().Verify(lastRoot); err != nil { t.Fatal(err) } - syncDisabledVM.blockChain.DrainAcceptorQueue() // Create a new VM from the same database with state sync enabled. syncReEnabledVM := &VM{} - // Enable state sync in configJSON - configJSON := fmt.Sprintf( - `{"state-sync-enabled":true, "state-sync-min-blocks":%d}`, + // Disable metrics to prevent duplicate registerer + configJSON = fmt.Sprintf( + "{\"metrics-enabled\":false, \"state-sync-enabled\":true, \"state-sync-min-blocks\":%d}", test.stateSyncMinBlocks, ) if err := syncReEnabledVM.Initialize( - context.Background(), vmSetup.syncerVM.ctx, - vmSetup.syncerDB, - []byte(genesisJSONLatest), + vmSetup.syncerDBManager, + []byte(genesisJSONApricotPhase5), nil, []byte(configJSON), vmSetup.syncerVM.toEngine, @@ -218,9 +200,9 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { } // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] - vmSetup.serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + vmSetup.serverAppSender.SendAppResponseF = func(nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { - go syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response) + go syncReEnabledVM.AppResponse(nodeID, requestID, response) } else { go test.responseIntercept(syncReEnabledVM, nodeID, requestID, response) } @@ -229,13 +211,9 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { } // connect peer to [syncerVM] - assert.NoError(t, syncReEnabledVM.Connected( - context.Background(), - vmSetup.serverVM.ctx.NodeID, - statesyncclient.StateSyncVersion, - )) + assert.NoError(t, syncReEnabledVM.Connected(vmSetup.serverVM.ctx.NodeID, statesyncclient.StateSyncVersion)) - enabled, err = syncReEnabledVM.StateSyncEnabled(context.Background()) + enabled, err = syncReEnabledVM.StateSyncEnabled() assert.NoError(t, err) assert.True(t, enabled, "sync should be enabled") @@ -243,68 +221,61 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { testSyncerVM(t, vmSetup, test) } -func TestVMShutdownWhileSyncing(t *testing.T) { +func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { var ( - lock sync.Mutex - vmSetup *syncVMSetup + serverVM, syncerVM *VM ) - reqCount := 0 - test := syncTest{ - syncableInterval: 256, - stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync - syncMode: block.StateSyncStatic, - responseIntercept: func(syncerVM *VM, nodeID ids.NodeID, requestID uint32, response []byte) { - lock.Lock() - defer lock.Unlock() + // If there is an error shutdown the VMs if they have been instantiated + defer func() { + // If the test has not already failed, shut down the VMs since the caller + // will not get the chance to shut them down. + if !t.Failed() { + return + } - reqCount++ - // Shutdown the VM after 50 requests to interrupt the sync - if reqCount == 50 { - // Note this verifies the VM shutdown does not time out while syncing. - require.NoError(t, vmSetup.shutdownOnceSyncerVM.Shutdown(context.Background())) - } else if reqCount < 50 { - err := syncerVM.AppResponse(context.Background(), nodeID, requestID, response) - require.NoError(t, err) + // If the test already failed, shut down the VMs if they were instantiated. + if serverVM != nil { + log.Info("Shutting down server VM") + if err := serverVM.Shutdown(); err != nil { + t.Fatal(err) } - }, - expectedErr: context.Canceled, - } - vmSetup = createSyncServerAndClientVMs(t, test, parentsToGet) - // Perform sync resulting in early termination. - testSyncerVM(t, vmSetup, test) -} - -func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *syncVMSetup { - var ( - require = require.New(t) - importAmount = 2000000 * units.Avax // 2M avax - alloc = map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, } - ) + if syncerVM != nil { + log.Info("Shutting down syncerVM") + if err := syncerVM.Shutdown(); err != nil { + t.Fatal(err) + } + } + }() + + // configure [serverVM] + importAmount := 2000000 * units.Avax // 2M avax _, serverVM, _, serverAtomicMemory, serverAppSender := GenesisVMWithUTXOs( - t, true, "", "", "", alloc, + t, + true, + genesisJSONApricotPhase5, + "", + "", + map[ids.ShortID]uint64{ + testShortIDAddrs[0]: importAmount, + }, ) - t.Cleanup(func() { - log.Info("Shutting down server VM") - require.NoError(serverVM.Shutdown(context.Background())) - }) + var ( importTx, exportTx *Tx err error ) - generateAndAcceptBlocks(t, serverVM, numBlocks, func(i int, gen *core.BlockGen) { - b, err := predicate.NewResults().Bytes() - if err != nil { - t.Fatal(err) - } - gen.AppendExtra(b) + generateAndAcceptBlocks(t, serverVM, parentsToGet, func(i int, gen *core.BlockGen) { switch i { case 0: // spend the UTXOs from shared memory - importTx, err = serverVM.newImportTx(serverVM.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - require.NoError(err) - require.NoError(serverVM.mempool.AddLocalTx(importTx)) + importTx, err = serverVM.newImportTx(serverVM.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + if err != nil { + t.Fatal(err) + } + if err := serverVM.issueTx(importTx, true /*=local*/); err != nil { + t.Fatal(err) + } case 1: // export some of the imported UTXOs to test exportTx is properly synced exportTx, err = serverVM.newExportTx( @@ -313,71 +284,82 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s serverVM.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, - []*secp256k1.PrivateKey{testKeys[0]}, + []*crypto.PrivateKeySECP256K1R{testKeys[0]}, ) - require.NoError(err) - require.NoError(serverVM.mempool.AddLocalTx(exportTx)) + if err != nil { + t.Fatal(err) + } + if err := serverVM.issueTx(exportTx, true /*=local*/); err != nil { + t.Fatal(err) + } default: // Generate simple transfer transactions. pk := testKeys[0].ToECDSA() tx := types.NewTransaction(gen.TxNonce(testEthAddrs[0]), testEthAddrs[1], common.Big1, params.TxGas, initialBaseFee, nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainID), pk) - require.NoError(err) + if err != nil { + t.Fatal(t) + } gen.AddTx(signedTx) } - }, nil) + }) - // override serverAtomicTrie's commitInterval so the call to [serverAtomicTrie.Index] + // override atomicTrie's commitHeightInterval so the call to [atomicTrie.Index] // creates a commit at the height [syncableInterval]. This is necessary to support // fetching a state summary. - serverAtomicTrie := serverVM.atomicTrie.(*atomicTrie) - serverAtomicTrie.commitInterval = test.syncableInterval - require.NoError(serverAtomicTrie.commit(test.syncableInterval, serverAtomicTrie.LastAcceptedRoot())) - require.NoError(serverVM.db.Commit()) + serverVM.atomicTrie.(*atomicTrie).commitHeightInterval = test.syncableInterval + assert.NoError(t, serverVM.atomicTrie.Index(test.syncableInterval, nil)) + assert.NoError(t, serverVM.db.Commit()) serverSharedMemories := newSharedMemories(serverAtomicMemory, serverVM.ctx.ChainID, serverVM.ctx.XChainID) serverSharedMemories.assertOpsApplied(t, importTx.mustAtomicOps()) serverSharedMemories.assertOpsApplied(t, exportTx.mustAtomicOps()) // make some accounts - trieDB := trie.NewDatabase(serverVM.chaindb, nil) + trieDB := trie.NewDatabase(serverVM.chaindb) root, accounts := statesync.FillAccountsWithOverlappingStorage(t, trieDB, types.EmptyRootHash, 1000, 16) // patch serverVM's lastAcceptedBlock to have the new root // and update the vm's state so the trie with accounts will // be returned by StateSyncGetLastSummary - lastAccepted := serverVM.blockChain.LastAcceptedBlock() + lastAccepted := serverVM.chain.LastAcceptedBlock() patchedBlock := patchBlock(lastAccepted, root, serverVM.chaindb) blockBytes, err := rlp.EncodeToBytes(patchedBlock) - require.NoError(err) - internalBlock, err := serverVM.parseBlock(context.Background(), blockBytes) - require.NoError(err) + if err != nil { + t.Fatal(err) + } + internalBlock, err := serverVM.parseBlock(blockBytes) + if err != nil { + t.Fatal(err) + } internalBlock.(*Block).SetStatus(choices.Accepted) - require.NoError(serverVM.State.SetLastAcceptedBlock(internalBlock)) + assert.NoError(t, serverVM.State.SetLastAcceptedBlock(internalBlock)) // patch syncableInterval for test serverVM.StateSyncServer.(*stateSyncServer).syncableInterval = test.syncableInterval // initialise [syncerVM] with blank genesis state - stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d}`, test.stateSyncMinBlocks, 4) - syncerEngineChan, syncerVM, syncerDB, syncerAtomicMemory, syncerAppSender := GenesisVMWithUTXOs( - t, false, "", stateSyncEnabledJSON, "", alloc, + stateSyncEnabledJSON := fmt.Sprintf("{\"state-sync-enabled\":true, \"state-sync-min-blocks\": %d}", test.stateSyncMinBlocks) + syncerEngineChan, syncerVM, syncerDBManager, syncerAtomicMemory, syncerAppSender := GenesisVMWithUTXOs( + t, + false, + genesisJSONApricotPhase5, + stateSyncEnabledJSON, + "", + map[ids.ShortID]uint64{ + testShortIDAddrs[0]: importAmount, + }, ) - shutdownOnceSyncerVM := &shutdownOnceVM{VM: syncerVM} - t.Cleanup(func() { - require.NoError(shutdownOnceSyncerVM.Shutdown(context.Background())) - }) - require.NoError(syncerVM.SetState(context.Background(), snow.StateSyncing)) - enabled, err := syncerVM.StateSyncEnabled(context.Background()) - require.NoError(err) - require.True(enabled) - - // override [syncerVM]'s commit interval so the atomic trie works correctly. - syncerVM.atomicTrie.(*atomicTrie).commitInterval = test.syncableInterval + if err := syncerVM.SetState(snow.StateSyncing); err != nil { + t.Fatal(err) + } + enabled, err := syncerVM.StateSyncEnabled() + assert.NoError(t, err) + assert.True(t, enabled) // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] - serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + serverAppSender.SendAppResponseF = func(nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { - go syncerVM.AppResponse(ctx, nodeID, requestID, response) + go syncerVM.AppResponse(nodeID, requestID, response) } else { go test.responseIntercept(syncerVM, nodeID, requestID, response) } @@ -386,20 +368,15 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s } // connect peer to [syncerVM] - require.NoError( - syncerVM.Connected( - context.Background(), - serverVM.ctx.NodeID, - statesyncclient.StateSyncVersion, - ), - ) + assert.NoError(t, syncerVM.Connected(serverVM.ctx.NodeID, statesyncclient.StateSyncVersion)) // override [syncerVM]'s SendAppRequest function to trigger AppRequest on [serverVM] - syncerAppSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { + syncerAppSender.SendAppRequestF = func(nodeSet ids.NodeIDSet, requestID uint32, request []byte) error { nodeID, hasItem := nodeSet.Pop() - require.True(hasItem, "expected nodeSet to contain at least 1 nodeID") - err := serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) - require.NoError(err) + if !hasItem { + t.Fatal("expected nodeSet to contain at least 1 nodeID") + } + go serverVM.AppRequest(nodeID, requestID, time.Now().Add(1*time.Second), request) return nil } @@ -410,12 +387,11 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s importTx, exportTx, }, - fundedAccounts: accounts, - syncerVM: syncerVM, - syncerDB: syncerDB, - syncerEngineChan: syncerEngineChan, - syncerAtomicMemory: syncerAtomicMemory, - shutdownOnceSyncerVM: shutdownOnceSyncerVM, + fundedAccounts: accounts, + syncerVM: syncerVM, + syncerDBManager: syncerDBManager, + syncerEngineChan: syncerEngineChan, + syncerAtomicMemory: syncerAtomicMemory, } } @@ -428,22 +404,17 @@ type syncVMSetup struct { includedAtomicTxs []*Tx fundedAccounts map[*keystore.Key]*types.StateAccount - syncerVM *VM - syncerDB database.Database - syncerEngineChan <-chan commonEng.Message - syncerAtomicMemory *atomic.Memory - shutdownOnceSyncerVM *shutdownOnceVM -} - -type shutdownOnceVM struct { - *VM - shutdownOnce sync.Once + syncerVM *VM + syncerDBManager manager.Manager + syncerEngineChan <-chan commonEng.Message + syncerAtomicMemory *atomic.Memory } -func (vm *shutdownOnceVM) Shutdown(ctx context.Context) error { - var err error - vm.shutdownOnce.Do(func() { err = vm.VM.Shutdown(ctx) }) - return err +// Teardown shuts down both VMs and asserts that both exit without error. +// Note: assumes both serverVM and sycnerVM have been initialized. +func (s *syncVMSetup) Teardown(t *testing.T) { + assert.NoError(t, s.serverVM.Shutdown()) + assert.NoError(t, s.syncerVM.Shutdown()) } // syncTest contains both the actual VMs as well as the parameters with the expected output. @@ -451,14 +422,12 @@ type syncTest struct { responseIntercept func(vm *VM, nodeID ids.NodeID, requestID uint32, response []byte) stateSyncMinBlocks uint64 syncableInterval uint64 - syncMode block.StateSyncMode + shouldSync bool expectedErr error } func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { - t.Helper() var ( - require = require.New(t) serverVM = vmSetup.serverVM includedAtomicTxs = vmSetup.includedAtomicTxs fundedAccounts = vmSetup.fundedAccounts @@ -466,95 +435,76 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { syncerEngineChan = vmSetup.syncerEngineChan syncerAtomicMemory = vmSetup.syncerAtomicMemory ) + // get last summary and test related methods - summary, err := serverVM.GetLastStateSummary(context.Background()) - require.NoError(err, "error getting state sync last summary") - parsedSummary, err := syncerVM.ParseStateSummary(context.Background(), summary.Bytes()) - require.NoError(err, "error parsing state summary") - retrievedSummary, err := serverVM.GetStateSummary(context.Background(), parsedSummary.Height()) - require.NoError(err, "error getting state sync summary at height") - require.Equal(summary, retrievedSummary) - - syncMode, err := parsedSummary.Accept(context.Background()) - require.NoError(err, "error accepting state summary") - require.Equal(test.syncMode, syncMode) - if syncMode == block.StateSyncSkipped { - return + summary, err := serverVM.GetLastStateSummary() + if err != nil { + t.Fatal("error getting state sync last summary", "err", err) + } + parsedSummary, err := syncerVM.ParseStateSummary(summary.Bytes()) + if err != nil { + t.Fatal("error getting state sync last summary", "err", err) } + retrievedSummary, err := serverVM.GetStateSummary(parsedSummary.Height()) + if err != nil { + t.Fatal("error when checking if summary is accepted", "err", err) + } + assert.Equal(t, summary, retrievedSummary) + shouldSync, err := parsedSummary.Accept() + if err != nil { + t.Fatal("unexpected error accepting state summary", "err", err) + } + if shouldSync != test.shouldSync { + t.Fatal("unexpected value returned from accept", "expected", test.shouldSync, "got", shouldSync) + } + if !shouldSync { + return + } msg := <-syncerEngineChan - require.Equal(commonEng.StateSyncDone, msg) + assert.Equal(t, commonEng.StateSyncDone, msg) // If the test is expected to error, assert the correct error is returned and finish the test. err = syncerVM.StateSyncClient.Error() if test.expectedErr != nil { - require.ErrorIs(err, test.expectedErr) - // Note we re-open the database here to avoid a closed error when the test is for a shutdown VM. - chaindb := Database{prefixdb.NewNested(ethDBPrefix, syncerVM.db)} - assertSyncPerformedHeights(t, chaindb, map[uint64]struct{}{}) + assert.ErrorIs(t, err, test.expectedErr) return } - require.NoError(err, "state sync failed") + if err != nil { + t.Fatal("state sync failed", err) + } // set [syncerVM] to bootstrapping and verify the last accepted block has been updated correctly // and that we can bootstrap and process some blocks. - require.NoError(syncerVM.SetState(context.Background(), snow.Bootstrapping)) - require.Equal(serverVM.LastAcceptedBlock().Height(), syncerVM.LastAcceptedBlock().Height(), "block height mismatch between syncer and server") - require.Equal(serverVM.LastAcceptedBlock().ID(), syncerVM.LastAcceptedBlock().ID(), "blockID mismatch between syncer and server") - require.True(syncerVM.blockChain.HasState(syncerVM.blockChain.LastAcceptedBlock().Root()), "unavailable state for last accepted block") - assertSyncPerformedHeights(t, syncerVM.chaindb, map[uint64]struct{}{retrievedSummary.Height(): {}}) - - lastNumber := syncerVM.blockChain.LastAcceptedBlock().NumberU64() - // check the last block is indexed - lastSyncedBlock := rawdb.ReadBlock(syncerVM.chaindb, rawdb.ReadCanonicalHash(syncerVM.chaindb, lastNumber), lastNumber) - for _, tx := range lastSyncedBlock.Transactions() { - index := rawdb.ReadTxLookupEntry(syncerVM.chaindb, tx.Hash()) - require.NotNilf(index, "Miss transaction indices, number %d hash %s", lastNumber, tx.Hash().Hex()) - } - - // tail should be the last block synced - if syncerVM.ethConfig.TxLookupLimit != 0 { - tail := lastSyncedBlock.NumberU64() - - core.CheckTxIndices(t, &tail, tail, syncerVM.chaindb, true) + if err := syncerVM.SetState(snow.Bootstrapping); err != nil { + t.Fatal(err) } + assert.Equal(t, serverVM.LastAcceptedBlock().Height(), syncerVM.LastAcceptedBlock().Height(), "block height mismatch between syncer and server") + assert.Equal(t, serverVM.LastAcceptedBlock().ID(), syncerVM.LastAcceptedBlock().ID(), "blockID mismatch between syncer and server") + assert.True(t, syncerVM.chain.BlockChain().HasState(syncerVM.chain.LastAcceptedBlock().Root()), "unavailable state for last accepted block") blocksToBuild := 10 txsPerBlock := 10 - toAddress := testEthAddrs[1] // arbitrary choice + toAddress := testEthAddrs[2] // arbitrary choice generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, gen *core.BlockGen) { - b, err := predicate.NewResults().Bytes() - if err != nil { - t.Fatal(err) - } - gen.AppendExtra(b) i := 0 for k := range fundedAccounts { tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, initialBaseFee, nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainConfig.ChainID), k.PrivateKey) - require.NoError(err) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainID), k.PrivateKey) + if err != nil { + t.Fatal(err) + } gen.AddTx(signedTx) i++ if i >= txsPerBlock { break } } - }, - func(block *types.Block) { - if syncerVM.ethConfig.TxLookupLimit != 0 { - tail := block.NumberU64() - syncerVM.ethConfig.TxLookupLimit + 1 - // tail should be the minimum last synced block, since we skipped it to the last block - if tail < lastSyncedBlock.NumberU64() { - tail = lastSyncedBlock.NumberU64() - } - core.CheckTxIndices(t, &tail, block.NumberU64(), syncerVM.chaindb, true) - } - }, - ) + }) // check we can transition to [NormalOp] state and continue to process blocks. - require.NoError(syncerVM.SetState(context.Background(), snow.NormalOp)) - require.True(syncerVM.bootstrapped) + assert.NoError(t, syncerVM.SetState(snow.NormalOp)) + assert.True(t, syncerVM.bootstrapped) // check atomic memory was synced properly syncerSharedMemories := newSharedMemories(syncerAtomicMemory, syncerVM.ctx.ChainID, syncerVM.ctx.XChainID) @@ -565,34 +515,20 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { // Generate blocks after we have entered normal consensus as well generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, gen *core.BlockGen) { - b, err := predicate.NewResults().Bytes() - if err != nil { - t.Fatal(err) - } - gen.AppendExtra(b) i := 0 for k := range fundedAccounts { tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, initialBaseFee, nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainConfig.ChainID), k.PrivateKey) - require.NoError(err) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainID), k.PrivateKey) + if err != nil { + t.Fatal(err) + } gen.AddTx(signedTx) i++ if i >= txsPerBlock { break } } - }, - func(block *types.Block) { - if syncerVM.ethConfig.TxLookupLimit != 0 { - tail := block.NumberU64() - syncerVM.ethConfig.TxLookupLimit + 1 - // tail should be the minimum last synced block, since we skipped it to the last block - if tail < lastSyncedBlock.NumberU64() { - tail = lastSyncedBlock.NumberU64() - } - core.CheckTxIndices(t, &tail, block.NumberU64(), syncerVM.chaindb, true) - } - }, - ) + }) } // patchBlock returns a copy of [blk] with [root] and updates [db] to @@ -604,7 +540,7 @@ func patchBlock(blk *types.Block, root common.Hash, db ethdb.Database) *types.Bl header := blk.Header() header.Root = root receipts := rawdb.ReadRawReceipts(db, blk.Hash(), blk.NumberU64()) - newBlk := types.NewBlockWithExtData( + newBlk := types.NewBlock( header, blk.Transactions(), blk.Uncles(), receipts, trie.NewStackTrie(nil), blk.ExtData(), true, ) rawdb.WriteBlock(db, newBlk) @@ -615,7 +551,7 @@ func patchBlock(blk *types.Block, root common.Hash, db ethdb.Database) *types.Bl // generateAndAcceptBlocks uses [core.GenerateChain] to generate blocks, then // calls Verify and Accept on each generated block // TODO: consider using this helper function in vm_test.go and elsewhere in this package to clean up tests -func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, *core.BlockGen), accepted func(*types.Block)) { +func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, *core.BlockGen)) { t.Helper() // acceptExternalBlock defines a function to parse, verify, and accept a block once it has been @@ -625,50 +561,32 @@ func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, if err != nil { t.Fatal(err) } - vmBlock, err := vm.ParseBlock(context.Background(), bytes) + vmBlock, err := vm.ParseBlock(bytes) if err != nil { t.Fatal(err) } - if err := vmBlock.Verify(context.Background()); err != nil { + if err := vmBlock.Verify(); err != nil { t.Fatal(err) } - if err := vmBlock.Accept(context.Background()); err != nil { + if err := vmBlock.Accept(); err != nil { t.Fatal(err) } - - if accepted != nil { - accepted(block) - } } _, _, err := core.GenerateChain( vm.chainConfig, - vm.blockChain.LastAcceptedBlock(), - dummy.NewFakerWithCallbacks(vm.createConsensusCallbacks()), + vm.chain.LastAcceptedBlock(), + dummy.NewDummyEngine(vm.createConsensusCallbacks()), vm.chaindb, numBlocks, 10, func(i int, g *core.BlockGen) { g.SetOnBlockGenerated(acceptExternalBlock) - g.SetCoinbase(constants.BlackholeAddr) // necessary for syntactic validation of the block + g.SetCoinbase(coreth.BlackholeAddr) // necessary for syntactic validation of the block gen(i, g) }, ) if err != nil { t.Fatal(err) } - vm.blockChain.DrainAcceptorQueue() -} - -// assertSyncPerformedHeights iterates over all heights the VM has synced to and -// verifies it matches [expected]. -func assertSyncPerformedHeights(t *testing.T, db ethdb.Iteratee, expected map[uint64]struct{}) { - it := rawdb.NewSyncPerformedIterator(db) - defer it.Release() - - found := make(map[uint64]struct{}, len(expected)) - for it.Next() { - found[rawdb.UnpackSyncPerformedKey(it.Key())] = struct{}{} - } - require.NoError(t, it.Error()) - require.Equal(t, expected, found) + vm.chain.BlockChain().DrainAcceptorQueue() } diff --git a/plugin/evm/test_tx.go b/plugin/evm/test_tx.go index 96d8cd2b88..8a7399f1bb 100644 --- a/plugin/evm/test_tx.go +++ b/plugin/evm/test_tx.go @@ -4,7 +4,6 @@ package evm import ( - "github.com/ava-labs/coreth/core/vm" "math/big" "math/rand" @@ -15,12 +14,12 @@ import ( "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/params" ) -type TestUnsignedTx struct { +type TestTx struct { GasUsedV uint64 `serialize:"true"` AcceptRequestsBlockchainIDV ids.ID `serialize:"true"` AcceptRequestsV *atomic.Requests `serialize:"true"` @@ -28,50 +27,50 @@ type TestUnsignedTx struct { IDV ids.ID `serialize:"true" json:"id"` BurnedV uint64 `serialize:"true"` UnsignedBytesV []byte - SignedBytesV []byte - InputUTXOsV set.Set[ids.ID] + BytesV []byte + InputUTXOsV ids.Set SemanticVerifyV error EVMStateTransferV error } -var _ UnsignedAtomicTx = &TestUnsignedTx{} +var _ UnsignedAtomicTx = &TestTx{} // GasUsed implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) GasUsed(fixedFee bool) (uint64, error) { return t.GasUsedV, nil } +func (t *TestTx) GasUsed(fixedFee bool) (uint64, error) { return t.GasUsedV, nil } // Verify implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) Verify(ctx *snow.Context, rules params.Rules) error { return t.VerifyV } +func (t *TestTx) Verify(ctx *snow.Context, rules params.Rules) error { return t.VerifyV } // AtomicOps implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) AtomicOps() (ids.ID, *atomic.Requests, error) { +func (t *TestTx) AtomicOps() (ids.ID, *atomic.Requests, error) { return t.AcceptRequestsBlockchainIDV, t.AcceptRequestsV, nil } // Initialize implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) Initialize(unsignedBytes, signedBytes []byte) {} +func (t *TestTx) Initialize(unsignedBytes, signedBytes []byte) {} // ID implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) ID() ids.ID { return t.IDV } +func (t *TestTx) ID() ids.ID { return t.IDV } // Burned implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) Burned(assetID ids.ID) (uint64, error) { return t.BurnedV, nil } +func (t *TestTx) Burned(assetID ids.ID) (uint64, error) { return t.BurnedV, nil } -// Bytes implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) Bytes() []byte { return t.UnsignedBytesV } +// UnsignedBytes implements the UnsignedAtomicTx interface +func (t *TestTx) UnsignedBytes() []byte { return t.UnsignedBytesV } -// SignedBytes implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) SignedBytes() []byte { return t.SignedBytesV } +// Bytes implements the UnsignedAtomicTx interface +func (t *TestTx) Bytes() []byte { return t.BytesV } // InputUTXOs implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) InputUTXOs() set.Set[ids.ID] { return t.InputUTXOsV } +func (t *TestTx) InputUTXOs() ids.Set { return t.InputUTXOsV } // SemanticVerify implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) SemanticVerify(vm *VM, stx *Tx, parent *Block, baseFee *big.Int, rules params.Rules) error { +func (t *TestTx) SemanticVerify(vm *VM, stx *Tx, parent *Block, baseFee *big.Int, rules params.Rules) error { return t.SemanticVerifyV } // EVMStateTransfer implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) EVMStateTransfer(ctx *snow.Context, state vm.StateDB) error { +func (t *TestTx) EVMStateTransfer(ctx *snow.Context, state *state.StateDB) error { return t.EVMStateTransferV } @@ -81,7 +80,7 @@ func testTxCodec() codec.Manager { errs := wrappers.Errs{} errs.Add( - c.RegisterType(&TestUnsignedTx{}), + c.RegisterType(&TestTx{}), c.RegisterType(&atomic.Element{}), c.RegisterType(&atomic.Requests{}), codec.RegisterCodec(codecVersion, c), @@ -97,7 +96,7 @@ var blockChainID = ids.GenerateTestID() func testDataImportTx() *Tx { return &Tx{ - UnsignedAtomicTx: &TestUnsignedTx{ + UnsignedAtomicTx: &TestTx{ IDV: ids.GenerateTestID(), AcceptRequestsBlockchainIDV: blockChainID, AcceptRequestsV: &atomic.Requests{ @@ -112,7 +111,7 @@ func testDataImportTx() *Tx { func testDataExportTx() *Tx { return &Tx{ - UnsignedAtomicTx: &TestUnsignedTx{ + UnsignedAtomicTx: &TestTx{ IDV: ids.GenerateTestID(), AcceptRequestsBlockchainIDV: blockChainID, AcceptRequestsV: &atomic.Requests{ diff --git a/plugin/evm/tx.go b/plugin/evm/tx.go index 0dd6ce62cd..0389b3f174 100644 --- a/plugin/evm/tx.go +++ b/plugin/evm/tx.go @@ -7,22 +7,21 @@ import ( "bytes" "errors" "fmt" - "github.com/ava-labs/coreth/core/vm" "math/big" "sort" "github.com/ethereum/go-ethereum/common" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/params" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/crypto" "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -55,14 +54,6 @@ type EVMOutput struct { AssetID ids.ID `serialize:"true" json:"assetID"` } -func (o EVMOutput) Compare(other EVMOutput) int { - addrComp := bytes.Compare(o.Address.Bytes(), other.Address.Bytes()) - if addrComp != 0 { - return addrComp - } - return bytes.Compare(o.AssetID[:], other.AssetID[:]) -} - // EVMInput defines an input created from the EVM state to fund export transactions type EVMInput struct { Address common.Address `serialize:"true" json:"address"` @@ -71,14 +62,6 @@ type EVMInput struct { Nonce uint64 `serialize:"true" json:"nonce"` } -func (i EVMInput) Compare(other EVMInput) int { - addrComp := bytes.Compare(i.Address.Bytes(), other.Address.Bytes()) - if addrComp != 0 { - return addrComp - } - return bytes.Compare(i.AssetID[:], other.AssetID[:]) -} - // Verify ... func (out *EVMOutput) Verify() error { switch { @@ -111,8 +94,8 @@ type UnsignedTx interface { ID() ids.ID GasUsed(fixedFee bool) (uint64, error) Burned(assetID ids.ID) (uint64, error) + UnsignedBytes() []byte Bytes() []byte - SignedBytes() []byte } // UnsignedAtomicTx is an unsigned operation that can be atomically accepted @@ -120,7 +103,7 @@ type UnsignedAtomicTx interface { UnsignedTx // InputUTXOs returns the UTXOs this tx consumes - InputUTXOs() set.Set[ids.ID] + InputUTXOs() ids.Set // Verify attempts to verify that the transaction is well formed Verify(ctx *snow.Context, rules params.Rules) error // Attempts to verify this transaction with the provided state. @@ -130,7 +113,7 @@ type UnsignedAtomicTx interface { // The set of atomic requests must be returned in a consistent order. AtomicOps() (ids.ID, *atomic.Requests, error) - EVMStateTransfer(ctx *snow.Context, state vm.StateDB) error + EVMStateTransfer(ctx *snow.Context, state *state.StateDB) error } // Tx is a signed transaction @@ -142,21 +125,8 @@ type Tx struct { Creds []verify.Verifiable `serialize:"true" json:"credentials"` } -func (tx *Tx) Compare(other *Tx) int { - txHex := tx.ID().Hex() - otherHex := other.ID().Hex() - switch { - case txHex < otherHex: - return -1 - case txHex > otherHex: - return 1 - default: - return 0 - } -} - // Sign this transaction with the provided signers -func (tx *Tx) Sign(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { +func (tx *Tx) Sign(c codec.Manager, signers [][]*crypto.PrivateKeySECP256K1R) error { unsignedBytes, err := c.Marshal(codecVersion, &tx.UnsignedAtomicTx) if err != nil { return fmt.Errorf("couldn't marshal UnsignedAtomicTx: %w", err) @@ -166,7 +136,7 @@ func (tx *Tx) Sign(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { hash := hashing.ComputeHash256(unsignedBytes) for _, keys := range signers { cred := &secp256k1fx.Credential{ - Sigs: make([][secp256k1.SignatureLen]byte, len(keys)), + Sigs: make([][crypto.SECP256K1RSigLen]byte, len(keys)), } for i, key := range keys { sig, err := key.SignHash(hash) // Sign hash @@ -201,7 +171,7 @@ func (tx *Tx) BlockFeeContribution(fixedFee bool, avaxAssetID ids.ID, baseFee *b if err != nil { return nil, nil, err } - txFee, err := CalculateDynamicFee(gasUsed, baseFee) + txFee, err := calculateDynamicFee(gasUsed, baseFee) if err != nil { return nil, nil, err } @@ -223,7 +193,7 @@ func (tx *Tx) BlockFeeContribution(fixedFee bool, avaxAssetID ids.ID, baseFee *b // innerSortInputsAndSigners implements sort.Interface for EVMInput type innerSortInputsAndSigners struct { inputs []EVMInput - signers [][]*secp256k1.PrivateKey + signers [][]*crypto.PrivateKeySECP256K1R } func (ins *innerSortInputsAndSigners) Less(i, j int) bool { @@ -242,13 +212,56 @@ func (ins *innerSortInputsAndSigners) Swap(i, j int) { } // SortEVMInputsAndSigners sorts the list of EVMInputs based on the addresses and assetIDs -func SortEVMInputsAndSigners(inputs []EVMInput, signers [][]*secp256k1.PrivateKey) { +func SortEVMInputsAndSigners(inputs []EVMInput, signers [][]*crypto.PrivateKeySECP256K1R) { sort.Sort(&innerSortInputsAndSigners{inputs: inputs, signers: signers}) } +// IsSortedAndUniqueEVMInputs returns true if the EVM Inputs are sorted and unique +// based on the account addresses +func IsSortedAndUniqueEVMInputs(inputs []EVMInput) bool { + return utils.IsSortedAndUnique(&innerSortInputsAndSigners{inputs: inputs}) +} + +// innerSortEVMOutputs implements sort.Interface for EVMOutput +type innerSortEVMOutputs struct { + outputs []EVMOutput +} + +func (outs *innerSortEVMOutputs) Less(i, j int) bool { + addrComp := bytes.Compare(outs.outputs[i].Address.Bytes(), outs.outputs[j].Address.Bytes()) + if addrComp != 0 { + return addrComp < 0 + } + return bytes.Compare(outs.outputs[i].AssetID[:], outs.outputs[j].AssetID[:]) < 0 +} + +func (outs *innerSortEVMOutputs) Len() int { return len(outs.outputs) } + +func (outs *innerSortEVMOutputs) Swap(i, j int) { + outs.outputs[j], outs.outputs[i] = outs.outputs[i], outs.outputs[j] +} + +// SortEVMOutputs sorts the list of EVMOutputs based on the addresses and assetIDs +// of the outputs +func SortEVMOutputs(outputs []EVMOutput) { + sort.Sort(&innerSortEVMOutputs{outputs: outputs}) +} + +// IsSortedEVMOutputs returns true if the EVMOutputs are sorted +// based on the account addresses and assetIDs +func IsSortedEVMOutputs(outputs []EVMOutput) bool { + return sort.IsSorted(&innerSortEVMOutputs{outputs: outputs}) +} + +// IsSortedAndUniqueEVMOutputs returns true if the EVMOutputs are sorted +// and unique based on the account addresses and assetIDs +func IsSortedAndUniqueEVMOutputs(outputs []EVMOutput) bool { + return utils.IsSortedAndUnique(&innerSortEVMOutputs{outputs: outputs}) +} + // calculates the amount of AVAX that must be burned by an atomic transaction // that consumes [cost] at [baseFee]. -func CalculateDynamicFee(cost uint64, baseFee *big.Int) (uint64, error) { +func calculateDynamicFee(cost uint64, baseFee *big.Int) (uint64, error) { if baseFee == nil { return 0, errNilBaseFee } @@ -275,7 +288,7 @@ func mergeAtomicOps(txs []*Tx) (map[ids.ID]*atomic.Requests, error) { // with txs initialized from the txID index. copyTxs := make([]*Tx, len(txs)) copy(copyTxs, txs) - utils.Sort(copyTxs) + sort.Slice(copyTxs, func(i, j int) bool { return copyTxs[i].ID().Hex() < copyTxs[j].ID().Hex() }) txs = copyTxs } output := make(map[ids.ID]*atomic.Requests) @@ -288,3 +301,14 @@ func mergeAtomicOps(txs []*Tx) (map[ids.ID]*atomic.Requests, error) { } return output, nil } + +// mergeAtomicOps merges atomic ops for [chainID] represented by [requests] +// to the [output] map provided. +func mergeAtomicOpsToMap(output map[ids.ID]*atomic.Requests, chainID ids.ID, requests *atomic.Requests) { + if request, exists := output[chainID]; exists { + request.PutRequests = append(request.PutRequests, requests.PutRequests...) + request.RemoveRequests = append(request.RemoveRequests, requests.RemoveRequests...) + } else { + output[chainID] = requests + } +} diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index c5440ced8f..c03dbbe40a 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -12,60 +12,46 @@ import ( "math/big" "os" "path/filepath" + "sort" "strings" "testing" "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" - - "github.com/ava-labs/coreth/eth/filters" - "github.com/ava-labs/coreth/internal/ethapi" - "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/utils" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/trie" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/api/keystore" "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/crypto" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - commonEng "github.com/ava-labs/avalanchego/snow/engine/common" + engCommon "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/eth" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/rpc" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/eth" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/rpc" - avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" - "github.com/ava-labs/coreth/accounts/abi" - accountKeystore "github.com/ava-labs/coreth/accounts/keystore" + accountKeystore "github.com/tenderly/coreth/accounts/keystore" ) var ( @@ -73,7 +59,7 @@ var ( testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} nonExistentID = ids.ID{'F'} - testKeys []*secp256k1.PrivateKey + testKeys []*crypto.PrivateKeySECP256K1R testEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] testShortIDAddrs []ids.ShortID testAvaxAssetID = ids.ID{1, 2, 3} @@ -81,67 +67,41 @@ var ( password = "CjasdjhiPeirbSenfeI13" // #nosec G101 // Use chainId: 43111, so that it does not overlap with any Avalanche ChainIDs, which may have their // config overridden in vm.Initialize. - genesisJSONApricotPhase0 = `{"config":{"chainId":43111,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` - genesisJSONApricotPhase1 = `{"config":{"chainId":43111,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"apricotPhase1BlockTimestamp":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` - genesisJSONApricotPhase2 = `{"config":{"chainId":43111,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"apricotPhase1BlockTimestamp":0,"apricotPhase2BlockTimestamp":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` - genesisJSONApricotPhase3 = `{"config":{"chainId":43111,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"apricotPhase1BlockTimestamp":0,"apricotPhase2BlockTimestamp":0,"apricotPhase3BlockTimestamp":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` - genesisJSONApricotPhase4 = `{"config":{"chainId":43111,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"apricotPhase1BlockTimestamp":0,"apricotPhase2BlockTimestamp":0,"apricotPhase3BlockTimestamp":0,"apricotPhase4BlockTimestamp":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` - genesisJSONApricotPhase5 = `{"config":{"chainId":43111,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"apricotPhase1BlockTimestamp":0,"apricotPhase2BlockTimestamp":0,"apricotPhase3BlockTimestamp":0,"apricotPhase4BlockTimestamp":0,"apricotPhase5BlockTimestamp":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` - - genesisJSONApricotPhasePre6 = `{"config":{"chainId":43111,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"apricotPhase1BlockTimestamp":0,"apricotPhase2BlockTimestamp":0,"apricotPhase3BlockTimestamp":0,"apricotPhase4BlockTimestamp":0,"apricotPhase5BlockTimestamp":0,"apricotPhasePre6BlockTimestamp":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` - genesisJSONApricotPhase6 = `{"config":{"chainId":43111,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"apricotPhase1BlockTimestamp":0,"apricotPhase2BlockTimestamp":0,"apricotPhase3BlockTimestamp":0,"apricotPhase4BlockTimestamp":0,"apricotPhase5BlockTimestamp":0,"apricotPhasePre6BlockTimestamp":0,"apricotPhase6BlockTimestamp":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` - genesisJSONApricotPhasePost6 = `{"config":{"chainId":43111,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"apricotPhase1BlockTimestamp":0,"apricotPhase2BlockTimestamp":0,"apricotPhase3BlockTimestamp":0,"apricotPhase4BlockTimestamp":0,"apricotPhase5BlockTimestamp":0,"apricotPhasePre6BlockTimestamp":0,"apricotPhase6BlockTimestamp":0,"apricotPhasePost6BlockTimestamp":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` - - genesisJSONBanff = `{"config":{"chainId":43111,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"apricotPhase1BlockTimestamp":0,"apricotPhase2BlockTimestamp":0,"apricotPhase3BlockTimestamp":0,"apricotPhase4BlockTimestamp":0,"apricotPhase5BlockTimestamp":0,"apricotPhasePre6BlockTimestamp":0,"apricotPhase6BlockTimestamp":0,"apricotPhasePost6BlockTimestamp":0,"banffBlockTimestamp":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` - genesisJSONCortina = `{"config":{"chainId":43111,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"apricotPhase1BlockTimestamp":0,"apricotPhase2BlockTimestamp":0,"apricotPhase3BlockTimestamp":0,"apricotPhase4BlockTimestamp":0,"apricotPhase5BlockTimestamp":0,"apricotPhasePre6BlockTimestamp":0,"apricotPhase6BlockTimestamp":0,"apricotPhasePost6BlockTimestamp":0,"banffBlockTimestamp":0,"cortinaBlockTimestamp":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` - genesisJSONDurango = `{"config":{"chainId":43111,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"apricotPhase1BlockTimestamp":0,"apricotPhase2BlockTimestamp":0,"apricotPhase3BlockTimestamp":0,"apricotPhase4BlockTimestamp":0,"apricotPhase5BlockTimestamp":0,"apricotPhasePre6BlockTimestamp":0,"apricotPhase6BlockTimestamp":0,"apricotPhasePost6BlockTimestamp":0,"banffBlockTimestamp":0,"cortinaBlockTimestamp":0,"durangoBlockTimestamp":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0x99b9DEA54C48Dfea6aA9A4Ca4623633EE04ddbB5":{"balance":"0x56bc75e2d63100000"},"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` - genesisJSONLatest = genesisJSONDurango - - genesisJSONCancun = `{"config":{"chainId":43111,"cancunTime":0,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"apricotPhase1BlockTimestamp":0,"apricotPhase2BlockTimestamp":0,"apricotPhase3BlockTimestamp":0,"apricotPhase4BlockTimestamp":0,"apricotPhase5BlockTimestamp":0,"apricotPhasePre6BlockTimestamp":0,"apricotPhase6BlockTimestamp":0,"apricotPhasePost6BlockTimestamp":0,"banffBlockTimestamp":0,"cortinaBlockTimestamp":0,"durangoBlockTimestamp":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"0x99b9DEA54C48Dfea6aA9A4Ca4623633EE04ddbB5":{"balance":"0x56bc75e2d63100000"},"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}` + genesisJSONApricotPhase0 = "{\"config\":{\"chainId\":43111,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}" + genesisJSONApricotPhase1 = "{\"config\":{\"chainId\":43111,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}" + genesisJSONApricotPhase2 = "{\"config\":{\"chainId\":43111,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0,\"apricotPhase2BlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}" + genesisJSONApricotPhase3 = "{\"config\":{\"chainId\":43111,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0,\"apricotPhase2BlockTimestamp\":0,\"apricotPhase3BlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}" + genesisJSONApricotPhase4 = "{\"config\":{\"chainId\":43111,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0,\"apricotPhase2BlockTimestamp\":0,\"apricotPhase3BlockTimestamp\":0,\"apricotPhase4BlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}" + genesisJSONApricotPhase5 = "{\"config\":{\"chainId\":43111,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0,\"apricotPhase2BlockTimestamp\":0,\"apricotPhase3BlockTimestamp\":0,\"apricotPhase4BlockTimestamp\":0, \"apricotPhase5BlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}" apricotRulesPhase0 = params.Rules{} - apricotRulesPhase1 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true}} - apricotRulesPhase2 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true}} - apricotRulesPhase3 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true}} - apricotRulesPhase4 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true}} - apricotRulesPhase5 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true, IsApricotPhase5: true}} - apricotRulesPhase6 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true, IsApricotPhase5: true, IsApricotPhasePre6: true, IsApricotPhase6: true, IsApricotPhasePost6: true}} - banffRules = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true, IsApricotPhase5: true, IsApricotPhasePre6: true, IsApricotPhase6: true, IsApricotPhasePost6: true, IsBanff: true}} - // cortinaRules = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true, IsApricotPhase5: true, IsApricotPhasePre6: true, IsApricotPhase6: true, IsApricotPhasePost6: true, IsBanff: true, IsCortina: true}} + apricotRulesPhase1 = params.Rules{IsApricotPhase1: true} + apricotRulesPhase2 = params.Rules{IsApricotPhase1: true, IsApricotPhase2: true} + apricotRulesPhase3 = params.Rules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true} + apricotRulesPhase4 = params.Rules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true} + apricotRulesPhase5 = params.Rules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true, IsApricotPhase5: true} ) func init() { var b []byte + factory := crypto.FactorySECP256K1R{} for _, key := range []string{ "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", } { - b, _ = cb58.Decode(key) - pk, _ := secp256k1.ToPrivateKey(b) - testKeys = append(testKeys, pk) - testEthAddrs = append(testEthAddrs, GetEthAddress(pk)) + b, _ = formatting.Decode(formatting.CB58, key) + pk, _ := factory.ToPrivateKey(b) + secpKey := pk.(*crypto.PrivateKeySECP256K1R) + testKeys = append(testKeys, secpKey) + testEthAddrs = append(testEthAddrs, GetEthAddress(secpKey)) testShortIDAddrs = append(testShortIDAddrs, pk.PublicKey().Address()) } -} - -func newPrefundedGenesis( - balance int, - addresses ...common.Address, -) *core.Genesis { - alloc := core.GenesisAlloc{} - for _, address := range addresses { - alloc[address] = core.GenesisAccount{ - Balance: big.NewInt(int64(balance)), - } - } - return &core.Genesis{ - Config: params.TestChainConfig, - Difficulty: big.NewInt(0), - Alloc: alloc, - } + minBlockTime = time.Millisecond + maxBlockTime = time.Millisecond + minBlockTimeAP4 = time.Millisecond } // BuildGenesisTest returns the genesis bytes for Coreth VM to be used in testing @@ -164,7 +124,7 @@ func BuildGenesisTest(t *testing.T, genesisJSON string) []byte { } func NewContext() *snow.Context { - ctx := utils.TestSnowContext() + ctx := snow.DefaultContextTest() ctx.NodeID = ids.GenerateTestNodeID() ctx.NetworkID = testNetworkID ctx.ChainID = testCChainID @@ -176,102 +136,91 @@ func NewContext() *snow.Context { _ = aliaser.Alias(testCChainID, testCChainID.String()) _ = aliaser.Alias(testXChainID, "X") _ = aliaser.Alias(testXChainID, testXChainID.String()) - ctx.ValidatorState = &validators.TestState{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - testXChainID: constants.PrimaryNetworkID, - testCChainID: constants.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errors.New("unknown chain") - } - return subnetID, nil + ctx.SNLookup = &snLookup{ + chainsToSubnet: map[ids.ID]ids.ID{ + constants.PlatformChainID: constants.PrimaryNetworkID, + testXChainID: constants.PrimaryNetworkID, + testCChainID: constants.PrimaryNetworkID, }, } - blsSecretKey, err := bls.NewSecretKey() - if err != nil { - panic(err) - } - ctx.WarpSigner = avalancheWarp.NewSigner(blsSecretKey, ctx.NetworkID, ctx.ChainID) - ctx.PublicKey = bls.PublicFromSecretKey(blsSecretKey) return ctx } -// setupGenesis sets up the genesis -// If [genesisJSON] is empty, defaults to using [genesisJSONLatest] -func setupGenesis( - t *testing.T, +type snLookup struct { + chainsToSubnet map[ids.ID]ids.ID +} + +func (sn *snLookup) SubnetID(chainID ids.ID) (ids.ID, error) { + subnetID, ok := sn.chainsToSubnet[chainID] + if !ok { + return ids.ID{}, errors.New("unknown chain") + } + return subnetID, nil +} + +func setupGenesis(t *testing.T, genesisJSON string, ) (*snow.Context, - database.Database, + manager.Manager, []byte, - chan commonEng.Message, - *atomic.Memory, -) { - if len(genesisJSON) == 0 { - genesisJSON = genesisJSONLatest - } + chan engCommon.Message, + *atomic.Memory) { genesisBytes := BuildGenesisTest(t, genesisJSON) ctx := NewContext() - baseDB := memdb.New() + baseDBManager := manager.NewMemDB(version.NewDefaultVersion(1, 4, 5)) - // initialize the atomic memory - atomicMemory := atomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) - ctx.SharedMemory = atomicMemory.NewSharedMemory(ctx.ChainID) + m := &atomic.Memory{} + m.Initialize(logging.NoLog{}, prefixdb.New([]byte{0}, baseDBManager.Current().Database)) + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) // NB: this lock is intentionally left locked when this function returns. // The caller of this function is responsible for unlocking. ctx.Lock.Lock() - userKeystore := keystore.New(logging.NoLog{}, memdb.New()) + userKeystore := keystore.New(logging.NoLog{}, manager.NewMemDB(version.NewDefaultVersion(1, 4, 5))) if err := userKeystore.CreateUser(username, password); err != nil { t.Fatal(err) } ctx.Keystore = userKeystore.NewBlockchainKeyStore(ctx.ChainID) - issuer := make(chan commonEng.Message, 1) - prefixedDB := prefixdb.New([]byte{1}, baseDB) - return ctx, prefixedDB, genesisBytes, issuer, atomicMemory + issuer := make(chan engCommon.Message, 1) + prefixedDBManager := baseDBManager.NewPrefixDBManager([]byte{1}) + return ctx, prefixedDBManager, genesisBytes, issuer, m } // GenesisVM creates a VM instance with the genesis test bytes and returns -// the channel use to send messages to the engine, the VM, database manager, -// sender, and atomic memory. -// If [genesisJSON] is empty, defaults to using [genesisJSONLatest] +// the channel use to send messages to the engine, the vm, and atomic memory func GenesisVM(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, -) (chan commonEng.Message, - *VM, database.Database, +) (chan engCommon.Message, + *VM, manager.Manager, *atomic.Memory, - *commonEng.SenderTest, -) { + *engCommon.SenderTest) { vm := &VM{} - vm.p2pSender = &commonEng.FakeSender{} ctx, dbManager, genesisBytes, issuer, m := setupGenesis(t, genesisJSON) - appSender := &commonEng.SenderTest{T: t} + appSender := &engCommon.SenderTest{T: t} appSender.CantSendAppGossip = true - appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } - err := vm.Initialize( - context.Background(), + appSender.SendAppGossipF = func([]byte) error { return nil } + if err := vm.Initialize( ctx, dbManager, genesisBytes, []byte(upgradeJSON), []byte(configJSON), issuer, - []*commonEng.Fx{}, + []*engCommon.Fx{}, appSender, - ) - require.NoError(t, err, "error initializing GenesisVM") + ); err != nil { + t.Fatal(err) + } if finishBootstrapping { - require.NoError(t, vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(t, vm.SetState(context.Background(), snow.NormalOp)) + assert.NoError(t, vm.SetState(snow.Bootstrapping)) + assert.NoError(t, vm.SetState(snow.NormalOp)) } return issuer, vm, dbManager, m, appSender @@ -314,9 +263,8 @@ func addUTXO(sharedMemory *atomic.Memory, ctx *snow.Context, txID ids.ID, index // GenesisVMWithUTXOs creates a GenesisVM and generates UTXOs in the X-Chain Shared Memory containing AVAX based on the [utxos] map // Generates UTXOIDs by using a hash of the address in the [utxos] map such that the UTXOs will be generated deterministically. -// If [genesisJSON] is empty, defaults to using [genesisJSONLatest] -func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan commonEng.Message, *VM, database.Database, *atomic.Memory, *commonEng.SenderTest) { - issuer, vm, db, sharedMemory, sender := GenesisVM(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON) +func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan engCommon.Message, *VM, manager.Manager, *atomic.Memory, *engCommon.SenderTest) { + issuer, vm, dbManager, sharedMemory, sender := GenesisVM(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON) for addr, avaxAmount := range utxos { txID, err := ids.ToID(hashing.ComputeHash256(addr.Bytes())) if err != nil { @@ -327,226 +275,60 @@ func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON stri } } - return issuer, vm, db, sharedMemory, sender + return issuer, vm, dbManager, sharedMemory, sender } func TestVMConfig(t *testing.T) { txFeeCap := float64(11) - enabledEthAPIs := []string{"debug"} - configJSON := fmt.Sprintf(`{"rpc-tx-fee-cap": %g,"eth-apis": %s}`, txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])) - _, vm, _, _, _ := GenesisVM(t, false, "", configJSON, "") - require.Equal(t, vm.config.RPCTxFeeCap, txFeeCap, "Tx Fee Cap should be set") - require.Equal(t, vm.config.EthAPIs(), enabledEthAPIs, "EnabledEthAPIs should be set") - require.NoError(t, vm.Shutdown(context.Background())) -} - -func TestCrossChainMessagestoVM(t *testing.T) { - crossChainCodec := message.CrossChainCodec - require := require.New(t) - - // the following is based on this contract: - // contract T { - // event received(address sender, uint amount, bytes memo); - // event receivedAddr(address sender); - // - // function receive(bytes calldata memo) external payable returns (string memory res) { - // emit received(msg.sender, msg.value, memo); - // emit receivedAddr(msg.sender); - // return "hello world"; - // } - // } - - const abiBin = `0x608060405234801561001057600080fd5b506102a0806100206000396000f3fe60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029` - const abiJSON = `[ { "constant": false, "inputs": [ { "name": "memo", "type": "bytes" } ], "name": "receive", "outputs": [ { "name": "res", "type": "string" } ], "payable": true, "stateMutability": "payable", "type": "function" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" }, { "indexed": false, "name": "amount", "type": "uint256" }, { "indexed": false, "name": "memo", "type": "bytes" } ], "name": "received", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" } ], "name": "receivedAddr", "type": "event" } ]` - parsed, err := abi.JSON(strings.NewReader(abiJSON)) - require.NoErrorf(err, "could not parse abi: %v") - - calledSendCrossChainAppResponseFn := false - importAmount := uint64(5000000000) - issuer, vm, _, _, appSender := GenesisVMWithUTXOs(t, true, "", "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) - }() - - appSender.SendCrossChainAppResponseF = func(ctx context.Context, respondingChainID ids.ID, requestID uint32, responseBytes []byte) { - calledSendCrossChainAppResponseFn = true - - var response message.EthCallResponse - if _, err = crossChainCodec.Unmarshal(responseBytes, &response); err != nil { - require.NoErrorf(err, "unexpected error during unmarshal: %w") - } - - result := core.ExecutionResult{} - err = json.Unmarshal(response.ExecutionResult, &result) - require.NoError(err) - require.NotNil(result.ReturnData) - - finalResult, err := parsed.Unpack("receive", result.ReturnData) - require.NoError(err) - require.NotNil(finalResult) - require.Equal("hello world", finalResult[0]) - } - - newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - require.NoError(err) - - err = vm.mempool.AddLocalTx(importTx) - require.NoError(err) - - <-issuer - - blk1, err := vm.BuildBlock(context.Background()) - require.NoError(err) - - err = blk1.Verify(context.Background()) - require.NoError(err) - - if status := blk1.Status(); status != choices.Processing { - t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) - } - - err = vm.SetPreference(context.Background(), blk1.ID()) - require.NoError(err) - - err = blk1.Accept(context.Background()) - require.NoError(err) - - newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk1.ID()) { - t.Fatalf("Expected new block to match") - } - - if status := blk1.Status(); status != choices.Accepted { - t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) - } - - lastAcceptedID, err := vm.LastAccepted(context.Background()) - require.NoError(err) - - if lastAcceptedID != blk1.ID() { - t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk1.ID(), lastAcceptedID) - } - - contractTx := types.NewContractCreation(0, common.Big0, 200000, new(big.Int).Mul(big.NewInt(3), initialBaseFee), common.FromHex(abiBin)) - contractSignedTx, err := types.SignTx(contractTx, types.NewEIP155Signer(vm.chainID), testKeys[0].ToECDSA()) - require.NoError(err) - - errs := vm.txPool.AddRemotesSync([]*types.Transaction{contractSignedTx}) - for _, err := range errs { - require.NoError(err) - } - testAddr := crypto.PubkeyToAddress(testKeys[0].ToECDSA().PublicKey) - contractAddress := crypto.CreateAddress(testAddr, 0) - - <-issuer - - blk2, err := vm.BuildBlock(context.Background()) - require.NoError(err) - - err = blk2.Verify(context.Background()) - require.NoError(err) - - if status := blk2.Status(); status != choices.Processing { - t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) - } - - err = vm.SetPreference(context.Background(), blk2.ID()) - require.NoError(err) - - err = blk2.Accept(context.Background()) - require.NoError(err) - - newHead = <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk2.ID()) { - t.Fatalf("Expected new block to match") - } - - if status := blk2.Status(); status != choices.Accepted { - t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) - } - - lastAcceptedID, err = vm.LastAccepted(context.Background()) - require.NoError(err) - - if lastAcceptedID != blk2.ID() { - t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID) - } - - input, err := parsed.Pack("receive", []byte("X")) - require.NoError(err) - - data := hexutil.Bytes(input) - - requestArgs, err := json.Marshal(ðapi.TransactionArgs{ - To: &contractAddress, - Data: &data, - }) - require.NoError(err) - - var ethCallRequest message.CrossChainRequest = message.EthCallRequest{ - RequestArgs: requestArgs, - } - - crossChainRequest, err := crossChainCodec.Marshal(message.Version, ðCallRequest) - require.NoError(err) - - requestingChainID := ids.ID(common.BytesToHash([]byte{1, 2, 3, 4, 5})) - - // we need all items in the acceptor queue to be processed before we process a cross chain request - vm.blockChain.DrainAcceptorQueue() - err = vm.Network.CrossChainAppRequest(context.Background(), requestingChainID, 1, time.Now().Add(60*time.Second), crossChainRequest) - require.NoError(err) - require.True(calledSendCrossChainAppResponseFn, "sendCrossChainAppResponseFn was not called") + enabledEthAPIs := []string{"internal-private-debug"} + configJSON := fmt.Sprintf("{\"rpc-tx-fee-cap\": %g,\"eth-apis\": %s}", txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])) + _, vm, _, _, _ := GenesisVM(t, false, genesisJSONApricotPhase0, configJSON, "") + assert.Equal(t, vm.config.RPCTxFeeCap, txFeeCap, "Tx Fee Cap should be set") + assert.Equal(t, vm.config.EthAPIs(), enabledEthAPIs, "EnabledEthAPIs should be set") + assert.NoError(t, vm.Shutdown()) } func TestVMConfigDefaults(t *testing.T) { txFeeCap := float64(11) - enabledEthAPIs := []string{"debug"} - configJSON := fmt.Sprintf(`{"rpc-tx-fee-cap": %g,"eth-apis": %s}`, txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])) - _, vm, _, _, _ := GenesisVM(t, false, "", configJSON, "") + enabledEthAPIs := []string{"internal-private-debug"} + configJSON := fmt.Sprintf("{\"rpc-tx-fee-cap\": %g,\"eth-apis\": %s}", txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])) + _, vm, _, _, _ := GenesisVM(t, false, genesisJSONApricotPhase0, configJSON, "") var vmConfig Config vmConfig.SetDefaults() vmConfig.RPCTxFeeCap = txFeeCap vmConfig.EnabledEthAPIs = enabledEthAPIs - require.Equal(t, vmConfig, vm.config, "VM Config should match default with overrides") - require.NoError(t, vm.Shutdown(context.Background())) + assert.Equal(t, vmConfig, vm.config, "VM Config should match default with overrides") + assert.NoError(t, vm.Shutdown()) } func TestVMNilConfig(t *testing.T) { - _, vm, _, _, _ := GenesisVM(t, false, "", "", "") + _, vm, _, _, _ := GenesisVM(t, false, genesisJSONApricotPhase0, "", "") // VM Config should match defaults if no config is passed in var vmConfig Config vmConfig.SetDefaults() - require.Equal(t, vmConfig, vm.config, "VM Config should match default config") - require.NoError(t, vm.Shutdown(context.Background())) + assert.Equal(t, vmConfig, vm.config, "VM Config should match default config") + assert.NoError(t, vm.Shutdown()) } -func TestVMContinuousProfiler(t *testing.T) { +func TestVMContinuosProfiler(t *testing.T) { profilerDir := t.TempDir() profilerFrequency := 500 * time.Millisecond - configJSON := fmt.Sprintf(`{"continuous-profiler-dir": %q,"continuous-profiler-frequency": "500ms"}`, profilerDir) - _, vm, _, _, _ := GenesisVM(t, false, "", configJSON, "") - require.Equal(t, vm.config.ContinuousProfilerDir, profilerDir, "profiler dir should be set") - require.Equal(t, vm.config.ContinuousProfilerFrequency.Duration, profilerFrequency, "profiler frequency should be set") + configJSON := fmt.Sprintf("{\"continuous-profiler-dir\": %q,\"continuous-profiler-frequency\": \"500ms\"}", profilerDir) + _, vm, _, _, _ := GenesisVM(t, false, genesisJSONApricotPhase0, configJSON, "") + assert.Equal(t, vm.config.ContinuousProfilerDir, profilerDir, "profiler dir should be set") + assert.Equal(t, vm.config.ContinuousProfilerFrequency.Duration, profilerFrequency, "profiler frequency should be set") // Sleep for twice the frequency of the profiler to give it time // to generate the first profile. time.Sleep(2 * time.Second) - require.NoError(t, vm.Shutdown(context.Background())) + assert.NoError(t, vm.Shutdown()) // Check that the first profile was generated expectedFileName := filepath.Join(profilerDir, "cpu.profile.1") _, err := os.Stat(expectedFileName) - require.NoError(t, err, "Expected continuous profiler to generate the first CPU profile at %s", expectedFileName) + assert.NoError(t, err, "Expected continuous profiler to generate the first CPU profile at %s", expectedFileName) } func TestVMUpgrades(t *testing.T) { @@ -556,48 +338,33 @@ func TestVMUpgrades(t *testing.T) { expectedGasPrice *big.Int }{ { - name: "Apricot Phase 3", - genesis: genesisJSONApricotPhase3, - expectedGasPrice: big.NewInt(0), + name: "Apricot Phase 0", + genesis: genesisJSONApricotPhase0, + expectedGasPrice: big.NewInt(params.LaunchMinGasPrice), }, { - name: "Apricot Phase 4", - genesis: genesisJSONApricotPhase4, - expectedGasPrice: big.NewInt(0), + name: "Apricot Phase 1", + genesis: genesisJSONApricotPhase1, + expectedGasPrice: big.NewInt(params.ApricotPhase1MinGasPrice), }, { - name: "Apricot Phase 5", - genesis: genesisJSONApricotPhase5, - expectedGasPrice: big.NewInt(0), + name: "Apricot Phase 2", + genesis: genesisJSONApricotPhase2, + expectedGasPrice: big.NewInt(params.ApricotPhase1MinGasPrice), }, { - name: "Apricot Phase Pre 6", - genesis: genesisJSONApricotPhasePre6, - expectedGasPrice: big.NewInt(0), - }, - { - name: "Apricot Phase 6", - genesis: genesisJSONApricotPhase6, - expectedGasPrice: big.NewInt(0), - }, - { - name: "Apricot Phase Post 6", - genesis: genesisJSONApricotPhasePost6, - expectedGasPrice: big.NewInt(0), - }, - { - name: "Banff", - genesis: genesisJSONBanff, + name: "Apricot Phase 3", + genesis: genesisJSONApricotPhase3, expectedGasPrice: big.NewInt(0), }, { - name: "Cortina", - genesis: genesisJSONCortina, + name: "Apricot Phase 4", + genesis: genesisJSONApricotPhase4, expectedGasPrice: big.NewInt(0), }, { - name: "Durango", - genesis: genesisJSONDurango, + name: "Apricot Phase 5", + genesis: genesisJSONApricotPhase5, expectedGasPrice: big.NewInt(0), }, } @@ -605,21 +372,19 @@ func TestVMUpgrades(t *testing.T) { t.Run(test.name, func(t *testing.T) { _, vm, _, _, _ := GenesisVM(t, true, test.genesis, "", "") - if gasPrice := vm.txPool.GasTip(); gasPrice.Cmp(test.expectedGasPrice) != 0 { + if gasPrice := vm.chain.GetTxPool().GasPrice(); gasPrice.Cmp(test.expectedGasPrice) != 0 { t.Fatalf("Expected pool gas price to be %d but found %d", test.expectedGasPrice, gasPrice) } defer func() { shutdownChan := make(chan error, 1) shutdownFunc := func() { - err := vm.Shutdown(context.Background()) + err := vm.Shutdown() shutdownChan <- err } go shutdownFunc() - shutdownTimeout := 250 * time.Millisecond + shutdownTimeout := 50 * time.Millisecond ticker := time.NewTicker(shutdownTimeout) - defer ticker.Stop() - select { case <-ticker.C: t.Fatalf("VM shutdown took longer than timeout: %v", shutdownTimeout) @@ -630,7 +395,7 @@ func TestVMUpgrades(t *testing.T) { } }() - lastAcceptedID, err := vm.LastAccepted(context.Background()) + lastAcceptedID, err := vm.LastAccepted() if err != nil { t.Fatal(err) } @@ -639,7 +404,7 @@ func TestVMUpgrades(t *testing.T) { t.Fatal("Expected last accepted block to match the genesis block hash") } - genesisBlk, err := vm.GetBlock(context.Background(), lastAcceptedID) + genesisBlk, err := vm.GetBlock(lastAcceptedID) if err != nil { t.Fatalf("Failed to get genesis block due to %s", err) } @@ -648,7 +413,7 @@ func TestVMUpgrades(t *testing.T) { t.Fatalf("Expected height of geneiss block to be 0, found: %d", height) } - if _, err := vm.ParseBlock(context.Background(), genesisBlk.Bytes()); err != nil { + if _, err := vm.ParseBlock(genesisBlk.Bytes()); err != nil { t.Fatalf("Failed to parse genesis block due to %s", err) } @@ -660,43 +425,6 @@ func TestVMUpgrades(t *testing.T) { } } -func TestImportMissingUTXOs(t *testing.T) { - // make a VM with a shared memory that has an importable UTXO to build a block - importAmount := uint64(50000000) - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase2, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(t, err) - }() - - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - require.NoError(t, err) - err = vm.mempool.AddLocalTx(importTx) - require.NoError(t, err) - <-issuer - blk, err := vm.BuildBlock(context.Background()) - require.NoError(t, err) - - // make another VM which is missing the UTXO in shared memory - _, vm2, _, _, _ := GenesisVM(t, true, genesisJSONApricotPhase2, "", "") - defer func() { - err := vm2.Shutdown(context.Background()) - require.NoError(t, err) - }() - - vm2Blk, err := vm2.ParseBlock(context.Background(), blk.Bytes()) - require.NoError(t, err) - err = vm2Blk.Verify(context.Background()) - require.ErrorIs(t, err, errMissingUTXOs) - - // This should not result in a bad block since the missing UTXO should - // prevent InsertBlockManual from being called. - badBlocks, _ := vm2.blockChain.BadBlocks() - require.Len(t, badBlocks, 0) -} - // Simple test to ensure we can issue an import transaction followed by an export transaction // and they will be indexed correctly when accepted. func TestIssueAtomicTxs(t *testing.T) { @@ -706,28 +434,28 @@ func TestIssueAtomicTxs(t *testing.T) { }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx); err != nil { + if err := vm.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer - blk, err := vm.BuildBlock(context.Background()) + blk, err := vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(); err != nil { t.Fatal(err) } @@ -735,11 +463,11 @@ func TestIssueAtomicTxs(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + if err := vm.SetPreference(blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(context.Background()); err != nil { + if err := blk.Accept(); err != nil { t.Fatal(err) } @@ -747,46 +475,29 @@ func TestIssueAtomicTxs(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - if lastAcceptedID, err := vm.LastAccepted(context.Background()); err != nil { + if lastAcceptedID, err := vm.LastAccepted(); err != nil { t.Fatal(err) } else if lastAcceptedID != blk.ID() { t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID) } - vm.blockChain.DrainAcceptorQueue() - filterAPI := filters.NewFilterAPI(filters.NewFilterSystem(vm.eth.APIBackend, filters.Config{ - Timeout: 5 * time.Minute, - })) - blockHash := common.Hash(blk.ID()) - logs, err := filterAPI.GetLogs(context.Background(), filters.FilterCriteria{ - BlockHash: &blockHash, - }) - if err != nil { - t.Fatal(err) - } - if len(logs) != 0 { - t.Fatalf("Expected log length to be 0, but found %d", len(logs)) - } - if logs == nil { - t.Fatal("Expected logs to be non-nil") - } - exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(exportTx); err != nil { + if err := vm.issueTx(exportTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer - blk2, err := vm.BuildBlock(context.Background()) + blk2, err := vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := blk2.Verify(context.Background()); err != nil { + if err := blk2.Verify(); err != nil { t.Fatal(err) } @@ -794,7 +505,7 @@ func TestIssueAtomicTxs(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := blk2.Accept(context.Background()); err != nil { + if err := blk2.Accept(); err != nil { t.Fatal(err) } @@ -802,7 +513,7 @@ func TestIssueAtomicTxs(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - if lastAcceptedID, err := vm.LastAccepted(context.Background()); err != nil { + if lastAcceptedID, err := vm.LastAccepted(); err != nil { t.Fatal(err) } else if lastAcceptedID != blk2.ID() { t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID) @@ -824,36 +535,36 @@ func TestIssueAtomicTxs(t *testing.T) { func TestBuildEthTxBlock(t *testing.T) { importAmount := uint64(20000000) - issuer, vm, dbManager, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase2, `{"pruning-enabled":true}`, "", map[ids.ShortID]uint64{ + issuer, vm, dbManager, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase2, "{\"pruning-enabled\":true}", "", map[ids.ShortID]uint64{ testShortIDAddrs[0]: importAmount, }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + vm.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan) - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx); err != nil { + if err := vm.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer - blk1, err := vm.BuildBlock(context.Background()) + blk1, err := vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := blk1.Verify(context.Background()); err != nil { + if err := blk1.Verify(); err != nil { t.Fatal(err) } @@ -861,11 +572,11 @@ func TestBuildEthTxBlock(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(context.Background(), blk1.ID()); err != nil { + if err := vm.SetPreference(blk1.ID()); err != nil { t.Fatal(err) } - if err := blk1.Accept(context.Background()); err != nil { + if err := blk1.Accept(); err != nil { t.Fatal(err) } @@ -883,7 +594,7 @@ func TestBuildEthTxBlock(t *testing.T) { } txs[i] = signedTx } - errs := vm.txPool.AddRemotesSync(txs) + errs := vm.chain.AddRemoteTxsSync(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) @@ -892,12 +603,12 @@ func TestBuildEthTxBlock(t *testing.T) { <-issuer - blk2, err := vm.BuildBlock(context.Background()) + blk2, err := vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := blk2.Verify(context.Background()); err != nil { + if err := blk2.Verify(); err != nil { t.Fatal(err) } @@ -905,7 +616,7 @@ func TestBuildEthTxBlock(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := blk2.Accept(context.Background()); err != nil { + if err := blk2.Accept(); err != nil { t.Fatal(err) } @@ -918,7 +629,7 @@ func TestBuildEthTxBlock(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - lastAcceptedID, err := vm.LastAccepted(context.Background()) + lastAcceptedID, err := vm.LastAccepted() if err != nil { t.Fatal(err) } @@ -927,13 +638,13 @@ func TestBuildEthTxBlock(t *testing.T) { } ethBlk1 := blk1.(*chain.BlockWrapper).Block.(*Block).ethBlock - if ethBlk1Root := ethBlk1.Root(); !vm.blockChain.HasState(ethBlk1Root) { + if ethBlk1Root := ethBlk1.Root(); !vm.chain.BlockChain().HasState(ethBlk1Root) { t.Fatalf("Expected blk1 state root to not yet be pruned after blk2 was accepted because of tip buffer") } // Clear the cache and ensure that GetBlock returns internal blocks with the correct status vm.State.Flush() - blk2Refreshed, err := vm.GetBlockInternal(context.Background(), blk2.ID()) + blk2Refreshed, err := vm.GetBlockInternal(blk2.ID()) if err != nil { t.Fatal(err) } @@ -942,7 +653,7 @@ func TestBuildEthTxBlock(t *testing.T) { } blk1RefreshedID := blk2Refreshed.Parent() - blk1Refreshed, err := vm.GetBlockInternal(context.Background(), blk1RefreshedID) + blk1Refreshed, err := vm.GetBlockInternal(blk1RefreshedID) if err != nil { t.Fatal(err) } @@ -956,27 +667,26 @@ func TestBuildEthTxBlock(t *testing.T) { restartedVM := &VM{} if err := restartedVM.Initialize( - context.Background(), NewContext(), dbManager, []byte(genesisJSONApricotPhase2), []byte(""), - []byte(`{"pruning-enabled":true}`), + []byte("{\"pruning-enabled\":true}"), issuer, - []*commonEng.Fx{}, + []*engCommon.Fx{}, nil, ); err != nil { t.Fatal(err) } // State root should not have been committed and discarded on restart - if ethBlk1Root := ethBlk1.Root(); restartedVM.blockChain.HasState(ethBlk1Root) { + if ethBlk1Root := ethBlk1.Root(); restartedVM.chain.BlockChain().HasState(ethBlk1Root) { t.Fatalf("Expected blk1 state root to be pruned after blk2 was accepted on top of it in pruning mode") } // State root should be committed when accepted tip on shutdown ethBlk2 := blk2.(*chain.BlockWrapper).Block.(*Block).ethBlock - if ethBlk2Root := ethBlk2.Root(); !restartedVM.blockChain.HasState(ethBlk2Root) { + if ethBlk2Root := ethBlk2.Root(); !restartedVM.chain.BlockChain().HasState(ethBlk2Root) { t.Fatalf("Expected blk2 state root to not be pruned after shutdown (last accepted tip should be committed)") } } @@ -990,7 +700,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() @@ -998,38 +708,38 @@ func testConflictingImportTxs(t *testing.T, genesis string) { importTxs := make([]*Tx, 0, 3) conflictTxs := make([]*Tx, 0, 3) for i, key := range testKeys { - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[i], initialBaseFee, []*secp256k1.PrivateKey{key}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[i], initialBaseFee, []*crypto.PrivateKeySECP256K1R{key}) if err != nil { t.Fatal(err) } importTxs = append(importTxs, importTx) conflictAddr := testEthAddrs[(i+1)%len(testEthAddrs)] - conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictAddr, initialBaseFee, []*secp256k1.PrivateKey{key}) + conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictAddr, initialBaseFee, []*crypto.PrivateKeySECP256K1R{key}) if err != nil { t.Fatal(err) } conflictTxs = append(conflictTxs, conflictTx) } - expectedParentBlkID, err := vm.LastAccepted(context.Background()) + expectedParentBlkID, err := vm.LastAccepted() if err != nil { t.Fatal(err) } for i, tx := range importTxs[:2] { - if err := vm.mempool.AddLocalTx(tx); err != nil { + if err := vm.issueTx(tx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) - blk, err := vm.BuildBlock(context.Background()) + blk, err := vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(); err != nil { t.Fatal(err) } @@ -1042,7 +752,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { } expectedParentBlkID = blk.ID() - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + if err := vm.SetPreference(blk.ID()); err != nil { t.Fatal(err) } } @@ -1051,7 +761,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { // the VM returns an error when it attempts to issue the conflict into the mempool // and when it attempts to build a block with the conflict force added to the mempool. for i, tx := range conflictTxs[:2] { - if err := vm.mempool.AddLocalTx(tx); err == nil { + if err := vm.issueTx(tx, true /*=local*/); err == nil { t.Fatal("Expected issueTx to fail due to conflicting transaction") } // Force issue transaction directly to the mempool @@ -1061,7 +771,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { <-issuer vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) - _, err = vm.BuildBlock(context.Background()) + _, err = vm.BuildBlock() // The new block is verified in BuildBlock, so // BuildBlock should fail due to an attempt to // double spend an atomic UTXO. @@ -1073,18 +783,18 @@ func testConflictingImportTxs(t *testing.T, genesis string) { // Generate one more valid block so that we can copy the header to create an invalid block // with modified extra data. This new block will be invalid for more than one reason (invalid merkle root) // so we check to make sure that the expected error is returned from block verification. - if err := vm.mempool.AddLocalTx(importTxs[2]); err != nil { + if err := vm.issueTx(importTxs[2], true); err != nil { t.Fatal(err) } <-issuer vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) - validBlock, err := vm.BuildBlock(context.Background()) + validBlock, err := vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := validBlock.Verify(context.Background()); err != nil { + if err := validBlock.Verify(); err != nil { t.Fatal(err) } @@ -1102,7 +812,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { t.Fatal(err) } - conflictingAtomicTxBlock := types.NewBlockWithExtData( + conflictingAtomicTxBlock := types.NewBlock( types.CopyHeader(validEthBlock.Header()), nil, nil, @@ -1117,12 +827,12 @@ func testConflictingImportTxs(t *testing.T, genesis string) { t.Fatal(err) } - parsedBlock, err := vm.ParseBlock(context.Background(), blockBytes) + parsedBlock, err := vm.ParseBlock(blockBytes) if err != nil { t.Fatal(err) } - if err := parsedBlock.Verify(context.Background()); !errors.Is(err, errConflictingAtomicInputs) { + if err := parsedBlock.Verify(); !errors.Is(err, errConflictingAtomicInputs) { t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicInputs, err) } @@ -1138,7 +848,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { header := types.CopyHeader(validEthBlock.Header()) header.ExtDataGasUsed.Mul(common.Big2, header.ExtDataGasUsed) - internalConflictBlock := types.NewBlockWithExtData( + internalConflictBlock := types.NewBlock( header, nil, nil, @@ -1153,12 +863,12 @@ func testConflictingImportTxs(t *testing.T, genesis string) { t.Fatal(err) } - parsedBlock, err = vm.ParseBlock(context.Background(), blockBytes) + parsedBlock, err = vm.ParseBlock(blockBytes) if err != nil { t.Fatal(err) } - if err := parsedBlock.Verify(context.Background()); !errors.Is(err, errConflictingAtomicInputs) { + if err := parsedBlock.Verify(); !errors.Is(err, errConflictingAtomicInputs) { t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicInputs, err) } } @@ -1181,10 +891,10 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(tx1); err != nil { + if err := vm.issueTx(tx1, true); err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(tx2); err != nil { + if err := vm.issueTx(tx2, true); err != nil { t.Fatal(err) } @@ -1208,10 +918,10 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(tx1); err != nil { + if err := vm.issueTx(tx1, true); err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(tx2); err != nil { + if err := vm.issueTx(tx2, true); err != nil { t.Fatal(err) } @@ -1241,15 +951,15 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx1); err != nil { + if err := vm.issueTx(importTx1, true /*=local*/); err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx2); err != nil { + if err := vm.issueTx(importTx2, true /*=local*/); err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(reissuanceTx1); !errors.Is(err, errConflictingAtomicTx) { + if err := vm.issueTx(reissuanceTx1, true /*=local*/); !errors.Is(err, errConflictingAtomicTx) { t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicTx, err) } @@ -1261,7 +971,7 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(reissuanceTx2); err != nil { + if err := vm.issueTx(reissuanceTx2, true /*=local*/); err != nil { t.Fatal(err) } @@ -1293,7 +1003,6 @@ func TestConflictingImportTxsAcrossBlocks(t *testing.T) { "apricotPhase4": genesisJSONApricotPhase4, "apricotPhase5": genesisJSONApricotPhase5, } { - genesis := genesis t.Run(name, func(t *testing.T) { testConflictingImportTxs(t, genesis) }) @@ -1304,55 +1013,54 @@ func TestConflictingImportTxsAcrossBlocks(t *testing.T) { // then calling SetPreference on block B (when it becomes preferred) // and the head of a longer chain (block D) does not corrupt the // canonical chain. -// -// A -// / \ -// B C -// | -// D +// A +// / \ +// B C +// | +// D func TestSetPreferenceRace(t *testing.T) { // Create two VMs which will agree on block A and then // build the two distinct preferred chains above importAmount := uint64(1000000000) - issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, `{"pruning-enabled":true}`, "", map[ids.ShortID]uint64{ + issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "{\"pruning-enabled\":true}", "", map[ids.ShortID]uint64{ testShortIDAddrs[0]: importAmount, }) - issuer2, vm2, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, `{"pruning-enabled":true}`, "", map[ids.ShortID]uint64{ + issuer2, vm2, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "{\"pruning-enabled\":true}", "", map[ids.ShortID]uint64{ testShortIDAddrs[0]: importAmount, }) defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { + if err := vm1.Shutdown(); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(context.Background()); err != nil { + if err := vm2.Shutdown(); err != nil { t.Fatal(err) } }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) - vm1.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan1) + vm1.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan1) newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) - vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) + vm2.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan2) - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, testEthAddrs[1], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, testEthAddrs[1], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm1.mempool.AddLocalTx(importTx); err != nil { + if err := vm1.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer1 - vm1BlkA, err := vm1.BuildBlock(context.Background()) + vm1BlkA, err := vm1.BuildBlock() if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := vm1BlkA.Verify(context.Background()); err != nil { + if err := vm1BlkA.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } @@ -1360,28 +1068,28 @@ func TestSetPreferenceRace(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkA.ID()); err != nil { t.Fatal(err) } - vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) + vm2BlkA, err := vm2.ParseBlock(vm1BlkA.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm2BlkA.Verify(context.Background()); err != nil { + if err := vm2BlkA.Verify(); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkA.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { + if err := vm2.SetPreference(vm2BlkA.ID()); err != nil { t.Fatal(err) } - if err := vm1BlkA.Accept(context.Background()); err != nil { + if err := vm1BlkA.Accept(); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } - if err := vm2BlkA.Accept(context.Background()); err != nil { + if err := vm2BlkA.Accept(); err != nil { t.Fatalf("VM2 failed to accept block: %s", err) } @@ -1409,7 +1117,7 @@ func TestSetPreferenceRace(t *testing.T) { var errs []error // Add the remote transactions, build the block, and set VM1's preference for block A - errs = vm1.txPool.AddRemotesSync(txs) + errs = vm1.chain.AddRemoteTxsSync(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) @@ -1418,12 +1126,12 @@ func TestSetPreferenceRace(t *testing.T) { <-issuer1 - vm1BlkB, err := vm1.BuildBlock(context.Background()) + vm1BlkB, err := vm1.BuildBlock() if err != nil { t.Fatal(err) } - if err := vm1BlkB.Verify(context.Background()); err != nil { + if err := vm1BlkB.Verify(); err != nil { t.Fatal(err) } @@ -1431,14 +1139,14 @@ func TestSetPreferenceRace(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkB.ID()); err != nil { t.Fatal(err) } // Split the transactions over two blocks, and set VM2's preference to them in sequence // after building each block // Block C - errs = vm2.txPool.AddRemotesSync(txs[0:5]) + errs = vm2.chain.AddRemoteTxsSync(txs[0:5]) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) @@ -1446,12 +1154,12 @@ func TestSetPreferenceRace(t *testing.T) { } <-issuer2 - vm2BlkC, err := vm2.BuildBlock(context.Background()) + vm2BlkC, err := vm2.BuildBlock() if err != nil { t.Fatalf("Failed to build BlkC on VM2: %s", err) } - if err := vm2BlkC.Verify(context.Background()); err != nil { + if err := vm2BlkC.Verify(); err != nil { t.Fatalf("BlkC failed verification on VM2: %s", err) } @@ -1459,7 +1167,7 @@ func TestSetPreferenceRace(t *testing.T) { t.Fatalf("Expected status of built block C to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(context.Background(), vm2BlkC.ID()); err != nil { + if err := vm2.SetPreference(vm2BlkC.ID()); err != nil { t.Fatal(err) } @@ -1469,7 +1177,7 @@ func TestSetPreferenceRace(t *testing.T) { } // Block D - errs = vm2.txPool.AddRemotesSync(txs[5:10]) + errs = vm2.chain.AddRemoteTxsSync(txs[5:10]) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) @@ -1477,12 +1185,12 @@ func TestSetPreferenceRace(t *testing.T) { } <-issuer2 - vm2BlkD, err := vm2.BuildBlock(context.Background()) + vm2BlkD, err := vm2.BuildBlock() if err != nil { t.Fatalf("Failed to build BlkD on VM2: %s", err) } - if err := vm2BlkD.Verify(context.Background()); err != nil { + if err := vm2BlkD.Verify(); err != nil { t.Fatalf("BlkD failed verification on VM2: %s", err) } @@ -1490,7 +1198,7 @@ func TestSetPreferenceRace(t *testing.T) { t.Fatalf("Expected status of built block D to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(context.Background(), vm2BlkD.ID()); err != nil { + if err := vm2.SetPreference(vm2BlkD.ID()); err != nil { t.Fatal(err) } @@ -1500,53 +1208,53 @@ func TestSetPreferenceRace(t *testing.T) { // Here we parse them in reverse order to simulate receiving a chain from the tip // back to the last accepted block as would typically be the case in the consensus // engine - vm1BlkD, err := vm1.ParseBlock(context.Background(), vm2BlkD.Bytes()) + vm1BlkD, err := vm1.ParseBlock(vm2BlkD.Bytes()) if err != nil { t.Fatalf("VM1 errored parsing blkD: %s", err) } - vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) + vm1BlkC, err := vm1.ParseBlock(vm2BlkC.Bytes()) if err != nil { t.Fatalf("VM1 errored parsing blkC: %s", err) } // The blocks must be verified in order. This invariant is maintained // in the consensus engine. - if err := vm1BlkC.Verify(context.Background()); err != nil { + if err := vm1BlkC.Verify(); err != nil { t.Fatalf("VM1 BlkC failed verification: %s", err) } - if err := vm1BlkD.Verify(context.Background()); err != nil { + if err := vm1BlkD.Verify(); err != nil { t.Fatalf("VM1 BlkD failed verification: %s", err) } // Set VM1's preference to blockD, skipping blockC - if err := vm1.SetPreference(context.Background(), vm1BlkD.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkD.ID()); err != nil { t.Fatal(err) } // Accept the longer chain on both VMs and ensure there are no errors // VM1 Accepts the blocks in order - if err := vm1BlkC.Accept(context.Background()); err != nil { + if err := vm1BlkC.Accept(); err != nil { t.Fatalf("VM1 BlkC failed on accept: %s", err) } - if err := vm1BlkD.Accept(context.Background()); err != nil { + if err := vm1BlkD.Accept(); err != nil { t.Fatalf("VM1 BlkC failed on accept: %s", err) } // VM2 Accepts the blocks in order - if err := vm2BlkC.Accept(context.Background()); err != nil { + if err := vm2BlkC.Accept(); err != nil { t.Fatalf("VM2 BlkC failed on accept: %s", err) } - if err := vm2BlkD.Accept(context.Background()); err != nil { + if err := vm2BlkD.Accept(); err != nil { t.Fatalf("VM2 BlkC failed on accept: %s", err) } log.Info("Validating canonical chain") // Verify the Canonical Chain for Both VMs - if err := vm2.blockChain.ValidateCanonicalChain(); err != nil { + if err := vm2.chain.ValidateCanonicalChain(); err != nil { t.Fatalf("VM2 failed canonical chain verification due to: %s", err) } - if err := vm1.blockChain.ValidateCanonicalChain(); err != nil { + if err := vm1.chain.ValidateCanonicalChain(); err != nil { t.Fatalf("VM1 failed canonical chain verification due to: %s", err) } } @@ -1572,40 +1280,40 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + vm.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan) - importTx0A, err := vm.newImportTx(vm.ctx.XChainID, key.Address, initialBaseFee, []*secp256k1.PrivateKey{key0}) + importTx0A, err := vm.newImportTx(vm.ctx.XChainID, key.Address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{key0}) if err != nil { t.Fatal(err) } // Create a conflicting transaction - importTx0B, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[2], initialBaseFee, []*secp256k1.PrivateKey{key0}) + importTx0B, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[2], initialBaseFee, []*crypto.PrivateKeySECP256K1R{key0}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx0A); err != nil { + if err := vm.issueTx(importTx0A, true /*=local*/); err != nil { t.Fatalf("Failed to issue importTx0A: %s", err) } <-issuer - blk0, err := vm.BuildBlock(context.Background()) + blk0, err := vm.BuildBlock() if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := blk0.Verify(context.Background()); err != nil { + if err := blk0.Verify(); err != nil { t.Fatalf("Block failed verification: %s", err) } - if err := vm.SetPreference(context.Background(), blk0.ID()); err != nil { + if err := vm.SetPreference(blk0.ID()); err != nil { t.Fatal(err) } @@ -1621,7 +1329,7 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { } // Add the remote transactions, build the block, and set VM1's preference for block A - errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + errs := vm.chain.AddRemoteTxsSync([]*types.Transaction{signedTx}) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) @@ -1630,44 +1338,44 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { <-issuer - blk1, err := vm.BuildBlock(context.Background()) + blk1, err := vm.BuildBlock() if err != nil { t.Fatalf("Failed to build blk1: %s", err) } - if err := blk1.Verify(context.Background()); err != nil { + if err := blk1.Verify(); err != nil { t.Fatalf("blk1 failed verification due to %s", err) } - if err := vm.SetPreference(context.Background(), blk1.ID()); err != nil { + if err := vm.SetPreference(blk1.ID()); err != nil { t.Fatal(err) } - importTx1, err := vm.newImportTx(vm.ctx.XChainID, key.Address, initialBaseFee, []*secp256k1.PrivateKey{key1}) + importTx1, err := vm.newImportTx(vm.ctx.XChainID, key.Address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{key1}) if err != nil { t.Fatalf("Failed to issue importTx1 due to: %s", err) } - if err := vm.mempool.AddLocalTx(importTx1); err != nil { + if err := vm.issueTx(importTx1, true /*=local*/); err != nil { t.Fatal(err) } <-issuer - blk2, err := vm.BuildBlock(context.Background()) + blk2, err := vm.BuildBlock() if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := blk2.Verify(context.Background()); err != nil { + if err := blk2.Verify(); err != nil { t.Fatalf("Block failed verification: %s", err) } - if err := vm.SetPreference(context.Background(), blk2.ID()); err != nil { + if err := vm.SetPreference(blk2.ID()); err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx0B); err == nil { + if err := vm.issueTx(importTx0B, true /*=local*/); err == nil { t.Fatalf("Should not have been able to issue import tx with conflict") } // Force issue transaction directly into the mempool @@ -1676,7 +1384,7 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { } <-issuer - _, err = vm.BuildBlock(context.Background()) + _, err = vm.BuildBlock() if err == nil { t.Fatal("Shouldn't have been able to build an invalid block") } @@ -1686,7 +1394,7 @@ func TestBonusBlocksTxs(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() @@ -1722,31 +1430,30 @@ func TestBonusBlocksTxs(t *testing.T) { t.Fatal(err) } - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx); err != nil { + if err := vm.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer - blk, err := vm.BuildBlock(context.Background()) + blk, err := vm.BuildBlock() if err != nil { t.Fatal(err) } - // Make [blk] a bonus block. - vm.atomicBackend.(*atomicBackend).bonusBlocks = map[uint64]ids.ID{blk.Height(): blk.ID()} + bonusBlocks.Add(blk.ID()) // Remove the UTXOs from shared memory, so that non-bonus blocks will fail verification if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.XChainID: {RemoveRequests: [][]byte{inputID[:]}}}); err != nil { t.Fatal(err) } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(); err != nil { t.Fatal(err) } @@ -1754,11 +1461,11 @@ func TestBonusBlocksTxs(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + if err := vm.SetPreference(blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(context.Background()); err != nil { + if err := blk.Accept(); err != nil { t.Fatal(err) } @@ -1766,7 +1473,7 @@ func TestBonusBlocksTxs(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - lastAcceptedID, err := vm.LastAccepted(context.Background()) + lastAcceptedID, err := vm.LastAccepted() if err != nil { t.Fatal(err) } @@ -1779,10 +1486,9 @@ func TestBonusBlocksTxs(t *testing.T) { // will not attempt to orphan either when verifying blocks C and D // from another VM (which have a common ancestor under the finalized // frontier). -// -// A -// / \ -// B C +// A +// / \ +// B C // // verifies block B and C, then Accepts block B. Then we test to ensure // that the VM defends against any attempt to set the preference or to @@ -1790,48 +1496,48 @@ func TestBonusBlocksTxs(t *testing.T) { // get rejected. func TestReorgProtection(t *testing.T) { importAmount := uint64(1000000000) - issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, `{"pruning-enabled":false}`, "", map[ids.ShortID]uint64{ + issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "{\"pruning-enabled\":false}", "", map[ids.ShortID]uint64{ testShortIDAddrs[0]: importAmount, }) - issuer2, vm2, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, `{"pruning-enabled":false}`, "", map[ids.ShortID]uint64{ + issuer2, vm2, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "{\"pruning-enabled\":false}", "", map[ids.ShortID]uint64{ testShortIDAddrs[0]: importAmount, }) defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { + if err := vm1.Shutdown(); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(context.Background()); err != nil { + if err := vm2.Shutdown(); err != nil { t.Fatal(err) } }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) - vm1.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan1) + vm1.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan1) newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) - vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) + vm2.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan2) key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm1.mempool.AddLocalTx(importTx); err != nil { + if err := vm1.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer1 - vm1BlkA, err := vm1.BuildBlock(context.Background()) + vm1BlkA, err := vm1.BuildBlock() if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := vm1BlkA.Verify(context.Background()); err != nil { + if err := vm1BlkA.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } @@ -1839,28 +1545,28 @@ func TestReorgProtection(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkA.ID()); err != nil { t.Fatal(err) } - vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) + vm2BlkA, err := vm2.ParseBlock(vm1BlkA.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm2BlkA.Verify(context.Background()); err != nil { + if err := vm2BlkA.Verify(); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkA.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { + if err := vm2.SetPreference(vm2BlkA.ID()); err != nil { t.Fatal(err) } - if err := vm1BlkA.Accept(context.Background()); err != nil { + if err := vm1BlkA.Accept(); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } - if err := vm2BlkA.Accept(context.Background()); err != nil { + if err := vm2BlkA.Accept(); err != nil { t.Fatalf("VM2 failed to accept block: %s", err) } @@ -1888,7 +1594,7 @@ func TestReorgProtection(t *testing.T) { var errs []error // Add the remote transactions, build the block, and set VM1's preference for block A - errs = vm1.txPool.AddRemotesSync(txs) + errs = vm1.chain.AddRemoteTxsSync(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) @@ -1897,12 +1603,12 @@ func TestReorgProtection(t *testing.T) { <-issuer1 - vm1BlkB, err := vm1.BuildBlock(context.Background()) + vm1BlkB, err := vm1.BuildBlock() if err != nil { t.Fatal(err) } - if err := vm1BlkB.Verify(context.Background()); err != nil { + if err := vm1BlkB.Verify(); err != nil { t.Fatal(err) } @@ -1910,14 +1616,14 @@ func TestReorgProtection(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkB.ID()); err != nil { t.Fatal(err) } // Split the transactions over two blocks, and set VM2's preference to them in sequence // after building each block // Block C - errs = vm2.txPool.AddRemotesSync(txs[0:5]) + errs = vm2.chain.AddRemoteTxsSync(txs[0:5]) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) @@ -1925,29 +1631,29 @@ func TestReorgProtection(t *testing.T) { } <-issuer2 - vm2BlkC, err := vm2.BuildBlock(context.Background()) + vm2BlkC, err := vm2.BuildBlock() if err != nil { t.Fatalf("Failed to build BlkC on VM2: %s", err) } - if err := vm2BlkC.Verify(context.Background()); err != nil { + if err := vm2BlkC.Verify(); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkC.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) + vm1BlkC, err := vm1.ParseBlock(vm2BlkC.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm1BlkC.Verify(context.Background()); err != nil { + if err := vm1BlkC.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } // Accept B, such that block C should get Rejected. - if err := vm1BlkB.Accept(context.Background()); err != nil { + if err := vm1BlkB.Accept(); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } @@ -1955,21 +1661,20 @@ func TestReorgProtection(t *testing.T) { // with the preferred chain lower than the last finalized block) // should NEVER happen. However, the VM defends against this // just in case. - if err := vm1.SetPreference(context.Background(), vm1BlkC.ID()); !strings.Contains(err.Error(), "cannot orphan finalized block") { + if err := vm1.SetPreference(vm1BlkC.ID()); !strings.Contains(err.Error(), "cannot orphan finalized block") { t.Fatalf("Unexpected error when setting preference that would trigger reorg: %s", err) } - if err := vm1BlkC.Accept(context.Background()); !strings.Contains(err.Error(), "expected accepted block to have parent") { + if err := vm1BlkC.Accept(); !strings.Contains(err.Error(), "expected accepted block to have parent") { t.Fatalf("Unexpected error when setting block at finalized height: %s", err) } } // Regression test to ensure that a VM that accepts block C while preferring // block B will trigger a reorg. -// -// A -// / \ -// B C +// A +// / \ +// B C func TestNonCanonicalAccept(t *testing.T) { importAmount := uint64(1000000000) issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ @@ -1980,40 +1685,40 @@ func TestNonCanonicalAccept(t *testing.T) { }) defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { + if err := vm1.Shutdown(); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(context.Background()); err != nil { + if err := vm2.Shutdown(); err != nil { t.Fatal(err) } }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) - vm1.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan1) + vm1.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan1) newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) - vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) + vm2.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan2) key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm1.mempool.AddLocalTx(importTx); err != nil { + if err := vm1.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer1 - vm1BlkA, err := vm1.BuildBlock(context.Background()) + vm1BlkA, err := vm1.BuildBlock() if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := vm1BlkA.Verify(context.Background()); err != nil { + if err := vm1BlkA.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } @@ -2021,28 +1726,28 @@ func TestNonCanonicalAccept(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkA.ID()); err != nil { t.Fatal(err) } - vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) + vm2BlkA, err := vm2.ParseBlock(vm1BlkA.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm2BlkA.Verify(context.Background()); err != nil { + if err := vm2BlkA.Verify(); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkA.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { + if err := vm2.SetPreference(vm2BlkA.ID()); err != nil { t.Fatal(err) } - if err := vm1BlkA.Accept(context.Background()); err != nil { + if err := vm1BlkA.Accept(); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } - if err := vm2BlkA.Accept(context.Background()); err != nil { + if err := vm2BlkA.Accept(); err != nil { t.Fatalf("VM2 failed to accept block: %s", err) } @@ -2070,7 +1775,7 @@ func TestNonCanonicalAccept(t *testing.T) { var errs []error // Add the remote transactions, build the block, and set VM1's preference for block A - errs = vm1.txPool.AddRemotesSync(txs) + errs = vm1.chain.AddRemoteTxsSync(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) @@ -2079,12 +1784,12 @@ func TestNonCanonicalAccept(t *testing.T) { <-issuer1 - vm1BlkB, err := vm1.BuildBlock(context.Background()) + vm1BlkB, err := vm1.BuildBlock() if err != nil { t.Fatal(err) } - if err := vm1BlkB.Verify(context.Background()); err != nil { + if err := vm1BlkB.Verify(); err != nil { t.Fatal(err) } @@ -2092,19 +1797,19 @@ func TestNonCanonicalAccept(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkB.ID()); err != nil { t.Fatal(err) } - vm1.eth.APIBackend.SetAllowUnfinalizedQueries(true) + vm1.chain.BlockChain().GetVMConfig().AllowUnfinalizedQueries = true blkBHeight := vm1BlkB.Height() blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { + if b := vm1.chain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) } - errs = vm2.txPool.AddRemotesSync(txs[0:5]) + errs = vm2.chain.AddRemoteTxsSync(txs[0:5]) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) @@ -2112,26 +1817,26 @@ func TestNonCanonicalAccept(t *testing.T) { } <-issuer2 - vm2BlkC, err := vm2.BuildBlock(context.Background()) + vm2BlkC, err := vm2.BuildBlock() if err != nil { t.Fatalf("Failed to build BlkC on VM2: %s", err) } - vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) + vm1BlkC, err := vm1.ParseBlock(vm2BlkC.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm1BlkC.Verify(context.Background()); err != nil { + if err := vm1BlkC.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } - if err := vm1BlkC.Accept(context.Background()); err != nil { + if err := vm1BlkC.Accept(); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { + if b := vm1.chain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex()) } } @@ -2139,12 +1844,11 @@ func TestNonCanonicalAccept(t *testing.T) { // Regression test to ensure that a VM that verifies block B, C, then // D (preferring block B) does not trigger a reorg through the re-verification // of block C or D. -// -// A -// / \ -// B C -// | -// D +// A +// / \ +// B C +// | +// D func TestStickyPreference(t *testing.T) { importAmount := uint64(1000000000) issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ @@ -2155,40 +1859,40 @@ func TestStickyPreference(t *testing.T) { }) defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { + if err := vm1.Shutdown(); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(context.Background()); err != nil { + if err := vm2.Shutdown(); err != nil { t.Fatal(err) } }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) - vm1.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan1) + vm1.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan1) newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) - vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) + vm2.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan2) key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm1.mempool.AddLocalTx(importTx); err != nil { + if err := vm1.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer1 - vm1BlkA, err := vm1.BuildBlock(context.Background()) + vm1BlkA, err := vm1.BuildBlock() if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := vm1BlkA.Verify(context.Background()); err != nil { + if err := vm1BlkA.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } @@ -2196,28 +1900,28 @@ func TestStickyPreference(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkA.ID()); err != nil { t.Fatal(err) } - vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) + vm2BlkA, err := vm2.ParseBlock(vm1BlkA.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm2BlkA.Verify(context.Background()); err != nil { + if err := vm2BlkA.Verify(); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkA.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { + if err := vm2.SetPreference(vm2BlkA.ID()); err != nil { t.Fatal(err) } - if err := vm1BlkA.Accept(context.Background()); err != nil { + if err := vm1BlkA.Accept(); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } - if err := vm2BlkA.Accept(context.Background()); err != nil { + if err := vm2BlkA.Accept(); err != nil { t.Fatalf("VM2 failed to accept block: %s", err) } @@ -2245,7 +1949,7 @@ func TestStickyPreference(t *testing.T) { var errs []error // Add the remote transactions, build the block, and set VM1's preference for block A - errs = vm1.txPool.AddRemotesSync(txs) + errs = vm1.chain.AddRemoteTxsSync(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) @@ -2254,12 +1958,12 @@ func TestStickyPreference(t *testing.T) { <-issuer1 - vm1BlkB, err := vm1.BuildBlock(context.Background()) + vm1BlkB, err := vm1.BuildBlock() if err != nil { t.Fatal(err) } - if err := vm1BlkB.Verify(context.Background()); err != nil { + if err := vm1BlkB.Verify(); err != nil { t.Fatal(err) } @@ -2267,19 +1971,19 @@ func TestStickyPreference(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkB.ID()); err != nil { t.Fatal(err) } - vm1.eth.APIBackend.SetAllowUnfinalizedQueries(true) + vm1.chain.BlockChain().GetVMConfig().AllowUnfinalizedQueries = true blkBHeight := vm1BlkB.Height() blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { + if b := vm1.chain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) } - errs = vm2.txPool.AddRemotesSync(txs[0:5]) + errs = vm2.chain.AddRemoteTxsSync(txs[0:5]) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) @@ -2287,12 +1991,12 @@ func TestStickyPreference(t *testing.T) { } <-issuer2 - vm2BlkC, err := vm2.BuildBlock(context.Background()) + vm2BlkC, err := vm2.BuildBlock() if err != nil { t.Fatalf("Failed to build BlkC on VM2: %s", err) } - if err := vm2BlkC.Verify(context.Background()); err != nil { + if err := vm2BlkC.Verify(); err != nil { t.Fatalf("BlkC failed verification on VM2: %s", err) } @@ -2300,7 +2004,7 @@ func TestStickyPreference(t *testing.T) { t.Fatalf("Expected status of built block C to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(context.Background(), vm2BlkC.ID()); err != nil { + if err := vm2.SetPreference(vm2BlkC.ID()); err != nil { t.Fatal(err) } @@ -2309,7 +2013,7 @@ func TestStickyPreference(t *testing.T) { t.Fatalf("Expected new block to match") } - errs = vm2.txPool.AddRemotesSync(txs[5:]) + errs = vm2.chain.AddRemoteTxsSync(txs[5:]) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) @@ -2317,19 +2021,19 @@ func TestStickyPreference(t *testing.T) { } <-issuer2 - vm2BlkD, err := vm2.BuildBlock(context.Background()) + vm2BlkD, err := vm2.BuildBlock() if err != nil { t.Fatalf("Failed to build BlkD on VM2: %s", err) } // Parse blocks produced in vm2 - vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) + vm1BlkC, err := vm1.ParseBlock(vm2BlkC.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() - vm1BlkD, err := vm1.ParseBlock(context.Background(), vm2BlkD.Bytes()) + vm1BlkD, err := vm1.ParseBlock(vm2BlkD.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } @@ -2337,75 +2041,75 @@ func TestStickyPreference(t *testing.T) { blkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() // Should be no-ops - if err := vm1BlkC.Verify(context.Background()); err != nil { + if err := vm1BlkC.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } - if err := vm1BlkD.Verify(context.Background()); err != nil { + if err := vm1BlkD.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { + if b := vm1.chain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) } - if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b != nil { + if b := vm1.chain.GetBlockByNumber(blkDHeight); b != nil { t.Fatalf("expected block at %d to be nil but got %s", blkDHeight, b.Hash().Hex()) } - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkBHash { + if b := vm1.chain.BlockChain().CurrentBlock(); b.Hash() != blkBHash { t.Fatalf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex()) } // Should still be no-ops on re-verify - if err := vm1BlkC.Verify(context.Background()); err != nil { + if err := vm1BlkC.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } - if err := vm1BlkD.Verify(context.Background()); err != nil { + if err := vm1BlkD.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { + if b := vm1.chain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) } - if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b != nil { + if b := vm1.chain.GetBlockByNumber(blkDHeight); b != nil { t.Fatalf("expected block at %d to be nil but got %s", blkDHeight, b.Hash().Hex()) } - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkBHash { + if b := vm1.chain.BlockChain().CurrentBlock(); b.Hash() != blkBHash { t.Fatalf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex()) } // Should be queryable after setting preference to side chain - if err := vm1.SetPreference(context.Background(), vm1BlkD.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkD.ID()); err != nil { t.Fatal(err) } - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { + if b := vm1.chain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex()) } - if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b.Hash() != blkDHash { + if b := vm1.chain.GetBlockByNumber(blkDHeight); b.Hash() != blkDHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), b.Hash().Hex()) } - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkDHash { + if b := vm1.chain.BlockChain().CurrentBlock(); b.Hash() != blkDHash { t.Fatalf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex()) } // Attempt to accept out of order - if err := vm1BlkD.Accept(context.Background()); !strings.Contains(err.Error(), "expected accepted block to have parent") { + if err := vm1BlkD.Accept(); !strings.Contains(err.Error(), "expected accepted block to have parent") { t.Fatalf("unexpected error when accepting out of order block: %s", err) } // Accept in order - if err := vm1BlkC.Accept(context.Background()); err != nil { + if err := vm1BlkC.Accept(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } - if err := vm1BlkD.Accept(context.Background()); err != nil { + if err := vm1BlkD.Accept(); err != nil { t.Fatalf("Block failed acceptance on VM1: %s", err) } // Ensure queryable after accepting - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { + if b := vm1.chain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex()) } - if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b.Hash() != blkDHash { + if b := vm1.chain.GetBlockByNumber(blkDHeight); b.Hash() != blkDHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), b.Hash().Hex()) } - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkDHash { + if b := vm1.chain.BlockChain().CurrentBlock(); b.Hash() != blkDHash { t.Fatalf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex()) } } @@ -2413,12 +2117,11 @@ func TestStickyPreference(t *testing.T) { // Regression test to ensure that a VM that prefers block B is able to parse // block C but unable to parse block D because it names B as an uncle, which // are not supported. -// -// A -// / \ -// B C -// | -// D +// A +// / \ +// B C +// | +// D func TestUncleBlock(t *testing.T) { importAmount := uint64(1000000000) issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ @@ -2429,39 +2132,39 @@ func TestUncleBlock(t *testing.T) { }) defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { + if err := vm1.Shutdown(); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(context.Background()); err != nil { + if err := vm2.Shutdown(); err != nil { t.Fatal(err) } }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) - vm1.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan1) + vm1.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan1) newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) - vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) + vm2.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan2) key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm1.mempool.AddLocalTx(importTx); err != nil { + if err := vm1.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer1 - vm1BlkA, err := vm1.BuildBlock(context.Background()) + vm1BlkA, err := vm1.BuildBlock() if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := vm1BlkA.Verify(context.Background()); err != nil { + if err := vm1BlkA.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } @@ -2469,28 +2172,28 @@ func TestUncleBlock(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkA.ID()); err != nil { t.Fatal(err) } - vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) + vm2BlkA, err := vm2.ParseBlock(vm1BlkA.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm2BlkA.Verify(context.Background()); err != nil { + if err := vm2BlkA.Verify(); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkA.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { + if err := vm2.SetPreference(vm2BlkA.ID()); err != nil { t.Fatal(err) } - if err := vm1BlkA.Accept(context.Background()); err != nil { + if err := vm1BlkA.Accept(); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } - if err := vm2BlkA.Accept(context.Background()); err != nil { + if err := vm2BlkA.Accept(); err != nil { t.Fatalf("VM2 failed to accept block: %s", err) } @@ -2515,7 +2218,7 @@ func TestUncleBlock(t *testing.T) { var errs []error - errs = vm1.txPool.AddRemotesSync(txs) + errs = vm1.chain.AddRemoteTxsSync(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) @@ -2524,12 +2227,12 @@ func TestUncleBlock(t *testing.T) { <-issuer1 - vm1BlkB, err := vm1.BuildBlock(context.Background()) + vm1BlkB, err := vm1.BuildBlock() if err != nil { t.Fatal(err) } - if err := vm1BlkB.Verify(context.Background()); err != nil { + if err := vm1BlkB.Verify(); err != nil { t.Fatal(err) } @@ -2537,11 +2240,11 @@ func TestUncleBlock(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkB.ID()); err != nil { t.Fatal(err) } - errs = vm2.txPool.AddRemotesSync(txs[0:5]) + errs = vm2.chain.AddRemoteTxsSync(txs[0:5]) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) @@ -2549,12 +2252,12 @@ func TestUncleBlock(t *testing.T) { } <-issuer2 - vm2BlkC, err := vm2.BuildBlock(context.Background()) + vm2BlkC, err := vm2.BuildBlock() if err != nil { t.Fatalf("Failed to build BlkC on VM2: %s", err) } - if err := vm2BlkC.Verify(context.Background()); err != nil { + if err := vm2BlkC.Verify(); err != nil { t.Fatalf("BlkC failed verification on VM2: %s", err) } @@ -2562,7 +2265,7 @@ func TestUncleBlock(t *testing.T) { t.Fatalf("Expected status of built block C to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(context.Background(), vm2BlkC.ID()); err != nil { + if err := vm2.SetPreference(vm2BlkC.ID()); err != nil { t.Fatal(err) } @@ -2571,7 +2274,7 @@ func TestUncleBlock(t *testing.T) { t.Fatalf("Expected new block to match") } - errs = vm2.txPool.AddRemotesSync(txs[5:10]) + errs = vm2.chain.AddRemoteTxsSync(txs[5:10]) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) @@ -2579,7 +2282,7 @@ func TestUncleBlock(t *testing.T) { } <-issuer2 - vm2BlkD, err := vm2.BuildBlock(context.Background()) + vm2BlkD, err := vm2.BuildBlock() if err != nil { t.Fatalf("Failed to build BlkD on VM2: %s", err) } @@ -2590,26 +2293,27 @@ func TestUncleBlock(t *testing.T) { uncleBlockHeader := types.CopyHeader(blkDEthBlock.Header()) uncleBlockHeader.UncleHash = types.CalcUncleHash(uncles) - uncleEthBlock := types.NewBlockWithExtData( + uncleEthBlock := types.NewBlock( uncleBlockHeader, blkDEthBlock.Transactions(), uncles, nil, - trie.NewStackTrie(nil), + new(trie.Trie), blkDEthBlock.ExtData(), false, ) - uncleBlock, err := vm2.newBlock(uncleEthBlock) - if err != nil { - t.Fatal(err) + uncleBlock := &Block{ + vm: vm2, + ethBlock: uncleEthBlock, + id: ids.ID(uncleEthBlock.Hash()), } - if err := uncleBlock.Verify(context.Background()); !errors.Is(err, errUnclesUnsupported) { + if err := uncleBlock.Verify(); !errors.Is(err, errUnclesUnsupported) { t.Fatalf("VM2 should have failed with %q but got %q", errUnclesUnsupported, err.Error()) } - if _, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()); err != nil { + if _, err := vm1.ParseBlock(vm2BlkC.Bytes()); err != nil { t.Fatalf("VM1 errored parsing blkC: %s", err) } - if _, err := vm1.ParseBlock(context.Background(), uncleBlock.Bytes()); !errors.Is(err, errUnclesUnsupported) { + if _, err := vm1.ParseBlock(uncleBlock.Bytes()); !errors.Is(err, errUnclesUnsupported) { t.Fatalf("VM1 should have failed with %q but got %q", errUnclesUnsupported, err.Error()) } } @@ -2623,23 +2327,23 @@ func TestEmptyBlock(t *testing.T) { }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx); err != nil { + if err := vm.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer - blk, err := vm.BuildBlock(context.Background()) + blk, err := vm.BuildBlock() if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } @@ -2647,7 +2351,7 @@ func TestEmptyBlock(t *testing.T) { // Create empty block from blkA ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock - emptyEthBlock := types.NewBlockWithExtData( + emptyEthBlock := types.NewBlock( types.CopyHeader(ethBlock.Header()), nil, nil, @@ -2661,27 +2365,27 @@ func TestEmptyBlock(t *testing.T) { t.Fatalf("emptyEthBlock should not have any extra data") } - emptyBlock, err := vm.newBlock(emptyEthBlock) - if err != nil { - t.Fatal(err) + emptyBlock := &Block{ + vm: vm, + ethBlock: emptyEthBlock, + id: ids.ID(emptyEthBlock.Hash()), } - if _, err := vm.ParseBlock(context.Background(), emptyBlock.Bytes()); !errors.Is(err, errEmptyBlock) { + if _, err := vm.ParseBlock(emptyBlock.Bytes()); !errors.Is(err, errEmptyBlock) { t.Fatalf("VM should have failed with errEmptyBlock but got %s", err.Error()) } - if err := emptyBlock.Verify(context.Background()); !errors.Is(err, errEmptyBlock) { + if err := emptyBlock.Verify(); !errors.Is(err, errEmptyBlock) { t.Fatalf("block should have failed verification with errEmptyBlock but got %s", err.Error()) } } // Regression test to ensure that a VM that verifies block B, C, then // D (preferring block B) reorgs when C and then D are accepted. -// -// A -// / \ -// B C -// | -// D +// A +// / \ +// B C +// | +// D func TestAcceptReorg(t *testing.T) { importAmount := uint64(1000000000) issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ @@ -2692,40 +2396,40 @@ func TestAcceptReorg(t *testing.T) { }) defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { + if err := vm1.Shutdown(); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(context.Background()); err != nil { + if err := vm2.Shutdown(); err != nil { t.Fatal(err) } }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) - vm1.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan1) + vm1.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan1) newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) - vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) + vm2.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan2) key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm1.mempool.AddLocalTx(importTx); err != nil { + if err := vm1.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer1 - vm1BlkA, err := vm1.BuildBlock(context.Background()) + vm1BlkA, err := vm1.BuildBlock() if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := vm1BlkA.Verify(context.Background()); err != nil { + if err := vm1BlkA.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } @@ -2733,28 +2437,28 @@ func TestAcceptReorg(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkA.ID()); err != nil { t.Fatal(err) } - vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) + vm2BlkA, err := vm2.ParseBlock(vm1BlkA.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm2BlkA.Verify(context.Background()); err != nil { + if err := vm2BlkA.Verify(); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkA.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { + if err := vm2.SetPreference(vm2BlkA.ID()); err != nil { t.Fatal(err) } - if err := vm1BlkA.Accept(context.Background()); err != nil { + if err := vm1BlkA.Accept(); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } - if err := vm2BlkA.Accept(context.Background()); err != nil { + if err := vm2BlkA.Accept(); err != nil { t.Fatalf("VM2 failed to accept block: %s", err) } @@ -2781,7 +2485,7 @@ func TestAcceptReorg(t *testing.T) { // Add the remote transactions, build the block, and set VM1's preference // for block B - errs := vm1.txPool.AddRemotesSync(txs) + errs := vm1.chain.AddRemoteTxsSync(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) @@ -2790,12 +2494,12 @@ func TestAcceptReorg(t *testing.T) { <-issuer1 - vm1BlkB, err := vm1.BuildBlock(context.Background()) + vm1BlkB, err := vm1.BuildBlock() if err != nil { t.Fatal(err) } - if err := vm1BlkB.Verify(context.Background()); err != nil { + if err := vm1BlkB.Verify(); err != nil { t.Fatal(err) } @@ -2803,11 +2507,11 @@ func TestAcceptReorg(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { + if err := vm1.SetPreference(vm1BlkB.ID()); err != nil { t.Fatal(err) } - errs = vm2.txPool.AddRemotesSync(txs[0:5]) + errs = vm2.chain.AddRemoteTxsSync(txs[0:5]) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) @@ -2816,16 +2520,16 @@ func TestAcceptReorg(t *testing.T) { <-issuer2 - vm2BlkC, err := vm2.BuildBlock(context.Background()) + vm2BlkC, err := vm2.BuildBlock() if err != nil { t.Fatalf("Failed to build BlkC on VM2: %s", err) } - if err := vm2BlkC.Verify(context.Background()); err != nil { + if err := vm2BlkC.Verify(); err != nil { t.Fatalf("BlkC failed verification on VM2: %s", err) } - if err := vm2.SetPreference(context.Background(), vm2BlkC.ID()); err != nil { + if err := vm2.SetPreference(vm2BlkC.ID()); err != nil { t.Fatal(err) } @@ -2834,7 +2538,7 @@ func TestAcceptReorg(t *testing.T) { t.Fatalf("Expected new block to match") } - errs = vm2.txPool.AddRemotesSync(txs[5:]) + errs = vm2.chain.AddRemoteTxsSync(txs[5:]) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) @@ -2843,51 +2547,51 @@ func TestAcceptReorg(t *testing.T) { <-issuer2 - vm2BlkD, err := vm2.BuildBlock(context.Background()) + vm2BlkD, err := vm2.BuildBlock() if err != nil { t.Fatalf("Failed to build BlkD on VM2: %s", err) } // Parse blocks produced in vm2 - vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) + vm1BlkC, err := vm1.ParseBlock(vm2BlkC.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - vm1BlkD, err := vm1.ParseBlock(context.Background(), vm2BlkD.Bytes()) + vm1BlkD, err := vm1.ParseBlock(vm2BlkD.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm1BlkC.Verify(context.Background()); err != nil { + if err := vm1BlkC.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } - if err := vm1BlkD.Verify(context.Background()); err != nil { + if err := vm1BlkD.Verify(); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkBHash { + if b := vm1.chain.BlockChain().CurrentBlock(); b.Hash() != blkBHash { t.Fatalf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex()) } - if err := vm1BlkC.Accept(context.Background()); err != nil { + if err := vm1BlkC.Accept(); err != nil { t.Fatal(err) } blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkCHash { + if b := vm1.chain.BlockChain().CurrentBlock(); b.Hash() != blkCHash { t.Fatalf("expected current block to have hash %s but got %s", blkCHash.Hex(), b.Hash().Hex()) } - if err := vm1BlkB.Reject(context.Background()); err != nil { + if err := vm1BlkB.Reject(); err != nil { t.Fatal(err) } - if err := vm1BlkD.Accept(context.Background()); err != nil { + if err := vm1BlkD.Accept(); err != nil { t.Fatal(err) } blkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkDHash { + if b := vm1.chain.BlockChain().CurrentBlock(); b.Hash() != blkDHash { t.Fatalf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex()) } } @@ -2899,23 +2603,23 @@ func TestFutureBlock(t *testing.T) { }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx); err != nil { + if err := vm.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer - blkA, err := vm.BuildBlock(context.Background()) + blkA, err := vm.BuildBlock() if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } @@ -2928,7 +2632,7 @@ func TestFutureBlock(t *testing.T) { // Set the modified time to exceed the allowed future time modifiedTime := modifiedHeader.Time + uint64(maxFutureBlockTime.Seconds()+1) modifiedHeader.Time = modifiedTime - modifiedBlock := types.NewBlockWithExtData( + modifiedBlock := types.NewBlock( modifiedHeader, nil, nil, @@ -2938,12 +2642,14 @@ func TestFutureBlock(t *testing.T) { false, ) - futureBlock, err := vm.newBlock(modifiedBlock) - if err != nil { - t.Fatal(err) + futureBlock := &Block{ + vm: vm, + ethBlock: modifiedBlock, + id: ids.ID(modifiedBlock.Hash()), + atomicTxs: internalBlkA.atomicTxs, } - if err := futureBlock.Verify(context.Background()); err == nil { + if err := futureBlock.Verify(); err == nil { t.Fatal("Future block should have failed verification due to block timestamp too far in the future") } else if !strings.Contains(err.Error(), "block timestamp is too far in the future") { t.Fatalf("Expected error to be block timestamp too far in the future but found %s", err) @@ -2958,34 +2664,34 @@ func TestBuildApricotPhase1Block(t *testing.T) { testShortIDAddrs[0]: importAmount, }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + vm.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan) key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx); err != nil { + if err := vm.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer - blk, err := vm.BuildBlock(context.Background()) + blk, err := vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(); err != nil { t.Fatal(err) } @@ -2993,11 +2699,11 @@ func TestBuildApricotPhase1Block(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + if err := vm.SetPreference(blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(context.Background()); err != nil { + if err := blk.Accept(); err != nil { t.Fatal(err) } @@ -3023,7 +2729,7 @@ func TestBuildApricotPhase1Block(t *testing.T) { } txs[i] = signedTx } - errs := vm.txPool.AddRemotesSync(txs) + errs := vm.chain.AddRemoteTxsSync(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) @@ -3032,12 +2738,12 @@ func TestBuildApricotPhase1Block(t *testing.T) { <-issuer - blk, err = vm.BuildBlock(context.Background()) + blk, err = vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(); err != nil { t.Fatal(err) } @@ -3045,7 +2751,7 @@ func TestBuildApricotPhase1Block(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := blk.Accept(context.Background()); err != nil { + if err := blk.Accept(); err != nil { t.Fatal(err) } @@ -3053,7 +2759,7 @@ func TestBuildApricotPhase1Block(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - lastAcceptedID, err := vm.LastAccepted(context.Background()) + lastAcceptedID, err := vm.LastAccepted() if err != nil { t.Fatal(err) } @@ -3062,7 +2768,7 @@ func TestBuildApricotPhase1Block(t *testing.T) { } // Confirm all txs are present - ethBlkTxs := vm.blockChain.GetBlockByNumber(2).Transactions() + ethBlkTxs := vm.chain.GetBlockByNumber(2).Transactions() for i, tx := range txs { if len(ethBlkTxs) <= i { t.Fatalf("missing transactions expected: %d but found: %d", len(txs), len(ethBlkTxs)) @@ -3080,28 +2786,28 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx); err != nil { + if err := vm.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer - blk, err := vm.BuildBlock(context.Background()) + blk, err := vm.BuildBlock() if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(); err != nil { t.Fatalf("Block failed verification on VM: %s", err) } @@ -3109,17 +2815,17 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + if err := vm.SetPreference(blk.ID()); err != nil { t.Fatal(err) } blkHeight := blk.Height() blkHash := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() - vm.eth.APIBackend.SetAllowUnfinalizedQueries(true) + vm.chain.BlockChain().GetVMConfig().AllowUnfinalizedQueries = true ctx := context.Background() - b, err := vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) + b, err := vm.chain.APIBackend().BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) if err != nil { t.Fatal(err) } @@ -3127,18 +2833,18 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { t.Fatalf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) } - vm.eth.APIBackend.SetAllowUnfinalizedQueries(false) + vm.chain.BlockChain().GetVMConfig().AllowUnfinalizedQueries = false - _, err = vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) + _, err = vm.chain.APIBackend().BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) if !errors.Is(err, eth.ErrUnfinalizedData) { t.Fatalf("expected ErrUnfinalizedData but got %s", err.Error()) } - if err := blk.Accept(context.Background()); err != nil { + if err := blk.Accept(); err != nil { t.Fatalf("VM failed to accept block: %s", err) } - if b := vm.blockChain.GetBlockByNumber(blkHeight); b.Hash() != blkHash { + if b := vm.chain.GetBlockByNumber(blkHeight); b.Hash() != blkHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) } } @@ -3153,28 +2859,28 @@ func TestReissueAtomicTx(t *testing.T) { }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() - genesisBlkID, err := vm.LastAccepted(context.Background()) + genesisBlkID, err := vm.LastAccepted() if err != nil { t.Fatal(err) } - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx); err != nil { + if err := vm.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer - blkA, err := vm.BuildBlock(context.Background()) + blkA, err := vm.BuildBlock() if err != nil { t.Fatal(err) } @@ -3183,23 +2889,19 @@ func TestReissueAtomicTx(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := blkA.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - if err := vm.SetPreference(context.Background(), blkA.ID()); err != nil { + if err := vm.SetPreference(blkA.ID()); err != nil { t.Fatal(err) } // SetPreference to parent before rejecting (will rollback state to genesis // so that atomic transaction can be reissued, otherwise current block will // conflict with UTXO to be reissued) - if err := vm.SetPreference(context.Background(), genesisBlkID); err != nil { + if err := vm.SetPreference(genesisBlkID); err != nil { t.Fatal(err) } // Rejecting [blkA] should cause [importTx] to be re-issued into the mempool. - if err := blkA.Reject(context.Background()); err != nil { + if err := blkA.Reject(); err != nil { t.Fatal(err) } @@ -3208,7 +2910,7 @@ func TestReissueAtomicTx(t *testing.T) { // as Rejected. time.Sleep(2 * time.Second) <-issuer - blkB, err := vm.BuildBlock(context.Background()) + blkB, err := vm.BuildBlock() if err != nil { t.Fatal(err) } @@ -3223,7 +2925,7 @@ func TestReissueAtomicTx(t *testing.T) { t.Fatalf("Expected status of blkB to be %s, but found %s", choices.Processing, status) } - if err := blkB.Verify(context.Background()); err != nil { + if err := blkB.Verify(); err != nil { t.Fatal(err) } @@ -3231,11 +2933,11 @@ func TestReissueAtomicTx(t *testing.T) { t.Fatalf("Expected status of blkC to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(context.Background(), blkB.ID()); err != nil { + if err := vm.SetPreference(blkB.ID()); err != nil { t.Fatal(err) } - if err := blkB.Accept(context.Background()); err != nil { + if err := blkB.Accept(); err != nil { t.Fatal(err) } @@ -3243,7 +2945,7 @@ func TestReissueAtomicTx(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - if lastAcceptedID, err := vm.LastAccepted(context.Background()); err != nil { + if lastAcceptedID, err := vm.LastAccepted(); err != nil { t.Fatal(err) } else if lastAcceptedID != blkB.ID() { t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blkB.ID(), lastAcceptedID) @@ -3262,7 +2964,7 @@ func TestAtomicTxFailsEVMStateTransferBuildBlock(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase1, "", "") defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() @@ -3270,23 +2972,23 @@ func TestAtomicTxFailsEVMStateTransferBuildBlock(t *testing.T) { exportTxs := createExportTxOptions(t, vm, issuer, sharedMemory) exportTx1, exportTx2 := exportTxs[0], exportTxs[1] - if err := vm.mempool.AddLocalTx(exportTx1); err != nil { + if err := vm.issueTx(exportTx1, true /*=local*/); err != nil { t.Fatal(err) } <-issuer - exportBlk1, err := vm.BuildBlock(context.Background()) + exportBlk1, err := vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := exportBlk1.Verify(context.Background()); err != nil { + if err := exportBlk1.Verify(); err != nil { t.Fatal(err) } - if err := vm.SetPreference(context.Background(), exportBlk1.ID()); err != nil { + if err := vm.SetPreference(exportBlk1.ID()); err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(exportTx2); err == nil { + if err := vm.issueTx(exportTx2, true /*=local*/); err == nil { t.Fatal("Should have failed to issue due to an invalid export tx") } @@ -3300,7 +3002,7 @@ func TestAtomicTxFailsEVMStateTransferBuildBlock(t *testing.T) { } <-issuer - _, err = vm.BuildBlock(context.Background()) + _, err = vm.BuildBlock() if err == nil { t.Fatal("BuildBlock should have returned an error due to invalid export transaction") } @@ -3310,7 +3012,7 @@ func TestBuildInvalidBlockHead(t *testing.T) { issuer, vm, _, _, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() @@ -3341,29 +3043,29 @@ func TestBuildInvalidBlockHead(t *testing.T) { SourceChain: vm.ctx.XChainID, } tx := &Tx{UnsignedAtomicTx: utx} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{key0}}); err != nil { + if err := tx.Sign(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key0}}); err != nil { t.Fatal(err) } - currentBlock := vm.blockChain.CurrentBlock() + currentBlock := vm.chain.BlockChain().CurrentBlock() // Verify that the transaction fails verification when attempting to issue // it into the atomic mempool. - if err := vm.mempool.AddLocalTx(tx); err == nil { + if err := vm.issueTx(tx, true /*=local*/); err == nil { t.Fatal("Should have failed to issue invalid transaction") } // Force issue the transaction directly to the mempool - if err := vm.mempool.ForceAddTx(tx); err != nil { + if err := vm.mempool.AddTx(tx); err != nil { t.Fatal(err) } <-issuer - if _, err := vm.BuildBlock(context.Background()); err == nil { + if _, err := vm.BuildBlock(); err == nil { t.Fatalf("Unexpectedly created a block") } - newCurrentBlock := vm.blockChain.CurrentBlock() + newCurrentBlock := vm.chain.BlockChain().CurrentBlock() if currentBlock.Hash() != newCurrentBlock.Hash() { t.Fatal("current block changed") @@ -3379,14 +3081,14 @@ func TestConfigureLogLevel(t *testing.T) { }{ { name: "Log level info", - logConfig: `{"log-level": "info"}`, + logConfig: "{\"log-level\": \"info\"}", genesisJSON: genesisJSONApricotPhase2, upgradeJSON: "", expectedErr: "", }, { name: "Invalid log level", - logConfig: `{"log-level": "cchain"}`, + logConfig: "{\"log-level\": \"cchain\"}", genesisJSON: genesisJSONApricotPhase3, upgradeJSON: "", expectedErr: "failed to initialize logger due to", @@ -3396,18 +3098,17 @@ func TestConfigureLogLevel(t *testing.T) { t.Run(test.name, func(t *testing.T) { vm := &VM{} ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, test.genesisJSON) - appSender := &commonEng.SenderTest{T: t} + appSender := &engCommon.SenderTest{T: t} appSender.CantSendAppGossip = true - appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } + appSender.SendAppGossipF = func([]byte) error { return nil } err := vm.Initialize( - context.Background(), ctx, dbManager, genesisBytes, []byte(""), []byte(test.logConfig), issuer, - []*commonEng.Fx{}, + []*engCommon.Fx{}, appSender, ) if len(test.expectedErr) == 0 && err != nil { @@ -3424,15 +3125,13 @@ func TestConfigureLogLevel(t *testing.T) { if err == nil { shutdownChan := make(chan error, 1) shutdownFunc := func() { - err := vm.Shutdown(context.Background()) + err := vm.Shutdown() shutdownChan <- err } go shutdownFunc() - shutdownTimeout := 250 * time.Millisecond + shutdownTimeout := 50 * time.Millisecond ticker := time.NewTicker(shutdownTimeout) - defer ticker.Stop() - select { case <-ticker.C: t.Fatalf("VM shutdown took longer than timeout: %v", shutdownTimeout) @@ -3452,13 +3151,13 @@ func TestBuildApricotPhase4Block(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase4, "", "") defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + vm.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan) key := testKeys[0].ToECDSA() address := testEthAddrs[0] @@ -3494,23 +3193,23 @@ func TestBuildApricotPhase4Block(t *testing.T) { t.Fatal(err) } - importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx); err != nil { + if err := vm.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer - blk, err := vm.BuildBlock(context.Background()) + blk, err := vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(); err != nil { t.Fatal(err) } @@ -3518,11 +3217,11 @@ func TestBuildApricotPhase4Block(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + if err := vm.SetPreference(blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(context.Background()); err != nil { + if err := blk.Accept(); err != nil { t.Fatal(err) } @@ -3563,7 +3262,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { } txs[i] = signedTx } - errs := vm.txPool.AddRemotesSync(txs) + errs := vm.chain.AddRemoteTxs(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) @@ -3572,12 +3271,12 @@ func TestBuildApricotPhase4Block(t *testing.T) { <-issuer - blk, err = vm.BuildBlock(context.Background()) + blk, err = vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(); err != nil { t.Fatal(err) } @@ -3585,7 +3284,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := blk.Accept(context.Background()); err != nil { + if err := blk.Accept(); err != nil { t.Fatal(err) } @@ -3608,7 +3307,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - lastAcceptedID, err := vm.LastAccepted(context.Background()) + lastAcceptedID, err := vm.LastAccepted() if err != nil { t.Fatal(err) } @@ -3617,7 +3316,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { } // Confirm all txs are present - ethBlkTxs := vm.blockChain.GetBlockByNumber(2).Transactions() + ethBlkTxs := vm.chain.GetBlockByNumber(2).Transactions() for i, tx := range txs { if len(ethBlkTxs) <= i { t.Fatalf("missing transactions expected: %d but found: %d", len(txs), len(ethBlkTxs)) @@ -3634,13 +3333,13 @@ func TestBuildApricotPhase5Block(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase5, "", "") defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + vm.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan) key := testKeys[0].ToECDSA() address := testEthAddrs[0] @@ -3676,23 +3375,23 @@ func TestBuildApricotPhase5Block(t *testing.T) { t.Fatal(err) } - importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx); err != nil { + if err := vm.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } <-issuer - blk, err := vm.BuildBlock(context.Background()) + blk, err := vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(); err != nil { t.Fatal(err) } @@ -3700,11 +3399,11 @@ func TestBuildApricotPhase5Block(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + if err := vm.SetPreference(blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(context.Background()); err != nil { + if err := blk.Accept(); err != nil { t.Fatal(err) } @@ -3737,7 +3436,7 @@ func TestBuildApricotPhase5Block(t *testing.T) { } txs[i] = signedTx } - errs := vm.txPool.Add(txs, false, false) + errs := vm.chain.AddRemoteTxs(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) @@ -3746,12 +3445,12 @@ func TestBuildApricotPhase5Block(t *testing.T) { <-issuer - blk, err = vm.BuildBlock(context.Background()) + blk, err = vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(); err != nil { t.Fatal(err) } @@ -3759,7 +3458,7 @@ func TestBuildApricotPhase5Block(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := blk.Accept(context.Background()); err != nil { + if err := blk.Accept(); err != nil { t.Fatal(err) } @@ -3782,7 +3481,7 @@ func TestBuildApricotPhase5Block(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - lastAcceptedID, err := vm.LastAccepted(context.Background()) + lastAcceptedID, err := vm.LastAccepted() if err != nil { t.Fatal(err) } @@ -3791,7 +3490,7 @@ func TestBuildApricotPhase5Block(t *testing.T) { } // Confirm all txs are present - ethBlkTxs := vm.blockChain.GetBlockByNumber(2).Transactions() + ethBlkTxs := vm.chain.GetBlockByNumber(2).Transactions() for i, tx := range txs { if len(ethBlkTxs) <= i { t.Fatalf("missing transactions expected: %d but found: %d", len(txs), len(ethBlkTxs)) @@ -3809,30 +3508,30 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase1, "", "") defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + vm.chain.GetTxPool().SubscribeNewReorgEvent(newTxPoolHeadChan) // Create three conflicting import transactions importTxs := createImportTxOptions(t, vm, sharedMemory) // Issue the first import transaction, build, and accept the block. - if err := vm.mempool.AddLocalTx(importTxs[0]); err != nil { + if err := vm.issueTx(importTxs[0], true); err != nil { t.Fatal(err) } <-issuer - blk, err := vm.BuildBlock(context.Background()) + blk, err := vm.BuildBlock() if err != nil { t.Fatal(err) } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(); err != nil { t.Fatal(err) } @@ -3840,11 +3539,11 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + if err := vm.SetPreference(blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(context.Background()); err != nil { + if err := blk.Accept(); err != nil { t.Fatal(err) } @@ -3858,7 +3557,7 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { vm.mempool.AddTx(importTxs[1]) vm.mempool.AddTx(importTxs[2]) - if _, err := vm.BuildBlock(context.Background()); err == nil { + if _, err := vm.BuildBlock(); err == nil { t.Fatal("Expected build block to fail due to empty block") } } @@ -3875,27 +3574,27 @@ func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { t.Fatal(err) } defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() // Create a conflict set for each pair of transactions - conflictSets := make([]set.Set[ids.ID], len(testKeys)) + conflictSets := make([]ids.Set, len(testKeys)) for index, key := range testKeys { - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[index], initialBaseFee, []*secp256k1.PrivateKey{key}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[index], initialBaseFee, []*crypto.PrivateKeySECP256K1R{key}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx); err != nil { + if err := vm.issueTx(importTx, true /*=local*/); err != nil { t.Fatal(err) } conflictSets[index].Add(importTx.ID()) - conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictKey.Address, initialBaseFee, []*secp256k1.PrivateKey{key}) + conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictKey.Address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{key}) if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(conflictTx); err == nil { + if err := vm.issueTx(conflictTx, true /*=local*/); err == nil { t.Fatal("should conflict with the utxoSet in the mempool") } // force add the tx @@ -3905,13 +3604,13 @@ func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { <-issuer // Note: this only checks the path through OnFinalizeAndAssemble, we should make sure to add a test // that verifies blocks received from the network will also fail verification - blk, err := vm.BuildBlock(context.Background()) + blk, err := vm.BuildBlock() if err != nil { t.Fatal(err) } atomicTxs := blk.(*chain.BlockWrapper).Block.(*Block).atomicTxs assert.True(t, len(atomicTxs) == len(testKeys), "Conflict transactions should be out of the batch") - atomicTxIDs := set.Set[ids.ID]{} + atomicTxIDs := ids.Set{} for _, tx := range atomicTxs { atomicTxIDs.Add(tx.ID()) } @@ -3924,10 +3623,10 @@ func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { assert.Equal(t, 1, conflictSet.Len()) } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(); err != nil { t.Fatal(err) } - if err := blk.Accept(context.Background()); err != nil { + if err := blk.Accept(); err != nil { t.Fatal(err) } } @@ -3937,7 +3636,7 @@ func TestBuildBlockDoesNotExceedAtomicGasLimit(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase5, "", "") defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm.Shutdown(); err != nil { t.Fatal(err) } }() @@ -3956,13 +3655,13 @@ func TestBuildBlockDoesNotExceedAtomicGasLimit(t *testing.T) { if err != nil { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(importTx); err != nil { + if err := vm.issueTx(importTx, true); err != nil { t.Fatal(err) } } <-issuer - blk, err := vm.BuildBlock(context.Background()) + blk, err := vm.BuildBlock() if err != nil { t.Fatal(err) } @@ -3986,10 +3685,10 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { _, vm2, _, sharedMemory2, _ := GenesisVM(t, true, genesisJSONApricotPhase5, "", "") defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { + if err := vm1.Shutdown(); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(context.Background()); err != nil { + if err := vm2.Shutdown(); err != nil { t.Fatal(err) } }() @@ -4011,20 +3710,20 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { // Double the initial base fee used when estimating the cost of this transaction to ensure that when it is // used in ApricotPhase5 it still pays a sufficient fee with the fixed fee per atomic transaction. - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), []*crypto.PrivateKeySECP256K1R{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm1.mempool.ForceAddTx(importTx); err != nil { + if err := vm1.issueTx(importTx, true); err != nil { t.Fatal(err) } <-issuer - blk1, err := vm1.BuildBlock(context.Background()) + blk1, err := vm1.BuildBlock() if err != nil { t.Fatal(err) } - if err := blk1.Verify(context.Background()); err != nil { + if err := blk1.Verify(); err != nil { t.Fatal(err) } @@ -4036,7 +3735,7 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { } // Construct the new block with the extra data in the new format (slice of atomic transactions). - ethBlk2 := types.NewBlockWithExtData( + ethBlk2 := types.NewBlock( types.CopyHeader(validEthBlock.Header()), nil, nil, @@ -4046,7 +3745,7 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { true, ) - state, err := vm2.blockChain.State() + state, err := vm2.chain.CurrentState() if err != nil { t.Fatal(err) } @@ -4057,163 +3756,11 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { } } -func TestSkipChainConfigCheckCompatible(t *testing.T) { - // Hack: registering metrics uses global variables, so we need to disable metrics here so that we can initialize the VM twice. - metrics.Enabled = false - defer func() { metrics.Enabled = true }() - - importAmount := uint64(50000000) - issuer, vm, dbManager, _, appSender := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase1, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - defer func() { require.NoError(t, vm.Shutdown(context.Background())) }() - - // Since rewinding is permitted for last accepted height of 0, we must - // accept one block to test the SkipUpgradeCheck functionality. - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - require.NoError(t, err) - require.NoError(t, vm.mempool.AddLocalTx(importTx)) - <-issuer - - blk, err := vm.BuildBlock(context.Background()) - require.NoError(t, err) - require.NoError(t, blk.Verify(context.Background())) - require.NoError(t, vm.SetPreference(context.Background(), blk.ID())) - require.NoError(t, blk.Accept(context.Background())) - - reinitVM := &VM{} - // use the block's timestamp instead of 0 since rewind to genesis - // is hardcoded to be allowed in core/genesis.go. - genesisWithUpgrade := &core.Genesis{} - require.NoError(t, json.Unmarshal([]byte(genesisJSONApricotPhase1), genesisWithUpgrade)) - genesisWithUpgrade.Config.ApricotPhase2BlockTimestamp = utils.TimeToNewUint64(blk.Timestamp()) - genesisWithUpgradeBytes, err := json.Marshal(genesisWithUpgrade) - require.NoError(t, err) - - // this will not be allowed - err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, appSender) - require.ErrorContains(t, err, "mismatching ApricotPhase2 fork block timestamp in database") - - // try again with skip-upgrade-check - config := []byte(`{"skip-upgrade-check": true}`) - err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, config, issuer, []*commonEng.Fx{}, appSender) - require.NoError(t, err) - require.NoError(t, reinitVM.Shutdown(context.Background())) -} - -func TestParentBeaconRootBlock(t *testing.T) { - tests := []struct { - name string - genesisJSON string - beaconRoot *common.Hash - expectedError bool - errString string - }{ - { - name: "non-empty parent beacon root in Durango", - genesisJSON: genesisJSONDurango, - beaconRoot: &common.Hash{0x01}, - expectedError: true, - // err string wont work because it will also fail with blob gas is non-empty (zeroed) - }, - { - name: "empty parent beacon root in Durango", - genesisJSON: genesisJSONDurango, - beaconRoot: &common.Hash{}, - expectedError: true, - }, - { - name: "nil parent beacon root in Durango", - genesisJSON: genesisJSONDurango, - beaconRoot: nil, - expectedError: false, - }, - { - name: "non-empty parent beacon root in Cancun", - genesisJSON: genesisJSONCancun, - beaconRoot: &common.Hash{0x01}, - expectedError: true, - errString: "expected empty hash", - }, - { - name: "empty parent beacon root in Cancun", - genesisJSON: genesisJSONCancun, - beaconRoot: &common.Hash{}, - expectedError: false, - }, - { - name: "nil parent beacon root in Cancun", - genesisJSON: genesisJSONCancun, - beaconRoot: nil, - expectedError: true, - errString: "header is missing parentBeaconRoot", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - importAmount := uint64(1000000000) - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, test.genesisJSON, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) - } - - <-issuer - - blk, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } - - // Modify the block to have a parent beacon root - ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock - header := types.CopyHeader(ethBlock.Header()) - header.ParentBeaconRoot = test.beaconRoot - parentBeaconEthBlock := types.NewBlockWithExtData( - header, - nil, - nil, - nil, - new(trie.Trie), - ethBlock.ExtData(), - false, - ) - - parentBeaconBlock, err := vm.newBlock(parentBeaconEthBlock) - if err != nil { - t.Fatal(err) - } - - errCheck := func(err error) { - if test.expectedError { - if test.errString != "" { - require.ErrorContains(t, err, test.errString) - } else { - require.Error(t, err) - } - } else { - require.NoError(t, err) - } - } - - _, err = vm.ParseBlock(context.Background(), parentBeaconBlock.Bytes()) - errCheck(err) - err = parentBeaconBlock.Verify(context.Background()) - errCheck(err) - }) - } +func TestGetAtomicRepositoryRepairHeights(t *testing.T) { + mainnetHeights := getAtomicRepositoryRepairHeights(params.AvalancheMainnetChainID) + assert.Len(t, mainnetHeights, 76) + sorted := sort.SliceIsSorted(mainnetHeights, func(i, j int) bool { return mainnetHeights[i] < mainnetHeights[j] }) + assert.True(t, sorted) + testnetHeights := getAtomicRepositoryRepairHeights(params.AvalancheFujiChainID) + assert.Empty(t, testnetHeights) } diff --git a/plugin/main.go b/plugin/main.go index 4080476d15..0f4204fb45 100644 --- a/plugin/main.go +++ b/plugin/main.go @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/utils/ulimit" "github.com/ava-labs/avalanchego/vms/rpcchainvm" - "github.com/ava-labs/coreth/plugin/evm" + "github.com/tenderly/coreth/plugin/evm" ) func main() { diff --git a/precompile/contract.go b/precompile/contract.go new file mode 100644 index 0000000000..4782ef89b9 --- /dev/null +++ b/precompile/contract.go @@ -0,0 +1,143 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package precompile + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +const ( + selectorLen = 4 +) + +type RunStatefulPrecompileFunc func(accessibleState PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) + +// PrecompileAccessibleState defines the interface exposed to stateful precompile contracts +type PrecompileAccessibleState interface { + GetStateDB() StateDB + GetBlockContext() BlockContext + NativeAssetCall(caller common.Address, input []byte, suppliedGas uint64, gasGost uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) +} + +// BlockContext defines an interface that provides information to a stateful precompile +// about the block that activates the upgrade. The precompile can access this information +// to initialize its state. +type BlockContext interface { + Number() *big.Int + Timestamp() *big.Int +} + +// ChainContext defines an interface that provides information to a stateful precompile +// about the chain configuration. The precompile can access this information to initialize +// its state. +type ChainConfig interface { + // Note: None of the existing stateful precompiles currently access chain config information + // in Configure so this interface is empty. +} + +// StateDB is the interface for accessing EVM state +type StateDB interface { + GetState(common.Address, common.Hash) common.Hash + SetState(common.Address, common.Hash, common.Hash) + + SetCode(common.Address, []byte) + + SetNonce(common.Address, uint64) + GetNonce(common.Address) uint64 + + GetBalance(common.Address) *big.Int + AddBalance(common.Address, *big.Int) + SubBalance(common.Address, *big.Int) + + SubBalanceMultiCoin(common.Address, common.Hash, *big.Int) + AddBalanceMultiCoin(common.Address, common.Hash, *big.Int) + GetBalanceMultiCoin(common.Address, common.Hash) *big.Int + + CreateAccount(common.Address) + Exist(common.Address) bool +} + +// StatefulPrecompiledContract is the interface for executing a precompiled contract +type StatefulPrecompiledContract interface { + // Run executes the precompiled contract. + Run(accessibleState PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) +} + +// statefulPrecompileFunction defines a function implemented by a stateful precompile +type statefulPrecompileFunction struct { + // selector is the 4 byte function selector for this function + // This should be calculated from the function signature using CalculateFunctionSelector + selector []byte + // execute is performed when this function is selected + execute RunStatefulPrecompileFunc +} + +// newStatefulPrecompileFunction creates a stateful precompile function with the given arguments +//nolint:unused,deadcode +func newStatefulPrecompileFunction(selector []byte, execute RunStatefulPrecompileFunc) *statefulPrecompileFunction { + return &statefulPrecompileFunction{ + selector: selector, + execute: execute, + } +} + +// statefulPrecompileWithFunctionSelectors implements StatefulPrecompiledContract by using 4 byte function selectors to pass +// off responsibilities to internal execution functions. +// Note: because we only ever read from [functions] there no lock is required to make it thread-safe. +type statefulPrecompileWithFunctionSelectors struct { + fallback *statefulPrecompileFunction + functions map[string]*statefulPrecompileFunction +} + +// newStatefulPrecompileWithFunctionSelectors generates new StatefulPrecompile using [functions] as the available functions and [fallback] +// as an optional fallback if there is no input data. Note: the selector of [fallback] will be ignored, so it is required to be left empty. +//nolint:unused,deadcode +func newStatefulPrecompileWithFunctionSelectors(fallback *statefulPrecompileFunction, functions []*statefulPrecompileFunction) StatefulPrecompiledContract { + // Ensure that if a fallback is present, it does not have a mistakenly populated function selector. + if fallback != nil && len(fallback.selector) != 0 { + panic(fmt.Errorf("fallback function cannot specify non-zero length function selector")) + } + + // Construct the contract and populate [functions]. + contract := &statefulPrecompileWithFunctionSelectors{ + fallback: fallback, + functions: make(map[string]*statefulPrecompileFunction), + } + for _, function := range functions { + _, exists := contract.functions[string(function.selector)] + if exists { + panic(fmt.Errorf("cannot create stateful precompile with duplicated function selector: %q", function.selector)) + } + contract.functions[string(function.selector)] = function + } + + return contract +} + +// Run selects the function using the 4 byte function selector at the start of the input and executes the underlying function on the +// given arguments. +func (s *statefulPrecompileWithFunctionSelectors) Run(accessibleState PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { + // If there is no input data present, call the fallback function if present. + if len(input) == 0 && s.fallback != nil { + return s.fallback.execute(accessibleState, caller, addr, nil, suppliedGas, readOnly) + } + + // Otherwise, an unexpected input size will result in an error. + if len(input) < selectorLen { + return nil, suppliedGas, fmt.Errorf("missing function selector to precompile - input length (%d)", len(input)) + } + + // Use the function selector to grab the correct function + selector := input[:selectorLen] + functionInput := input[selectorLen:] + function, ok := s.functions[string(selector)] + if !ok { + return nil, suppliedGas, fmt.Errorf("invalid function selector %#x", selector) + } + + return function.execute(accessibleState, caller, addr, functionInput, suppliedGas, readOnly) +} \ No newline at end of file diff --git a/precompile/params.go b/precompile/params.go new file mode 100644 index 0000000000..f332d11c61 --- /dev/null +++ b/precompile/params.go @@ -0,0 +1,47 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package precompile + +import ( + "bytes" + + "github.com/ethereum/go-ethereum/common" +) + +// Gas costs for stateful precompiles +// can be added here eg. +// const MintGasCost = 30_000 + +// AddressRange represents a continuous range of addresses +type AddressRange struct { + Start common.Address + End common.Address +} + +// Contains returns true iff [addr] is contained within the (inclusive) +func (a *AddressRange) Contains(addr common.Address) bool { + addrBytes := addr.Bytes() + return bytes.Compare(addrBytes, a.Start[:]) >= 0 && bytes.Compare(addrBytes, a.End[:]) <= 0 +} + +// Designated addresses of stateful precompiles +// Note: it is important that none of these addresses conflict with each other or any other precompiles +// in core/vm/contracts.go. +// We start at 0x0100000000000000000000000000000000000000 and will increment by 1 from here to reduce +// the risk of conflicts. +var ( + UsedAddresses = []common.Address{ + // precompile contract addresses can be added here + } + + // ReservedRanges contains addresses ranges that are reserved + // for precompiles and cannot be used as EOA or deployed contracts. + ReservedRanges = []AddressRange{ + { + // reserved for coreth precompiles + common.HexToAddress("0x0100000000000000000000000000000000000000"), + common.HexToAddress("0x01000000000000000000000000000000000000ff"), + }, + } +) \ No newline at end of file diff --git a/precompile/stateful_precompile_config.go b/precompile/stateful_precompile_config.go new file mode 100644 index 0000000000..710e0e2b3b --- /dev/null +++ b/precompile/stateful_precompile_config.go @@ -0,0 +1,59 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package precompile + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + + "github.com/tenderly/coreth/utils" +) + +// StatefulPrecompileConfig defines the interface for a stateful precompile to +type StatefulPrecompileConfig interface { + // Address returns the address where the stateful precompile is accessible. + Address() common.Address + // Timestamp returns the timestamp at which this stateful precompile should be enabled. + // 1) 0 indicates that the precompile should be enabled from genesis. + // 2) n indicates that the precompile should be enabled in the first block with timestamp >= [n]. + // 3) nil indicates that the precompile is never enabled. + Timestamp() *big.Int + // Configure is called on the first block where the stateful precompile should be enabled. + // This allows the stateful precompile to configure its own state via [StateDB] as necessary. + // This function must be deterministic since it will impact the EVM state. If a change to the + // config causes a change to the state modifications made in Configure, then it cannot be safely + // made to the config after the network upgrade has gone into effect. + // + // Configure is called on the first block where the stateful precompile should be enabled. This + // provides the config the ability to set its initial state and should only modify the state within + // its own address space. + Configure(ChainConfig, StateDB, BlockContext) + // Contract returns a thread-safe singleton that can be used as the StatefulPrecompiledContract when + // this config is enabled. + Contract() StatefulPrecompiledContract +} + +// CheckConfigure checks if [config] is activated by the transition from block at [parentTimestamp] to the timestamp +// set in [blockContext]. +// If it does, then it calls Configure on [precompileConfig] to make the necessary state update to enable the StatefulPrecompile. +// Note: this function is called within genesis to configure the starting state if [precompileConfig] specifies that it should be +// configured at genesis, or happens during block processing to update the state before processing the given block. +// TODO: add ability to call Configure at different timestamps, so that developers can easily re-configure by updating the +// stateful precompile config. +// Assumes that [config] is non-nil. +func CheckConfigure(chainConfig ChainConfig, parentTimestamp *big.Int, blockContext BlockContext, precompileConfig StatefulPrecompileConfig, state StateDB) { + forkTimestamp := precompileConfig.Timestamp() + // If the network upgrade goes into effect within this transition, configure the stateful precompile + if utils.IsForkTransition(forkTimestamp, parentTimestamp, blockContext.Timestamp()) { + // Set the nonce of the precompile's address (as is done when a contract is created) to ensure + // that it is marked as non-empty and will not be cleaned up when the statedb is finalized. + state.SetNonce(precompileConfig.Address(), 1) + // Set the code of the precompile's address to a non-zero length byte slice to ensure that the precompile + // can be called from within Solidity contracts. Solidity adds a check before invoking a contract to ensure + // that it does not attempt to invoke a non-existent contract. + state.SetCode(precompileConfig.Address(), []byte{0x1}) + precompileConfig.Configure(chainConfig, state, blockContext) + } +} diff --git a/precompile/utils.go b/precompile/utils.go new file mode 100644 index 0000000000..1a75ffb2ab --- /dev/null +++ b/precompile/utils.go @@ -0,0 +1,34 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package precompile + +import ( + "fmt" + "regexp" + + "github.com/tenderly/coreth/vmerrs" + "github.com/ethereum/go-ethereum/crypto" +) + +var functionSignatureRegex = regexp.MustCompile(`[\w]+\(((([\w]+)?)|((([\w]+),)+([\w]+)))\)`) + +// CalculateFunctionSelector returns the 4 byte function selector that results from [functionSignature] +// Ex. the function setBalance(addr address, balance uint256) should be passed in as the string: +// "setBalance(address,uint256)" +func CalculateFunctionSelector(functionSignature string) []byte { + if !functionSignatureRegex.MatchString(functionSignature) { + panic(fmt.Errorf("invalid function signature: %q", functionSignature)) + } + hash := crypto.Keccak256([]byte(functionSignature)) + return hash[:4] +} + +// deductGas checks if [suppliedGas] is sufficient against [requiredGas] and deducts [requiredGas] from [suppliedGas]. +//nolint:unused,deadcode +func deductGas(suppliedGas uint64, requiredGas uint64) (uint64, error) { + if suppliedGas < requiredGas { + return 0, vmerrs.ErrOutOfGas + } + return suppliedGas - requiredGas, nil +} \ No newline at end of file diff --git a/pypi b/pypi new file mode 100644 index 0000000000..ebc61603b1 --- /dev/null +++ b/pypi @@ -0,0 +1,29 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +with open("requirements.txt", "r") as f: + requirements = f.readlines() + +setuptools.setup( + name="HT", + version="0.0.1", + author="Krunoslav Lehman Pavasovic, Umut Simsekli", + author_email="krunolp@gmail.com", + description="Heavy Tailed Experiments", + long_description=long_description, + long_description_content_type="text/markdown", + url='github.com/krunolp/heavy_tails', + package_dir={'heavy_tails': 'heavy_tails'}, + packages=setuptools.find_packages(), + install_requires=[ + 'numpy', + 'pandas', + 'scipy', + 'sklearn', + 'matplotlib', + 'jax', + 'jaxlib', + ], +) diff --git a/rlp/decode.go b/rlp/decode.go new file mode 100644 index 0000000000..02277ba51e --- /dev/null +++ b/rlp/decode.go @@ -0,0 +1,1120 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "strings" + "sync" + + "github.com/tenderly/coreth/rlp/internal/rlpstruct" +) + +//lint:ignore ST1012 EOL is not an error. + +// EOL is returned when the end of the current list +// has been reached during streaming. +var EOL = errors.New("rlp: end of list") + +var ( + ErrExpectedString = errors.New("rlp: expected String or Byte") + ErrExpectedList = errors.New("rlp: expected List") + ErrCanonInt = errors.New("rlp: non-canonical integer format") + ErrCanonSize = errors.New("rlp: non-canonical size information") + ErrElemTooLarge = errors.New("rlp: element is larger than containing list") + ErrValueTooLarge = errors.New("rlp: value size exceeds available input length") + ErrMoreThanOneValue = errors.New("rlp: input contains more than one value") + + // internal errors + errNotInList = errors.New("rlp: call of ListEnd outside of any list") + errNotAtEOL = errors.New("rlp: call of ListEnd not positioned at EOL") + errUintOverflow = errors.New("rlp: uint overflow") + errNoPointer = errors.New("rlp: interface given to Decode must be a pointer") + errDecodeIntoNil = errors.New("rlp: pointer given to Decode must not be nil") + + streamPool = sync.Pool{ + New: func() interface{} { return new(Stream) }, + } +) + +// Decoder is implemented by types that require custom RLP decoding rules or need to decode +// into private fields. +// +// The DecodeRLP method should read one value from the given Stream. It is not forbidden to +// read less or more, but it might be confusing. +type Decoder interface { + DecodeRLP(*Stream) error +} + +// Decode parses RLP-encoded data from r and stores the result in the value pointed to by +// val. Please see package-level documentation for the decoding rules. Val must be a +// non-nil pointer. +// +// If r does not implement ByteReader, Decode will do its own buffering. +// +// Note that Decode does not set an input limit for all readers and may be vulnerable to +// panics cause by huge value sizes. If you need an input limit, use +// +// NewStream(r, limit).Decode(val) +func Decode(r io.Reader, val interface{}) error { + stream := streamPool.Get().(*Stream) + defer streamPool.Put(stream) + + stream.Reset(r, 0) + return stream.Decode(val) +} + +// DecodeBytes parses RLP data from b into val. Please see package-level documentation for +// the decoding rules. The input must contain exactly one value and no trailing data. +func DecodeBytes(b []byte, val interface{}) error { + r := bytes.NewReader(b) + + stream := streamPool.Get().(*Stream) + defer streamPool.Put(stream) + + stream.Reset(r, uint64(len(b))) + if err := stream.Decode(val); err != nil { + return err + } + if r.Len() > 0 { + return ErrMoreThanOneValue + } + return nil +} + +type decodeError struct { + msg string + typ reflect.Type + ctx []string +} + +func (err *decodeError) Error() string { + ctx := "" + if len(err.ctx) > 0 { + ctx = ", decoding into " + for i := len(err.ctx) - 1; i >= 0; i-- { + ctx += err.ctx[i] + } + } + return fmt.Sprintf("rlp: %s for %v%s", err.msg, err.typ, ctx) +} + +func wrapStreamError(err error, typ reflect.Type) error { + switch err { + case ErrCanonInt: + return &decodeError{msg: "non-canonical integer (leading zero bytes)", typ: typ} + case ErrCanonSize: + return &decodeError{msg: "non-canonical size information", typ: typ} + case ErrExpectedList: + return &decodeError{msg: "expected input list", typ: typ} + case ErrExpectedString: + return &decodeError{msg: "expected input string or byte", typ: typ} + case errUintOverflow: + return &decodeError{msg: "input string too long", typ: typ} + case errNotAtEOL: + return &decodeError{msg: "input list has too many elements", typ: typ} + } + return err +} + +func addErrorContext(err error, ctx string) error { + if decErr, ok := err.(*decodeError); ok { + decErr.ctx = append(decErr.ctx, ctx) + } + return err +} + +var ( + decoderInterface = reflect.TypeOf(new(Decoder)).Elem() + bigInt = reflect.TypeOf(big.Int{}) +) + +func makeDecoder(typ reflect.Type, tags rlpstruct.Tags) (dec decoder, err error) { + kind := typ.Kind() + switch { + case typ == rawValueType: + return decodeRawValue, nil + case typ.AssignableTo(reflect.PtrTo(bigInt)): + return decodeBigInt, nil + case typ.AssignableTo(bigInt): + return decodeBigIntNoPtr, nil + case kind == reflect.Ptr: + return makePtrDecoder(typ, tags) + case reflect.PtrTo(typ).Implements(decoderInterface): + return decodeDecoder, nil + case isUint(kind): + return decodeUint, nil + case kind == reflect.Bool: + return decodeBool, nil + case kind == reflect.String: + return decodeString, nil + case kind == reflect.Slice || kind == reflect.Array: + return makeListDecoder(typ, tags) + case kind == reflect.Struct: + return makeStructDecoder(typ) + case kind == reflect.Interface: + return decodeInterface, nil + default: + return nil, fmt.Errorf("rlp: type %v is not RLP-serializable", typ) + } +} + +func decodeRawValue(s *Stream, val reflect.Value) error { + r, err := s.Raw() + if err != nil { + return err + } + val.SetBytes(r) + return nil +} + +func decodeUint(s *Stream, val reflect.Value) error { + typ := val.Type() + num, err := s.uint(typ.Bits()) + if err != nil { + return wrapStreamError(err, val.Type()) + } + val.SetUint(num) + return nil +} + +func decodeBool(s *Stream, val reflect.Value) error { + b, err := s.Bool() + if err != nil { + return wrapStreamError(err, val.Type()) + } + val.SetBool(b) + return nil +} + +func decodeString(s *Stream, val reflect.Value) error { + b, err := s.Bytes() + if err != nil { + return wrapStreamError(err, val.Type()) + } + val.SetString(string(b)) + return nil +} + +func decodeBigIntNoPtr(s *Stream, val reflect.Value) error { + return decodeBigInt(s, val.Addr()) +} + +func decodeBigInt(s *Stream, val reflect.Value) error { + i := val.Interface().(*big.Int) + if i == nil { + i = new(big.Int) + val.Set(reflect.ValueOf(i)) + } + + err := s.decodeBigInt(i) + if err != nil { + return wrapStreamError(err, val.Type()) + } + return nil +} + +func makeListDecoder(typ reflect.Type, tag rlpstruct.Tags) (decoder, error) { + etype := typ.Elem() + if etype.Kind() == reflect.Uint8 && !reflect.PtrTo(etype).Implements(decoderInterface) { + if typ.Kind() == reflect.Array { + return decodeByteArray, nil + } + return decodeByteSlice, nil + } + etypeinfo := theTC.infoWhileGenerating(etype, rlpstruct.Tags{}) + if etypeinfo.decoderErr != nil { + return nil, etypeinfo.decoderErr + } + var dec decoder + switch { + case typ.Kind() == reflect.Array: + dec = func(s *Stream, val reflect.Value) error { + return decodeListArray(s, val, etypeinfo.decoder) + } + case tag.Tail: + // A slice with "tail" tag can occur as the last field + // of a struct and is supposed to swallow all remaining + // list elements. The struct decoder already called s.List, + // proceed directly to decoding the elements. + dec = func(s *Stream, val reflect.Value) error { + return decodeSliceElems(s, val, etypeinfo.decoder) + } + default: + dec = func(s *Stream, val reflect.Value) error { + return decodeListSlice(s, val, etypeinfo.decoder) + } + } + return dec, nil +} + +func decodeListSlice(s *Stream, val reflect.Value, elemdec decoder) error { + size, err := s.List() + if err != nil { + return wrapStreamError(err, val.Type()) + } + if size == 0 { + val.Set(reflect.MakeSlice(val.Type(), 0, 0)) + return s.ListEnd() + } + if err := decodeSliceElems(s, val, elemdec); err != nil { + return err + } + return s.ListEnd() +} + +func decodeSliceElems(s *Stream, val reflect.Value, elemdec decoder) error { + i := 0 + for ; ; i++ { + // grow slice if necessary + if i >= val.Cap() { + newcap := val.Cap() + val.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(val.Type(), val.Len(), newcap) + reflect.Copy(newv, val) + val.Set(newv) + } + if i >= val.Len() { + val.SetLen(i + 1) + } + // decode into element + if err := elemdec(s, val.Index(i)); err == EOL { + break + } else if err != nil { + return addErrorContext(err, fmt.Sprint("[", i, "]")) + } + } + if i < val.Len() { + val.SetLen(i) + } + return nil +} + +func decodeListArray(s *Stream, val reflect.Value, elemdec decoder) error { + if _, err := s.List(); err != nil { + return wrapStreamError(err, val.Type()) + } + vlen := val.Len() + i := 0 + for ; i < vlen; i++ { + if err := elemdec(s, val.Index(i)); err == EOL { + break + } else if err != nil { + return addErrorContext(err, fmt.Sprint("[", i, "]")) + } + } + if i < vlen { + return &decodeError{msg: "input list has too few elements", typ: val.Type()} + } + return wrapStreamError(s.ListEnd(), val.Type()) +} + +func decodeByteSlice(s *Stream, val reflect.Value) error { + b, err := s.Bytes() + if err != nil { + return wrapStreamError(err, val.Type()) + } + val.SetBytes(b) + return nil +} + +func decodeByteArray(s *Stream, val reflect.Value) error { + kind, size, err := s.Kind() + if err != nil { + return err + } + slice := byteArrayBytes(val, val.Len()) + switch kind { + case Byte: + if len(slice) == 0 { + return &decodeError{msg: "input string too long", typ: val.Type()} + } else if len(slice) > 1 { + return &decodeError{msg: "input string too short", typ: val.Type()} + } + slice[0] = s.byteval + s.kind = -1 + case String: + if uint64(len(slice)) < size { + return &decodeError{msg: "input string too long", typ: val.Type()} + } + if uint64(len(slice)) > size { + return &decodeError{msg: "input string too short", typ: val.Type()} + } + if err := s.readFull(slice); err != nil { + return err + } + // Reject cases where single byte encoding should have been used. + if size == 1 && slice[0] < 128 { + return wrapStreamError(ErrCanonSize, val.Type()) + } + case List: + return wrapStreamError(ErrExpectedString, val.Type()) + } + return nil +} + +func makeStructDecoder(typ reflect.Type) (decoder, error) { + fields, err := structFields(typ) + if err != nil { + return nil, err + } + for _, f := range fields { + if f.info.decoderErr != nil { + return nil, structFieldError{typ, f.index, f.info.decoderErr} + } + } + dec := func(s *Stream, val reflect.Value) (err error) { + if _, err := s.List(); err != nil { + return wrapStreamError(err, typ) + } + for i, f := range fields { + err := f.info.decoder(s, val.Field(f.index)) + if err == EOL { + if f.optional { + // The field is optional, so reaching the end of the list before + // reaching the last field is acceptable. All remaining undecoded + // fields are zeroed. + zeroFields(val, fields[i:]) + break + } + return &decodeError{msg: "too few elements", typ: typ} + } else if err != nil { + return addErrorContext(err, "."+typ.Field(f.index).Name) + } + } + return wrapStreamError(s.ListEnd(), typ) + } + return dec, nil +} + +func zeroFields(structval reflect.Value, fields []field) { + for _, f := range fields { + fv := structval.Field(f.index) + fv.Set(reflect.Zero(fv.Type())) + } +} + +// makePtrDecoder creates a decoder that decodes into the pointer's element type. +func makePtrDecoder(typ reflect.Type, tag rlpstruct.Tags) (decoder, error) { + etype := typ.Elem() + etypeinfo := theTC.infoWhileGenerating(etype, rlpstruct.Tags{}) + switch { + case etypeinfo.decoderErr != nil: + return nil, etypeinfo.decoderErr + case !tag.NilOK: + return makeSimplePtrDecoder(etype, etypeinfo), nil + default: + return makeNilPtrDecoder(etype, etypeinfo, tag), nil + } +} + +func makeSimplePtrDecoder(etype reflect.Type, etypeinfo *typeinfo) decoder { + return func(s *Stream, val reflect.Value) (err error) { + newval := val + if val.IsNil() { + newval = reflect.New(etype) + } + if err = etypeinfo.decoder(s, newval.Elem()); err == nil { + val.Set(newval) + } + return err + } +} + +// makeNilPtrDecoder creates a decoder that decodes empty values as nil. Non-empty +// values are decoded into a value of the element type, just like makePtrDecoder does. +// +// This decoder is used for pointer-typed struct fields with struct tag "nil". +func makeNilPtrDecoder(etype reflect.Type, etypeinfo *typeinfo, ts rlpstruct.Tags) decoder { + typ := reflect.PtrTo(etype) + nilPtr := reflect.Zero(typ) + + // Determine the value kind that results in nil pointer. + nilKind := typeNilKind(etype, ts) + + return func(s *Stream, val reflect.Value) (err error) { + kind, size, err := s.Kind() + if err != nil { + val.Set(nilPtr) + return wrapStreamError(err, typ) + } + // Handle empty values as a nil pointer. + if kind != Byte && size == 0 { + if kind != nilKind { + return &decodeError{ + msg: fmt.Sprintf("wrong kind of empty value (got %v, want %v)", kind, nilKind), + typ: typ, + } + } + // rearm s.Kind. This is important because the input + // position must advance to the next value even though + // we don't read anything. + s.kind = -1 + val.Set(nilPtr) + return nil + } + newval := val + if val.IsNil() { + newval = reflect.New(etype) + } + if err = etypeinfo.decoder(s, newval.Elem()); err == nil { + val.Set(newval) + } + return err + } +} + +var ifsliceType = reflect.TypeOf([]interface{}{}) + +func decodeInterface(s *Stream, val reflect.Value) error { + if val.Type().NumMethod() != 0 { + return fmt.Errorf("rlp: type %v is not RLP-serializable", val.Type()) + } + kind, _, err := s.Kind() + if err != nil { + return err + } + if kind == List { + slice := reflect.New(ifsliceType).Elem() + if err := decodeListSlice(s, slice, decodeInterface); err != nil { + return err + } + val.Set(slice) + } else { + b, err := s.Bytes() + if err != nil { + return err + } + val.Set(reflect.ValueOf(b)) + } + return nil +} + +func decodeDecoder(s *Stream, val reflect.Value) error { + return val.Addr().Interface().(Decoder).DecodeRLP(s) +} + +// Kind represents the kind of value contained in an RLP stream. +type Kind int8 + +const ( + Byte Kind = iota + String + List +) + +func (k Kind) String() string { + switch k { + case Byte: + return "Byte" + case String: + return "String" + case List: + return "List" + default: + return fmt.Sprintf("Unknown(%d)", k) + } +} + +// ByteReader must be implemented by any input reader for a Stream. It +// is implemented by e.g. bufio.Reader and bytes.Reader. +type ByteReader interface { + io.Reader + io.ByteReader +} + +// Stream can be used for piecemeal decoding of an input stream. This +// is useful if the input is very large or if the decoding rules for a +// type depend on the input structure. Stream does not keep an +// internal buffer. After decoding a value, the input reader will be +// positioned just before the type information for the next value. +// +// When decoding a list and the input position reaches the declared +// length of the list, all operations will return error EOL. +// The end of the list must be acknowledged using ListEnd to continue +// reading the enclosing list. +// +// Stream is not safe for concurrent use. +type Stream struct { + r ByteReader + + remaining uint64 // number of bytes remaining to be read from r + size uint64 // size of value ahead + kinderr error // error from last readKind + stack []uint64 // list sizes + uintbuf [32]byte // auxiliary buffer for integer decoding + kind Kind // kind of value ahead + byteval byte // value of single byte in type tag + limited bool // true if input limit is in effect +} + +// NewStream creates a new decoding stream reading from r. +// +// If r implements the ByteReader interface, Stream will +// not introduce any buffering. +// +// For non-toplevel values, Stream returns ErrElemTooLarge +// for values that do not fit into the enclosing list. +// +// Stream supports an optional input limit. If a limit is set, the +// size of any toplevel value will be checked against the remaining +// input length. Stream operations that encounter a value exceeding +// the remaining input length will return ErrValueTooLarge. The limit +// can be set by passing a non-zero value for inputLimit. +// +// If r is a bytes.Reader or strings.Reader, the input limit is set to +// the length of r's underlying data unless an explicit limit is +// provided. +func NewStream(r io.Reader, inputLimit uint64) *Stream { + s := new(Stream) + s.Reset(r, inputLimit) + return s +} + +// NewListStream creates a new stream that pretends to be positioned +// at an encoded list of the given length. +func NewListStream(r io.Reader, len uint64) *Stream { + s := new(Stream) + s.Reset(r, len) + s.kind = List + s.size = len + return s +} + +// Bytes reads an RLP string and returns its contents as a byte slice. +// If the input does not contain an RLP string, the returned +// error will be ErrExpectedString. +func (s *Stream) Bytes() ([]byte, error) { + kind, size, err := s.Kind() + if err != nil { + return nil, err + } + switch kind { + case Byte: + s.kind = -1 // rearm Kind + return []byte{s.byteval}, nil + case String: + b := make([]byte, size) + if err = s.readFull(b); err != nil { + return nil, err + } + if size == 1 && b[0] < 128 { + return nil, ErrCanonSize + } + return b, nil + default: + return nil, ErrExpectedString + } +} + +// ReadBytes decodes the next RLP value and stores the result in b. +// The value size must match len(b) exactly. +func (s *Stream) ReadBytes(b []byte) error { + kind, size, err := s.Kind() + if err != nil { + return err + } + switch kind { + case Byte: + if len(b) != 1 { + return fmt.Errorf("input value has wrong size 1, want %d", len(b)) + } + b[0] = s.byteval + s.kind = -1 // rearm Kind + return nil + case String: + if uint64(len(b)) != size { + return fmt.Errorf("input value has wrong size %d, want %d", size, len(b)) + } + if err = s.readFull(b); err != nil { + return err + } + if size == 1 && b[0] < 128 { + return ErrCanonSize + } + return nil + default: + return ErrExpectedString + } +} + +// Raw reads a raw encoded value including RLP type information. +func (s *Stream) Raw() ([]byte, error) { + kind, size, err := s.Kind() + if err != nil { + return nil, err + } + if kind == Byte { + s.kind = -1 // rearm Kind + return []byte{s.byteval}, nil + } + // The original header has already been read and is no longer + // available. Read content and put a new header in front of it. + start := headsize(size) + buf := make([]byte, uint64(start)+size) + if err := s.readFull(buf[start:]); err != nil { + return nil, err + } + if kind == String { + puthead(buf, 0x80, 0xB7, size) + } else { + puthead(buf, 0xC0, 0xF7, size) + } + return buf, nil +} + +// Uint reads an RLP string of up to 8 bytes and returns its contents +// as an unsigned integer. If the input does not contain an RLP string, the +// returned error will be ErrExpectedString. +// +// Deprecated: use s.Uint64 instead. +func (s *Stream) Uint() (uint64, error) { + return s.uint(64) +} + +func (s *Stream) Uint64() (uint64, error) { + return s.uint(64) +} + +func (s *Stream) Uint32() (uint32, error) { + i, err := s.uint(32) + return uint32(i), err +} + +func (s *Stream) Uint16() (uint16, error) { + i, err := s.uint(16) + return uint16(i), err +} + +func (s *Stream) Uint8() (uint8, error) { + i, err := s.uint(8) + return uint8(i), err +} + +func (s *Stream) uint(maxbits int) (uint64, error) { + kind, size, err := s.Kind() + if err != nil { + return 0, err + } + switch kind { + case Byte: + if s.byteval == 0 { + return 0, ErrCanonInt + } + s.kind = -1 // rearm Kind + return uint64(s.byteval), nil + case String: + if size > uint64(maxbits/8) { + return 0, errUintOverflow + } + v, err := s.readUint(byte(size)) + switch { + case err == ErrCanonSize: + // Adjust error because we're not reading a size right now. + return 0, ErrCanonInt + case err != nil: + return 0, err + case size > 0 && v < 128: + return 0, ErrCanonSize + default: + return v, nil + } + default: + return 0, ErrExpectedString + } +} + +// Bool reads an RLP string of up to 1 byte and returns its contents +// as a boolean. If the input does not contain an RLP string, the +// returned error will be ErrExpectedString. +func (s *Stream) Bool() (bool, error) { + num, err := s.uint(8) + if err != nil { + return false, err + } + switch num { + case 0: + return false, nil + case 1: + return true, nil + default: + return false, fmt.Errorf("rlp: invalid boolean value: %d", num) + } +} + +// List starts decoding an RLP list. If the input does not contain a +// list, the returned error will be ErrExpectedList. When the list's +// end has been reached, any Stream operation will return EOL. +func (s *Stream) List() (size uint64, err error) { + kind, size, err := s.Kind() + if err != nil { + return 0, err + } + if kind != List { + return 0, ErrExpectedList + } + + // Remove size of inner list from outer list before pushing the new size + // onto the stack. This ensures that the remaining outer list size will + // be correct after the matching call to ListEnd. + if inList, limit := s.listLimit(); inList { + s.stack[len(s.stack)-1] = limit - size + } + s.stack = append(s.stack, size) + s.kind = -1 + s.size = 0 + return size, nil +} + +// ListEnd returns to the enclosing list. +// The input reader must be positioned at the end of a list. +func (s *Stream) ListEnd() error { + // Ensure that no more data is remaining in the current list. + if inList, listLimit := s.listLimit(); !inList { + return errNotInList + } else if listLimit > 0 { + return errNotAtEOL + } + s.stack = s.stack[:len(s.stack)-1] // pop + s.kind = -1 + s.size = 0 + return nil +} + +// MoreDataInList reports whether the current list context contains +// more data to be read. +func (s *Stream) MoreDataInList() bool { + _, listLimit := s.listLimit() + return listLimit > 0 +} + +// BigInt decodes an arbitrary-size integer value. +func (s *Stream) BigInt() (*big.Int, error) { + i := new(big.Int) + if err := s.decodeBigInt(i); err != nil { + return nil, err + } + return i, nil +} + +func (s *Stream) decodeBigInt(dst *big.Int) error { + var buffer []byte + kind, size, err := s.Kind() + switch { + case err != nil: + return err + case kind == List: + return ErrExpectedString + case kind == Byte: + buffer = s.uintbuf[:1] + buffer[0] = s.byteval + s.kind = -1 // re-arm Kind + case size == 0: + // Avoid zero-length read. + s.kind = -1 + case size <= uint64(len(s.uintbuf)): + // For integers smaller than s.uintbuf, allocating a buffer + // can be avoided. + buffer = s.uintbuf[:size] + if err := s.readFull(buffer); err != nil { + return err + } + // Reject inputs where single byte encoding should have been used. + if size == 1 && buffer[0] < 128 { + return ErrCanonSize + } + default: + // For large integers, a temporary buffer is needed. + buffer = make([]byte, size) + if err := s.readFull(buffer); err != nil { + return err + } + } + + // Reject leading zero bytes. + if len(buffer) > 0 && buffer[0] == 0 { + return ErrCanonInt + } + // Set the integer bytes. + dst.SetBytes(buffer) + return nil +} + +// Decode decodes a value and stores the result in the value pointed +// to by val. Please see the documentation for the Decode function +// to learn about the decoding rules. +func (s *Stream) Decode(val interface{}) error { + if val == nil { + return errDecodeIntoNil + } + rval := reflect.ValueOf(val) + rtyp := rval.Type() + if rtyp.Kind() != reflect.Ptr { + return errNoPointer + } + if rval.IsNil() { + return errDecodeIntoNil + } + decoder, err := cachedDecoder(rtyp.Elem()) + if err != nil { + return err + } + + err = decoder(s, rval.Elem()) + if decErr, ok := err.(*decodeError); ok && len(decErr.ctx) > 0 { + // Add decode target type to error so context has more meaning. + decErr.ctx = append(decErr.ctx, fmt.Sprint("(", rtyp.Elem(), ")")) + } + return err +} + +// Reset discards any information about the current decoding context +// and starts reading from r. This method is meant to facilitate reuse +// of a preallocated Stream across many decoding operations. +// +// If r does not also implement ByteReader, Stream will do its own +// buffering. +func (s *Stream) Reset(r io.Reader, inputLimit uint64) { + if inputLimit > 0 { + s.remaining = inputLimit + s.limited = true + } else { + // Attempt to automatically discover + // the limit when reading from a byte slice. + switch br := r.(type) { + case *bytes.Reader: + s.remaining = uint64(br.Len()) + s.limited = true + case *bytes.Buffer: + s.remaining = uint64(br.Len()) + s.limited = true + case *strings.Reader: + s.remaining = uint64(br.Len()) + s.limited = true + default: + s.limited = false + } + } + // Wrap r with a buffer if it doesn't have one. + bufr, ok := r.(ByteReader) + if !ok { + bufr = bufio.NewReader(r) + } + s.r = bufr + // Reset the decoding context. + s.stack = s.stack[:0] + s.size = 0 + s.kind = -1 + s.kinderr = nil + s.byteval = 0 + s.uintbuf = [32]byte{} +} + +// Kind returns the kind and size of the next value in the +// input stream. +// +// The returned size is the number of bytes that make up the value. +// For kind == Byte, the size is zero because the value is +// contained in the type tag. +// +// The first call to Kind will read size information from the input +// reader and leave it positioned at the start of the actual bytes of +// the value. Subsequent calls to Kind (until the value is decoded) +// will not advance the input reader and return cached information. +func (s *Stream) Kind() (kind Kind, size uint64, err error) { + if s.kind >= 0 { + return s.kind, s.size, s.kinderr + } + + // Check for end of list. This needs to be done here because readKind + // checks against the list size, and would return the wrong error. + inList, listLimit := s.listLimit() + if inList && listLimit == 0 { + return 0, 0, EOL + } + // Read the actual size tag. + s.kind, s.size, s.kinderr = s.readKind() + if s.kinderr == nil { + // Check the data size of the value ahead against input limits. This + // is done here because many decoders require allocating an input + // buffer matching the value size. Checking it here protects those + // decoders from inputs declaring very large value size. + if inList && s.size > listLimit { + s.kinderr = ErrElemTooLarge + } else if s.limited && s.size > s.remaining { + s.kinderr = ErrValueTooLarge + } + } + return s.kind, s.size, s.kinderr +} + +func (s *Stream) readKind() (kind Kind, size uint64, err error) { + b, err := s.readByte() + if err != nil { + if len(s.stack) == 0 { + // At toplevel, Adjust the error to actual EOF. io.EOF is + // used by callers to determine when to stop decoding. + switch err { + case io.ErrUnexpectedEOF: + err = io.EOF + case ErrValueTooLarge: + err = io.EOF + } + } + return 0, 0, err + } + s.byteval = 0 + switch { + case b < 0x80: + // For a single byte whose value is in the [0x00, 0x7F] range, that byte + // is its own RLP encoding. + s.byteval = b + return Byte, 0, nil + case b < 0xB8: + // Otherwise, if a string is 0-55 bytes long, the RLP encoding consists + // of a single byte with value 0x80 plus the length of the string + // followed by the string. The range of the first byte is thus [0x80, 0xB7]. + return String, uint64(b - 0x80), nil + case b < 0xC0: + // If a string is more than 55 bytes long, the RLP encoding consists of a + // single byte with value 0xB7 plus the length of the length of the + // string in binary form, followed by the length of the string, followed + // by the string. For example, a length-1024 string would be encoded as + // 0xB90400 followed by the string. The range of the first byte is thus + // [0xB8, 0xBF]. + size, err = s.readUint(b - 0xB7) + if err == nil && size < 56 { + err = ErrCanonSize + } + return String, size, err + case b < 0xF8: + // If the total payload of a list (i.e. the combined length of all its + // items) is 0-55 bytes long, the RLP encoding consists of a single byte + // with value 0xC0 plus the length of the list followed by the + // concatenation of the RLP encodings of the items. The range of the + // first byte is thus [0xC0, 0xF7]. + return List, uint64(b - 0xC0), nil + default: + // If the total payload of a list is more than 55 bytes long, the RLP + // encoding consists of a single byte with value 0xF7 plus the length of + // the length of the payload in binary form, followed by the length of + // the payload, followed by the concatenation of the RLP encodings of + // the items. The range of the first byte is thus [0xF8, 0xFF]. + size, err = s.readUint(b - 0xF7) + if err == nil && size < 56 { + err = ErrCanonSize + } + return List, size, err + } +} + +func (s *Stream) readUint(size byte) (uint64, error) { + switch size { + case 0: + s.kind = -1 // rearm Kind + return 0, nil + case 1: + b, err := s.readByte() + return uint64(b), err + default: + buffer := s.uintbuf[:8] + for i := range buffer { + buffer[i] = 0 + } + start := int(8 - size) + if err := s.readFull(buffer[start:]); err != nil { + return 0, err + } + if buffer[start] == 0 { + // Note: readUint is also used to decode integer values. + // The error needs to be adjusted to become ErrCanonInt in this case. + return 0, ErrCanonSize + } + return binary.BigEndian.Uint64(buffer[:]), nil + } +} + +// readFull reads into buf from the underlying stream. +func (s *Stream) readFull(buf []byte) (err error) { + if err := s.willRead(uint64(len(buf))); err != nil { + return err + } + var nn, n int + for n < len(buf) && err == nil { + nn, err = s.r.Read(buf[n:]) + n += nn + } + if err == io.EOF { + if n < len(buf) { + err = io.ErrUnexpectedEOF + } else { + // Readers are allowed to give EOF even though the read succeeded. + // In such cases, we discard the EOF, like io.ReadFull() does. + err = nil + } + } + return err +} + +// readByte reads a single byte from the underlying stream. +func (s *Stream) readByte() (byte, error) { + if err := s.willRead(1); err != nil { + return 0, err + } + b, err := s.r.ReadByte() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return b, err +} + +// willRead is called before any read from the underlying stream. It checks +// n against size limits, and updates the limits if n doesn't overflow them. +func (s *Stream) willRead(n uint64) error { + s.kind = -1 // rearm Kind + + if inList, limit := s.listLimit(); inList { + if n > limit { + return ErrElemTooLarge + } + s.stack[len(s.stack)-1] = limit - n + } + if s.limited { + if n > s.remaining { + return ErrValueTooLarge + } + s.remaining -= n + } + return nil +} + +// listLimit returns the amount of data remaining in the innermost list. +func (s *Stream) listLimit() (inList bool, limit uint64) { + if len(s.stack) == 0 { + return false, 0 + } + return true, s.stack[len(s.stack)-1] +} diff --git a/rlp/decode_tail_test.go b/rlp/decode_tail_test.go new file mode 100644 index 0000000000..884c1148b2 --- /dev/null +++ b/rlp/decode_tail_test.go @@ -0,0 +1,49 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "bytes" + "fmt" +) + +type structWithTail struct { + A, B uint + C []uint `rlp:"tail"` +} + +func ExampleDecode_structTagTail() { + // In this example, the "tail" struct tag is used to decode lists of + // differing length into a struct. + var val structWithTail + + err := Decode(bytes.NewReader([]byte{0xC4, 0x01, 0x02, 0x03, 0x04}), &val) + fmt.Printf("with 4 elements: err=%v val=%v\n", err, val) + + err = Decode(bytes.NewReader([]byte{0xC6, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06}), &val) + fmt.Printf("with 6 elements: err=%v val=%v\n", err, val) + + // Note that at least two list elements must be present to + // fill fields A and B: + err = Decode(bytes.NewReader([]byte{0xC1, 0x01}), &val) + fmt.Printf("with 1 element: err=%q\n", err) + + // Output: + // with 4 elements: err= val={1 2 [3 4]} + // with 6 elements: err= val={1 2 [3 4 5 6]} + // with 1 element: err="rlp: too few elements for rlp.structWithTail" +} diff --git a/rlp/decode_test.go b/rlp/decode_test.go new file mode 100644 index 0000000000..00722f847b --- /dev/null +++ b/rlp/decode_test.go @@ -0,0 +1,1210 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common/math" +) + +func TestStreamKind(t *testing.T) { + tests := []struct { + input string + wantKind Kind + wantLen uint64 + }{ + {"00", Byte, 0}, + {"01", Byte, 0}, + {"7F", Byte, 0}, + {"80", String, 0}, + {"B7", String, 55}, + {"B90400", String, 1024}, + {"BFFFFFFFFFFFFFFFFF", String, ^uint64(0)}, + {"C0", List, 0}, + {"C8", List, 8}, + {"F7", List, 55}, + {"F90400", List, 1024}, + {"FFFFFFFFFFFFFFFFFF", List, ^uint64(0)}, + } + + for i, test := range tests { + // using plainReader to inhibit input limit errors. + s := NewStream(newPlainReader(unhex(test.input)), 0) + kind, len, err := s.Kind() + if err != nil { + t.Errorf("test %d: Kind returned error: %v", i, err) + continue + } + if kind != test.wantKind { + t.Errorf("test %d: kind mismatch: got %d, want %d", i, kind, test.wantKind) + } + if len != test.wantLen { + t.Errorf("test %d: len mismatch: got %d, want %d", i, len, test.wantLen) + } + } +} + +func TestNewListStream(t *testing.T) { + ls := NewListStream(bytes.NewReader(unhex("0101010101")), 3) + if k, size, err := ls.Kind(); k != List || size != 3 || err != nil { + t.Errorf("Kind() returned (%v, %d, %v), expected (List, 3, nil)", k, size, err) + } + if size, err := ls.List(); size != 3 || err != nil { + t.Errorf("List() returned (%d, %v), expected (3, nil)", size, err) + } + for i := 0; i < 3; i++ { + if val, err := ls.Uint(); val != 1 || err != nil { + t.Errorf("Uint() returned (%d, %v), expected (1, nil)", val, err) + } + } + if err := ls.ListEnd(); err != nil { + t.Errorf("ListEnd() returned %v, expected (3, nil)", err) + } +} + +func TestStreamErrors(t *testing.T) { + withoutInputLimit := func(b []byte) *Stream { + return NewStream(newPlainReader(b), 0) + } + withCustomInputLimit := func(limit uint64) func([]byte) *Stream { + return func(b []byte) *Stream { + return NewStream(bytes.NewReader(b), limit) + } + } + + type calls []string + tests := []struct { + string + calls + newStream func([]byte) *Stream // uses bytes.Reader if nil + error error + }{ + {"C0", calls{"Bytes"}, nil, ErrExpectedString}, + {"C0", calls{"Uint"}, nil, ErrExpectedString}, + {"89000000000000000001", calls{"Uint"}, nil, errUintOverflow}, + {"00", calls{"List"}, nil, ErrExpectedList}, + {"80", calls{"List"}, nil, ErrExpectedList}, + {"C0", calls{"List", "Uint"}, nil, EOL}, + {"C8C9010101010101010101", calls{"List", "Kind"}, nil, ErrElemTooLarge}, + {"C3C2010201", calls{"List", "List", "Uint", "Uint", "ListEnd", "Uint"}, nil, EOL}, + {"00", calls{"ListEnd"}, nil, errNotInList}, + {"C401020304", calls{"List", "Uint", "ListEnd"}, nil, errNotAtEOL}, + + // Non-canonical integers (e.g. leading zero bytes). + {"00", calls{"Uint"}, nil, ErrCanonInt}, + {"820002", calls{"Uint"}, nil, ErrCanonInt}, + {"8133", calls{"Uint"}, nil, ErrCanonSize}, + {"817F", calls{"Uint"}, nil, ErrCanonSize}, + {"8180", calls{"Uint"}, nil, nil}, + + // Non-valid boolean + {"02", calls{"Bool"}, nil, errors.New("rlp: invalid boolean value: 2")}, + + // Size tags must use the smallest possible encoding. + // Leading zero bytes in the size tag are also rejected. + {"8100", calls{"Uint"}, nil, ErrCanonSize}, + {"8100", calls{"Bytes"}, nil, ErrCanonSize}, + {"8101", calls{"Bytes"}, nil, ErrCanonSize}, + {"817F", calls{"Bytes"}, nil, ErrCanonSize}, + {"8180", calls{"Bytes"}, nil, nil}, + {"B800", calls{"Kind"}, withoutInputLimit, ErrCanonSize}, + {"B90000", calls{"Kind"}, withoutInputLimit, ErrCanonSize}, + {"B90055", calls{"Kind"}, withoutInputLimit, ErrCanonSize}, + {"BA0002FFFF", calls{"Bytes"}, withoutInputLimit, ErrCanonSize}, + {"F800", calls{"Kind"}, withoutInputLimit, ErrCanonSize}, + {"F90000", calls{"Kind"}, withoutInputLimit, ErrCanonSize}, + {"F90055", calls{"Kind"}, withoutInputLimit, ErrCanonSize}, + {"FA0002FFFF", calls{"List"}, withoutInputLimit, ErrCanonSize}, + + // Expected EOF + {"", calls{"Kind"}, nil, io.EOF}, + {"", calls{"Uint"}, nil, io.EOF}, + {"", calls{"List"}, nil, io.EOF}, + {"8180", calls{"Uint", "Uint"}, nil, io.EOF}, + {"C0", calls{"List", "ListEnd", "List"}, nil, io.EOF}, + + {"", calls{"List"}, withoutInputLimit, io.EOF}, + {"8180", calls{"Uint", "Uint"}, withoutInputLimit, io.EOF}, + {"C0", calls{"List", "ListEnd", "List"}, withoutInputLimit, io.EOF}, + + // Input limit errors. + {"81", calls{"Bytes"}, nil, ErrValueTooLarge}, + {"81", calls{"Uint"}, nil, ErrValueTooLarge}, + {"81", calls{"Raw"}, nil, ErrValueTooLarge}, + {"BFFFFFFFFFFFFFFFFFFF", calls{"Bytes"}, nil, ErrValueTooLarge}, + {"C801", calls{"List"}, nil, ErrValueTooLarge}, + + // Test for list element size check overflow. + {"CD04040404FFFFFFFFFFFFFFFFFF0303", calls{"List", "Uint", "Uint", "Uint", "Uint", "List"}, nil, ErrElemTooLarge}, + + // Test for input limit overflow. Since we are counting the limit + // down toward zero in Stream.remaining, reading too far can overflow + // remaining to a large value, effectively disabling the limit. + {"C40102030401", calls{"Raw", "Uint"}, withCustomInputLimit(5), io.EOF}, + {"C4010203048180", calls{"Raw", "Uint"}, withCustomInputLimit(6), ErrValueTooLarge}, + + // Check that the same calls are fine without a limit. + {"C40102030401", calls{"Raw", "Uint"}, withoutInputLimit, nil}, + {"C4010203048180", calls{"Raw", "Uint"}, withoutInputLimit, nil}, + + // Unexpected EOF. This only happens when there is + // no input limit, so the reader needs to be 'dumbed down'. + {"81", calls{"Bytes"}, withoutInputLimit, io.ErrUnexpectedEOF}, + {"81", calls{"Uint"}, withoutInputLimit, io.ErrUnexpectedEOF}, + {"BFFFFFFFFFFFFFFF", calls{"Bytes"}, withoutInputLimit, io.ErrUnexpectedEOF}, + {"C801", calls{"List", "Uint", "Uint"}, withoutInputLimit, io.ErrUnexpectedEOF}, + + // This test verifies that the input position is advanced + // correctly when calling Bytes for empty strings. Kind can be called + // any number of times in between and doesn't advance. + {"C3808080", calls{ + "List", // enter the list + "Bytes", // past first element + + "Kind", "Kind", "Kind", // this shouldn't advance + + "Bytes", // past second element + + "Kind", "Kind", // can't hurt to try + + "Bytes", // past final element + "Bytes", // this one should fail + }, nil, EOL}, + } + +testfor: + for i, test := range tests { + if test.newStream == nil { + test.newStream = func(b []byte) *Stream { return NewStream(bytes.NewReader(b), 0) } + } + s := test.newStream(unhex(test.string)) + rs := reflect.ValueOf(s) + for j, call := range test.calls { + fval := rs.MethodByName(call) + ret := fval.Call(nil) + err := "" + if lastret := ret[len(ret)-1].Interface(); lastret != nil { + err = lastret.(error).Error() + } + if j == len(test.calls)-1 { + want := "" + if test.error != nil { + want = test.error.Error() + } + if err != want { + t.Log(test) + t.Errorf("test %d: last call (%s) error mismatch\ngot: %s\nwant: %s", + i, call, err, test.error) + } + } else if err != "" { + t.Log(test) + t.Errorf("test %d: call %d (%s) unexpected error: %q", i, j, call, err) + continue testfor + } + } + } +} + +func TestStreamList(t *testing.T) { + s := NewStream(bytes.NewReader(unhex("C80102030405060708")), 0) + + len, err := s.List() + if err != nil { + t.Fatalf("List error: %v", err) + } + if len != 8 { + t.Fatalf("List returned invalid length, got %d, want 8", len) + } + + for i := uint64(1); i <= 8; i++ { + v, err := s.Uint() + if err != nil { + t.Fatalf("Uint error: %v", err) + } + if i != v { + t.Errorf("Uint returned wrong value, got %d, want %d", v, i) + } + } + + if _, err := s.Uint(); err != EOL { + t.Errorf("Uint error mismatch, got %v, want %v", err, EOL) + } + if err = s.ListEnd(); err != nil { + t.Fatalf("ListEnd error: %v", err) + } +} + +func TestStreamRaw(t *testing.T) { + tests := []struct { + input string + output string + }{ + { + "C58401010101", + "8401010101", + }, + { + "F842B84001010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101", + "B84001010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101", + }, + } + for i, tt := range tests { + s := NewStream(bytes.NewReader(unhex(tt.input)), 0) + s.List() + + want := unhex(tt.output) + raw, err := s.Raw() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(want, raw) { + t.Errorf("test %d: raw mismatch: got %x, want %x", i, raw, want) + } + } +} + +func TestStreamReadBytes(t *testing.T) { + tests := []struct { + input string + size int + err string + }{ + // kind List + {input: "C0", size: 1, err: "rlp: expected String or Byte"}, + // kind Byte + {input: "04", size: 0, err: "input value has wrong size 1, want 0"}, + {input: "04", size: 1}, + {input: "04", size: 2, err: "input value has wrong size 1, want 2"}, + // kind String + {input: "820102", size: 0, err: "input value has wrong size 2, want 0"}, + {input: "820102", size: 1, err: "input value has wrong size 2, want 1"}, + {input: "820102", size: 2}, + {input: "820102", size: 3, err: "input value has wrong size 2, want 3"}, + } + + for _, test := range tests { + test := test + name := fmt.Sprintf("input_%s/size_%d", test.input, test.size) + t.Run(name, func(t *testing.T) { + s := NewStream(bytes.NewReader(unhex(test.input)), 0) + b := make([]byte, test.size) + err := s.ReadBytes(b) + if test.err == "" { + if err != nil { + t.Errorf("unexpected error %q", err) + } + } else { + if err == nil { + t.Errorf("expected error, got nil") + } else if err.Error() != test.err { + t.Errorf("wrong error %q", err) + } + } + }) + } +} + +func TestDecodeErrors(t *testing.T) { + r := bytes.NewReader(nil) + + if err := Decode(r, nil); err != errDecodeIntoNil { + t.Errorf("Decode(r, nil) error mismatch, got %q, want %q", err, errDecodeIntoNil) + } + + var nilptr *struct{} + if err := Decode(r, nilptr); err != errDecodeIntoNil { + t.Errorf("Decode(r, nilptr) error mismatch, got %q, want %q", err, errDecodeIntoNil) + } + + if err := Decode(r, struct{}{}); err != errNoPointer { + t.Errorf("Decode(r, struct{}{}) error mismatch, got %q, want %q", err, errNoPointer) + } + + expectErr := "rlp: type chan bool is not RLP-serializable" + if err := Decode(r, new(chan bool)); err == nil || err.Error() != expectErr { + t.Errorf("Decode(r, new(chan bool)) error mismatch, got %q, want %q", err, expectErr) + } + + if err := Decode(r, new(uint)); err != io.EOF { + t.Errorf("Decode(r, new(int)) error mismatch, got %q, want %q", err, io.EOF) + } +} + +type decodeTest struct { + input string + ptr interface{} + value interface{} + error string +} + +type simplestruct struct { + A uint + B string +} + +type recstruct struct { + I uint + Child *recstruct `rlp:"nil"` +} + +type bigIntStruct struct { + I *big.Int + B string +} + +type invalidNilTag struct { + X []byte `rlp:"nil"` +} + +type invalidTail1 struct { + A uint `rlp:"tail"` + B string +} + +type invalidTail2 struct { + A uint + B string `rlp:"tail"` +} + +type tailRaw struct { + A uint + Tail []RawValue `rlp:"tail"` +} + +type tailUint struct { + A uint + Tail []uint `rlp:"tail"` +} + +type tailPrivateFields struct { + A uint + Tail []uint `rlp:"tail"` + x, y bool //lint:ignore U1000 unused fields required for testing purposes. +} + +type nilListUint struct { + X *uint `rlp:"nilList"` +} + +type nilStringSlice struct { + X *[]uint `rlp:"nilString"` +} + +type intField struct { + X int +} + +type optionalFields struct { + A uint + B uint `rlp:"optional"` + C uint `rlp:"optional"` +} + +type optionalAndTailField struct { + A uint + B uint `rlp:"optional"` + Tail []uint `rlp:"tail"` +} + +type optionalBigIntField struct { + A uint + B *big.Int `rlp:"optional"` +} + +type optionalPtrField struct { + A uint + B *[3]byte `rlp:"optional"` +} + +type optionalPtrFieldNil struct { + A uint + B *[3]byte `rlp:"optional,nil"` +} + +type ignoredField struct { + A uint + B uint `rlp:"-"` + C uint +} + +var ( + veryBigInt = new(big.Int).Add( + new(big.Int).Lsh(big.NewInt(0xFFFFFFFFFFFFFF), 16), + big.NewInt(0xFFFF), + ) + veryVeryBigInt = new(big.Int).Exp(veryBigInt, big.NewInt(8), nil) +) + +var decodeTests = []decodeTest{ + // booleans + {input: "01", ptr: new(bool), value: true}, + {input: "80", ptr: new(bool), value: false}, + {input: "02", ptr: new(bool), error: "rlp: invalid boolean value: 2"}, + + // integers + {input: "05", ptr: new(uint32), value: uint32(5)}, + {input: "80", ptr: new(uint32), value: uint32(0)}, + {input: "820505", ptr: new(uint32), value: uint32(0x0505)}, + {input: "83050505", ptr: new(uint32), value: uint32(0x050505)}, + {input: "8405050505", ptr: new(uint32), value: uint32(0x05050505)}, + {input: "850505050505", ptr: new(uint32), error: "rlp: input string too long for uint32"}, + {input: "C0", ptr: new(uint32), error: "rlp: expected input string or byte for uint32"}, + {input: "00", ptr: new(uint32), error: "rlp: non-canonical integer (leading zero bytes) for uint32"}, + {input: "8105", ptr: new(uint32), error: "rlp: non-canonical size information for uint32"}, + {input: "820004", ptr: new(uint32), error: "rlp: non-canonical integer (leading zero bytes) for uint32"}, + {input: "B8020004", ptr: new(uint32), error: "rlp: non-canonical size information for uint32"}, + + // slices + {input: "C0", ptr: new([]uint), value: []uint{}}, + {input: "C80102030405060708", ptr: new([]uint), value: []uint{1, 2, 3, 4, 5, 6, 7, 8}}, + {input: "F8020004", ptr: new([]uint), error: "rlp: non-canonical size information for []uint"}, + + // arrays + {input: "C50102030405", ptr: new([5]uint), value: [5]uint{1, 2, 3, 4, 5}}, + {input: "C0", ptr: new([5]uint), error: "rlp: input list has too few elements for [5]uint"}, + {input: "C102", ptr: new([5]uint), error: "rlp: input list has too few elements for [5]uint"}, + {input: "C6010203040506", ptr: new([5]uint), error: "rlp: input list has too many elements for [5]uint"}, + {input: "F8020004", ptr: new([5]uint), error: "rlp: non-canonical size information for [5]uint"}, + + // zero sized arrays + {input: "C0", ptr: new([0]uint), value: [0]uint{}}, + {input: "C101", ptr: new([0]uint), error: "rlp: input list has too many elements for [0]uint"}, + + // byte slices + {input: "01", ptr: new([]byte), value: []byte{1}}, + {input: "80", ptr: new([]byte), value: []byte{}}, + {input: "8D6162636465666768696A6B6C6D", ptr: new([]byte), value: []byte("abcdefghijklm")}, + {input: "C0", ptr: new([]byte), error: "rlp: expected input string or byte for []uint8"}, + {input: "8105", ptr: new([]byte), error: "rlp: non-canonical size information for []uint8"}, + + // byte arrays + {input: "02", ptr: new([1]byte), value: [1]byte{2}}, + {input: "8180", ptr: new([1]byte), value: [1]byte{128}}, + {input: "850102030405", ptr: new([5]byte), value: [5]byte{1, 2, 3, 4, 5}}, + + // byte array errors + {input: "02", ptr: new([5]byte), error: "rlp: input string too short for [5]uint8"}, + {input: "80", ptr: new([5]byte), error: "rlp: input string too short for [5]uint8"}, + {input: "820000", ptr: new([5]byte), error: "rlp: input string too short for [5]uint8"}, + {input: "C0", ptr: new([5]byte), error: "rlp: expected input string or byte for [5]uint8"}, + {input: "C3010203", ptr: new([5]byte), error: "rlp: expected input string or byte for [5]uint8"}, + {input: "86010203040506", ptr: new([5]byte), error: "rlp: input string too long for [5]uint8"}, + {input: "8105", ptr: new([1]byte), error: "rlp: non-canonical size information for [1]uint8"}, + {input: "817F", ptr: new([1]byte), error: "rlp: non-canonical size information for [1]uint8"}, + + // zero sized byte arrays + {input: "80", ptr: new([0]byte), value: [0]byte{}}, + {input: "01", ptr: new([0]byte), error: "rlp: input string too long for [0]uint8"}, + {input: "8101", ptr: new([0]byte), error: "rlp: input string too long for [0]uint8"}, + + // strings + {input: "00", ptr: new(string), value: "\000"}, + {input: "8D6162636465666768696A6B6C6D", ptr: new(string), value: "abcdefghijklm"}, + {input: "C0", ptr: new(string), error: "rlp: expected input string or byte for string"}, + + // big ints + {input: "80", ptr: new(*big.Int), value: big.NewInt(0)}, + {input: "01", ptr: new(*big.Int), value: big.NewInt(1)}, + {input: "89FFFFFFFFFFFFFFFFFF", ptr: new(*big.Int), value: veryBigInt}, + {input: "B848FFFFFFFFFFFFFFFFF800000000000000001BFFFFFFFFFFFFFFFFC8000000000000000045FFFFFFFFFFFFFFFFC800000000000000001BFFFFFFFFFFFFFFFFF8000000000000000001", ptr: new(*big.Int), value: veryVeryBigInt}, + {input: "10", ptr: new(big.Int), value: *big.NewInt(16)}, // non-pointer also works + {input: "C0", ptr: new(*big.Int), error: "rlp: expected input string or byte for *big.Int"}, + {input: "00", ptr: new(*big.Int), error: "rlp: non-canonical integer (leading zero bytes) for *big.Int"}, + {input: "820001", ptr: new(*big.Int), error: "rlp: non-canonical integer (leading zero bytes) for *big.Int"}, + {input: "8105", ptr: new(*big.Int), error: "rlp: non-canonical size information for *big.Int"}, + + // structs + { + input: "C50583343434", + ptr: new(simplestruct), + value: simplestruct{5, "444"}, + }, + { + input: "C601C402C203C0", + ptr: new(recstruct), + value: recstruct{1, &recstruct{2, &recstruct{3, nil}}}, + }, + { + // This checks that empty big.Int works correctly in struct context. It's easy to + // miss the update of s.kind for this case, so it needs its own test. + input: "C58083343434", + ptr: new(bigIntStruct), + value: bigIntStruct{new(big.Int), "444"}, + }, + + // struct errors + { + input: "C0", + ptr: new(simplestruct), + error: "rlp: too few elements for rlp.simplestruct", + }, + { + input: "C105", + ptr: new(simplestruct), + error: "rlp: too few elements for rlp.simplestruct", + }, + { + input: "C7C50583343434C0", + ptr: new([]*simplestruct), + error: "rlp: too few elements for rlp.simplestruct, decoding into ([]*rlp.simplestruct)[1]", + }, + { + input: "83222222", + ptr: new(simplestruct), + error: "rlp: expected input list for rlp.simplestruct", + }, + { + input: "C3010101", + ptr: new(simplestruct), + error: "rlp: input list has too many elements for rlp.simplestruct", + }, + { + input: "C501C3C00000", + ptr: new(recstruct), + error: "rlp: expected input string or byte for uint, decoding into (rlp.recstruct).Child.I", + }, + { + input: "C103", + ptr: new(intField), + error: "rlp: type int is not RLP-serializable (struct field rlp.intField.X)", + }, + { + input: "C50102C20102", + ptr: new(tailUint), + error: "rlp: expected input string or byte for uint, decoding into (rlp.tailUint).Tail[1]", + }, + { + input: "C0", + ptr: new(invalidNilTag), + error: `rlp: invalid struct tag "nil" for rlp.invalidNilTag.X (field is not a pointer)`, + }, + + // struct tag "tail" + { + input: "C3010203", + ptr: new(tailRaw), + value: tailRaw{A: 1, Tail: []RawValue{unhex("02"), unhex("03")}}, + }, + { + input: "C20102", + ptr: new(tailRaw), + value: tailRaw{A: 1, Tail: []RawValue{unhex("02")}}, + }, + { + input: "C101", + ptr: new(tailRaw), + value: tailRaw{A: 1, Tail: []RawValue{}}, + }, + { + input: "C3010203", + ptr: new(tailPrivateFields), + value: tailPrivateFields{A: 1, Tail: []uint{2, 3}}, + }, + { + input: "C0", + ptr: new(invalidTail1), + error: `rlp: invalid struct tag "tail" for rlp.invalidTail1.A (must be on last field)`, + }, + { + input: "C0", + ptr: new(invalidTail2), + error: `rlp: invalid struct tag "tail" for rlp.invalidTail2.B (field type is not slice)`, + }, + + // struct tag "-" + { + input: "C20102", + ptr: new(ignoredField), + value: ignoredField{A: 1, C: 2}, + }, + + // struct tag "nilList" + { + input: "C180", + ptr: new(nilListUint), + error: "rlp: wrong kind of empty value (got String, want List) for *uint, decoding into (rlp.nilListUint).X", + }, + { + input: "C1C0", + ptr: new(nilListUint), + value: nilListUint{}, + }, + { + input: "C103", + ptr: new(nilListUint), + value: func() interface{} { + v := uint(3) + return nilListUint{X: &v} + }(), + }, + + // struct tag "nilString" + { + input: "C1C0", + ptr: new(nilStringSlice), + error: "rlp: wrong kind of empty value (got List, want String) for *[]uint, decoding into (rlp.nilStringSlice).X", + }, + { + input: "C180", + ptr: new(nilStringSlice), + value: nilStringSlice{}, + }, + { + input: "C2C103", + ptr: new(nilStringSlice), + value: nilStringSlice{X: &[]uint{3}}, + }, + + // struct tag "optional" + { + input: "C101", + ptr: new(optionalFields), + value: optionalFields{1, 0, 0}, + }, + { + input: "C20102", + ptr: new(optionalFields), + value: optionalFields{1, 2, 0}, + }, + { + input: "C3010203", + ptr: new(optionalFields), + value: optionalFields{1, 2, 3}, + }, + { + input: "C401020304", + ptr: new(optionalFields), + error: "rlp: input list has too many elements for rlp.optionalFields", + }, + { + input: "C101", + ptr: new(optionalAndTailField), + value: optionalAndTailField{A: 1}, + }, + { + input: "C20102", + ptr: new(optionalAndTailField), + value: optionalAndTailField{A: 1, B: 2, Tail: []uint{}}, + }, + { + input: "C401020304", + ptr: new(optionalAndTailField), + value: optionalAndTailField{A: 1, B: 2, Tail: []uint{3, 4}}, + }, + { + input: "C101", + ptr: new(optionalBigIntField), + value: optionalBigIntField{A: 1, B: nil}, + }, + { + input: "C20102", + ptr: new(optionalBigIntField), + value: optionalBigIntField{A: 1, B: big.NewInt(2)}, + }, + { + input: "C101", + ptr: new(optionalPtrField), + value: optionalPtrField{A: 1}, + }, + { + input: "C20180", // not accepted because "optional" doesn't enable "nil" + ptr: new(optionalPtrField), + error: "rlp: input string too short for [3]uint8, decoding into (rlp.optionalPtrField).B", + }, + { + input: "C20102", + ptr: new(optionalPtrField), + error: "rlp: input string too short for [3]uint8, decoding into (rlp.optionalPtrField).B", + }, + { + input: "C50183010203", + ptr: new(optionalPtrField), + value: optionalPtrField{A: 1, B: &[3]byte{1, 2, 3}}, + }, + { + input: "C101", + ptr: new(optionalPtrFieldNil), + value: optionalPtrFieldNil{A: 1}, + }, + { + input: "C20180", // accepted because "nil" tag allows empty input + ptr: new(optionalPtrFieldNil), + value: optionalPtrFieldNil{A: 1}, + }, + { + input: "C20102", + ptr: new(optionalPtrFieldNil), + error: "rlp: input string too short for [3]uint8, decoding into (rlp.optionalPtrFieldNil).B", + }, + + // struct tag "optional" field clearing + { + input: "C101", + ptr: &optionalFields{A: 9, B: 8, C: 7}, + value: optionalFields{A: 1, B: 0, C: 0}, + }, + { + input: "C20102", + ptr: &optionalFields{A: 9, B: 8, C: 7}, + value: optionalFields{A: 1, B: 2, C: 0}, + }, + { + input: "C20102", + ptr: &optionalAndTailField{A: 9, B: 8, Tail: []uint{7, 6, 5}}, + value: optionalAndTailField{A: 1, B: 2, Tail: []uint{}}, + }, + { + input: "C101", + ptr: &optionalPtrField{A: 9, B: &[3]byte{8, 7, 6}}, + value: optionalPtrField{A: 1}, + }, + + // RawValue + {input: "01", ptr: new(RawValue), value: RawValue(unhex("01"))}, + {input: "82FFFF", ptr: new(RawValue), value: RawValue(unhex("82FFFF"))}, + {input: "C20102", ptr: new([]RawValue), value: []RawValue{unhex("01"), unhex("02")}}, + + // pointers + {input: "00", ptr: new(*[]byte), value: &[]byte{0}}, + {input: "80", ptr: new(*uint), value: uintp(0)}, + {input: "C0", ptr: new(*uint), error: "rlp: expected input string or byte for uint"}, + {input: "07", ptr: new(*uint), value: uintp(7)}, + {input: "817F", ptr: new(*uint), error: "rlp: non-canonical size information for uint"}, + {input: "8180", ptr: new(*uint), value: uintp(0x80)}, + {input: "C109", ptr: new(*[]uint), value: &[]uint{9}}, + {input: "C58403030303", ptr: new(*[][]byte), value: &[][]byte{{3, 3, 3, 3}}}, + + // check that input position is advanced also for empty values. + {input: "C3808005", ptr: new([]*uint), value: []*uint{uintp(0), uintp(0), uintp(5)}}, + + // interface{} + {input: "00", ptr: new(interface{}), value: []byte{0}}, + {input: "01", ptr: new(interface{}), value: []byte{1}}, + {input: "80", ptr: new(interface{}), value: []byte{}}, + {input: "850505050505", ptr: new(interface{}), value: []byte{5, 5, 5, 5, 5}}, + {input: "C0", ptr: new(interface{}), value: []interface{}{}}, + {input: "C50183040404", ptr: new(interface{}), value: []interface{}{[]byte{1}, []byte{4, 4, 4}}}, + { + input: "C3010203", + ptr: new([]io.Reader), + error: "rlp: type io.Reader is not RLP-serializable", + }, + + // fuzzer crashes + { + input: "c330f9c030f93030ce3030303030303030bd303030303030", + ptr: new(interface{}), + error: "rlp: element is larger than containing list", + }, +} + +func uintp(i uint) *uint { return &i } + +func runTests(t *testing.T, decode func([]byte, interface{}) error) { + for i, test := range decodeTests { + input, err := hex.DecodeString(test.input) + if err != nil { + t.Errorf("test %d: invalid hex input %q", i, test.input) + continue + } + err = decode(input, test.ptr) + if err != nil && test.error == "" { + t.Errorf("test %d: unexpected Decode error: %v\ndecoding into %T\ninput %q", + i, err, test.ptr, test.input) + continue + } + if test.error != "" && fmt.Sprint(err) != test.error { + t.Errorf("test %d: Decode error mismatch\ngot %v\nwant %v\ndecoding into %T\ninput %q", + i, err, test.error, test.ptr, test.input) + continue + } + deref := reflect.ValueOf(test.ptr).Elem().Interface() + if err == nil && !reflect.DeepEqual(deref, test.value) { + t.Errorf("test %d: value mismatch\ngot %#v\nwant %#v\ndecoding into %T\ninput %q", + i, deref, test.value, test.ptr, test.input) + } + } +} + +func TestDecodeWithByteReader(t *testing.T) { + runTests(t, func(input []byte, into interface{}) error { + return Decode(bytes.NewReader(input), into) + }) +} + +func testDecodeWithEncReader(t *testing.T, n int) { + s := strings.Repeat("0", n) + _, r, _ := EncodeToReader(s) + var decoded string + err := Decode(r, &decoded) + if err != nil { + t.Errorf("Unexpected decode error with n=%v: %v", n, err) + } + if decoded != s { + t.Errorf("Decode mismatch with n=%v", n) + } +} + +// This is a regression test checking that decoding from encReader +// works for RLP values of size 8192 bytes or more. +func TestDecodeWithEncReader(t *testing.T) { + testDecodeWithEncReader(t, 8188) // length with header is 8191 + testDecodeWithEncReader(t, 8189) // length with header is 8192 +} + +// plainReader reads from a byte slice but does not +// implement ReadByte. It is also not recognized by the +// size validation. This is useful to test how the decoder +// behaves on a non-buffered input stream. +type plainReader []byte + +func newPlainReader(b []byte) io.Reader { + return (*plainReader)(&b) +} + +func (r *plainReader) Read(buf []byte) (n int, err error) { + if len(*r) == 0 { + return 0, io.EOF + } + n = copy(buf, *r) + *r = (*r)[n:] + return n, nil +} + +func TestDecodeWithNonByteReader(t *testing.T) { + runTests(t, func(input []byte, into interface{}) error { + return Decode(newPlainReader(input), into) + }) +} + +func TestDecodeStreamReset(t *testing.T) { + s := NewStream(nil, 0) + runTests(t, func(input []byte, into interface{}) error { + s.Reset(bytes.NewReader(input), 0) + return s.Decode(into) + }) +} + +type testDecoder struct{ called bool } + +func (t *testDecoder) DecodeRLP(s *Stream) error { + if _, err := s.Uint(); err != nil { + return err + } + t.called = true + return nil +} + +func TestDecodeDecoder(t *testing.T) { + var s struct { + T1 testDecoder + T2 *testDecoder + T3 **testDecoder + } + if err := Decode(bytes.NewReader(unhex("C3010203")), &s); err != nil { + t.Fatalf("Decode error: %v", err) + } + + if !s.T1.called { + t.Errorf("DecodeRLP was not called for (non-pointer) testDecoder") + } + + if s.T2 == nil { + t.Errorf("*testDecoder has not been allocated") + } else if !s.T2.called { + t.Errorf("DecodeRLP was not called for *testDecoder") + } + + if s.T3 == nil || *s.T3 == nil { + t.Errorf("**testDecoder has not been allocated") + } else if !(*s.T3).called { + t.Errorf("DecodeRLP was not called for **testDecoder") + } +} + +func TestDecodeDecoderNilPointer(t *testing.T) { + var s struct { + T1 *testDecoder `rlp:"nil"` + T2 *testDecoder + } + if err := Decode(bytes.NewReader(unhex("C2C002")), &s); err != nil { + t.Fatalf("Decode error: %v", err) + } + if s.T1 != nil { + t.Errorf("decoder T1 allocated for empty input (called: %v)", s.T1.called) + } + if s.T2 == nil || !s.T2.called { + t.Errorf("decoder T2 not allocated/called") + } +} + +type byteDecoder byte + +func (bd *byteDecoder) DecodeRLP(s *Stream) error { + _, err := s.Uint() + *bd = 255 + return err +} + +func (bd byteDecoder) called() bool { + return bd == 255 +} + +// This test verifies that the byte slice/byte array logic +// does not kick in for element types implementing Decoder. +func TestDecoderInByteSlice(t *testing.T) { + var slice []byteDecoder + if err := Decode(bytes.NewReader(unhex("C101")), &slice); err != nil { + t.Errorf("unexpected Decode error %v", err) + } else if !slice[0].called() { + t.Errorf("DecodeRLP not called for slice element") + } + + var array [1]byteDecoder + if err := Decode(bytes.NewReader(unhex("C101")), &array); err != nil { + t.Errorf("unexpected Decode error %v", err) + } else if !array[0].called() { + t.Errorf("DecodeRLP not called for array element") + } +} + +type unencodableDecoder func() + +func (f *unencodableDecoder) DecodeRLP(s *Stream) error { + if _, err := s.List(); err != nil { + return err + } + if err := s.ListEnd(); err != nil { + return err + } + *f = func() {} + return nil +} + +func TestDecoderFunc(t *testing.T) { + var x func() + if err := DecodeBytes([]byte{0xC0}, (*unencodableDecoder)(&x)); err != nil { + t.Fatal(err) + } + x() +} + +// This tests the validity checks for fields with struct tag "optional". +func TestInvalidOptionalField(t *testing.T) { + type ( + invalid1 struct { + A uint `rlp:"optional"` + B uint + } + invalid2 struct { + T []uint `rlp:"tail,optional"` + } + invalid3 struct { + T []uint `rlp:"optional,tail"` + } + ) + + tests := []struct { + v interface{} + err string + }{ + {v: new(invalid1), err: `rlp: invalid struct tag "" for rlp.invalid1.B (must be optional because preceding field "A" is optional)`}, + {v: new(invalid2), err: `rlp: invalid struct tag "optional" for rlp.invalid2.T (also has "tail" tag)`}, + {v: new(invalid3), err: `rlp: invalid struct tag "tail" for rlp.invalid3.T (also has "optional" tag)`}, + } + for _, test := range tests { + err := DecodeBytes(unhex("C20102"), test.v) + if err == nil { + t.Errorf("no error for %T", test.v) + } else if err.Error() != test.err { + t.Errorf("wrong error for %T: %v", test.v, err.Error()) + } + } +} + +func ExampleDecode() { + input, _ := hex.DecodeString("C90A1486666F6F626172") + + type example struct { + A, B uint + String string + } + + var s example + err := Decode(bytes.NewReader(input), &s) + if err != nil { + fmt.Printf("Error: %v\n", err) + } else { + fmt.Printf("Decoded value: %#v\n", s) + } + // Output: + // Decoded value: rlp.example{A:0xa, B:0x14, String:"foobar"} +} + +func ExampleDecode_structTagNil() { + // In this example, we'll use the "nil" struct tag to change + // how a pointer-typed field is decoded. The input contains an RLP + // list of one element, an empty string. + input := []byte{0xC1, 0x80} + + // This type uses the normal rules. + // The empty input string is decoded as a pointer to an empty Go string. + var normalRules struct { + String *string + } + Decode(bytes.NewReader(input), &normalRules) + fmt.Printf("normal: String = %q\n", *normalRules.String) + + // This type uses the struct tag. + // The empty input string is decoded as a nil pointer. + var withEmptyOK struct { + String *string `rlp:"nil"` + } + Decode(bytes.NewReader(input), &withEmptyOK) + fmt.Printf("with nil tag: String = %v\n", withEmptyOK.String) + + // Output: + // normal: String = "" + // with nil tag: String = +} + +func ExampleStream() { + input, _ := hex.DecodeString("C90A1486666F6F626172") + s := NewStream(bytes.NewReader(input), 0) + + // Check what kind of value lies ahead + kind, size, _ := s.Kind() + fmt.Printf("Kind: %v size:%d\n", kind, size) + + // Enter the list + if _, err := s.List(); err != nil { + fmt.Printf("List error: %v\n", err) + return + } + + // Decode elements + fmt.Println(s.Uint()) + fmt.Println(s.Uint()) + fmt.Println(s.Bytes()) + + // Acknowledge end of list + if err := s.ListEnd(); err != nil { + fmt.Printf("ListEnd error: %v\n", err) + } + // Output: + // Kind: List size:9 + // 10 + // 20 + // [102 111 111 98 97 114] +} + +func BenchmarkDecodeUints(b *testing.B) { + enc := encodeTestSlice(90000) + b.SetBytes(int64(len(enc))) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + var s []uint + r := bytes.NewReader(enc) + if err := Decode(r, &s); err != nil { + b.Fatalf("Decode error: %v", err) + } + } +} + +func BenchmarkDecodeUintsReused(b *testing.B) { + enc := encodeTestSlice(100000) + b.SetBytes(int64(len(enc))) + b.ReportAllocs() + b.ResetTimer() + + var s []uint + for i := 0; i < b.N; i++ { + r := bytes.NewReader(enc) + if err := Decode(r, &s); err != nil { + b.Fatalf("Decode error: %v", err) + } + } +} + +func BenchmarkDecodeByteArrayStruct(b *testing.B) { + enc, err := EncodeToBytes(&byteArrayStruct{}) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(enc))) + b.ReportAllocs() + b.ResetTimer() + + var out byteArrayStruct + for i := 0; i < b.N; i++ { + if err := DecodeBytes(enc, &out); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeBigInts(b *testing.B) { + ints := make([]*big.Int, 200) + for i := range ints { + ints[i] = math.BigPow(2, int64(i)) + } + enc, err := EncodeToBytes(ints) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(enc))) + b.ReportAllocs() + b.ResetTimer() + + var out []*big.Int + for i := 0; i < b.N; i++ { + if err := DecodeBytes(enc, &out); err != nil { + b.Fatal(err) + } + } +} + +func encodeTestSlice(n uint) []byte { + s := make([]uint, n) + for i := uint(0); i < n; i++ { + s[i] = i + } + b, err := EncodeToBytes(s) + if err != nil { + panic(fmt.Sprintf("encode error: %v", err)) + } + return b +} + +func unhex(str string) []byte { + b, err := hex.DecodeString(strings.ReplaceAll(str, " ", "")) + if err != nil { + panic(fmt.Sprintf("invalid hex string: %q", str)) + } + return b +} diff --git a/rlp/doc.go b/rlp/doc.go new file mode 100644 index 0000000000..e4404c978d --- /dev/null +++ b/rlp/doc.go @@ -0,0 +1,161 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +/* +Package rlp implements the RLP serialization format. + +The purpose of RLP (Recursive Linear Prefix) is to encode arbitrarily nested arrays of +binary data, and RLP is the main encoding method used to serialize objects in Ethereum. +The only purpose of RLP is to encode structure; encoding specific atomic data types (eg. +strings, ints, floats) is left up to higher-order protocols. In Ethereum integers must be +represented in big endian binary form with no leading zeroes (thus making the integer +value zero equivalent to the empty string). + +RLP values are distinguished by a type tag. The type tag precedes the value in the input +stream and defines the size and kind of the bytes that follow. + + +Encoding Rules + +Package rlp uses reflection and encodes RLP based on the Go type of the value. + +If the type implements the Encoder interface, Encode calls EncodeRLP. It does not +call EncodeRLP on nil pointer values. + +To encode a pointer, the value being pointed to is encoded. A nil pointer to a struct +type, slice or array always encodes as an empty RLP list unless the slice or array has +element type byte. A nil pointer to any other value encodes as the empty string. + +Struct values are encoded as an RLP list of all their encoded public fields. Recursive +struct types are supported. + +To encode slices and arrays, the elements are encoded as an RLP list of the value's +elements. Note that arrays and slices with element type uint8 or byte are always encoded +as an RLP string. + +A Go string is encoded as an RLP string. + +An unsigned integer value is encoded as an RLP string. Zero always encodes as an empty RLP +string. big.Int values are treated as integers. Signed integers (int, int8, int16, ...) +are not supported and will return an error when encoding. + +Boolean values are encoded as the unsigned integers zero (false) and one (true). + +An interface value encodes as the value contained in the interface. + +Floating point numbers, maps, channels and functions are not supported. + + +Decoding Rules + +Decoding uses the following type-dependent rules: + +If the type implements the Decoder interface, DecodeRLP is called. + +To decode into a pointer, the value will be decoded as the element type of the pointer. If +the pointer is nil, a new value of the pointer's element type is allocated. If the pointer +is non-nil, the existing value will be reused. Note that package rlp never leaves a +pointer-type struct field as nil unless one of the "nil" struct tags is present. + +To decode into a struct, decoding expects the input to be an RLP list. The decoded +elements of the list are assigned to each public field in the order given by the struct's +definition. The input list must contain an element for each decoded field. Decoding +returns an error if there are too few or too many elements for the struct. + +To decode into a slice, the input must be a list and the resulting slice will contain the +input elements in order. For byte slices, the input must be an RLP string. Array types +decode similarly, with the additional restriction that the number of input elements (or +bytes) must match the array's defined length. + +To decode into a Go string, the input must be an RLP string. The input bytes are taken +as-is and will not necessarily be valid UTF-8. + +To decode into an unsigned integer type, the input must also be an RLP string. The bytes +are interpreted as a big endian representation of the integer. If the RLP string is larger +than the bit size of the type, decoding will return an error. Decode also supports +*big.Int. There is no size limit for big integers. + +To decode into a boolean, the input must contain an unsigned integer of value zero (false) +or one (true). + +To decode into an interface value, one of these types is stored in the value: + + []interface{}, for RLP lists + []byte, for RLP strings + +Non-empty interface types are not supported when decoding. +Signed integers, floating point numbers, maps, channels and functions cannot be decoded into. + + +Struct Tags + +As with other encoding packages, the "-" tag ignores fields. + + type StructWithIgnoredField struct{ + Ignored uint `rlp:"-"` + Field uint + } + +Go struct values encode/decode as RLP lists. There are two ways of influencing the mapping +of fields to list elements. The "tail" tag, which may only be used on the last exported +struct field, allows slurping up any excess list elements into a slice. + + type StructWithTail struct{ + Field uint + Tail []string `rlp:"tail"` + } + +The "optional" tag says that the field may be omitted if it is zero-valued. If this tag is +used on a struct field, all subsequent public fields must also be declared optional. + +When encoding a struct with optional fields, the output RLP list contains all values up to +the last non-zero optional field. + +When decoding into a struct, optional fields may be omitted from the end of the input +list. For the example below, this means input lists of one, two, or three elements are +accepted. + + type StructWithOptionalFields struct{ + Required uint + Optional1 uint `rlp:"optional"` + Optional2 uint `rlp:"optional"` + } + +The "nil", "nilList" and "nilString" tags apply to pointer-typed fields only, and change +the decoding rules for the field type. For regular pointer fields without the "nil" tag, +input values must always match the required input length exactly and the decoder does not +produce nil values. When the "nil" tag is set, input values of size zero decode as a nil +pointer. This is especially useful for recursive types. + + type StructWithNilField struct { + Field *[3]byte `rlp:"nil"` + } + +In the example above, Field allows two possible input sizes. For input 0xC180 (a list +containing an empty string) Field is set to nil after decoding. For input 0xC483000000 (a +list containing a 3-byte string), Field is set to a non-nil array pointer. + +RLP supports two kinds of empty values: empty lists and empty strings. When using the +"nil" tag, the kind of empty value allowed for a type is chosen automatically. A field +whose Go type is a pointer to an unsigned integer, string, boolean or byte array/slice +expects an empty RLP string. Any other pointer field type encodes/decodes as an empty RLP +list. + +The choice of null value can be made explicit with the "nilList" and "nilString" struct +tags. Using these tags encodes/decodes a Go nil pointer value as the empty RLP value kind +defined by the tag. +*/ +package rlp diff --git a/rlp/encbuffer.go b/rlp/encbuffer.go new file mode 100644 index 0000000000..687949c044 --- /dev/null +++ b/rlp/encbuffer.go @@ -0,0 +1,398 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "io" + "math/big" + "reflect" + "sync" +) + +type encBuffer struct { + str []byte // string data, contains everything except list headers + lheads []listhead // all list headers + lhsize int // sum of sizes of all encoded list headers + sizebuf [9]byte // auxiliary buffer for uint encoding +} + +// The global encBuffer pool. +var encBufferPool = sync.Pool{ + New: func() interface{} { return new(encBuffer) }, +} + +func getEncBuffer() *encBuffer { + buf := encBufferPool.Get().(*encBuffer) + buf.reset() + return buf +} + +func (buf *encBuffer) reset() { + buf.lhsize = 0 + buf.str = buf.str[:0] + buf.lheads = buf.lheads[:0] +} + +// size returns the length of the encoded data. +func (buf *encBuffer) size() int { + return len(buf.str) + buf.lhsize +} + +// makeBytes creates the encoder output. +func (w *encBuffer) makeBytes() []byte { + out := make([]byte, w.size()) + w.copyTo(out) + return out +} + +func (w *encBuffer) copyTo(dst []byte) { + strpos := 0 + pos := 0 + for _, head := range w.lheads { + // write string data before header + n := copy(dst[pos:], w.str[strpos:head.offset]) + pos += n + strpos += n + // write the header + enc := head.encode(dst[pos:]) + pos += len(enc) + } + // copy string data after the last list header + copy(dst[pos:], w.str[strpos:]) +} + +// writeTo writes the encoder output to w. +func (buf *encBuffer) writeTo(w io.Writer) (err error) { + strpos := 0 + for _, head := range buf.lheads { + // write string data before header + if head.offset-strpos > 0 { + n, err := w.Write(buf.str[strpos:head.offset]) + strpos += n + if err != nil { + return err + } + } + // write the header + enc := head.encode(buf.sizebuf[:]) + if _, err = w.Write(enc); err != nil { + return err + } + } + if strpos < len(buf.str) { + // write string data after the last list header + _, err = w.Write(buf.str[strpos:]) + } + return err +} + +// Write implements io.Writer and appends b directly to the output. +func (buf *encBuffer) Write(b []byte) (int, error) { + buf.str = append(buf.str, b...) + return len(b), nil +} + +// writeBool writes b as the integer 0 (false) or 1 (true). +func (buf *encBuffer) writeBool(b bool) { + if b { + buf.str = append(buf.str, 0x01) + } else { + buf.str = append(buf.str, 0x80) + } +} + +func (buf *encBuffer) writeUint64(i uint64) { + if i == 0 { + buf.str = append(buf.str, 0x80) + } else if i < 128 { + // fits single byte + buf.str = append(buf.str, byte(i)) + } else { + s := putint(buf.sizebuf[1:], i) + buf.sizebuf[0] = 0x80 + byte(s) + buf.str = append(buf.str, buf.sizebuf[:s+1]...) + } +} + +func (buf *encBuffer) writeBytes(b []byte) { + if len(b) == 1 && b[0] <= 0x7F { + // fits single byte, no string header + buf.str = append(buf.str, b[0]) + } else { + buf.encodeStringHeader(len(b)) + buf.str = append(buf.str, b...) + } +} + +func (buf *encBuffer) writeString(s string) { + buf.writeBytes([]byte(s)) +} + +// wordBytes is the number of bytes in a big.Word +const wordBytes = (32 << (uint64(^big.Word(0)) >> 63)) / 8 + +// writeBigInt writes i as an integer. +func (w *encBuffer) writeBigInt(i *big.Int) { + bitlen := i.BitLen() + if bitlen <= 64 { + w.writeUint64(i.Uint64()) + return + } + // Integer is larger than 64 bits, encode from i.Bits(). + // The minimal byte length is bitlen rounded up to the next + // multiple of 8, divided by 8. + length := ((bitlen + 7) & -8) >> 3 + w.encodeStringHeader(length) + w.str = append(w.str, make([]byte, length)...) + index := length + buf := w.str[len(w.str)-length:] + for _, d := range i.Bits() { + for j := 0; j < wordBytes && index > 0; j++ { + index-- + buf[index] = byte(d) + d >>= 8 + } + } +} + +// list adds a new list header to the header stack. It returns the index of the header. +// Call listEnd with this index after encoding the content of the list. +func (buf *encBuffer) list() int { + buf.lheads = append(buf.lheads, listhead{offset: len(buf.str), size: buf.lhsize}) + return len(buf.lheads) - 1 +} + +func (buf *encBuffer) listEnd(index int) { + lh := &buf.lheads[index] + lh.size = buf.size() - lh.offset - lh.size + if lh.size < 56 { + buf.lhsize++ // length encoded into kind tag + } else { + buf.lhsize += 1 + intsize(uint64(lh.size)) + } +} + +func (buf *encBuffer) encode(val interface{}) error { + rval := reflect.ValueOf(val) + writer, err := cachedWriter(rval.Type()) + if err != nil { + return err + } + return writer(rval, buf) +} + +func (buf *encBuffer) encodeStringHeader(size int) { + if size < 56 { + buf.str = append(buf.str, 0x80+byte(size)) + } else { + sizesize := putint(buf.sizebuf[1:], uint64(size)) + buf.sizebuf[0] = 0xB7 + byte(sizesize) + buf.str = append(buf.str, buf.sizebuf[:sizesize+1]...) + } +} + +// encReader is the io.Reader returned by EncodeToReader. +// It releases its encbuf at EOF. +type encReader struct { + buf *encBuffer // the buffer we're reading from. this is nil when we're at EOF. + lhpos int // index of list header that we're reading + strpos int // current position in string buffer + piece []byte // next piece to be read +} + +func (r *encReader) Read(b []byte) (n int, err error) { + for { + if r.piece = r.next(); r.piece == nil { + // Put the encode buffer back into the pool at EOF when it + // is first encountered. Subsequent calls still return EOF + // as the error but the buffer is no longer valid. + if r.buf != nil { + encBufferPool.Put(r.buf) + r.buf = nil + } + return n, io.EOF + } + nn := copy(b[n:], r.piece) + n += nn + if nn < len(r.piece) { + // piece didn't fit, see you next time. + r.piece = r.piece[nn:] + return n, nil + } + r.piece = nil + } +} + +// next returns the next piece of data to be read. +// it returns nil at EOF. +func (r *encReader) next() []byte { + switch { + case r.buf == nil: + return nil + + case r.piece != nil: + // There is still data available for reading. + return r.piece + + case r.lhpos < len(r.buf.lheads): + // We're before the last list header. + head := r.buf.lheads[r.lhpos] + sizebefore := head.offset - r.strpos + if sizebefore > 0 { + // String data before header. + p := r.buf.str[r.strpos:head.offset] + r.strpos += sizebefore + return p + } + r.lhpos++ + return head.encode(r.buf.sizebuf[:]) + + case r.strpos < len(r.buf.str): + // String data at the end, after all list headers. + p := r.buf.str[r.strpos:] + r.strpos = len(r.buf.str) + return p + + default: + return nil + } +} + +func encBufferFromWriter(w io.Writer) *encBuffer { + switch w := w.(type) { + case EncoderBuffer: + return w.buf + case *EncoderBuffer: + return w.buf + case *encBuffer: + return w + default: + return nil + } +} + +// EncoderBuffer is a buffer for incremental encoding. +// +// The zero value is NOT ready for use. To get a usable buffer, +// create it using NewEncoderBuffer or call Reset. +type EncoderBuffer struct { + buf *encBuffer + dst io.Writer + + ownBuffer bool +} + +// NewEncoderBuffer creates an encoder buffer. +func NewEncoderBuffer(dst io.Writer) EncoderBuffer { + var w EncoderBuffer + w.Reset(dst) + return w +} + +// Reset truncates the buffer and sets the output destination. +func (w *EncoderBuffer) Reset(dst io.Writer) { + if w.buf != nil && !w.ownBuffer { + panic("can't Reset derived EncoderBuffer") + } + + // If the destination writer has an *encBuffer, use it. + // Note that w.ownBuffer is left false here. + if dst != nil { + if outer := encBufferFromWriter(dst); outer != nil { + *w = EncoderBuffer{outer, nil, false} + return + } + } + + // Get a fresh buffer. + if w.buf == nil { + w.buf = encBufferPool.Get().(*encBuffer) + w.ownBuffer = true + } + w.buf.reset() + w.dst = dst +} + +// Flush writes encoded RLP data to the output writer. This can only be called once. +// If you want to re-use the buffer after Flush, you must call Reset. +func (w *EncoderBuffer) Flush() error { + var err error + if w.dst != nil { + err = w.buf.writeTo(w.dst) + } + // Release the internal buffer. + if w.ownBuffer { + encBufferPool.Put(w.buf) + } + *w = EncoderBuffer{} + return err +} + +// ToBytes returns the encoded bytes. +func (w *EncoderBuffer) ToBytes() []byte { + return w.buf.makeBytes() +} + +// AppendToBytes appends the encoded bytes to dst. +func (w *EncoderBuffer) AppendToBytes(dst []byte) []byte { + size := w.buf.size() + out := append(dst, make([]byte, size)...) + w.buf.copyTo(out[len(dst):]) + return out +} + +// Write appends b directly to the encoder output. +func (w EncoderBuffer) Write(b []byte) (int, error) { + return w.buf.Write(b) +} + +// WriteBool writes b as the integer 0 (false) or 1 (true). +func (w EncoderBuffer) WriteBool(b bool) { + w.buf.writeBool(b) +} + +// WriteUint64 encodes an unsigned integer. +func (w EncoderBuffer) WriteUint64(i uint64) { + w.buf.writeUint64(i) +} + +// WriteBigInt encodes a big.Int as an RLP string. +// Note: Unlike with Encode, the sign of i is ignored. +func (w EncoderBuffer) WriteBigInt(i *big.Int) { + w.buf.writeBigInt(i) +} + +// WriteBytes encodes b as an RLP string. +func (w EncoderBuffer) WriteBytes(b []byte) { + w.buf.writeBytes(b) +} + +// WriteBytes encodes s as an RLP string. +func (w EncoderBuffer) WriteString(s string) { + w.buf.writeString(s) +} + +// List starts a list. It returns an internal index. Call EndList with +// this index after encoding the content to finish the list. +func (w EncoderBuffer) List() int { + return w.buf.list() +} + +// ListEnd finishes the given list. +func (w EncoderBuffer) ListEnd(index int) { + w.buf.listEnd(index) +} diff --git a/rlp/encbuffer_example_test.go b/rlp/encbuffer_example_test.go new file mode 100644 index 0000000000..11b4b578aa --- /dev/null +++ b/rlp/encbuffer_example_test.go @@ -0,0 +1,45 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp_test + +import ( + "bytes" + "fmt" + + "github.com/tenderly/coreth/rlp" +) + +func ExampleEncoderBuffer() { + var w bytes.Buffer + + // Encode [4, [5, 6]] to w. + buf := rlp.NewEncoderBuffer(&w) + l1 := buf.List() + buf.WriteUint64(4) + l2 := buf.List() + buf.WriteUint64(5) + buf.WriteUint64(6) + buf.ListEnd(l2) + buf.ListEnd(l1) + + if err := buf.Flush(); err != nil { + panic(err) + } + fmt.Printf("%X\n", w.Bytes()) + // Output: + // C404C20506 +} diff --git a/rlp/encode.go b/rlp/encode.go new file mode 100644 index 0000000000..75c4e276cc --- /dev/null +++ b/rlp/encode.go @@ -0,0 +1,471 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "errors" + "fmt" + "io" + "math/big" + "reflect" + + "github.com/tenderly/coreth/rlp/internal/rlpstruct" +) + +var ( + // Common encoded values. + // These are useful when implementing EncodeRLP. + EmptyString = []byte{0x80} + EmptyList = []byte{0xC0} +) + +var ErrNegativeBigInt = errors.New("rlp: cannot encode negative big.Int") + +// Encoder is implemented by types that require custom +// encoding rules or want to encode private fields. +type Encoder interface { + // EncodeRLP should write the RLP encoding of its receiver to w. + // If the implementation is a pointer method, it may also be + // called for nil pointers. + // + // Implementations should generate valid RLP. The data written is + // not verified at the moment, but a future version might. It is + // recommended to write only a single value but writing multiple + // values or no value at all is also permitted. + EncodeRLP(io.Writer) error +} + +// Encode writes the RLP encoding of val to w. Note that Encode may +// perform many small writes in some cases. Consider making w +// buffered. +// +// Please see package-level documentation of encoding rules. +func Encode(w io.Writer, val interface{}) error { + // Optimization: reuse *encBuffer when called by EncodeRLP. + if buf := encBufferFromWriter(w); buf != nil { + return buf.encode(val) + } + + buf := getEncBuffer() + defer encBufferPool.Put(buf) + if err := buf.encode(val); err != nil { + return err + } + return buf.writeTo(w) +} + +// EncodeToBytes returns the RLP encoding of val. +// Please see package-level documentation for the encoding rules. +func EncodeToBytes(val interface{}) ([]byte, error) { + buf := getEncBuffer() + defer encBufferPool.Put(buf) + + if err := buf.encode(val); err != nil { + return nil, err + } + return buf.makeBytes(), nil +} + +// EncodeToReader returns a reader from which the RLP encoding of val +// can be read. The returned size is the total size of the encoded +// data. +// +// Please see the documentation of Encode for the encoding rules. +func EncodeToReader(val interface{}) (size int, r io.Reader, err error) { + buf := getEncBuffer() + if err := buf.encode(val); err != nil { + encBufferPool.Put(buf) + return 0, nil, err + } + // Note: can't put the reader back into the pool here + // because it is held by encReader. The reader puts it + // back when it has been fully consumed. + return buf.size(), &encReader{buf: buf}, nil +} + +type listhead struct { + offset int // index of this header in string data + size int // total size of encoded data (including list headers) +} + +// encode writes head to the given buffer, which must be at least +// 9 bytes long. It returns the encoded bytes. +func (head *listhead) encode(buf []byte) []byte { + return buf[:puthead(buf, 0xC0, 0xF7, uint64(head.size))] +} + +// headsize returns the size of a list or string header +// for a value of the given size. +func headsize(size uint64) int { + if size < 56 { + return 1 + } + return 1 + intsize(size) +} + +// puthead writes a list or string header to buf. +// buf must be at least 9 bytes long. +func puthead(buf []byte, smalltag, largetag byte, size uint64) int { + if size < 56 { + buf[0] = smalltag + byte(size) + return 1 + } + sizesize := putint(buf[1:], size) + buf[0] = largetag + byte(sizesize) + return sizesize + 1 +} + +var encoderInterface = reflect.TypeOf(new(Encoder)).Elem() + +// makeWriter creates a writer function for the given type. +func makeWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) { + kind := typ.Kind() + switch { + case typ == rawValueType: + return writeRawValue, nil + case typ.AssignableTo(reflect.PtrTo(bigInt)): + return writeBigIntPtr, nil + case typ.AssignableTo(bigInt): + return writeBigIntNoPtr, nil + case kind == reflect.Ptr: + return makePtrWriter(typ, ts) + case reflect.PtrTo(typ).Implements(encoderInterface): + return makeEncoderWriter(typ), nil + case isUint(kind): + return writeUint, nil + case kind == reflect.Bool: + return writeBool, nil + case kind == reflect.String: + return writeString, nil + case kind == reflect.Slice && isByte(typ.Elem()): + return writeBytes, nil + case kind == reflect.Array && isByte(typ.Elem()): + return makeByteArrayWriter(typ), nil + case kind == reflect.Slice || kind == reflect.Array: + return makeSliceWriter(typ, ts) + case kind == reflect.Struct: + return makeStructWriter(typ) + case kind == reflect.Interface: + return writeInterface, nil + default: + return nil, fmt.Errorf("rlp: type %v is not RLP-serializable", typ) + } +} + +func writeRawValue(val reflect.Value, w *encBuffer) error { + w.str = append(w.str, val.Bytes()...) + return nil +} + +func writeUint(val reflect.Value, w *encBuffer) error { + w.writeUint64(val.Uint()) + return nil +} + +func writeBool(val reflect.Value, w *encBuffer) error { + w.writeBool(val.Bool()) + return nil +} + +func writeBigIntPtr(val reflect.Value, w *encBuffer) error { + ptr := val.Interface().(*big.Int) + if ptr == nil { + w.str = append(w.str, 0x80) + return nil + } + if ptr.Sign() == -1 { + return ErrNegativeBigInt + } + w.writeBigInt(ptr) + return nil +} + +func writeBigIntNoPtr(val reflect.Value, w *encBuffer) error { + i := val.Interface().(big.Int) + if i.Sign() == -1 { + return ErrNegativeBigInt + } + w.writeBigInt(&i) + return nil +} + +func writeBytes(val reflect.Value, w *encBuffer) error { + w.writeBytes(val.Bytes()) + return nil +} + +func makeByteArrayWriter(typ reflect.Type) writer { + switch typ.Len() { + case 0: + return writeLengthZeroByteArray + case 1: + return writeLengthOneByteArray + default: + length := typ.Len() + return func(val reflect.Value, w *encBuffer) error { + if !val.CanAddr() { + // Getting the byte slice of val requires it to be addressable. Make it + // addressable by copying. + copy := reflect.New(val.Type()).Elem() + copy.Set(val) + val = copy + } + slice := byteArrayBytes(val, length) + w.encodeStringHeader(len(slice)) + w.str = append(w.str, slice...) + return nil + } + } +} + +func writeLengthZeroByteArray(val reflect.Value, w *encBuffer) error { + w.str = append(w.str, 0x80) + return nil +} + +func writeLengthOneByteArray(val reflect.Value, w *encBuffer) error { + b := byte(val.Index(0).Uint()) + if b <= 0x7f { + w.str = append(w.str, b) + } else { + w.str = append(w.str, 0x81, b) + } + return nil +} + +func writeString(val reflect.Value, w *encBuffer) error { + s := val.String() + if len(s) == 1 && s[0] <= 0x7f { + // fits single byte, no string header + w.str = append(w.str, s[0]) + } else { + w.encodeStringHeader(len(s)) + w.str = append(w.str, s...) + } + return nil +} + +func writeInterface(val reflect.Value, w *encBuffer) error { + if val.IsNil() { + // Write empty list. This is consistent with the previous RLP + // encoder that we had and should therefore avoid any + // problems. + w.str = append(w.str, 0xC0) + return nil + } + eval := val.Elem() + writer, err := cachedWriter(eval.Type()) + if err != nil { + return err + } + return writer(eval, w) +} + +func makeSliceWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) { + etypeinfo := theTC.infoWhileGenerating(typ.Elem(), rlpstruct.Tags{}) + if etypeinfo.writerErr != nil { + return nil, etypeinfo.writerErr + } + + var wfn writer + if ts.Tail { + // This is for struct tail slices. + // w.list is not called for them. + wfn = func(val reflect.Value, w *encBuffer) error { + vlen := val.Len() + for i := 0; i < vlen; i++ { + if err := etypeinfo.writer(val.Index(i), w); err != nil { + return err + } + } + return nil + } + } else { + // This is for regular slices and arrays. + wfn = func(val reflect.Value, w *encBuffer) error { + vlen := val.Len() + if vlen == 0 { + w.str = append(w.str, 0xC0) + return nil + } + listOffset := w.list() + for i := 0; i < vlen; i++ { + if err := etypeinfo.writer(val.Index(i), w); err != nil { + return err + } + } + w.listEnd(listOffset) + return nil + } + } + return wfn, nil +} + +func makeStructWriter(typ reflect.Type) (writer, error) { + fields, err := structFields(typ) + if err != nil { + return nil, err + } + for _, f := range fields { + if f.info.writerErr != nil { + return nil, structFieldError{typ, f.index, f.info.writerErr} + } + } + + var writer writer + firstOptionalField := firstOptionalField(fields) + if firstOptionalField == len(fields) { + // This is the writer function for structs without any optional fields. + writer = func(val reflect.Value, w *encBuffer) error { + lh := w.list() + for _, f := range fields { + if err := f.info.writer(val.Field(f.index), w); err != nil { + return err + } + } + w.listEnd(lh) + return nil + } + } else { + // If there are any "optional" fields, the writer needs to perform additional + // checks to determine the output list length. + writer = func(val reflect.Value, w *encBuffer) error { + lastField := len(fields) - 1 + for ; lastField >= firstOptionalField; lastField-- { + if !val.Field(fields[lastField].index).IsZero() { + break + } + } + lh := w.list() + for i := 0; i <= lastField; i++ { + if err := fields[i].info.writer(val.Field(fields[i].index), w); err != nil { + return err + } + } + w.listEnd(lh) + return nil + } + } + return writer, nil +} + +func makePtrWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) { + nilEncoding := byte(0xC0) + if typeNilKind(typ.Elem(), ts) == String { + nilEncoding = 0x80 + } + + etypeinfo := theTC.infoWhileGenerating(typ.Elem(), rlpstruct.Tags{}) + if etypeinfo.writerErr != nil { + return nil, etypeinfo.writerErr + } + + writer := func(val reflect.Value, w *encBuffer) error { + if ev := val.Elem(); ev.IsValid() { + return etypeinfo.writer(ev, w) + } + w.str = append(w.str, nilEncoding) + return nil + } + return writer, nil +} + +func makeEncoderWriter(typ reflect.Type) writer { + if typ.Implements(encoderInterface) { + return func(val reflect.Value, w *encBuffer) error { + return val.Interface().(Encoder).EncodeRLP(w) + } + } + w := func(val reflect.Value, w *encBuffer) error { + if !val.CanAddr() { + // package json simply doesn't call MarshalJSON for this case, but encodes the + // value as if it didn't implement the interface. We don't want to handle it that + // way. + return fmt.Errorf("rlp: unadressable value of type %v, EncodeRLP is pointer method", val.Type()) + } + return val.Addr().Interface().(Encoder).EncodeRLP(w) + } + return w +} + +// putint writes i to the beginning of b in big endian byte +// order, using the least number of bytes needed to represent i. +func putint(b []byte, i uint64) (size int) { + switch { + case i < (1 << 8): + b[0] = byte(i) + return 1 + case i < (1 << 16): + b[0] = byte(i >> 8) + b[1] = byte(i) + return 2 + case i < (1 << 24): + b[0] = byte(i >> 16) + b[1] = byte(i >> 8) + b[2] = byte(i) + return 3 + case i < (1 << 32): + b[0] = byte(i >> 24) + b[1] = byte(i >> 16) + b[2] = byte(i >> 8) + b[3] = byte(i) + return 4 + case i < (1 << 40): + b[0] = byte(i >> 32) + b[1] = byte(i >> 24) + b[2] = byte(i >> 16) + b[3] = byte(i >> 8) + b[4] = byte(i) + return 5 + case i < (1 << 48): + b[0] = byte(i >> 40) + b[1] = byte(i >> 32) + b[2] = byte(i >> 24) + b[3] = byte(i >> 16) + b[4] = byte(i >> 8) + b[5] = byte(i) + return 6 + case i < (1 << 56): + b[0] = byte(i >> 48) + b[1] = byte(i >> 40) + b[2] = byte(i >> 32) + b[3] = byte(i >> 24) + b[4] = byte(i >> 16) + b[5] = byte(i >> 8) + b[6] = byte(i) + return 7 + default: + b[0] = byte(i >> 56) + b[1] = byte(i >> 48) + b[2] = byte(i >> 40) + b[3] = byte(i >> 32) + b[4] = byte(i >> 24) + b[5] = byte(i >> 16) + b[6] = byte(i >> 8) + b[7] = byte(i) + return 8 + } +} + +// intsize computes the minimum number of bytes required to store i. +func intsize(i uint64) (size int) { + for size = 1; ; size++ { + if i >>= 8; i == 0 { + return size + } + } +} diff --git a/rlp/encode_test.go b/rlp/encode_test.go new file mode 100644 index 0000000000..58ddc0d120 --- /dev/null +++ b/rlp/encode_test.go @@ -0,0 +1,585 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/big" + "runtime" + "sync" + "testing" + + "github.com/ethereum/go-ethereum/common/math" +) + +type testEncoder struct { + err error +} + +func (e *testEncoder) EncodeRLP(w io.Writer) error { + if e == nil { + panic("EncodeRLP called on nil value") + } + if e.err != nil { + return e.err + } + w.Write([]byte{0, 1, 0, 1, 0, 1, 0, 1, 0, 1}) + return nil +} + +type testEncoderValueMethod struct{} + +func (e testEncoderValueMethod) EncodeRLP(w io.Writer) error { + w.Write([]byte{0xFA, 0xFE, 0xF0}) + return nil +} + +type byteEncoder byte + +func (e byteEncoder) EncodeRLP(w io.Writer) error { + w.Write(EmptyList) + return nil +} + +type undecodableEncoder func() + +func (f undecodableEncoder) EncodeRLP(w io.Writer) error { + w.Write([]byte{0xF5, 0xF5, 0xF5}) + return nil +} + +type encodableReader struct { + A, B uint +} + +func (e *encodableReader) Read(b []byte) (int, error) { + panic("called") +} + +type namedByteType byte + +var ( + _ = Encoder(&testEncoder{}) + _ = Encoder(byteEncoder(0)) + + reader io.Reader = &encodableReader{1, 2} +) + +type encTest struct { + val interface{} + output, error string +} + +var encTests = []encTest{ + // booleans + {val: true, output: "01"}, + {val: false, output: "80"}, + + // integers + {val: uint32(0), output: "80"}, + {val: uint32(127), output: "7F"}, + {val: uint32(128), output: "8180"}, + {val: uint32(256), output: "820100"}, + {val: uint32(1024), output: "820400"}, + {val: uint32(0xFFFFFF), output: "83FFFFFF"}, + {val: uint32(0xFFFFFFFF), output: "84FFFFFFFF"}, + {val: uint64(0xFFFFFFFF), output: "84FFFFFFFF"}, + {val: uint64(0xFFFFFFFFFF), output: "85FFFFFFFFFF"}, + {val: uint64(0xFFFFFFFFFFFF), output: "86FFFFFFFFFFFF"}, + {val: uint64(0xFFFFFFFFFFFFFF), output: "87FFFFFFFFFFFFFF"}, + {val: uint64(0xFFFFFFFFFFFFFFFF), output: "88FFFFFFFFFFFFFFFF"}, + + // big integers (should match uint for small values) + {val: big.NewInt(0), output: "80"}, + {val: big.NewInt(1), output: "01"}, + {val: big.NewInt(127), output: "7F"}, + {val: big.NewInt(128), output: "8180"}, + {val: big.NewInt(256), output: "820100"}, + {val: big.NewInt(1024), output: "820400"}, + {val: big.NewInt(0xFFFFFF), output: "83FFFFFF"}, + {val: big.NewInt(0xFFFFFFFF), output: "84FFFFFFFF"}, + {val: big.NewInt(0xFFFFFFFFFF), output: "85FFFFFFFFFF"}, + {val: big.NewInt(0xFFFFFFFFFFFF), output: "86FFFFFFFFFFFF"}, + {val: big.NewInt(0xFFFFFFFFFFFFFF), output: "87FFFFFFFFFFFFFF"}, + { + val: new(big.Int).SetBytes(unhex("102030405060708090A0B0C0D0E0F2")), + output: "8F102030405060708090A0B0C0D0E0F2", + }, + { + val: new(big.Int).SetBytes(unhex("0100020003000400050006000700080009000A000B000C000D000E01")), + output: "9C0100020003000400050006000700080009000A000B000C000D000E01", + }, + { + val: new(big.Int).SetBytes(unhex("010000000000000000000000000000000000000000000000000000000000000000")), + output: "A1010000000000000000000000000000000000000000000000000000000000000000", + }, + { + val: veryBigInt, + output: "89FFFFFFFFFFFFFFFFFF", + }, + { + val: veryVeryBigInt, + output: "B848FFFFFFFFFFFFFFFFF800000000000000001BFFFFFFFFFFFFFFFFC8000000000000000045FFFFFFFFFFFFFFFFC800000000000000001BFFFFFFFFFFFFFFFFF8000000000000000001", + }, + + // non-pointer big.Int + {val: *big.NewInt(0), output: "80"}, + {val: *big.NewInt(0xFFFFFF), output: "83FFFFFF"}, + + // negative ints are not supported + {val: big.NewInt(-1), error: "rlp: cannot encode negative big.Int"}, + {val: *big.NewInt(-1), error: "rlp: cannot encode negative big.Int"}, + + // byte arrays + {val: [0]byte{}, output: "80"}, + {val: [1]byte{0}, output: "00"}, + {val: [1]byte{1}, output: "01"}, + {val: [1]byte{0x7F}, output: "7F"}, + {val: [1]byte{0x80}, output: "8180"}, + {val: [1]byte{0xFF}, output: "81FF"}, + {val: [3]byte{1, 2, 3}, output: "83010203"}, + {val: [57]byte{1, 2, 3}, output: "B839010203000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}, + + // named byte type arrays + {val: [0]namedByteType{}, output: "80"}, + {val: [1]namedByteType{0}, output: "00"}, + {val: [1]namedByteType{1}, output: "01"}, + {val: [1]namedByteType{0x7F}, output: "7F"}, + {val: [1]namedByteType{0x80}, output: "8180"}, + {val: [1]namedByteType{0xFF}, output: "81FF"}, + {val: [3]namedByteType{1, 2, 3}, output: "83010203"}, + {val: [57]namedByteType{1, 2, 3}, output: "B839010203000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}, + + // byte slices + {val: []byte{}, output: "80"}, + {val: []byte{0}, output: "00"}, + {val: []byte{0x7E}, output: "7E"}, + {val: []byte{0x7F}, output: "7F"}, + {val: []byte{0x80}, output: "8180"}, + {val: []byte{1, 2, 3}, output: "83010203"}, + + // named byte type slices + {val: []namedByteType{}, output: "80"}, + {val: []namedByteType{0}, output: "00"}, + {val: []namedByteType{0x7E}, output: "7E"}, + {val: []namedByteType{0x7F}, output: "7F"}, + {val: []namedByteType{0x80}, output: "8180"}, + {val: []namedByteType{1, 2, 3}, output: "83010203"}, + + // strings + {val: "", output: "80"}, + {val: "\x7E", output: "7E"}, + {val: "\x7F", output: "7F"}, + {val: "\x80", output: "8180"}, + {val: "dog", output: "83646F67"}, + { + val: "Lorem ipsum dolor sit amet, consectetur adipisicing eli", + output: "B74C6F72656D20697073756D20646F6C6F722073697420616D65742C20636F6E7365637465747572206164697069736963696E6720656C69", + }, + { + val: "Lorem ipsum dolor sit amet, consectetur adipisicing elit", + output: "B8384C6F72656D20697073756D20646F6C6F722073697420616D65742C20636F6E7365637465747572206164697069736963696E6720656C6974", + }, + { + val: "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur mauris magna, suscipit sed vehicula non, iaculis faucibus tortor. Proin suscipit ultricies malesuada. Duis tortor elit, dictum quis tristique eu, ultrices at risus. Morbi a est imperdiet mi ullamcorper aliquet suscipit nec lorem. Aenean quis leo mollis, vulputate elit varius, consequat enim. Nulla ultrices turpis justo, et posuere urna consectetur nec. Proin non convallis metus. Donec tempor ipsum in mauris congue sollicitudin. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Suspendisse convallis sem vel massa faucibus, eget lacinia lacus tempor. Nulla quis ultricies purus. Proin auctor rhoncus nibh condimentum mollis. Aliquam consequat enim at metus luctus, a eleifend purus egestas. Curabitur at nibh metus. Nam bibendum, neque at auctor tristique, lorem libero aliquet arcu, non interdum tellus lectus sit amet eros. Cras rhoncus, metus ac ornare cursus, dolor justo ultrices metus, at ullamcorper volutpat", + output: "B904004C6F72656D20697073756D20646F6C6F722073697420616D65742C20636F6E73656374657475722061646970697363696E6720656C69742E20437572616269747572206D6175726973206D61676E612C20737573636970697420736564207665686963756C61206E6F6E2C20696163756C697320666175636962757320746F72746F722E2050726F696E20737573636970697420756C74726963696573206D616C6573756164612E204475697320746F72746F7220656C69742C2064696374756D2071756973207472697374697175652065752C20756C7472696365732061742072697375732E204D6F72626920612065737420696D70657264696574206D6920756C6C616D636F7270657220616C6971756574207375736369706974206E6563206C6F72656D2E2041656E65616E2071756973206C656F206D6F6C6C69732C2076756C70757461746520656C6974207661726975732C20636F6E73657175617420656E696D2E204E756C6C6120756C74726963657320747572706973206A7573746F2C20657420706F73756572652075726E6120636F6E7365637465747572206E65632E2050726F696E206E6F6E20636F6E76616C6C6973206D657475732E20446F6E65632074656D706F7220697073756D20696E206D617572697320636F6E67756520736F6C6C696369747564696E2E20566573746962756C756D20616E746520697073756D207072696D697320696E206661756369627573206F726369206C756374757320657420756C74726963657320706F737565726520637562696C69612043757261653B2053757370656E646973736520636F6E76616C6C69732073656D2076656C206D617373612066617563696275732C2065676574206C6163696E6961206C616375732074656D706F722E204E756C6C61207175697320756C747269636965732070757275732E2050726F696E20617563746F722072686F6E637573206E69626820636F6E64696D656E74756D206D6F6C6C69732E20416C697175616D20636F6E73657175617420656E696D206174206D65747573206C75637475732C206120656C656966656E6420707572757320656765737461732E20437572616269747572206174206E696268206D657475732E204E616D20626962656E64756D2C206E6571756520617420617563746F72207472697374697175652C206C6F72656D206C696265726F20616C697175657420617263752C206E6F6E20696E74657264756D2074656C6C7573206C65637475732073697420616D65742065726F732E20437261732072686F6E6375732C206D65747573206163206F726E617265206375727375732C20646F6C6F72206A7573746F20756C747269636573206D657475732C20617420756C6C616D636F7270657220766F6C7574706174", + }, + + // slices + {val: []uint{}, output: "C0"}, + {val: []uint{1, 2, 3}, output: "C3010203"}, + { + // [ [], [[]], [ [], [[]] ] ] + val: []interface{}{[]interface{}{}, [][]interface{}{{}}, []interface{}{[]interface{}{}, [][]interface{}{{}}}}, + output: "C7C0C1C0C3C0C1C0", + }, + { + val: []string{"aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo"}, + output: "F83C836161618362626283636363836464648365656583666666836767678368686883696969836A6A6A836B6B6B836C6C6C836D6D6D836E6E6E836F6F6F", + }, + { + val: []interface{}{uint(1), uint(0xFFFFFF), []interface{}{[]uint{4, 5, 5}}, "abc"}, + output: "CE0183FFFFFFC4C304050583616263", + }, + { + val: [][]string{ + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + {"asdf", "qwer", "zxcv"}, + }, + output: "F90200CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376", + }, + + // RawValue + {val: RawValue(unhex("01")), output: "01"}, + {val: RawValue(unhex("82FFFF")), output: "82FFFF"}, + {val: []RawValue{unhex("01"), unhex("02")}, output: "C20102"}, + + // structs + {val: simplestruct{}, output: "C28080"}, + {val: simplestruct{A: 3, B: "foo"}, output: "C50383666F6F"}, + {val: &recstruct{5, nil}, output: "C205C0"}, + {val: &recstruct{5, &recstruct{4, &recstruct{3, nil}}}, output: "C605C404C203C0"}, + {val: &intField{X: 3}, error: "rlp: type int is not RLP-serializable (struct field rlp.intField.X)"}, + + // struct tag "-" + {val: &ignoredField{A: 1, B: 2, C: 3}, output: "C20103"}, + + // struct tag "tail" + {val: &tailRaw{A: 1, Tail: []RawValue{unhex("02"), unhex("03")}}, output: "C3010203"}, + {val: &tailRaw{A: 1, Tail: []RawValue{unhex("02")}}, output: "C20102"}, + {val: &tailRaw{A: 1, Tail: []RawValue{}}, output: "C101"}, + {val: &tailRaw{A: 1, Tail: nil}, output: "C101"}, + + // struct tag "optional" + {val: &optionalFields{}, output: "C180"}, + {val: &optionalFields{A: 1}, output: "C101"}, + {val: &optionalFields{A: 1, B: 2}, output: "C20102"}, + {val: &optionalFields{A: 1, B: 2, C: 3}, output: "C3010203"}, + {val: &optionalFields{A: 1, B: 0, C: 3}, output: "C3018003"}, + {val: &optionalAndTailField{A: 1}, output: "C101"}, + {val: &optionalAndTailField{A: 1, B: 2}, output: "C20102"}, + {val: &optionalAndTailField{A: 1, Tail: []uint{5, 6}}, output: "C401800506"}, + {val: &optionalAndTailField{A: 1, Tail: []uint{5, 6}}, output: "C401800506"}, + {val: &optionalBigIntField{A: 1}, output: "C101"}, + {val: &optionalPtrField{A: 1}, output: "C101"}, + {val: &optionalPtrFieldNil{A: 1}, output: "C101"}, + + // nil + {val: (*uint)(nil), output: "80"}, + {val: (*string)(nil), output: "80"}, + {val: (*[]byte)(nil), output: "80"}, + {val: (*[10]byte)(nil), output: "80"}, + {val: (*big.Int)(nil), output: "80"}, + {val: (*[]string)(nil), output: "C0"}, + {val: (*[10]string)(nil), output: "C0"}, + {val: (*[]interface{})(nil), output: "C0"}, + {val: (*[]struct{ uint })(nil), output: "C0"}, + {val: (*interface{})(nil), output: "C0"}, + + // nil struct fields + { + val: struct { + X *[]byte + }{}, + output: "C180", + }, + { + val: struct { + X *[2]byte + }{}, + output: "C180", + }, + { + val: struct { + X *uint64 + }{}, + output: "C180", + }, + { + val: struct { + X *uint64 `rlp:"nilList"` + }{}, + output: "C1C0", + }, + { + val: struct { + X *[]uint64 + }{}, + output: "C1C0", + }, + { + val: struct { + X *[]uint64 `rlp:"nilString"` + }{}, + output: "C180", + }, + + // interfaces + {val: []io.Reader{reader}, output: "C3C20102"}, // the contained value is a struct + + // Encoder + {val: (*testEncoder)(nil), output: "C0"}, + {val: &testEncoder{}, output: "00010001000100010001"}, + {val: &testEncoder{errors.New("test error")}, error: "test error"}, + {val: struct{ E testEncoderValueMethod }{}, output: "C3FAFEF0"}, + {val: struct{ E *testEncoderValueMethod }{}, output: "C1C0"}, + + // Verify that the Encoder interface works for unsupported types like func(). + {val: undecodableEncoder(func() {}), output: "F5F5F5"}, + + // Verify that pointer method testEncoder.EncodeRLP is called for + // addressable non-pointer values. + {val: &struct{ TE testEncoder }{testEncoder{}}, output: "CA00010001000100010001"}, + {val: &struct{ TE testEncoder }{testEncoder{errors.New("test error")}}, error: "test error"}, + + // Verify the error for non-addressable non-pointer Encoder. + {val: testEncoder{}, error: "rlp: unadressable value of type rlp.testEncoder, EncodeRLP is pointer method"}, + + // Verify Encoder takes precedence over []byte. + {val: []byteEncoder{0, 1, 2, 3, 4}, output: "C5C0C0C0C0C0"}, +} + +func runEncTests(t *testing.T, f func(val interface{}) ([]byte, error)) { + for i, test := range encTests { + output, err := f(test.val) + if err != nil && test.error == "" { + t.Errorf("test %d: unexpected error: %v\nvalue %#v\ntype %T", + i, err, test.val, test.val) + continue + } + if test.error != "" && fmt.Sprint(err) != test.error { + t.Errorf("test %d: error mismatch\ngot %v\nwant %v\nvalue %#v\ntype %T", + i, err, test.error, test.val, test.val) + continue + } + if err == nil && !bytes.Equal(output, unhex(test.output)) { + t.Errorf("test %d: output mismatch:\ngot %X\nwant %s\nvalue %#v\ntype %T", + i, output, test.output, test.val, test.val) + } + } +} + +func TestEncode(t *testing.T) { + runEncTests(t, func(val interface{}) ([]byte, error) { + b := new(bytes.Buffer) + err := Encode(b, val) + return b.Bytes(), err + }) +} + +func TestEncodeToBytes(t *testing.T) { + runEncTests(t, EncodeToBytes) +} + +func TestEncodeAppendToBytes(t *testing.T) { + buffer := make([]byte, 20) + runEncTests(t, func(val interface{}) ([]byte, error) { + w := NewEncoderBuffer(nil) + defer w.Flush() + + err := Encode(w, val) + if err != nil { + return nil, err + } + output := w.AppendToBytes(buffer[:0]) + return output, nil + }) +} + +func TestEncodeToReader(t *testing.T) { + runEncTests(t, func(val interface{}) ([]byte, error) { + _, r, err := EncodeToReader(val) + if err != nil { + return nil, err + } + return io.ReadAll(r) + }) +} + +func TestEncodeToReaderPiecewise(t *testing.T) { + runEncTests(t, func(val interface{}) ([]byte, error) { + size, r, err := EncodeToReader(val) + if err != nil { + return nil, err + } + + // read output piecewise + output := make([]byte, size) + for start, end := 0, 0; start < size; start = end { + if remaining := size - start; remaining < 3 { + end += remaining + } else { + end = start + 3 + } + n, err := r.Read(output[start:end]) + end = start + n + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + } + return output, nil + }) +} + +// This is a regression test verifying that encReader +// returns its encbuf to the pool only once. +func TestEncodeToReaderReturnToPool(t *testing.T) { + buf := make([]byte, 50) + wg := new(sync.WaitGroup) + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + for i := 0; i < 1000; i++ { + _, r, _ := EncodeToReader("foo") + io.ReadAll(r) + r.Read(buf) + r.Read(buf) + r.Read(buf) + r.Read(buf) + } + wg.Done() + }() + } + wg.Wait() +} + +var sink interface{} + +func BenchmarkIntsize(b *testing.B) { + for i := 0; i < b.N; i++ { + sink = intsize(0x12345678) + } +} + +func BenchmarkPutint(b *testing.B) { + buf := make([]byte, 8) + for i := 0; i < b.N; i++ { + putint(buf, 0x12345678) + sink = buf + } +} + +func BenchmarkEncodeBigInts(b *testing.B) { + ints := make([]*big.Int, 200) + for i := range ints { + ints[i] = math.BigPow(2, int64(i)) + } + out := bytes.NewBuffer(make([]byte, 0, 4096)) + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + out.Reset() + if err := Encode(out, ints); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeConcurrentInterface(b *testing.B) { + type struct1 struct { + A string + B *big.Int + C [20]byte + } + value := []interface{}{ + uint(999), + &struct1{A: "hello", B: big.NewInt(0xFFFFFFFF)}, + [10]byte{1, 2, 3, 4, 5, 6}, + []string{"yeah", "yeah", "yeah"}, + } + + var wg sync.WaitGroup + for cpu := 0; cpu < runtime.NumCPU(); cpu++ { + wg.Add(1) + go func() { + defer wg.Done() + + var buffer bytes.Buffer + for i := 0; i < b.N; i++ { + buffer.Reset() + err := Encode(&buffer, value) + if err != nil { + panic(err) + } + } + }() + } + wg.Wait() +} + +type byteArrayStruct struct { + A [20]byte + B [32]byte + C [32]byte +} + +func BenchmarkEncodeByteArrayStruct(b *testing.B) { + var out bytes.Buffer + var value byteArrayStruct + + b.ReportAllocs() + for i := 0; i < b.N; i++ { + out.Reset() + if err := Encode(&out, &value); err != nil { + b.Fatal(err) + } + } +} + +type structSliceElem struct { + X uint64 + Y uint64 + Z uint64 +} + +type structPtrSlice []*structSliceElem + +func BenchmarkEncodeStructPtrSlice(b *testing.B) { + var out bytes.Buffer + var value = structPtrSlice{ + &structSliceElem{1, 1, 1}, + &structSliceElem{2, 2, 2}, + &structSliceElem{3, 3, 3}, + &structSliceElem{5, 5, 5}, + &structSliceElem{6, 6, 6}, + &structSliceElem{7, 7, 7}, + } + + b.ReportAllocs() + for i := 0; i < b.N; i++ { + out.Reset() + if err := Encode(&out, &value); err != nil { + b.Fatal(err) + } + } +} diff --git a/rlp/encoder_example_test.go b/rlp/encoder_example_test.go new file mode 100644 index 0000000000..6b27513968 --- /dev/null +++ b/rlp/encoder_example_test.go @@ -0,0 +1,48 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp_test + +import ( + "fmt" + "io" + + "github.com/tenderly/coreth/rlp" +) + +type MyCoolType struct { + Name string + a, b uint +} + +// EncodeRLP writes x as RLP list [a, b] that omits the Name field. +func (x *MyCoolType) EncodeRLP(w io.Writer) (err error) { + return rlp.Encode(w, []uint{x.a, x.b}) +} + +func ExampleEncoder() { + var t *MyCoolType // t is nil pointer to MyCoolType + bytes, _ := rlp.EncodeToBytes(t) + fmt.Printf("%v → %X\n", t, bytes) + + t = &MyCoolType{Name: "foobar", a: 5, b: 6} + bytes, _ = rlp.EncodeToBytes(t) + fmt.Printf("%v → %X\n", t, bytes) + + // Output: + // → C0 + // &{foobar 5 6} → C20506 +} diff --git a/rlp/internal/rlpstruct/rlpstruct.go b/rlp/internal/rlpstruct/rlpstruct.go new file mode 100644 index 0000000000..1edead96ce --- /dev/null +++ b/rlp/internal/rlpstruct/rlpstruct.go @@ -0,0 +1,213 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package rlpstruct implements struct processing for RLP encoding/decoding. +// +// In particular, this package handles all rules around field filtering, +// struct tags and nil value determination. +package rlpstruct + +import ( + "fmt" + "reflect" + "strings" +) + +// Field represents a struct field. +type Field struct { + Name string + Index int + Exported bool + Type Type + Tag string +} + +// Type represents the attributes of a Go type. +type Type struct { + Name string + Kind reflect.Kind + IsEncoder bool // whether type implements rlp.Encoder + IsDecoder bool // whether type implements rlp.Decoder + Elem *Type // non-nil for Kind values of Ptr, Slice, Array +} + +// defaultNilValue determines whether a nil pointer to t encodes/decodes +// as an empty string or empty list. +func (t Type) DefaultNilValue() NilKind { + k := t.Kind + if isUint(k) || k == reflect.String || k == reflect.Bool || isByteArray(t) { + return NilKindString + } + return NilKindList +} + +// NilKind is the RLP value encoded in place of nil pointers. +type NilKind uint8 + +const ( + NilKindString NilKind = 0x80 + NilKindList NilKind = 0xC0 +) + +// Tags represents struct tags. +type Tags struct { + // rlp:"nil" controls whether empty input results in a nil pointer. + // nilKind is the kind of empty value allowed for the field. + NilKind NilKind + NilOK bool + + // rlp:"optional" allows for a field to be missing in the input list. + // If this is set, all subsequent fields must also be optional. + Optional bool + + // rlp:"tail" controls whether this field swallows additional list elements. It can + // only be set for the last field, which must be of slice type. + Tail bool + + // rlp:"-" ignores fields. + Ignored bool +} + +// TagError is raised for invalid struct tags. +type TagError struct { + StructType string + + // These are set by this package. + Field string + Tag string + Err string +} + +func (e TagError) Error() string { + field := "field " + e.Field + if e.StructType != "" { + field = e.StructType + "." + e.Field + } + return fmt.Sprintf("rlp: invalid struct tag %q for %s (%s)", e.Tag, field, e.Err) +} + +// ProcessFields filters the given struct fields, returning only fields +// that should be considered for encoding/decoding. +func ProcessFields(allFields []Field) ([]Field, []Tags, error) { + lastPublic := lastPublicField(allFields) + + // Gather all exported fields and their tags. + var fields []Field + var tags []Tags + for _, field := range allFields { + if !field.Exported { + continue + } + ts, err := parseTag(field, lastPublic) + if err != nil { + return nil, nil, err + } + if ts.Ignored { + continue + } + fields = append(fields, field) + tags = append(tags, ts) + } + + // Verify optional field consistency. If any optional field exists, + // all fields after it must also be optional. Note: optional + tail + // is supported. + var anyOptional bool + var firstOptionalName string + for i, ts := range tags { + name := fields[i].Name + if ts.Optional || ts.Tail { + if !anyOptional { + firstOptionalName = name + } + anyOptional = true + } else { + if anyOptional { + msg := fmt.Sprintf("must be optional because preceding field %q is optional", firstOptionalName) + return nil, nil, TagError{Field: name, Err: msg} + } + } + } + return fields, tags, nil +} + +func parseTag(field Field, lastPublic int) (Tags, error) { + name := field.Name + tag := reflect.StructTag(field.Tag) + var ts Tags + for _, t := range strings.Split(tag.Get("rlp"), ",") { + switch t = strings.TrimSpace(t); t { + case "": + // empty tag is allowed for some reason + case "-": + ts.Ignored = true + case "nil", "nilString", "nilList": + ts.NilOK = true + if field.Type.Kind != reflect.Ptr { + return ts, TagError{Field: name, Tag: t, Err: "field is not a pointer"} + } + switch t { + case "nil": + ts.NilKind = field.Type.Elem.DefaultNilValue() + case "nilString": + ts.NilKind = NilKindString + case "nilList": + ts.NilKind = NilKindList + } + case "optional": + ts.Optional = true + if ts.Tail { + return ts, TagError{Field: name, Tag: t, Err: `also has "tail" tag`} + } + case "tail": + ts.Tail = true + if field.Index != lastPublic { + return ts, TagError{Field: name, Tag: t, Err: "must be on last field"} + } + if ts.Optional { + return ts, TagError{Field: name, Tag: t, Err: `also has "optional" tag`} + } + if field.Type.Kind != reflect.Slice { + return ts, TagError{Field: name, Tag: t, Err: "field type is not slice"} + } + default: + return ts, TagError{Field: name, Tag: t, Err: "unknown tag"} + } + } + return ts, nil +} + +func lastPublicField(fields []Field) int { + last := 0 + for _, f := range fields { + if f.Exported { + last = f.Index + } + } + return last +} + +func isUint(k reflect.Kind) bool { + return k >= reflect.Uint && k <= reflect.Uintptr +} + +func isByte(typ Type) bool { + return typ.Kind == reflect.Uint8 && !typ.IsEncoder +} + +func isByteArray(typ Type) bool { + return (typ.Kind == reflect.Slice || typ.Kind == reflect.Array) && isByte(*typ.Elem) +} diff --git a/rlp/iterator.go b/rlp/iterator.go new file mode 100644 index 0000000000..6be574572e --- /dev/null +++ b/rlp/iterator.go @@ -0,0 +1,60 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +type listIterator struct { + data []byte + next []byte + err error +} + +// NewListIterator creates an iterator for the (list) represented by data +// TODO: Consider removing this implementation, as it is no longer used. +func NewListIterator(data RawValue) (*listIterator, error) { + k, t, c, err := readKind(data) + if err != nil { + return nil, err + } + if k != List { + return nil, ErrExpectedList + } + it := &listIterator{ + data: data[t : t+c], + } + return it, nil +} + +// Next forwards the iterator one step, returns true if it was not at end yet +func (it *listIterator) Next() bool { + if len(it.data) == 0 { + return false + } + _, t, c, err := readKind(it.data) + it.next = it.data[:t+c] + it.data = it.data[t+c:] + it.err = err + return true +} + +// Value returns the current value +func (it *listIterator) Value() []byte { + return it.next +} + +func (it *listIterator) Err() error { + return it.err +} diff --git a/rlp/iterator_test.go b/rlp/iterator_test.go new file mode 100644 index 0000000000..a22aaec862 --- /dev/null +++ b/rlp/iterator_test.go @@ -0,0 +1,59 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +// TestIterator tests some basic things about the ListIterator. A more +// comprehensive test can be found in core/rlp_test.go, where we can +// use both types and rlp without dependency cycles +func TestIterator(t *testing.T) { + bodyRlpHex := "0xf902cbf8d6f869800182c35094000000000000000000000000000000000000aaaa808a000000000000000000001ba01025c66fad28b4ce3370222624d952c35529e602af7cbe04f667371f61b0e3b3a00ab8813514d1217059748fd903288ace1b4001a4bc5fbde2790debdc8167de2ff869010182c35094000000000000000000000000000000000000aaaa808a000000000000000000001ca05ac4cf1d19be06f3742c21df6c49a7e929ceb3dbaf6a09f3cfb56ff6828bd9a7a06875970133a35e63ac06d360aa166d228cc013e9b96e0a2cae7f55b22e1ee2e8f901f0f901eda0c75448377c0e426b8017b23c5f77379ecf69abc1d5c224284ad3ba1c46c59adaa00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808080808080a00000000000000000000000000000000000000000000000000000000000000000880000000000000000" + bodyRlp := hexutil.MustDecode(bodyRlpHex) + + it, err := NewListIterator(bodyRlp) + if err != nil { + t.Fatal(err) + } + // Check that txs exist + if !it.Next() { + t.Fatal("expected two elems, got zero") + } + txs := it.Value() + // Check that uncles exist + if !it.Next() { + t.Fatal("expected two elems, got one") + } + txit, err := NewListIterator(txs) + if err != nil { + t.Fatal(err) + } + var i = 0 + for txit.Next() { + if txit.err != nil { + t.Fatal(txit.err) + } + i++ + } + if exp := 2; i != exp { + t.Errorf("count wrong, expected %d got %d", i, exp) + } +} diff --git a/rlp/raw.go b/rlp/raw.go new file mode 100644 index 0000000000..f355efc144 --- /dev/null +++ b/rlp/raw.go @@ -0,0 +1,261 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "io" + "reflect" +) + +// RawValue represents an encoded RLP value and can be used to delay +// RLP decoding or to precompute an encoding. Note that the decoder does +// not verify whether the content of RawValues is valid RLP. +type RawValue []byte + +var rawValueType = reflect.TypeOf(RawValue{}) + +// ListSize returns the encoded size of an RLP list with the given +// content size. +func ListSize(contentSize uint64) uint64 { + return uint64(headsize(contentSize)) + contentSize +} + +// IntSize returns the encoded size of the integer x. +func IntSize(x uint64) int { + if x < 0x80 { + return 1 + } + return 1 + intsize(x) +} + +// Split returns the content of first RLP value and any +// bytes after the value as subslices of b. +func Split(b []byte) (k Kind, content, rest []byte, err error) { + k, ts, cs, err := readKind(b) + if err != nil { + return 0, nil, b, err + } + return k, b[ts : ts+cs], b[ts+cs:], nil +} + +// SplitString splits b into the content of an RLP string +// and any remaining bytes after the string. +func SplitString(b []byte) (content, rest []byte, err error) { + k, content, rest, err := Split(b) + if err != nil { + return nil, b, err + } + if k == List { + return nil, b, ErrExpectedString + } + return content, rest, nil +} + +// SplitUint64 decodes an integer at the beginning of b. +// It also returns the remaining data after the integer in 'rest'. +func SplitUint64(b []byte) (x uint64, rest []byte, err error) { + content, rest, err := SplitString(b) + if err != nil { + return 0, b, err + } + switch { + case len(content) == 0: + return 0, rest, nil + case len(content) == 1: + if content[0] == 0 { + return 0, b, ErrCanonInt + } + return uint64(content[0]), rest, nil + case len(content) > 8: + return 0, b, errUintOverflow + default: + x, err = readSize(content, byte(len(content))) + if err != nil { + return 0, b, ErrCanonInt + } + return x, rest, nil + } +} + +// SplitList splits b into the content of a list and any remaining +// bytes after the list. +func SplitList(b []byte) (content, rest []byte, err error) { + k, content, rest, err := Split(b) + if err != nil { + return nil, b, err + } + if k != List { + return nil, b, ErrExpectedList + } + return content, rest, nil +} + +// CountValues counts the number of encoded values in b. +func CountValues(b []byte) (int, error) { + i := 0 + for ; len(b) > 0; i++ { + _, tagsize, size, err := readKind(b) + if err != nil { + return 0, err + } + b = b[tagsize+size:] + } + return i, nil +} + +func readKind(buf []byte) (k Kind, tagsize, contentsize uint64, err error) { + if len(buf) == 0 { + return 0, 0, 0, io.ErrUnexpectedEOF + } + b := buf[0] + switch { + case b < 0x80: + k = Byte + tagsize = 0 + contentsize = 1 + case b < 0xB8: + k = String + tagsize = 1 + contentsize = uint64(b - 0x80) + // Reject strings that should've been single bytes. + if contentsize == 1 && len(buf) > 1 && buf[1] < 128 { + return 0, 0, 0, ErrCanonSize + } + case b < 0xC0: + k = String + tagsize = uint64(b-0xB7) + 1 + contentsize, err = readSize(buf[1:], b-0xB7) + case b < 0xF8: + k = List + tagsize = 1 + contentsize = uint64(b - 0xC0) + default: + k = List + tagsize = uint64(b-0xF7) + 1 + contentsize, err = readSize(buf[1:], b-0xF7) + } + if err != nil { + return 0, 0, 0, err + } + // Reject values larger than the input slice. + if contentsize > uint64(len(buf))-tagsize { + return 0, 0, 0, ErrValueTooLarge + } + return k, tagsize, contentsize, err +} + +func readSize(b []byte, slen byte) (uint64, error) { + if int(slen) > len(b) { + return 0, io.ErrUnexpectedEOF + } + var s uint64 + switch slen { + case 1: + s = uint64(b[0]) + case 2: + s = uint64(b[0])<<8 | uint64(b[1]) + case 3: + s = uint64(b[0])<<16 | uint64(b[1])<<8 | uint64(b[2]) + case 4: + s = uint64(b[0])<<24 | uint64(b[1])<<16 | uint64(b[2])<<8 | uint64(b[3]) + case 5: + s = uint64(b[0])<<32 | uint64(b[1])<<24 | uint64(b[2])<<16 | uint64(b[3])<<8 | uint64(b[4]) + case 6: + s = uint64(b[0])<<40 | uint64(b[1])<<32 | uint64(b[2])<<24 | uint64(b[3])<<16 | uint64(b[4])<<8 | uint64(b[5]) + case 7: + s = uint64(b[0])<<48 | uint64(b[1])<<40 | uint64(b[2])<<32 | uint64(b[3])<<24 | uint64(b[4])<<16 | uint64(b[5])<<8 | uint64(b[6]) + case 8: + s = uint64(b[0])<<56 | uint64(b[1])<<48 | uint64(b[2])<<40 | uint64(b[3])<<32 | uint64(b[4])<<24 | uint64(b[5])<<16 | uint64(b[6])<<8 | uint64(b[7]) + } + // Reject sizes < 56 (shouldn't have separate size) and sizes with + // leading zero bytes. + if s < 56 || b[0] == 0 { + return 0, ErrCanonSize + } + return s, nil +} + +// AppendUint64 appends the RLP encoding of i to b, and returns the resulting slice. +func AppendUint64(b []byte, i uint64) []byte { + if i == 0 { + return append(b, 0x80) + } else if i < 128 { + return append(b, byte(i)) + } + switch { + case i < (1 << 8): + return append(b, 0x81, byte(i)) + case i < (1 << 16): + return append(b, 0x82, + byte(i>>8), + byte(i), + ) + case i < (1 << 24): + return append(b, 0x83, + byte(i>>16), + byte(i>>8), + byte(i), + ) + case i < (1 << 32): + return append(b, 0x84, + byte(i>>24), + byte(i>>16), + byte(i>>8), + byte(i), + ) + case i < (1 << 40): + return append(b, 0x85, + byte(i>>32), + byte(i>>24), + byte(i>>16), + byte(i>>8), + byte(i), + ) + + case i < (1 << 48): + return append(b, 0x86, + byte(i>>40), + byte(i>>32), + byte(i>>24), + byte(i>>16), + byte(i>>8), + byte(i), + ) + case i < (1 << 56): + return append(b, 0x87, + byte(i>>48), + byte(i>>40), + byte(i>>32), + byte(i>>24), + byte(i>>16), + byte(i>>8), + byte(i), + ) + + default: + return append(b, 0x88, + byte(i>>56), + byte(i>>48), + byte(i>>40), + byte(i>>32), + byte(i>>24), + byte(i>>16), + byte(i>>8), + byte(i), + ) + } +} diff --git a/rlp/raw_test.go b/rlp/raw_test.go new file mode 100644 index 0000000000..46adff22c5 --- /dev/null +++ b/rlp/raw_test.go @@ -0,0 +1,285 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "bytes" + "errors" + "io" + "testing" + "testing/quick" +) + +func TestCountValues(t *testing.T) { + tests := []struct { + input string // note: spaces in input are stripped by unhex + count int + err error + }{ + // simple cases + {"", 0, nil}, + {"00", 1, nil}, + {"80", 1, nil}, + {"C0", 1, nil}, + {"01 02 03", 3, nil}, + {"01 C406070809 02", 3, nil}, + {"820101 820202 8403030303 04", 4, nil}, + + // size errors + {"8142", 0, ErrCanonSize}, + {"01 01 8142", 0, ErrCanonSize}, + {"02 84020202", 0, ErrValueTooLarge}, + + { + input: "A12000BF49F440A1CD0527E4D06E2765654C0F56452257516D793A9B8D604DCFDF2AB853F851808D10000000000000000000000000A056E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421A0C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470", + count: 2, + }, + } + for i, test := range tests { + count, err := CountValues(unhex(test.input)) + if count != test.count { + t.Errorf("test %d: count mismatch, got %d want %d\ninput: %s", i, count, test.count, test.input) + } + if !errors.Is(err, test.err) { + t.Errorf("test %d: err mismatch, got %q want %q\ninput: %s", i, err, test.err, test.input) + } + } +} + +func TestSplitTypes(t *testing.T) { + if _, _, err := SplitString(unhex("C100")); err != ErrExpectedString { + t.Errorf("SplitString returned %q, want %q", err, ErrExpectedString) + } + if _, _, err := SplitList(unhex("01")); err != ErrExpectedList { + t.Errorf("SplitString returned %q, want %q", err, ErrExpectedList) + } + if _, _, err := SplitList(unhex("81FF")); err != ErrExpectedList { + t.Errorf("SplitString returned %q, want %q", err, ErrExpectedList) + } +} + +func TestSplitUint64(t *testing.T) { + tests := []struct { + input string + val uint64 + rest string + err error + }{ + {"01", 1, "", nil}, + {"7FFF", 0x7F, "FF", nil}, + {"80FF", 0, "FF", nil}, + {"81FAFF", 0xFA, "FF", nil}, + {"82FAFAFF", 0xFAFA, "FF", nil}, + {"83FAFAFAFF", 0xFAFAFA, "FF", nil}, + {"84FAFAFAFAFF", 0xFAFAFAFA, "FF", nil}, + {"85FAFAFAFAFAFF", 0xFAFAFAFAFA, "FF", nil}, + {"86FAFAFAFAFAFAFF", 0xFAFAFAFAFAFA, "FF", nil}, + {"87FAFAFAFAFAFAFAFF", 0xFAFAFAFAFAFAFA, "FF", nil}, + {"88FAFAFAFAFAFAFAFAFF", 0xFAFAFAFAFAFAFAFA, "FF", nil}, + + // errors + {"", 0, "", io.ErrUnexpectedEOF}, + {"00", 0, "00", ErrCanonInt}, + {"81", 0, "81", ErrValueTooLarge}, + {"8100", 0, "8100", ErrCanonSize}, + {"8200FF", 0, "8200FF", ErrCanonInt}, + {"8103FF", 0, "8103FF", ErrCanonSize}, + {"89FAFAFAFAFAFAFAFAFAFF", 0, "89FAFAFAFAFAFAFAFAFAFF", errUintOverflow}, + } + + for i, test := range tests { + val, rest, err := SplitUint64(unhex(test.input)) + if val != test.val { + t.Errorf("test %d: val mismatch: got %x, want %x (input %q)", i, val, test.val, test.input) + } + if !bytes.Equal(rest, unhex(test.rest)) { + t.Errorf("test %d: rest mismatch: got %x, want %s (input %q)", i, rest, test.rest, test.input) + } + if err != test.err { + t.Errorf("test %d: error mismatch: got %q, want %q", i, err, test.err) + } + } +} + +func TestSplit(t *testing.T) { + tests := []struct { + input string + kind Kind + val, rest string + err error + }{ + {input: "00FFFF", kind: Byte, val: "00", rest: "FFFF"}, + {input: "01FFFF", kind: Byte, val: "01", rest: "FFFF"}, + {input: "7FFFFF", kind: Byte, val: "7F", rest: "FFFF"}, + {input: "80FFFF", kind: String, val: "", rest: "FFFF"}, + {input: "C3010203", kind: List, val: "010203"}, + + // errors + {input: "", err: io.ErrUnexpectedEOF}, + + {input: "8141", err: ErrCanonSize, rest: "8141"}, + {input: "B800", err: ErrCanonSize, rest: "B800"}, + {input: "B802FFFF", err: ErrCanonSize, rest: "B802FFFF"}, + {input: "B90000", err: ErrCanonSize, rest: "B90000"}, + {input: "B90055", err: ErrCanonSize, rest: "B90055"}, + {input: "BA0002FFFF", err: ErrCanonSize, rest: "BA0002FFFF"}, + {input: "F800", err: ErrCanonSize, rest: "F800"}, + {input: "F90000", err: ErrCanonSize, rest: "F90000"}, + {input: "F90055", err: ErrCanonSize, rest: "F90055"}, + {input: "FA0002FFFF", err: ErrCanonSize, rest: "FA0002FFFF"}, + + {input: "81", err: ErrValueTooLarge, rest: "81"}, + {input: "8501010101", err: ErrValueTooLarge, rest: "8501010101"}, + {input: "C60607080902", err: ErrValueTooLarge, rest: "C60607080902"}, + + // size check overflow + {input: "BFFFFFFFFFFFFFFFFF", err: ErrValueTooLarge, rest: "BFFFFFFFFFFFFFFFFF"}, + {input: "FFFFFFFFFFFFFFFFFF", err: ErrValueTooLarge, rest: "FFFFFFFFFFFFFFFFFF"}, + + { + input: "B838FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + err: ErrValueTooLarge, + rest: "B838FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + }, + { + input: "F838FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + err: ErrValueTooLarge, + rest: "F838FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + }, + + // a few bigger values, just for kicks + { + input: "F839FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + kind: List, + val: "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + rest: "", + }, + { + input: "F90211A060EF29F20CC1007AE6E9530AEE16F4B31F8F1769A2D1264EC995C6D1241868D6A07C62AB8AC9838F5F5877B20BB37B387BC2106E97A3D52172CBEDB5EE17C36008A00EAB6B7324AADC0F6047C6AFC8229F09F7CF451B51D67C8DFB08D49BA8C3C626A04453343B2F3A6E42FCF87948F88AF7C8FC16D0C2735CBA7F026836239AB2C15FA024635C7291C882CE4C0763760C1A362DFC3FFCD802A55722236DE058D74202ACA0A220C808DE10F55E40AB25255201CFF009EA181D3906638E944EE2BF34049984A08D325AB26796F1CCB470F69C0F842501DC35D368A0C2575B2D243CFD1E8AB0FDA0B5298FF60DA5069463D610513C9F04F24051348391A143AFFAB7197DFACDEA72A02D2A7058A4463F8FB69378369E11EF33AE3252E2DB86CB545B36D3C26DDECE5AA0888F97BCA8E0BD83DC5B3B91CFF5FAF2F66F9501010682D67EF4A3B4E66115FBA0E8175A60C93BE9ED02921958F0EA55DA0FB5E4802AF5846147BAD92BC2D8AF26A08B3376FF433F3A4250FA64B7F804004CAC5807877D91C4427BD1CD05CF912ED8A09B32EF0F03BD13C37FF950C0CCCEFCCDD6669F2E7F2AA5CB859928E84E29763EA09BBA5E46610C8C8B1F8E921E5691BF8C7E40D75825D5EA3217AA9C3A8A355F39A0EEB95BC78251CCCEC54A97F19755C4A59A293544EEE6119AFA50531211E53C4FA00B6E86FE150BF4A9E0FEEE9C90F5465E617A861BB5E357F942881EE762212E2580", + kind: List, + val: "A060EF29F20CC1007AE6E9530AEE16F4B31F8F1769A2D1264EC995C6D1241868D6A07C62AB8AC9838F5F5877B20BB37B387BC2106E97A3D52172CBEDB5EE17C36008A00EAB6B7324AADC0F6047C6AFC8229F09F7CF451B51D67C8DFB08D49BA8C3C626A04453343B2F3A6E42FCF87948F88AF7C8FC16D0C2735CBA7F026836239AB2C15FA024635C7291C882CE4C0763760C1A362DFC3FFCD802A55722236DE058D74202ACA0A220C808DE10F55E40AB25255201CFF009EA181D3906638E944EE2BF34049984A08D325AB26796F1CCB470F69C0F842501DC35D368A0C2575B2D243CFD1E8AB0FDA0B5298FF60DA5069463D610513C9F04F24051348391A143AFFAB7197DFACDEA72A02D2A7058A4463F8FB69378369E11EF33AE3252E2DB86CB545B36D3C26DDECE5AA0888F97BCA8E0BD83DC5B3B91CFF5FAF2F66F9501010682D67EF4A3B4E66115FBA0E8175A60C93BE9ED02921958F0EA55DA0FB5E4802AF5846147BAD92BC2D8AF26A08B3376FF433F3A4250FA64B7F804004CAC5807877D91C4427BD1CD05CF912ED8A09B32EF0F03BD13C37FF950C0CCCEFCCDD6669F2E7F2AA5CB859928E84E29763EA09BBA5E46610C8C8B1F8E921E5691BF8C7E40D75825D5EA3217AA9C3A8A355F39A0EEB95BC78251CCCEC54A97F19755C4A59A293544EEE6119AFA50531211E53C4FA00B6E86FE150BF4A9E0FEEE9C90F5465E617A861BB5E357F942881EE762212E2580", + rest: "", + }, + { + input: "F877A12000BF49F440A1CD0527E4D06E2765654C0F56452257516D793A9B8D604DCFDF2AB853F851808D10000000000000000000000000A056E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421A0C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470", + kind: List, + val: "A12000BF49F440A1CD0527E4D06E2765654C0F56452257516D793A9B8D604DCFDF2AB853F851808D10000000000000000000000000A056E81F171BCC55A6FF8345E692C0F86E5B48E01B996CADC001622FB5E363B421A0C5D2460186F7233C927E7DB2DCC703C0E500B653CA82273B7BFAD8045D85A470", + rest: "", + }, + } + + for i, test := range tests { + kind, val, rest, err := Split(unhex(test.input)) + if kind != test.kind { + t.Errorf("test %d: kind mismatch: got %v, want %v", i, kind, test.kind) + } + if !bytes.Equal(val, unhex(test.val)) { + t.Errorf("test %d: val mismatch: got %x, want %s", i, val, test.val) + } + if !bytes.Equal(rest, unhex(test.rest)) { + t.Errorf("test %d: rest mismatch: got %x, want %s", i, rest, test.rest) + } + if err != test.err { + t.Errorf("test %d: error mismatch: got %q, want %q", i, err, test.err) + } + } +} + +func TestReadSize(t *testing.T) { + tests := []struct { + input string + slen byte + size uint64 + err error + }{ + {input: "", slen: 1, err: io.ErrUnexpectedEOF}, + {input: "FF", slen: 2, err: io.ErrUnexpectedEOF}, + {input: "00", slen: 1, err: ErrCanonSize}, + {input: "36", slen: 1, err: ErrCanonSize}, + {input: "37", slen: 1, err: ErrCanonSize}, + {input: "38", slen: 1, size: 0x38}, + {input: "FF", slen: 1, size: 0xFF}, + {input: "FFFF", slen: 2, size: 0xFFFF}, + {input: "FFFFFF", slen: 3, size: 0xFFFFFF}, + {input: "FFFFFFFF", slen: 4, size: 0xFFFFFFFF}, + {input: "FFFFFFFFFF", slen: 5, size: 0xFFFFFFFFFF}, + {input: "FFFFFFFFFFFF", slen: 6, size: 0xFFFFFFFFFFFF}, + {input: "FFFFFFFFFFFFFF", slen: 7, size: 0xFFFFFFFFFFFFFF}, + {input: "FFFFFFFFFFFFFFFF", slen: 8, size: 0xFFFFFFFFFFFFFFFF}, + {input: "0102", slen: 2, size: 0x0102}, + {input: "010203", slen: 3, size: 0x010203}, + {input: "01020304", slen: 4, size: 0x01020304}, + {input: "0102030405", slen: 5, size: 0x0102030405}, + {input: "010203040506", slen: 6, size: 0x010203040506}, + {input: "01020304050607", slen: 7, size: 0x01020304050607}, + {input: "0102030405060708", slen: 8, size: 0x0102030405060708}, + } + + for _, test := range tests { + size, err := readSize(unhex(test.input), test.slen) + if err != test.err { + t.Errorf("readSize(%s, %d): error mismatch: got %q, want %q", test.input, test.slen, err, test.err) + continue + } + if size != test.size { + t.Errorf("readSize(%s, %d): size mismatch: got %#x, want %#x", test.input, test.slen, size, test.size) + } + } +} + +func TestAppendUint64(t *testing.T) { + tests := []struct { + input uint64 + slice []byte + output string + }{ + {0, nil, "80"}, + {1, nil, "01"}, + {2, nil, "02"}, + {127, nil, "7F"}, + {128, nil, "8180"}, + {129, nil, "8181"}, + {0xFFFFFF, nil, "83FFFFFF"}, + {127, []byte{1, 2, 3}, "0102037F"}, + {0xFFFFFF, []byte{1, 2, 3}, "01020383FFFFFF"}, + } + + for _, test := range tests { + x := AppendUint64(test.slice, test.input) + if !bytes.Equal(x, unhex(test.output)) { + t.Errorf("AppendUint64(%v, %d): got %x, want %s", test.slice, test.input, x, test.output) + } + + // Check that IntSize returns the appended size. + length := len(x) - len(test.slice) + if s := IntSize(test.input); s != length { + t.Errorf("IntSize(%d): got %d, want %d", test.input, s, length) + } + } +} + +func TestAppendUint64Random(t *testing.T) { + fn := func(i uint64) bool { + enc, _ := EncodeToBytes(i) + encAppend := AppendUint64(nil, i) + return bytes.Equal(enc, encAppend) + } + config := quick.Config{MaxCountScale: 50} + if err := quick.Check(fn, &config); err != nil { + t.Fatal(err) + } +} diff --git a/rlp/rlpgen/gen.go b/rlp/rlpgen/gen.go new file mode 100644 index 0000000000..15582b6ea9 --- /dev/null +++ b/rlp/rlpgen/gen.go @@ -0,0 +1,751 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/types" + "sort" + + "github.com/tenderly/coreth/rlp/internal/rlpstruct" +) + +// buildContext keeps the data needed for make*Op. +type buildContext struct { + topType *types.Named // the type we're creating methods for + + encoderIface *types.Interface + decoderIface *types.Interface + rawValueType *types.Named + + typeToStructCache map[types.Type]*rlpstruct.Type +} + +func newBuildContext(packageRLP *types.Package) *buildContext { + enc := packageRLP.Scope().Lookup("Encoder").Type().Underlying() + dec := packageRLP.Scope().Lookup("Decoder").Type().Underlying() + rawv := packageRLP.Scope().Lookup("RawValue").Type() + return &buildContext{ + typeToStructCache: make(map[types.Type]*rlpstruct.Type), + encoderIface: enc.(*types.Interface), + decoderIface: dec.(*types.Interface), + rawValueType: rawv.(*types.Named), + } +} + +func (bctx *buildContext) isEncoder(typ types.Type) bool { + return types.Implements(typ, bctx.encoderIface) +} + +func (bctx *buildContext) isDecoder(typ types.Type) bool { + return types.Implements(typ, bctx.decoderIface) +} + +// typeToStructType converts typ to rlpstruct.Type. +func (bctx *buildContext) typeToStructType(typ types.Type) *rlpstruct.Type { + if prev := bctx.typeToStructCache[typ]; prev != nil { + return prev // short-circuit for recursive types. + } + + // Resolve named types to their underlying type, but keep the name. + name := types.TypeString(typ, nil) + for { + utype := typ.Underlying() + if utype == typ { + break + } + typ = utype + } + + // Create the type and store it in cache. + t := &rlpstruct.Type{ + Name: name, + Kind: typeReflectKind(typ), + IsEncoder: bctx.isEncoder(typ), + IsDecoder: bctx.isDecoder(typ), + } + bctx.typeToStructCache[typ] = t + + // Assign element type. + switch typ.(type) { + case *types.Array, *types.Slice, *types.Pointer: + etype := typ.(interface{ Elem() types.Type }).Elem() + t.Elem = bctx.typeToStructType(etype) + } + return t +} + +// genContext is passed to the gen* methods of op when generating +// the output code. It tracks packages to be imported by the output +// file and assigns unique names of temporary variables. +type genContext struct { + inPackage *types.Package + imports map[string]struct{} + tempCounter int +} + +func newGenContext(inPackage *types.Package) *genContext { + return &genContext{ + inPackage: inPackage, + imports: make(map[string]struct{}), + } +} + +func (ctx *genContext) temp() string { + v := fmt.Sprintf("_tmp%d", ctx.tempCounter) + ctx.tempCounter++ + return v +} + +func (ctx *genContext) resetTemp() { + ctx.tempCounter = 0 +} + +func (ctx *genContext) addImport(path string) { + if path == ctx.inPackage.Path() { + return // avoid importing the package that we're generating in. + } + // TODO: renaming? + ctx.imports[path] = struct{}{} +} + +// importsList returns all packages that need to be imported. +func (ctx *genContext) importsList() []string { + imp := make([]string, 0, len(ctx.imports)) + for k := range ctx.imports { + imp = append(imp, k) + } + sort.Strings(imp) + return imp +} + +// qualify is the types.Qualifier used for printing types. +func (ctx *genContext) qualify(pkg *types.Package) string { + if pkg.Path() == ctx.inPackage.Path() { + return "" + } + ctx.addImport(pkg.Path()) + // TODO: renaming? + return pkg.Name() +} + +type op interface { + // genWrite creates the encoder. The generated code should write v, + // which is any Go expression, to the rlp.EncoderBuffer 'w'. + genWrite(ctx *genContext, v string) string + + // genDecode creates the decoder. The generated code should read + // a value from the rlp.Stream 'dec' and store it to dst. + genDecode(ctx *genContext) (string, string) +} + +// basicOp handles basic types bool, uint*, string. +type basicOp struct { + typ types.Type + writeMethod string // calle write the value + writeArgType types.Type // parameter type of writeMethod + decMethod string + decResultType types.Type // return type of decMethod + decUseBitSize bool // if true, result bit size is appended to decMethod +} + +func (*buildContext) makeBasicOp(typ *types.Basic) (op, error) { + op := basicOp{typ: typ} + kind := typ.Kind() + switch { + case kind == types.Bool: + op.writeMethod = "WriteBool" + op.writeArgType = types.Typ[types.Bool] + op.decMethod = "Bool" + op.decResultType = types.Typ[types.Bool] + case kind >= types.Uint8 && kind <= types.Uint64: + op.writeMethod = "WriteUint64" + op.writeArgType = types.Typ[types.Uint64] + op.decMethod = "Uint" + op.decResultType = typ + op.decUseBitSize = true + case kind == types.String: + op.writeMethod = "WriteString" + op.writeArgType = types.Typ[types.String] + op.decMethod = "String" + op.decResultType = types.Typ[types.String] + default: + return nil, fmt.Errorf("unhandled basic type: %v", typ) + } + return op, nil +} + +func (*buildContext) makeByteSliceOp(typ *types.Slice) op { + if !isByte(typ.Elem()) { + panic("non-byte slice type in makeByteSliceOp") + } + bslice := types.NewSlice(types.Typ[types.Uint8]) + return basicOp{ + typ: typ, + writeMethod: "WriteBytes", + writeArgType: bslice, + decMethod: "Bytes", + decResultType: bslice, + } +} + +func (bctx *buildContext) makeRawValueOp() op { + bslice := types.NewSlice(types.Typ[types.Uint8]) + return basicOp{ + typ: bctx.rawValueType, + writeMethod: "Write", + writeArgType: bslice, + decMethod: "Raw", + decResultType: bslice, + } +} + +func (op basicOp) writeNeedsConversion() bool { + return !types.AssignableTo(op.typ, op.writeArgType) +} + +func (op basicOp) decodeNeedsConversion() bool { + return !types.AssignableTo(op.decResultType, op.typ) +} + +func (op basicOp) genWrite(ctx *genContext, v string) string { + if op.writeNeedsConversion() { + v = fmt.Sprintf("%s(%s)", op.writeArgType, v) + } + return fmt.Sprintf("w.%s(%s)\n", op.writeMethod, v) +} + +func (op basicOp) genDecode(ctx *genContext) (string, string) { + var ( + resultV = ctx.temp() + result = resultV + method = op.decMethod + ) + if op.decUseBitSize { + // Note: For now, this only works for platform-independent integer + // sizes. makeBasicOp forbids the platform-dependent types. + var sizes types.StdSizes + method = fmt.Sprintf("%s%d", op.decMethod, sizes.Sizeof(op.typ)*8) + } + + // Call the decoder method. + var b bytes.Buffer + fmt.Fprintf(&b, "%s, err := dec.%s()\n", resultV, method) + fmt.Fprintf(&b, "if err != nil { return err }\n") + if op.decodeNeedsConversion() { + conv := ctx.temp() + fmt.Fprintf(&b, "%s := %s(%s)\n", conv, types.TypeString(op.typ, ctx.qualify), resultV) + result = conv + } + return result, b.String() +} + +// byteArrayOp handles [...]byte. +type byteArrayOp struct { + typ types.Type + name types.Type // name != typ for named byte array types (e.g. common.Address) +} + +func (bctx *buildContext) makeByteArrayOp(name *types.Named, typ *types.Array) byteArrayOp { + nt := types.Type(name) + if name == nil { + nt = typ + } + return byteArrayOp{typ, nt} +} + +func (op byteArrayOp) genWrite(ctx *genContext, v string) string { + return fmt.Sprintf("w.WriteBytes(%s[:])\n", v) +} + +func (op byteArrayOp) genDecode(ctx *genContext) (string, string) { + var resultV = ctx.temp() + + var b bytes.Buffer + fmt.Fprintf(&b, "var %s %s\n", resultV, types.TypeString(op.name, ctx.qualify)) + fmt.Fprintf(&b, "if err := dec.ReadBytes(%s[:]); err != nil { return err }\n", resultV) + return resultV, b.String() +} + +// bigIntNoPtrOp handles non-pointer big.Int. +// This exists because big.Int has it's own decoder operation on rlp.Stream, +// but the decode method returns *big.Int, so it needs to be dereferenced. +type bigIntOp struct { + pointer bool +} + +func (op bigIntOp) genWrite(ctx *genContext, v string) string { + var b bytes.Buffer + + fmt.Fprintf(&b, "if %s.Sign() == -1 {\n", v) + fmt.Fprintf(&b, " return rlp.ErrNegativeBigInt\n") + fmt.Fprintf(&b, "}\n") + dst := v + if !op.pointer { + dst = "&" + v + } + fmt.Fprintf(&b, "w.WriteBigInt(%s)\n", dst) + + // Wrap with nil check. + if op.pointer { + code := b.String() + b.Reset() + fmt.Fprintf(&b, "if %s == nil {\n", v) + fmt.Fprintf(&b, " w.Write(rlp.EmptyString)") + fmt.Fprintf(&b, "} else {\n") + fmt.Fprint(&b, code) + fmt.Fprintf(&b, "}\n") + } + + return b.String() +} + +func (op bigIntOp) genDecode(ctx *genContext) (string, string) { + var resultV = ctx.temp() + + var b bytes.Buffer + fmt.Fprintf(&b, "%s, err := dec.BigInt()\n", resultV) + fmt.Fprintf(&b, "if err != nil { return err }\n") + + result := resultV + if !op.pointer { + result = "(*" + resultV + ")" + } + return result, b.String() +} + +// encoderDecoderOp handles rlp.Encoder and rlp.Decoder. +// In order to be used with this, the type must implement both interfaces. +// This restriction may be lifted in the future by creating separate ops for +// encoding and decoding. +type encoderDecoderOp struct { + typ types.Type +} + +func (op encoderDecoderOp) genWrite(ctx *genContext, v string) string { + return fmt.Sprintf("if err := %s.EncodeRLP(w); err != nil { return err }\n", v) +} + +func (op encoderDecoderOp) genDecode(ctx *genContext) (string, string) { + // DecodeRLP must have pointer receiver, and this is verified in makeOp. + etyp := op.typ.(*types.Pointer).Elem() + var resultV = ctx.temp() + + var b bytes.Buffer + fmt.Fprintf(&b, "%s := new(%s)\n", resultV, types.TypeString(etyp, ctx.qualify)) + fmt.Fprintf(&b, "if err := %s.DecodeRLP(dec); err != nil { return err }\n", resultV) + return resultV, b.String() +} + +// ptrOp handles pointer types. +type ptrOp struct { + elemTyp types.Type + elem op + nilOK bool + nilValue rlpstruct.NilKind +} + +func (bctx *buildContext) makePtrOp(elemTyp types.Type, tags rlpstruct.Tags) (op, error) { + elemOp, err := bctx.makeOp(nil, elemTyp, rlpstruct.Tags{}) + if err != nil { + return nil, err + } + op := ptrOp{elemTyp: elemTyp, elem: elemOp} + + // Determine nil value. + if tags.NilOK { + op.nilOK = true + op.nilValue = tags.NilKind + } else { + styp := bctx.typeToStructType(elemTyp) + op.nilValue = styp.DefaultNilValue() + } + return op, nil +} + +func (op ptrOp) genWrite(ctx *genContext, v string) string { + // Note: in writer functions, accesses to v are read-only, i.e. v is any Go + // expression. To make all accesses work through the pointer, we substitute + // v with (*v). This is required for most accesses including `v`, `call(v)`, + // and `v[index]` on slices. + // + // For `v.field` and `v[:]` on arrays, the dereference operation is not required. + var vv string + _, isStruct := op.elem.(structOp) + _, isByteArray := op.elem.(byteArrayOp) + if isStruct || isByteArray { + vv = v + } else { + vv = fmt.Sprintf("(*%s)", v) + } + + var b bytes.Buffer + fmt.Fprintf(&b, "if %s == nil {\n", v) + fmt.Fprintf(&b, " w.Write([]byte{0x%X})\n", op.nilValue) + fmt.Fprintf(&b, "} else {\n") + fmt.Fprintf(&b, " %s", op.elem.genWrite(ctx, vv)) + fmt.Fprintf(&b, "}\n") + return b.String() +} + +func (op ptrOp) genDecode(ctx *genContext) (string, string) { + result, code := op.elem.genDecode(ctx) + if !op.nilOK { + // If nil pointers are not allowed, we can just decode the element. + return "&" + result, code + } + + // nil is allowed, so check the kind and size first. + // If size is zero and kind matches the nilKind of the type, + // the value decodes as a nil pointer. + var ( + resultV = ctx.temp() + kindV = ctx.temp() + sizeV = ctx.temp() + wantKind string + ) + if op.nilValue == rlpstruct.NilKindList { + wantKind = "rlp.List" + } else { + wantKind = "rlp.String" + } + var b bytes.Buffer + fmt.Fprintf(&b, "var %s %s\n", resultV, types.TypeString(types.NewPointer(op.elemTyp), ctx.qualify)) + fmt.Fprintf(&b, "if %s, %s, err := dec.Kind(); err != nil {\n", kindV, sizeV) + fmt.Fprintf(&b, " return err\n") + fmt.Fprintf(&b, "} else if %s != 0 || %s != %s {\n", sizeV, kindV, wantKind) + fmt.Fprint(&b, code) + fmt.Fprintf(&b, " %s = &%s\n", resultV, result) + fmt.Fprintf(&b, "}\n") + return resultV, b.String() +} + +// structOp handles struct types. +type structOp struct { + named *types.Named + typ *types.Struct + fields []*structField + optionalFields []*structField +} + +type structField struct { + name string + typ types.Type + elem op +} + +func (bctx *buildContext) makeStructOp(named *types.Named, typ *types.Struct) (op, error) { + // Convert fields to []rlpstruct.Field. + var allStructFields []rlpstruct.Field + for i := 0; i < typ.NumFields(); i++ { + f := typ.Field(i) + allStructFields = append(allStructFields, rlpstruct.Field{ + Name: f.Name(), + Exported: f.Exported(), + Index: i, + Tag: typ.Tag(i), + Type: *bctx.typeToStructType(f.Type()), + }) + } + + // Filter/validate fields. + fields, tags, err := rlpstruct.ProcessFields(allStructFields) + if err != nil { + return nil, err + } + + // Create field ops. + var op = structOp{named: named, typ: typ} + for i, field := range fields { + // Advanced struct tags are not supported yet. + tag := tags[i] + if err := checkUnsupportedTags(field.Name, tag); err != nil { + return nil, err + } + typ := typ.Field(field.Index).Type() + elem, err := bctx.makeOp(nil, typ, tags[i]) + if err != nil { + return nil, fmt.Errorf("field %s: %v", field.Name, err) + } + f := &structField{name: field.Name, typ: typ, elem: elem} + if tag.Optional { + op.optionalFields = append(op.optionalFields, f) + } else { + op.fields = append(op.fields, f) + } + } + return op, nil +} + +func checkUnsupportedTags(field string, tag rlpstruct.Tags) error { + if tag.Tail { + return fmt.Errorf(`field %s has unsupported struct tag "tail"`, field) + } + return nil +} + +func (op structOp) genWrite(ctx *genContext, v string) string { + var b bytes.Buffer + var listMarker = ctx.temp() + fmt.Fprintf(&b, "%s := w.List()\n", listMarker) + for _, field := range op.fields { + selector := v + "." + field.name + fmt.Fprint(&b, field.elem.genWrite(ctx, selector)) + } + op.writeOptionalFields(&b, ctx, v) + fmt.Fprintf(&b, "w.ListEnd(%s)\n", listMarker) + return b.String() +} + +func (op structOp) writeOptionalFields(b *bytes.Buffer, ctx *genContext, v string) { + if len(op.optionalFields) == 0 { + return + } + // First check zero-ness of all optional fields. + var zeroV = make([]string, len(op.optionalFields)) + for i, field := range op.optionalFields { + selector := v + "." + field.name + zeroV[i] = ctx.temp() + fmt.Fprintf(b, "%s := %s\n", zeroV[i], nonZeroCheck(selector, field.typ, ctx.qualify)) + } + // Now write the fields. + for i, field := range op.optionalFields { + selector := v + "." + field.name + cond := "" + for j := i; j < len(op.optionalFields); j++ { + if j > i { + cond += " || " + } + cond += zeroV[j] + } + fmt.Fprintf(b, "if %s {\n", cond) + fmt.Fprint(b, field.elem.genWrite(ctx, selector)) + fmt.Fprintf(b, "}\n") + } +} + +func (op structOp) genDecode(ctx *genContext) (string, string) { + // Get the string representation of the type. + // Here, named types are handled separately because the output + // would contain a copy of the struct definition otherwise. + var typeName string + if op.named != nil { + typeName = types.TypeString(op.named, ctx.qualify) + } else { + typeName = types.TypeString(op.typ, ctx.qualify) + } + + // Create struct object. + var resultV = ctx.temp() + var b bytes.Buffer + fmt.Fprintf(&b, "var %s %s\n", resultV, typeName) + + // Decode fields. + fmt.Fprintf(&b, "{\n") + fmt.Fprintf(&b, "if _, err := dec.List(); err != nil { return err }\n") + for _, field := range op.fields { + result, code := field.elem.genDecode(ctx) + fmt.Fprintf(&b, "// %s:\n", field.name) + fmt.Fprint(&b, code) + fmt.Fprintf(&b, "%s.%s = %s\n", resultV, field.name, result) + } + op.decodeOptionalFields(&b, ctx, resultV) + fmt.Fprintf(&b, "if err := dec.ListEnd(); err != nil { return err }\n") + fmt.Fprintf(&b, "}\n") + return resultV, b.String() +} + +func (op structOp) decodeOptionalFields(b *bytes.Buffer, ctx *genContext, resultV string) { + var suffix bytes.Buffer + for _, field := range op.optionalFields { + result, code := field.elem.genDecode(ctx) + fmt.Fprintf(b, "// %s:\n", field.name) + fmt.Fprintf(b, "if dec.MoreDataInList() {\n") + fmt.Fprint(b, code) + fmt.Fprintf(b, "%s.%s = %s\n", resultV, field.name, result) + fmt.Fprintf(&suffix, "}\n") + } + suffix.WriteTo(b) +} + +// sliceOp handles slice types. +type sliceOp struct { + typ *types.Slice + elemOp op +} + +func (bctx *buildContext) makeSliceOp(typ *types.Slice) (op, error) { + elemOp, err := bctx.makeOp(nil, typ.Elem(), rlpstruct.Tags{}) + if err != nil { + return nil, err + } + return sliceOp{typ: typ, elemOp: elemOp}, nil +} + +func (op sliceOp) genWrite(ctx *genContext, v string) string { + var ( + listMarker = ctx.temp() // holds return value of w.List() + iterElemV = ctx.temp() // iteration variable + elemCode = op.elemOp.genWrite(ctx, iterElemV) + ) + + var b bytes.Buffer + fmt.Fprintf(&b, "%s := w.List()\n", listMarker) + fmt.Fprintf(&b, "for _, %s := range %s {\n", iterElemV, v) + fmt.Fprint(&b, elemCode) + fmt.Fprintf(&b, "}\n") + fmt.Fprintf(&b, "w.ListEnd(%s)\n", listMarker) + return b.String() +} + +func (op sliceOp) genDecode(ctx *genContext) (string, string) { + var sliceV = ctx.temp() // holds the output slice + elemResult, elemCode := op.elemOp.genDecode(ctx) + + var b bytes.Buffer + fmt.Fprintf(&b, "var %s %s\n", sliceV, types.TypeString(op.typ, ctx.qualify)) + fmt.Fprintf(&b, "if _, err := dec.List(); err != nil { return err }\n") + fmt.Fprintf(&b, "for dec.MoreDataInList() {\n") + fmt.Fprintf(&b, " %s", elemCode) + fmt.Fprintf(&b, " %s = append(%s, %s)\n", sliceV, sliceV, elemResult) + fmt.Fprintf(&b, "}\n") + fmt.Fprintf(&b, "if err := dec.ListEnd(); err != nil { return err }\n") + return sliceV, b.String() +} + +func (bctx *buildContext) makeOp(name *types.Named, typ types.Type, tags rlpstruct.Tags) (op, error) { + switch typ := typ.(type) { + case *types.Named: + if isBigInt(typ) { + return bigIntOp{}, nil + } + if typ == bctx.rawValueType { + return bctx.makeRawValueOp(), nil + } + if bctx.isDecoder(typ) { + return nil, fmt.Errorf("type %v implements rlp.Decoder with non-pointer receiver", typ) + } + // TODO: same check for encoder? + return bctx.makeOp(typ, typ.Underlying(), tags) + case *types.Pointer: + if isBigInt(typ.Elem()) { + return bigIntOp{pointer: true}, nil + } + // Encoder/Decoder interfaces. + if bctx.isEncoder(typ) { + if bctx.isDecoder(typ) { + return encoderDecoderOp{typ}, nil + } + return nil, fmt.Errorf("type %v implements rlp.Encoder but not rlp.Decoder", typ) + } + if bctx.isDecoder(typ) { + return nil, fmt.Errorf("type %v implements rlp.Decoder but not rlp.Encoder", typ) + } + // Default pointer handling. + return bctx.makePtrOp(typ.Elem(), tags) + case *types.Basic: + return bctx.makeBasicOp(typ) + case *types.Struct: + return bctx.makeStructOp(name, typ) + case *types.Slice: + etyp := typ.Elem() + if isByte(etyp) && !bctx.isEncoder(etyp) { + return bctx.makeByteSliceOp(typ), nil + } + return bctx.makeSliceOp(typ) + case *types.Array: + etyp := typ.Elem() + if isByte(etyp) && !bctx.isEncoder(etyp) { + return bctx.makeByteArrayOp(name, typ), nil + } + return nil, fmt.Errorf("unhandled array type: %v", typ) + default: + return nil, fmt.Errorf("unhandled type: %v", typ) + } +} + +// generateDecoder generates the DecodeRLP method on 'typ'. +func generateDecoder(ctx *genContext, typ string, op op) []byte { + ctx.resetTemp() + ctx.addImport(pathOfPackageRLP) + + result, code := op.genDecode(ctx) + var b bytes.Buffer + fmt.Fprintf(&b, "func (obj *%s) DecodeRLP(dec *rlp.Stream) error {\n", typ) + fmt.Fprint(&b, code) + fmt.Fprintf(&b, " *obj = %s\n", result) + fmt.Fprintf(&b, " return nil\n") + fmt.Fprintf(&b, "}\n") + return b.Bytes() +} + +// generateEncoder generates the EncodeRLP method on 'typ'. +func generateEncoder(ctx *genContext, typ string, op op) []byte { + ctx.resetTemp() + ctx.addImport("io") + ctx.addImport(pathOfPackageRLP) + + var b bytes.Buffer + fmt.Fprintf(&b, "func (obj *%s) EncodeRLP(_w io.Writer) error {\n", typ) + fmt.Fprintf(&b, " w := rlp.NewEncoderBuffer(_w)\n") + fmt.Fprint(&b, op.genWrite(ctx, "obj")) + fmt.Fprintf(&b, " return w.Flush()\n") + fmt.Fprintf(&b, "}\n") + return b.Bytes() +} + +func (bctx *buildContext) generate(typ *types.Named, encoder, decoder bool) ([]byte, error) { + bctx.topType = typ + + pkg := typ.Obj().Pkg() + op, err := bctx.makeOp(nil, typ, rlpstruct.Tags{}) + if err != nil { + return nil, err + } + + var ( + ctx = newGenContext(pkg) + encSource []byte + decSource []byte + ) + if encoder { + encSource = generateEncoder(ctx, typ.Obj().Name(), op) + } + if decoder { + decSource = generateDecoder(ctx, typ.Obj().Name(), op) + } + + var b bytes.Buffer + fmt.Fprintf(&b, "package %s\n\n", pkg.Name()) + for _, imp := range ctx.importsList() { + fmt.Fprintf(&b, "import %q\n", imp) + } + if encoder { + fmt.Fprintln(&b) + b.Write(encSource) + } + if decoder { + fmt.Fprintln(&b) + b.Write(decSource) + } + + source := b.Bytes() + // fmt.Println(string(source)) + return format.Source(source) +} diff --git a/rlp/rlpgen/gen_test.go b/rlp/rlpgen/gen_test.go new file mode 100644 index 0000000000..241c34b6df --- /dev/null +++ b/rlp/rlpgen/gen_test.go @@ -0,0 +1,107 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/importer" + "go/parser" + "go/token" + "go/types" + "os" + "path/filepath" + "testing" +) + +// Package RLP is loaded only once and reused for all tests. +var ( + testFset = token.NewFileSet() + testImporter = importer.ForCompiler(testFset, "source", nil).(types.ImporterFrom) + testPackageRLP *types.Package +) + +func init() { + cwd, err := os.Getwd() + if err != nil { + panic(err) + } + testPackageRLP, err = testImporter.ImportFrom(pathOfPackageRLP, cwd, 0) + if err != nil { + panic(fmt.Errorf("can't load package RLP: %v", err)) + } +} + +var tests = []string{"uints", "nil", "rawvalue", "optional", "bigint"} + +func TestOutput(t *testing.T) { + for _, test := range tests { + test := test + t.Run(test, func(t *testing.T) { + inputFile := filepath.Join("testdata", test+".in.txt") + outputFile := filepath.Join("testdata", test+".out.txt") + bctx, typ, err := loadTestSource(inputFile, "Test") + if err != nil { + t.Fatal("error loading test source:", err) + } + output, err := bctx.generate(typ, true, true) + if err != nil { + t.Fatal("error in generate:", err) + } + + // Set this environment variable to regenerate the test outputs. + if os.Getenv("WRITE_TEST_FILES") != "" { + os.WriteFile(outputFile, output, 0644) + } + + // Check if output matches. + wantOutput, err := os.ReadFile(outputFile) + if err != nil { + t.Fatal("error loading expected test output:", err) + } + if !bytes.Equal(output, wantOutput) { + t.Fatal("output mismatch:\n", string(output)) + } + }) + } +} + +func loadTestSource(file string, typeName string) (*buildContext, *types.Named, error) { + // Load the test input. + content, err := os.ReadFile(file) + if err != nil { + return nil, nil, err + } + f, err := parser.ParseFile(testFset, file, content, 0) + if err != nil { + return nil, nil, err + } + conf := types.Config{Importer: testImporter} + pkg, err := conf.Check("test", testFset, []*ast.File{f}, nil) + if err != nil { + return nil, nil, err + } + + // Find the test struct. + bctx := newBuildContext(testPackageRLP) + typ, err := lookupStructType(pkg.Scope(), typeName) + if err != nil { + return nil, nil, fmt.Errorf("can't find type %s: %v", typeName, err) + } + return bctx, typ, nil +} diff --git a/rlp/rlpgen/main.go b/rlp/rlpgen/main.go new file mode 100644 index 0000000000..fcef813285 --- /dev/null +++ b/rlp/rlpgen/main.go @@ -0,0 +1,147 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "bytes" + "errors" + "flag" + "fmt" + "go/types" + "os" + + "golang.org/x/tools/go/packages" +) + +const pathOfPackageRLP = "github.com/tenderly/coreth/rlp" + +func main() { + var ( + pkgdir = flag.String("dir", ".", "input package") + output = flag.String("out", "-", "output file (default is stdout)") + genEncoder = flag.Bool("encoder", true, "generate EncodeRLP?") + genDecoder = flag.Bool("decoder", false, "generate DecodeRLP?") + typename = flag.String("type", "", "type to generate methods for") + ) + flag.Parse() + + cfg := Config{ + Dir: *pkgdir, + Type: *typename, + GenerateEncoder: *genEncoder, + GenerateDecoder: *genDecoder, + } + code, err := cfg.process() + if err != nil { + fatal(err) + } + if *output == "-" { + os.Stdout.Write(code) + } else if err := os.WriteFile(*output, code, 0600); err != nil { + fatal(err) + } +} + +func fatal(args ...interface{}) { + fmt.Fprintln(os.Stderr, args...) + os.Exit(1) +} + +type Config struct { + Dir string // input package directory + Type string + + GenerateEncoder bool + GenerateDecoder bool +} + +// process generates the Go code. +func (cfg *Config) process() (code []byte, err error) { + // Load packages. + pcfg := &packages.Config{ + Mode: packages.NeedName | packages.NeedTypes | packages.NeedImports | packages.NeedDeps, + Dir: cfg.Dir, + BuildFlags: []string{"-tags", "norlpgen"}, + } + ps, err := packages.Load(pcfg, pathOfPackageRLP, ".") + if err != nil { + return nil, err + } + if len(ps) == 0 { + return nil, fmt.Errorf("no Go package found in %s", cfg.Dir) + } + packages.PrintErrors(ps) + + // Find the packages that were loaded. + var ( + pkg *types.Package + packageRLP *types.Package + ) + for _, p := range ps { + if len(p.Errors) > 0 { + return nil, fmt.Errorf("package %s has errors", p.PkgPath) + } + if p.PkgPath == pathOfPackageRLP { + packageRLP = p.Types + } else { + pkg = p.Types + } + } + bctx := newBuildContext(packageRLP) + + // Find the type and generate. + typ, err := lookupStructType(pkg.Scope(), cfg.Type) + if err != nil { + return nil, fmt.Errorf("can't find %s in %s: %v", cfg.Type, pkg, err) + } + code, err = bctx.generate(typ, cfg.GenerateEncoder, cfg.GenerateDecoder) + if err != nil { + return nil, err + } + + // Add build comments. + // This is done here to avoid processing these lines with gofmt. + var header bytes.Buffer + fmt.Fprint(&header, "// Code generated by rlpgen. DO NOT EDIT.\n\n") + fmt.Fprint(&header, "//go:build !norlpgen\n") + fmt.Fprint(&header, "// +build !norlpgen\n\n") + return append(header.Bytes(), code...), nil +} + +func lookupStructType(scope *types.Scope, name string) (*types.Named, error) { + typ, err := lookupType(scope, name) + if err != nil { + return nil, err + } + _, ok := typ.Underlying().(*types.Struct) + if !ok { + return nil, errors.New("not a struct type") + } + return typ, nil +} + +func lookupType(scope *types.Scope, name string) (*types.Named, error) { + obj := scope.Lookup(name) + if obj == nil { + return nil, errors.New("no such identifier") + } + typ, ok := obj.(*types.TypeName) + if !ok { + return nil, errors.New("not a type") + } + return typ.Type().(*types.Named), nil +} diff --git a/rlp/rlpgen/testdata/bigint.in.txt b/rlp/rlpgen/testdata/bigint.in.txt new file mode 100644 index 0000000000..d23d84a287 --- /dev/null +++ b/rlp/rlpgen/testdata/bigint.in.txt @@ -0,0 +1,10 @@ +// -*- mode: go -*- + +package test + +import "math/big" + +type Test struct { + Int *big.Int + IntNoPtr big.Int +} diff --git a/rlp/rlpgen/testdata/bigint.out.txt b/rlp/rlpgen/testdata/bigint.out.txt new file mode 100644 index 0000000000..70f19680f7 --- /dev/null +++ b/rlp/rlpgen/testdata/bigint.out.txt @@ -0,0 +1,49 @@ +package test + +import "github.com/tenderly/coreth/rlp" +import "io" + +func (obj *Test) EncodeRLP(_w io.Writer) error { + w := rlp.NewEncoderBuffer(_w) + _tmp0 := w.List() + if obj.Int == nil { + w.Write(rlp.EmptyString) + } else { + if obj.Int.Sign() == -1 { + return rlp.ErrNegativeBigInt + } + w.WriteBigInt(obj.Int) + } + if obj.IntNoPtr.Sign() == -1 { + return rlp.ErrNegativeBigInt + } + w.WriteBigInt(&obj.IntNoPtr) + w.ListEnd(_tmp0) + return w.Flush() +} + +func (obj *Test) DecodeRLP(dec *rlp.Stream) error { + var _tmp0 Test + { + if _, err := dec.List(); err != nil { + return err + } + // Int: + _tmp1, err := dec.BigInt() + if err != nil { + return err + } + _tmp0.Int = _tmp1 + // IntNoPtr: + _tmp2, err := dec.BigInt() + if err != nil { + return err + } + _tmp0.IntNoPtr = (*_tmp2) + if err := dec.ListEnd(); err != nil { + return err + } + } + *obj = _tmp0 + return nil +} diff --git a/rlp/rlpgen/testdata/nil.in.txt b/rlp/rlpgen/testdata/nil.in.txt new file mode 100644 index 0000000000..a28ff34487 --- /dev/null +++ b/rlp/rlpgen/testdata/nil.in.txt @@ -0,0 +1,30 @@ +// -*- mode: go -*- + +package test + +type Aux struct{ + A uint32 +} + +type Test struct{ + Uint8 *byte `rlp:"nil"` + Uint8List *byte `rlp:"nilList"` + + Uint32 *uint32 `rlp:"nil"` + Uint32List *uint32 `rlp:"nilList"` + + Uint64 *uint64 `rlp:"nil"` + Uint64List *uint64 `rlp:"nilList"` + + String *string `rlp:"nil"` + StringList *string `rlp:"nilList"` + + ByteArray *[3]byte `rlp:"nil"` + ByteArrayList *[3]byte `rlp:"nilList"` + + ByteSlice *[]byte `rlp:"nil"` + ByteSliceList *[]byte `rlp:"nilList"` + + Struct *Aux `rlp:"nil"` + StructString *Aux `rlp:"nilString"` +} diff --git a/rlp/rlpgen/testdata/nil.out.txt b/rlp/rlpgen/testdata/nil.out.txt new file mode 100644 index 0000000000..8f176e7e99 --- /dev/null +++ b/rlp/rlpgen/testdata/nil.out.txt @@ -0,0 +1,289 @@ +package test + +import "github.com/tenderly/coreth/rlp" +import "io" + +func (obj *Test) EncodeRLP(_w io.Writer) error { + w := rlp.NewEncoderBuffer(_w) + _tmp0 := w.List() + if obj.Uint8 == nil { + w.Write([]byte{0x80}) + } else { + w.WriteUint64(uint64((*obj.Uint8))) + } + if obj.Uint8List == nil { + w.Write([]byte{0xC0}) + } else { + w.WriteUint64(uint64((*obj.Uint8List))) + } + if obj.Uint32 == nil { + w.Write([]byte{0x80}) + } else { + w.WriteUint64(uint64((*obj.Uint32))) + } + if obj.Uint32List == nil { + w.Write([]byte{0xC0}) + } else { + w.WriteUint64(uint64((*obj.Uint32List))) + } + if obj.Uint64 == nil { + w.Write([]byte{0x80}) + } else { + w.WriteUint64((*obj.Uint64)) + } + if obj.Uint64List == nil { + w.Write([]byte{0xC0}) + } else { + w.WriteUint64((*obj.Uint64List)) + } + if obj.String == nil { + w.Write([]byte{0x80}) + } else { + w.WriteString((*obj.String)) + } + if obj.StringList == nil { + w.Write([]byte{0xC0}) + } else { + w.WriteString((*obj.StringList)) + } + if obj.ByteArray == nil { + w.Write([]byte{0x80}) + } else { + w.WriteBytes(obj.ByteArray[:]) + } + if obj.ByteArrayList == nil { + w.Write([]byte{0xC0}) + } else { + w.WriteBytes(obj.ByteArrayList[:]) + } + if obj.ByteSlice == nil { + w.Write([]byte{0x80}) + } else { + w.WriteBytes((*obj.ByteSlice)) + } + if obj.ByteSliceList == nil { + w.Write([]byte{0xC0}) + } else { + w.WriteBytes((*obj.ByteSliceList)) + } + if obj.Struct == nil { + w.Write([]byte{0xC0}) + } else { + _tmp1 := w.List() + w.WriteUint64(uint64(obj.Struct.A)) + w.ListEnd(_tmp1) + } + if obj.StructString == nil { + w.Write([]byte{0x80}) + } else { + _tmp2 := w.List() + w.WriteUint64(uint64(obj.StructString.A)) + w.ListEnd(_tmp2) + } + w.ListEnd(_tmp0) + return w.Flush() +} + +func (obj *Test) DecodeRLP(dec *rlp.Stream) error { + var _tmp0 Test + { + if _, err := dec.List(); err != nil { + return err + } + // Uint8: + var _tmp2 *byte + if _tmp3, _tmp4, err := dec.Kind(); err != nil { + return err + } else if _tmp4 != 0 || _tmp3 != rlp.String { + _tmp1, err := dec.Uint8() + if err != nil { + return err + } + _tmp2 = &_tmp1 + } + _tmp0.Uint8 = _tmp2 + // Uint8List: + var _tmp6 *byte + if _tmp7, _tmp8, err := dec.Kind(); err != nil { + return err + } else if _tmp8 != 0 || _tmp7 != rlp.List { + _tmp5, err := dec.Uint8() + if err != nil { + return err + } + _tmp6 = &_tmp5 + } + _tmp0.Uint8List = _tmp6 + // Uint32: + var _tmp10 *uint32 + if _tmp11, _tmp12, err := dec.Kind(); err != nil { + return err + } else if _tmp12 != 0 || _tmp11 != rlp.String { + _tmp9, err := dec.Uint32() + if err != nil { + return err + } + _tmp10 = &_tmp9 + } + _tmp0.Uint32 = _tmp10 + // Uint32List: + var _tmp14 *uint32 + if _tmp15, _tmp16, err := dec.Kind(); err != nil { + return err + } else if _tmp16 != 0 || _tmp15 != rlp.List { + _tmp13, err := dec.Uint32() + if err != nil { + return err + } + _tmp14 = &_tmp13 + } + _tmp0.Uint32List = _tmp14 + // Uint64: + var _tmp18 *uint64 + if _tmp19, _tmp20, err := dec.Kind(); err != nil { + return err + } else if _tmp20 != 0 || _tmp19 != rlp.String { + _tmp17, err := dec.Uint64() + if err != nil { + return err + } + _tmp18 = &_tmp17 + } + _tmp0.Uint64 = _tmp18 + // Uint64List: + var _tmp22 *uint64 + if _tmp23, _tmp24, err := dec.Kind(); err != nil { + return err + } else if _tmp24 != 0 || _tmp23 != rlp.List { + _tmp21, err := dec.Uint64() + if err != nil { + return err + } + _tmp22 = &_tmp21 + } + _tmp0.Uint64List = _tmp22 + // String: + var _tmp26 *string + if _tmp27, _tmp28, err := dec.Kind(); err != nil { + return err + } else if _tmp28 != 0 || _tmp27 != rlp.String { + _tmp25, err := dec.String() + if err != nil { + return err + } + _tmp26 = &_tmp25 + } + _tmp0.String = _tmp26 + // StringList: + var _tmp30 *string + if _tmp31, _tmp32, err := dec.Kind(); err != nil { + return err + } else if _tmp32 != 0 || _tmp31 != rlp.List { + _tmp29, err := dec.String() + if err != nil { + return err + } + _tmp30 = &_tmp29 + } + _tmp0.StringList = _tmp30 + // ByteArray: + var _tmp34 *[3]byte + if _tmp35, _tmp36, err := dec.Kind(); err != nil { + return err + } else if _tmp36 != 0 || _tmp35 != rlp.String { + var _tmp33 [3]byte + if err := dec.ReadBytes(_tmp33[:]); err != nil { + return err + } + _tmp34 = &_tmp33 + } + _tmp0.ByteArray = _tmp34 + // ByteArrayList: + var _tmp38 *[3]byte + if _tmp39, _tmp40, err := dec.Kind(); err != nil { + return err + } else if _tmp40 != 0 || _tmp39 != rlp.List { + var _tmp37 [3]byte + if err := dec.ReadBytes(_tmp37[:]); err != nil { + return err + } + _tmp38 = &_tmp37 + } + _tmp0.ByteArrayList = _tmp38 + // ByteSlice: + var _tmp42 *[]byte + if _tmp43, _tmp44, err := dec.Kind(); err != nil { + return err + } else if _tmp44 != 0 || _tmp43 != rlp.String { + _tmp41, err := dec.Bytes() + if err != nil { + return err + } + _tmp42 = &_tmp41 + } + _tmp0.ByteSlice = _tmp42 + // ByteSliceList: + var _tmp46 *[]byte + if _tmp47, _tmp48, err := dec.Kind(); err != nil { + return err + } else if _tmp48 != 0 || _tmp47 != rlp.List { + _tmp45, err := dec.Bytes() + if err != nil { + return err + } + _tmp46 = &_tmp45 + } + _tmp0.ByteSliceList = _tmp46 + // Struct: + var _tmp51 *Aux + if _tmp52, _tmp53, err := dec.Kind(); err != nil { + return err + } else if _tmp53 != 0 || _tmp52 != rlp.List { + var _tmp49 Aux + { + if _, err := dec.List(); err != nil { + return err + } + // A: + _tmp50, err := dec.Uint32() + if err != nil { + return err + } + _tmp49.A = _tmp50 + if err := dec.ListEnd(); err != nil { + return err + } + } + _tmp51 = &_tmp49 + } + _tmp0.Struct = _tmp51 + // StructString: + var _tmp56 *Aux + if _tmp57, _tmp58, err := dec.Kind(); err != nil { + return err + } else if _tmp58 != 0 || _tmp57 != rlp.String { + var _tmp54 Aux + { + if _, err := dec.List(); err != nil { + return err + } + // A: + _tmp55, err := dec.Uint32() + if err != nil { + return err + } + _tmp54.A = _tmp55 + if err := dec.ListEnd(); err != nil { + return err + } + } + _tmp56 = &_tmp54 + } + _tmp0.StructString = _tmp56 + if err := dec.ListEnd(); err != nil { + return err + } + } + *obj = _tmp0 + return nil +} diff --git a/rlp/rlpgen/testdata/optional.in.txt b/rlp/rlpgen/testdata/optional.in.txt new file mode 100644 index 0000000000..f1ac9f7899 --- /dev/null +++ b/rlp/rlpgen/testdata/optional.in.txt @@ -0,0 +1,17 @@ +// -*- mode: go -*- + +package test + +type Aux struct { + A uint64 +} + +type Test struct { + Uint64 uint64 `rlp:"optional"` + Pointer *uint64 `rlp:"optional"` + String string `rlp:"optional"` + Slice []uint64 `rlp:"optional"` + Array [3]byte `rlp:"optional"` + NamedStruct Aux `rlp:"optional"` + AnonStruct struct{ A string } `rlp:"optional"` +} diff --git a/rlp/rlpgen/testdata/optional.out.txt b/rlp/rlpgen/testdata/optional.out.txt new file mode 100644 index 0000000000..7dc5612547 --- /dev/null +++ b/rlp/rlpgen/testdata/optional.out.txt @@ -0,0 +1,153 @@ +package test + +import "github.com/tenderly/coreth/rlp" +import "io" + +func (obj *Test) EncodeRLP(_w io.Writer) error { + w := rlp.NewEncoderBuffer(_w) + _tmp0 := w.List() + _tmp1 := obj.Uint64 != 0 + _tmp2 := obj.Pointer != nil + _tmp3 := obj.String != "" + _tmp4 := len(obj.Slice) > 0 + _tmp5 := obj.Array != ([3]byte{}) + _tmp6 := obj.NamedStruct != (Aux{}) + _tmp7 := obj.AnonStruct != (struct{ A string }{}) + if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 { + w.WriteUint64(obj.Uint64) + } + if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 { + if obj.Pointer == nil { + w.Write([]byte{0x80}) + } else { + w.WriteUint64((*obj.Pointer)) + } + } + if _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 { + w.WriteString(obj.String) + } + if _tmp4 || _tmp5 || _tmp6 || _tmp7 { + _tmp8 := w.List() + for _, _tmp9 := range obj.Slice { + w.WriteUint64(_tmp9) + } + w.ListEnd(_tmp8) + } + if _tmp5 || _tmp6 || _tmp7 { + w.WriteBytes(obj.Array[:]) + } + if _tmp6 || _tmp7 { + _tmp10 := w.List() + w.WriteUint64(obj.NamedStruct.A) + w.ListEnd(_tmp10) + } + if _tmp7 { + _tmp11 := w.List() + w.WriteString(obj.AnonStruct.A) + w.ListEnd(_tmp11) + } + w.ListEnd(_tmp0) + return w.Flush() +} + +func (obj *Test) DecodeRLP(dec *rlp.Stream) error { + var _tmp0 Test + { + if _, err := dec.List(); err != nil { + return err + } + // Uint64: + if dec.MoreDataInList() { + _tmp1, err := dec.Uint64() + if err != nil { + return err + } + _tmp0.Uint64 = _tmp1 + // Pointer: + if dec.MoreDataInList() { + _tmp2, err := dec.Uint64() + if err != nil { + return err + } + _tmp0.Pointer = &_tmp2 + // String: + if dec.MoreDataInList() { + _tmp3, err := dec.String() + if err != nil { + return err + } + _tmp0.String = _tmp3 + // Slice: + if dec.MoreDataInList() { + var _tmp4 []uint64 + if _, err := dec.List(); err != nil { + return err + } + for dec.MoreDataInList() { + _tmp5, err := dec.Uint64() + if err != nil { + return err + } + _tmp4 = append(_tmp4, _tmp5) + } + if err := dec.ListEnd(); err != nil { + return err + } + _tmp0.Slice = _tmp4 + // Array: + if dec.MoreDataInList() { + var _tmp6 [3]byte + if err := dec.ReadBytes(_tmp6[:]); err != nil { + return err + } + _tmp0.Array = _tmp6 + // NamedStruct: + if dec.MoreDataInList() { + var _tmp7 Aux + { + if _, err := dec.List(); err != nil { + return err + } + // A: + _tmp8, err := dec.Uint64() + if err != nil { + return err + } + _tmp7.A = _tmp8 + if err := dec.ListEnd(); err != nil { + return err + } + } + _tmp0.NamedStruct = _tmp7 + // AnonStruct: + if dec.MoreDataInList() { + var _tmp9 struct{ A string } + { + if _, err := dec.List(); err != nil { + return err + } + // A: + _tmp10, err := dec.String() + if err != nil { + return err + } + _tmp9.A = _tmp10 + if err := dec.ListEnd(); err != nil { + return err + } + } + _tmp0.AnonStruct = _tmp9 + } + } + } + } + } + } + } + if err := dec.ListEnd(); err != nil { + return err + } + } + *obj = _tmp0 + return nil +} diff --git a/rlp/rlpgen/testdata/rawvalue.in.txt b/rlp/rlpgen/testdata/rawvalue.in.txt new file mode 100644 index 0000000000..1540e071bf --- /dev/null +++ b/rlp/rlpgen/testdata/rawvalue.in.txt @@ -0,0 +1,11 @@ +// -*- mode: go -*- + +package test + +import "github.com/tenderly/coreth/rlp" + +type Test struct { + RawValue rlp.RawValue + PointerToRawValue *rlp.RawValue + SliceOfRawValue []rlp.RawValue +} diff --git a/rlp/rlpgen/testdata/rawvalue.out.txt b/rlp/rlpgen/testdata/rawvalue.out.txt new file mode 100644 index 0000000000..388c7db8fd --- /dev/null +++ b/rlp/rlpgen/testdata/rawvalue.out.txt @@ -0,0 +1,64 @@ +package test + +import "github.com/tenderly/coreth/rlp" +import "io" + +func (obj *Test) EncodeRLP(_w io.Writer) error { + w := rlp.NewEncoderBuffer(_w) + _tmp0 := w.List() + w.Write(obj.RawValue) + if obj.PointerToRawValue == nil { + w.Write([]byte{0x80}) + } else { + w.Write((*obj.PointerToRawValue)) + } + _tmp1 := w.List() + for _, _tmp2 := range obj.SliceOfRawValue { + w.Write(_tmp2) + } + w.ListEnd(_tmp1) + w.ListEnd(_tmp0) + return w.Flush() +} + +func (obj *Test) DecodeRLP(dec *rlp.Stream) error { + var _tmp0 Test + { + if _, err := dec.List(); err != nil { + return err + } + // RawValue: + _tmp1, err := dec.Raw() + if err != nil { + return err + } + _tmp0.RawValue = _tmp1 + // PointerToRawValue: + _tmp2, err := dec.Raw() + if err != nil { + return err + } + _tmp0.PointerToRawValue = &_tmp2 + // SliceOfRawValue: + var _tmp3 []rlp.RawValue + if _, err := dec.List(); err != nil { + return err + } + for dec.MoreDataInList() { + _tmp4, err := dec.Raw() + if err != nil { + return err + } + _tmp3 = append(_tmp3, _tmp4) + } + if err := dec.ListEnd(); err != nil { + return err + } + _tmp0.SliceOfRawValue = _tmp3 + if err := dec.ListEnd(); err != nil { + return err + } + } + *obj = _tmp0 + return nil +} diff --git a/rlp/rlpgen/testdata/uints.in.txt b/rlp/rlpgen/testdata/uints.in.txt new file mode 100644 index 0000000000..8095da997d --- /dev/null +++ b/rlp/rlpgen/testdata/uints.in.txt @@ -0,0 +1,10 @@ +// -*- mode: go -*- + +package test + +type Test struct{ + A uint8 + B uint16 + C uint32 + D uint64 +} diff --git a/rlp/rlpgen/testdata/uints.out.txt b/rlp/rlpgen/testdata/uints.out.txt new file mode 100644 index 0000000000..1d6eb768ff --- /dev/null +++ b/rlp/rlpgen/testdata/uints.out.txt @@ -0,0 +1,53 @@ +package test + +import "github.com/tenderly/coreth/rlp" +import "io" + +func (obj *Test) EncodeRLP(_w io.Writer) error { + w := rlp.NewEncoderBuffer(_w) + _tmp0 := w.List() + w.WriteUint64(uint64(obj.A)) + w.WriteUint64(uint64(obj.B)) + w.WriteUint64(uint64(obj.C)) + w.WriteUint64(obj.D) + w.ListEnd(_tmp0) + return w.Flush() +} + +func (obj *Test) DecodeRLP(dec *rlp.Stream) error { + var _tmp0 Test + { + if _, err := dec.List(); err != nil { + return err + } + // A: + _tmp1, err := dec.Uint8() + if err != nil { + return err + } + _tmp0.A = _tmp1 + // B: + _tmp2, err := dec.Uint16() + if err != nil { + return err + } + _tmp0.B = _tmp2 + // C: + _tmp3, err := dec.Uint32() + if err != nil { + return err + } + _tmp0.C = _tmp3 + // D: + _tmp4, err := dec.Uint64() + if err != nil { + return err + } + _tmp0.D = _tmp4 + if err := dec.ListEnd(); err != nil { + return err + } + } + *obj = _tmp0 + return nil +} diff --git a/rlp/rlpgen/types.go b/rlp/rlpgen/types.go new file mode 100644 index 0000000000..19694262e5 --- /dev/null +++ b/rlp/rlpgen/types.go @@ -0,0 +1,114 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "fmt" + "go/types" + "reflect" +) + +// typeReflectKind gives the reflect.Kind that represents typ. +func typeReflectKind(typ types.Type) reflect.Kind { + switch typ := typ.(type) { + case *types.Basic: + k := typ.Kind() + if k >= types.Bool && k <= types.Complex128 { + // value order matches for Bool..Complex128 + return reflect.Bool + reflect.Kind(k-types.Bool) + } + if k == types.String { + return reflect.String + } + if k == types.UnsafePointer { + return reflect.UnsafePointer + } + panic(fmt.Errorf("unhandled BasicKind %v", k)) + case *types.Array: + return reflect.Array + case *types.Chan: + return reflect.Chan + case *types.Interface: + return reflect.Interface + case *types.Map: + return reflect.Map + case *types.Pointer: + return reflect.Ptr + case *types.Signature: + return reflect.Func + case *types.Slice: + return reflect.Slice + case *types.Struct: + return reflect.Struct + default: + panic(fmt.Errorf("unhandled type %T", typ)) + } +} + +// nonZeroCheck returns the expression that checks whether 'v' is a non-zero value of type 'vtyp'. +func nonZeroCheck(v string, vtyp types.Type, qualify types.Qualifier) string { + // Resolve type name. + typ := resolveUnderlying(vtyp) + switch typ := typ.(type) { + case *types.Basic: + k := typ.Kind() + switch { + case k == types.Bool: + return v + case k >= types.Uint && k <= types.Complex128: + return fmt.Sprintf("%s != 0", v) + case k == types.String: + return fmt.Sprintf(`%s != ""`, v) + default: + panic(fmt.Errorf("unhandled BasicKind %v", k)) + } + case *types.Array, *types.Struct: + return fmt.Sprintf("%s != (%s{})", v, types.TypeString(vtyp, qualify)) + case *types.Interface, *types.Pointer, *types.Signature: + return fmt.Sprintf("%s != nil", v) + case *types.Slice, *types.Map: + return fmt.Sprintf("len(%s) > 0", v) + default: + panic(fmt.Errorf("unhandled type %T", typ)) + } +} + +// isBigInt checks whether 'typ' is "math/big".Int. +func isBigInt(typ types.Type) bool { + named, ok := typ.(*types.Named) + if !ok { + return false + } + name := named.Obj() + return name.Pkg().Path() == "math/big" && name.Name() == "Int" +} + +// isByte checks whether the underlying type of 'typ' is uint8. +func isByte(typ types.Type) bool { + basic, ok := resolveUnderlying(typ).(*types.Basic) + return ok && basic.Kind() == types.Uint8 +} + +func resolveUnderlying(typ types.Type) types.Type { + for { + t := typ.Underlying() + if t == typ { + return t + } + typ = t + } +} diff --git a/rlp/safe.go b/rlp/safe.go new file mode 100644 index 0000000000..3c910337b6 --- /dev/null +++ b/rlp/safe.go @@ -0,0 +1,27 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +//go:build nacl || js || !cgo +// +build nacl js !cgo + +package rlp + +import "reflect" + +// byteArrayBytes returns a slice of the byte array v. +func byteArrayBytes(v reflect.Value, length int) []byte { + return v.Slice(0, length).Bytes() +} diff --git a/rlp/typecache.go b/rlp/typecache.go new file mode 100644 index 0000000000..ab04751bbd --- /dev/null +++ b/rlp/typecache.go @@ -0,0 +1,240 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rlp + +import ( + "fmt" + "reflect" + "sync" + "sync/atomic" + + "github.com/tenderly/coreth/rlp/internal/rlpstruct" +) + +// typeinfo is an entry in the type cache. +type typeinfo struct { + decoder decoder + decoderErr error // error from makeDecoder + writer writer + writerErr error // error from makeWriter +} + +// typekey is the key of a type in typeCache. It includes the struct tags because +// they might generate a different decoder. +type typekey struct { + reflect.Type + rlpstruct.Tags +} + +type decoder func(*Stream, reflect.Value) error + +type writer func(reflect.Value, *encBuffer) error + +var theTC = newTypeCache() + +type typeCache struct { + cur atomic.Value + + // This lock synchronizes writers. + mu sync.Mutex + next map[typekey]*typeinfo +} + +func newTypeCache() *typeCache { + c := new(typeCache) + c.cur.Store(make(map[typekey]*typeinfo)) + return c +} + +func cachedDecoder(typ reflect.Type) (decoder, error) { + info := theTC.info(typ) + return info.decoder, info.decoderErr +} + +func cachedWriter(typ reflect.Type) (writer, error) { + info := theTC.info(typ) + return info.writer, info.writerErr +} + +func (c *typeCache) info(typ reflect.Type) *typeinfo { + key := typekey{Type: typ} + if info := c.cur.Load().(map[typekey]*typeinfo)[key]; info != nil { + return info + } + + // Not in the cache, need to generate info for this type. + return c.generate(typ, rlpstruct.Tags{}) +} + +func (c *typeCache) generate(typ reflect.Type, tags rlpstruct.Tags) *typeinfo { + c.mu.Lock() + defer c.mu.Unlock() + + cur := c.cur.Load().(map[typekey]*typeinfo) + if info := cur[typekey{typ, tags}]; info != nil { + return info + } + + // Copy cur to next. + c.next = make(map[typekey]*typeinfo, len(cur)+1) + for k, v := range cur { + c.next[k] = v + } + + // Generate. + info := c.infoWhileGenerating(typ, tags) + + // next -> cur + c.cur.Store(c.next) + c.next = nil + return info +} + +func (c *typeCache) infoWhileGenerating(typ reflect.Type, tags rlpstruct.Tags) *typeinfo { + key := typekey{typ, tags} + if info := c.next[key]; info != nil { + return info + } + // Put a dummy value into the cache before generating. + // If the generator tries to lookup itself, it will get + // the dummy value and won't call itself recursively. + info := new(typeinfo) + c.next[key] = info + info.generate(typ, tags) + return info +} + +type field struct { + index int + info *typeinfo + optional bool +} + +// structFields resolves the typeinfo of all public fields in a struct type. +func structFields(typ reflect.Type) (fields []field, err error) { + // Convert fields to rlpstruct.Field. + var allStructFields []rlpstruct.Field + for i := 0; i < typ.NumField(); i++ { + rf := typ.Field(i) + allStructFields = append(allStructFields, rlpstruct.Field{ + Name: rf.Name, + Index: i, + Exported: rf.PkgPath == "", + Tag: string(rf.Tag), + Type: *rtypeToStructType(rf.Type, nil), + }) + } + + // Filter/validate fields. + structFields, structTags, err := rlpstruct.ProcessFields(allStructFields) + if err != nil { + if tagErr, ok := err.(rlpstruct.TagError); ok { + tagErr.StructType = typ.String() + return nil, tagErr + } + return nil, err + } + + // Resolve typeinfo. + for i, sf := range structFields { + typ := typ.Field(sf.Index).Type + tags := structTags[i] + info := theTC.infoWhileGenerating(typ, tags) + fields = append(fields, field{sf.Index, info, tags.Optional}) + } + return fields, nil +} + +// firstOptionalField returns the index of the first field with "optional" tag. +func firstOptionalField(fields []field) int { + for i, f := range fields { + if f.optional { + return i + } + } + return len(fields) +} + +type structFieldError struct { + typ reflect.Type + field int + err error +} + +func (e structFieldError) Error() string { + return fmt.Sprintf("%v (struct field %v.%s)", e.err, e.typ, e.typ.Field(e.field).Name) +} + +func (i *typeinfo) generate(typ reflect.Type, tags rlpstruct.Tags) { + i.decoder, i.decoderErr = makeDecoder(typ, tags) + i.writer, i.writerErr = makeWriter(typ, tags) +} + +// rtypeToStructType converts typ to rlpstruct.Type. +func rtypeToStructType(typ reflect.Type, rec map[reflect.Type]*rlpstruct.Type) *rlpstruct.Type { + k := typ.Kind() + if k == reflect.Invalid { + panic("invalid kind") + } + + if prev := rec[typ]; prev != nil { + return prev // short-circuit for recursive types + } + if rec == nil { + rec = make(map[reflect.Type]*rlpstruct.Type) + } + + t := &rlpstruct.Type{ + Name: typ.String(), + Kind: k, + IsEncoder: typ.Implements(encoderInterface), + IsDecoder: typ.Implements(decoderInterface), + } + rec[typ] = t + if k == reflect.Array || k == reflect.Slice || k == reflect.Ptr { + t.Elem = rtypeToStructType(typ.Elem(), rec) + } + return t +} + +// typeNilKind gives the RLP value kind for nil pointers to 'typ'. +func typeNilKind(typ reflect.Type, tags rlpstruct.Tags) Kind { + styp := rtypeToStructType(typ, nil) + + var nk rlpstruct.NilKind + if tags.NilOK { + nk = tags.NilKind + } else { + nk = styp.DefaultNilValue() + } + switch nk { + case rlpstruct.NilKindString: + return String + case rlpstruct.NilKindList: + return List + default: + panic("invalid nil kind value") + } +} + +func isUint(k reflect.Kind) bool { + return k >= reflect.Uint && k <= reflect.Uintptr +} + +func isByte(typ reflect.Type) bool { + return typ.Kind() == reflect.Uint8 && !typ.Implements(encoderInterface) +} diff --git a/rlp/unsafe.go b/rlp/unsafe.go new file mode 100644 index 0000000000..2152ba35fc --- /dev/null +++ b/rlp/unsafe.go @@ -0,0 +1,35 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +//go:build !nacl && !js && cgo +// +build !nacl,!js,cgo + +package rlp + +import ( + "reflect" + "unsafe" +) + +// byteArrayBytes returns a slice of the byte array v. +func byteArrayBytes(v reflect.Value, length int) []byte { + var s []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) + hdr.Data = v.UnsafeAddr() + hdr.Cap = length + hdr.Len = length + return s +} diff --git a/rpc/handler.go b/rpc/handler.go index 2a5aee5244..4022a64cc3 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -35,7 +35,7 @@ import ( "sync" "time" - "github.com/ava-labs/coreth/metrics" + "github.com/tenderly/coreth/metrics" "github.com/ethereum/go-ethereum/log" "golang.org/x/time/rate" ) diff --git a/rpc/metrics.go b/rpc/metrics.go index 889b48fcdc..a1fdebeb6d 100644 --- a/rpc/metrics.go +++ b/rpc/metrics.go @@ -30,7 +30,7 @@ import ( "fmt" "time" - "github.com/ava-labs/coreth/metrics" + "github.com/tenderly/coreth/metrics" ) var ( diff --git a/scripts/build.sh b/scripts/build.sh index 8d99dcb7bd..4e8609eeae 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -4,7 +4,30 @@ set -o errexit set -o nounset set -o pipefail -# Root directory +go_version_minimum="1.17.9" + +go_version() { + go version | sed -nE -e 's/[^0-9.]+([0-9.]+).+/\1/p' +} + +version_lt() { + # Return true if $1 is a lower version than than $2, + local ver1=$1 + local ver2=$2 + # Reverse sort the versions, if the 1st item != ver1 then ver1 < ver2 + if [[ $(echo -e -n "$ver1\n$ver2\n" | sort -rV | head -n1) != "$ver1" ]]; then + return 0 + else + return 1 + fi +} + +if version_lt "$(go_version)" "$go_version_minimum"; then + echo "Coreth requires Go >= $go_version_minimum, Go $(go_version) found." >&2 + exit 1 +fi + +# Avalanche root directory CORETH_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Load the versions @@ -23,8 +46,8 @@ fi # Check if CORETH_COMMIT is set, if not retrieve the last commit from the repo. # This is used in the Dockerfile to allow a commit hash to be passed in without # including the .git/ directory within the Docker image. -CORETH_COMMIT=${CORETH_COMMIT:-$(git rev-list -1 HEAD)} +coreth_commit=${CORETH_COMMIT:-$( git rev-list -1 HEAD )} -# Build Coreth, which runs as a subprocess -echo "Building Coreth @ GitCommit: $CORETH_COMMIT" -go build -ldflags "-X github.com/ava-labs/coreth/plugin/evm.GitCommit=$CORETH_COMMIT" -o "$binary_path" "plugin/"*.go +# Build Coreth, which is run as a subprocess +echo "Building Coreth Version: $coreth_version; GitCommit: $coreth_commit" +go build -ldflags "-X github.com/tenderly/coreth/plugin/evm.GitCommit=$coreth_commit -X github.com/tenderly/coreth/plugin/evm.Version=$coreth_version" -o "$binary_path" "plugin/"*.go diff --git a/signer/core/apitypes/types.go b/signer/core/apitypes/types.go index 2d5cb7f79b..19384c6473 100644 --- a/signer/core/apitypes/types.go +++ b/signer/core/apitypes/types.go @@ -32,7 +32,7 @@ import ( "math/big" "strings" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/core/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" ) diff --git a/sync/client/client.go b/sync/client/client.go index 0aaa5c00f9..bdac407ebe 100644 --- a/sync/client/client.go +++ b/sync/client/client.go @@ -13,22 +13,22 @@ import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/sync/client/stats" + "github.com/tenderly/coreth/ethdb/memorydb" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/sync/client/stats" "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/version" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/peer" + "github.com/tenderly/coreth/plugin/evm/message" + "github.com/tenderly/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/peer" - "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ava-labs/coreth/trie" - "github.com/ethereum/go-ethereum/ethdb" ) const ( @@ -38,11 +38,7 @@ const ( ) var ( - StateSyncVersion = &version.Application{ - Major: 1, - Minor: 7, - Patch: 13, - } + StateSyncVersion = version.NewDefaultApplication(constants.PlatformName, 1, 7, 13) errEmptyResponse = errors.New("empty response") errTooManyBlocks = errors.New("response contains more blocks than requested") errHashMismatch = errors.New("hash does not match expected value") @@ -57,7 +53,7 @@ var _ Client = &client{} // Client synchronously fetches data from the network to fulfill state sync requests. // Repeatedly requests failed requests until the context to the request is expired. type Client interface { - // GetLeafs synchronously sends the given request, returning a parsed LeafsResponse or error + // GetLeafs synchronously sends given request, returning parsed *LeafsResponse or error // Note: this verifies the response including the range proofs. GetLeafs(ctx context.Context, request message.LeafsRequest) (message.LeafsResponse, error) @@ -151,7 +147,7 @@ func parseLeafsResponse(codec codec.Manager, reqIntf message.Request, data []byt // Populate proof when ProofVals are present in the response. Its ok to pass it as nil to the trie.VerifyRangeProof // function as it will assert that all the leaves belonging to the specified root are present. if len(leafsResponse.ProofVals) > 0 { - proof = rawdb.NewMemoryDatabase() + proof = memorydb.New() defer proof.Close() for _, proofVal := range leafsResponse.ProofVals { proofKey := crypto.Keccak256(proofVal) @@ -161,9 +157,13 @@ func parseLeafsResponse(codec codec.Manager, reqIntf message.Request, data []byt } } - firstKey := leafsRequest.Start + var ( + firstKey = leafsRequest.Start + lastKey = leafsRequest.End + ) + // Last key is the last returned key in response if len(leafsResponse.Keys) > 0 { - lastKey := leafsResponse.Keys[len(leafsResponse.Keys)-1] + lastKey = leafsResponse.Keys[len(leafsResponse.Keys)-1] if firstKey == nil { firstKey = bytes.Repeat([]byte{0x00}, len(lastKey)) @@ -173,7 +173,7 @@ func parseLeafsResponse(codec codec.Manager, reqIntf message.Request, data []byt // VerifyRangeProof verifies that the key-value pairs included in [leafResponse] are all of the keys within the range from start // to the last key returned. // Also ensures the keys are in monotonically increasing order - more, err := trie.VerifyRangeProof(leafsRequest.Root, firstKey, leafsResponse.Keys, leafsResponse.Vals, proof) + more, err := trie.VerifyRangeProof(leafsRequest.Root, firstKey, lastKey, leafsResponse.Keys, leafsResponse.Vals, proof) if err != nil { return nil, 0, fmt.Errorf("%s due to %w", errInvalidRangeProof, err) } @@ -321,14 +321,14 @@ func (c *client) get(ctx context.Context, request message.Request, parseFn parse start time.Time = time.Now() ) if len(c.stateSyncNodes) == 0 { - response, nodeID, err = c.networkClient.SendAppRequestAny(ctx, StateSyncVersion, requestBytes) + response, nodeID, err = c.networkClient.RequestAny(StateSyncVersion, requestBytes) } else { // get the next nodeID using the nodeIdx offset. If we're out of nodes, loop back to 0 // we do this every attempt to ensure we get a different node each time if possible. nodeIdx := atomic.AddUint32(&c.stateSyncNodeIdx, 1) nodeID = c.stateSyncNodes[nodeIdx%uint32(len(c.stateSyncNodes))] - response, err = c.networkClient.SendAppRequest(ctx, nodeID, requestBytes) + response, err = c.networkClient.Request(nodeID, requestBytes) } metric.UpdateRequestLatency(time.Since(start)) @@ -347,7 +347,7 @@ func (c *client) get(ctx context.Context, request message.Request, parseFn parse responseIntf, numElements, err = parseFn(c.codec, request, response) if err != nil { lastErr = err - log.Debug("could not validate response, retrying", "nodeID", nodeID, "attempt", attempt, "request", request, "err", err) + log.Info("could not validate response, retrying", "nodeID", nodeID, "attempt", attempt, "request", request, "err", err) c.networkClient.TrackBandwidth(nodeID, 0) metric.IncFailed() metric.IncInvalidResponse() @@ -357,7 +357,7 @@ func (c *client) get(ctx context.Context, request message.Request, parseFn parse bandwidth := float64(len(response)) / (time.Since(start).Seconds() + epsilon) c.networkClient.TrackBandwidth(nodeID, bandwidth) metric.IncSucceeded() - metric.IncReceived(int64(numElements)) + metric.UpdateReceived(int64(numElements)) return responseIntf, nil } } diff --git a/sync/client/client_test.go b/sync/client/client_test.go index 58a0d6f3b4..fd76bfc3cf 100644 --- a/sync/client/client_test.go +++ b/sync/client/client_test.go @@ -15,17 +15,16 @@ import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm/message" - clientstats "github.com/ava-labs/coreth/sync/client/stats" - "github.com/ava-labs/coreth/sync/handlers" - handlerstats "github.com/ava-labs/coreth/sync/handlers/stats" - "github.com/ava-labs/coreth/sync/syncutils" - "github.com/ava-labs/coreth/trie" + "github.com/tenderly/coreth/consensus/dummy" + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb/memorydb" + "github.com/tenderly/coreth/params" + "github.com/tenderly/coreth/plugin/evm/message" + clientstats "github.com/tenderly/coreth/sync/client/stats" + "github.com/tenderly/coreth/sync/handlers" + handlerstats "github.com/tenderly/coreth/sync/handlers/stats" + "github.com/tenderly/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" ) @@ -142,9 +141,8 @@ func TestGetBlocks(t *testing.T) { var gspec = &core.Genesis{ Config: params.TestChainConfig, } - memdb := rawdb.NewMemoryDatabase() - tdb := trie.NewDatabase(memdb, nil) - genesis := gspec.MustCommit(memdb, tdb) + memdb := memorydb.New() + genesis := gspec.MustCommit(memdb) engine := dummy.NewETHFaker() numBlocks := 110 blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, numBlocks, 0, func(i int, b *core.BlockGen) {}) @@ -411,9 +409,9 @@ func TestGetLeafs(t *testing.T) { const leafsLimit = 1024 - trieDB := trie.NewDatabase(rawdb.NewMemoryDatabase(), nil) - largeTrieRoot, largeTrieKeys, _ := syncutils.GenerateTrie(t, trieDB, 100_000, common.HashLength) - smallTrieRoot, _, _ := syncutils.GenerateTrie(t, trieDB, leafsLimit, common.HashLength) + trieDB := trie.NewDatabase(memorydb.New()) + largeTrieRoot, largeTrieKeys, _ := trie.GenerateTrie(t, trieDB, 100_000, common.HashLength) + smallTrieRoot, _, _ := trie.GenerateTrie(t, trieDB, leafsLimit, common.HashLength) handler := handlers.NewLeafsRequestHandler(trieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) client := NewClient(&ClientConfig{ @@ -794,8 +792,8 @@ func TestGetLeafs(t *testing.T) { func TestGetLeafsRetries(t *testing.T) { rand.Seed(1) - trieDB := trie.NewDatabase(rawdb.NewMemoryDatabase(), nil) - root, _, _ := syncutils.GenerateTrie(t, trieDB, 100_000, common.HashLength) + trieDB := trie.NewDatabase(memorydb.New()) + root, _, _ := trie.GenerateTrie(t, trieDB, 100_000, common.HashLength) handler := handlers.NewLeafsRequestHandler(trieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) mockNetClient := &mockNetwork{} @@ -813,7 +811,7 @@ func TestGetLeafsRetries(t *testing.T) { Root: root, Start: bytes.Repeat([]byte{0x00}, common.HashLength), End: bytes.Repeat([]byte{0xff}, common.HashLength), - Limit: 1024, + Limit: defaultLeafRequestLimit, NodeType: message.StateTrieNode, } diff --git a/sync/client/leaf_syncer.go b/sync/client/leaf_syncer.go index 2ca82b7560..83cafd0e0e 100644 --- a/sync/client/leaf_syncer.go +++ b/sync/client/leaf_syncer.go @@ -9,8 +9,8 @@ import ( "errors" "fmt" - "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ava-labs/coreth/utils" + "github.com/tenderly/coreth/plugin/evm/message" + "github.com/tenderly/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "golang.org/x/sync/errgroup" diff --git a/sync/client/mock_client.go b/sync/client/mock_client.go index 038bdf73bf..ec9d481b00 100644 --- a/sync/client/mock_client.go +++ b/sync/client/mock_client.go @@ -10,11 +10,11 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ava-labs/coreth/sync/handlers" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/plugin/evm/message" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/sync/handlers" ) var ( diff --git a/sync/client/mock_network.go b/sync/client/mock_network.go index b841ffae45..318392f54f 100644 --- a/sync/client/mock_network.go +++ b/sync/client/mock_network.go @@ -8,7 +8,7 @@ import ( "errors" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/peer" + "github.com/tenderly/coreth/peer" "github.com/ava-labs/avalanchego/version" ) diff --git a/sync/client/stats/stats.go b/sync/client/stats/stats.go index 14af154423..93a5c6952c 100644 --- a/sync/client/stats/stats.go +++ b/sync/client/stats/stats.go @@ -7,8 +7,8 @@ import ( "fmt" "time" - "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/tenderly/coreth/metrics" + "github.com/tenderly/coreth/plugin/evm/message" ) var ( diff --git a/sync/handlers/code_request.go b/sync/handlers/code_request.go index b756507f75..26b738b4ec 100644 --- a/sync/handlers/code_request.go +++ b/sync/handlers/code_request.go @@ -10,14 +10,16 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ava-labs/coreth/sync/handlers/stats" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/plugin/evm/message" + "github.com/tenderly/coreth/sync/handlers/stats" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) +const maxCodeHashesPerRequest = 5 + // CodeRequestHandler is a peer.RequestHandler for message.CodeRequest // serving requested contract code bytes type CodeRequestHandler struct { @@ -49,7 +51,7 @@ func (n *CodeRequestHandler) OnCodeRequest(_ context.Context, nodeID ids.NodeID, n.stats.UpdateCodeReadTime(time.Since(startTime)) }() - if len(codeRequest.Hashes) > message.MaxCodeHashesPerRequest { + if len(codeRequest.Hashes) > maxCodeHashesPerRequest { n.stats.IncTooManyHashesRequested() log.Debug("too many hashes requested, dropping request", "nodeID", nodeID, "requestID", requestID, "numHashes", len(codeRequest.Hashes)) return nil, nil @@ -75,7 +77,7 @@ func (n *CodeRequestHandler) OnCodeRequest(_ context.Context, nodeID ids.NodeID, codeResponse := message.CodeResponse{Data: codeBytes} responseBytes, err := n.codec.Marshal(message.Version, codeResponse) if err != nil { - log.Error("could not marshal CodeResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "request", codeRequest, "err", err) + log.Warn("could not marshal CodeResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "request", codeRequest, "err", err) return nil, nil } n.stats.UpdateCodeBytesReturned(uint32(totalBytes)) diff --git a/sync/handlers/code_request_test.go b/sync/handlers/code_request_test.go index 1bf5bd5223..cd79509e0d 100644 --- a/sync/handlers/code_request_test.go +++ b/sync/handlers/code_request_test.go @@ -8,15 +8,15 @@ import ( "crypto/rand" "testing" - "github.com/ava-labs/coreth/params" + "github.com/tenderly/coreth/params" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ava-labs/coreth/sync/handlers/stats" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb/memorydb" + "github.com/tenderly/coreth/plugin/evm/message" + "github.com/tenderly/coreth/sync/handlers/stats" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/stretchr/testify/assert" ) @@ -94,7 +94,7 @@ func TestCodeRequestHandler(t *testing.T) { responseBytes, err := codeRequestHandler.OnCodeRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) assert.NoError(t, err) - // If the expected response is empty, assert that the handler returns an empty response and return early. + // If the expected resposne is empty, assert that the handler returns an empty response and return early. if len(expectedResponse) == 0 { assert.Len(t, responseBytes, 0, "expected response to be empty") return diff --git a/sync/handlers/leafs_request.go b/sync/handlers/leafs_request.go index 9519a349f0..8c47cd869f 100644 --- a/sync/handlers/leafs_request.go +++ b/sync/handlers/leafs_request.go @@ -12,17 +12,17 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/core/state/snapshot" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ava-labs/coreth/sync/handlers/stats" - "github.com/ava-labs/coreth/sync/syncutils" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/utils" + "github.com/tenderly/coreth/core/state/snapshot" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/ethdb/memorydb" + "github.com/tenderly/coreth/plugin/evm/message" + "github.com/tenderly/coreth/sync/handlers/stats" + "github.com/tenderly/coreth/trie" + "github.com/tenderly/coreth/utils" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/log" ) @@ -32,10 +32,6 @@ const ( // in message.LeafsRequest if it is greater than this value maxLeavesLimit = uint16(1024) - // Maximum percent of the time left to deadline to spend on optimistically - // reading the snapshot to find the response - maxSnapshotReadTimePercent = 75 - segmentLen = 64 // divide data from snapshot to segments of this size ) @@ -98,11 +94,7 @@ func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.N return nil, nil } - // TODO: We should know the state root that accounts correspond to, - // as this information will be necessary to access storage tries when - // the trie is path based. - // stateRoot := common.Hash{} - t, err := trie.New(trie.TrieID(leafsRequest.Root), lrh.trieDB) + t, err := trie.New(leafsRequest.Root, lrh.trieDB) if err != nil { log.Debug("error opening trie when processing request, dropping request", "nodeID", nodeID, "requestID", requestID, "root", leafsRequest.Root, "err", err) lrh.stats.IncMissingRoot() @@ -236,19 +228,7 @@ func (rb *responseBuilder) fillFromSnapshot(ctx context.Context) (bool, error) { // modified since the requested root. If this assumption can be verified with // range proofs and data from the trie, we can skip iterating the trie as // an optimization. - // Since we are performing this read optimistically, we use a separate context - // with reduced timeout so there is enough time to read the trie if the snapshot - // read does not contain up-to-date data. - snapCtx := ctx - if deadline, ok := ctx.Deadline(); ok { - timeTillDeadline := time.Until(deadline) - bufferedDeadline := time.Now().Add(timeTillDeadline * maxSnapshotReadTimePercent / 100) - - var cancel context.CancelFunc - snapCtx, cancel = context.WithDeadline(ctx, bufferedDeadline) - defer cancel() - } - snapKeys, snapVals, err := rb.readLeafsFromSnapshot(snapCtx) + snapKeys, snapVals, err := rb.readLeafsFromSnapshot(ctx) // Update read snapshot time here, so that we include the case that an error occurred. rb.stats.UpdateSnapshotReadTime(time.Since(snapshotReadStart)) if err != nil { @@ -283,7 +263,7 @@ func (rb *responseBuilder) fillFromSnapshot(ctx context.Context) (bool, error) { // segments of the data and use them in the response. hasGap := false for i := 0; i < len(snapKeys); i += segmentLen { - segmentEnd := min(i+segmentLen, len(snapKeys)) + segmentEnd := math.Min(i+segmentLen, len(snapKeys)) proof, ok, _, err := rb.isRangeValid(snapKeys[i:segmentEnd], snapVals[i:segmentEnd], hasGap) if err != nil { rb.stats.IncProofError() @@ -319,7 +299,7 @@ func (rb *responseBuilder) fillFromSnapshot(ctx context.Context) (bool, error) { // all the key/vals in the segment are valid, but possibly shorten segmentEnd // here to respect limit. this is necessary in case the number of leafs we read // from the trie is more than the length of a segment which cannot be validated. limit - segmentEnd = min(segmentEnd, i+int(rb.limit)-len(rb.response.Keys)) + segmentEnd = math.Min(segmentEnd, i+int(rb.limit)-len(rb.response.Keys)) rb.response.Keys = append(rb.response.Keys, snapKeys[i:segmentEnd]...) rb.response.Vals = append(rb.response.Vals, snapVals[i:segmentEnd]...) @@ -341,14 +321,14 @@ func (rb *responseBuilder) generateRangeProof(start []byte, keys [][]byte) (*mem start = bytes.Repeat([]byte{0x00}, rb.keyLength) } - if err := rb.t.Prove(start, proof); err != nil { + if err := rb.t.Prove(start, 0, proof); err != nil { _ = proof.Close() // closing memdb does not error return nil, err } if len(keys) > 0 { // If there is a non-zero number of keys, set [end] for the range proof to the last key. end := keys[len(keys)-1] - if err := rb.t.Prove(end, proof); err != nil { + if err := rb.t.Prove(end, 0, proof); err != nil { _ = proof.Close() // closing memdb does not error return nil, err } @@ -366,7 +346,11 @@ func (rb *responseBuilder) verifyRangeProof(keys, vals [][]byte, start []byte, p if len(start) == 0 { start = bytes.Repeat([]byte{0x00}, rb.keyLength) } - return trie.VerifyRangeProof(rb.request.Root, start, keys, vals, proof) + var end []byte + if len(keys) > 0 { + end = keys[len(keys)-1] + } + return trie.VerifyRangeProof(rb.request.Root, start, end, keys, vals, proof) } // iterateVals returns the values contained in [db] @@ -427,11 +411,7 @@ func (rb *responseBuilder) fillFromTrie(ctx context.Context, end []byte) (bool, defer func() { rb.trieReadTime += time.Since(startTime) }() // create iterator to iterate the trie - nodeIt, err := rb.t.NodeIterator(rb.nextKey()) - if err != nil { - return false, err - } - it := trie.NewIterator(nodeIt) + it := trie.NewIterator(rb.t.NodeIterator(rb.nextKey())) more := false for it.Next() { // if we're at the end, break this loop @@ -479,9 +459,9 @@ func (rb *responseBuilder) readLeafsFromSnapshot(ctx context.Context) ([][]byte, // Get an iterator into the storage or the main account snapshot. if rb.request.Account == (common.Hash{}) { - snapIt = &syncutils.AccountIterator{AccountIterator: rb.snap.DiskAccountIterator(startHash)} + snapIt = &accountIt{AccountIterator: rb.snap.DiskAccountIterator(startHash)} } else { - snapIt = &syncutils.StorageIterator{StorageIterator: rb.snap.DiskStorageIterator(rb.request.Account, startHash)} + snapIt = &storageIt{StorageIterator: rb.snap.DiskStorageIterator(rb.request.Account, startHash)} } defer snapIt.Release() for snapIt.Next() { diff --git a/sync/handlers/leafs_request_test.go b/sync/handlers/leafs_request_test.go index 9be7c3f616..49d9aa72c5 100644 --- a/sync/handlers/leafs_request_test.go +++ b/sync/handlers/leafs_request_test.go @@ -10,36 +10,33 @@ import ( "testing" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state/snapshot" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ava-labs/coreth/sync/handlers/stats" - "github.com/ava-labs/coreth/sync/syncutils" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" "github.com/stretchr/testify/assert" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state/snapshot" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/ethdb/memorydb" + "github.com/tenderly/coreth/plugin/evm/message" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/sync/handlers/stats" + "github.com/tenderly/coreth/trie" ) func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { rand.Seed(1) mockHandlerStats := &stats.MockHandlerStats{} - memdb := rawdb.NewMemoryDatabase() - trieDB := trie.NewDatabase(memdb, nil) + memdb := memorydb.New() + trieDB := trie.NewDatabase(memdb) - corruptedTrieRoot, _, _ := syncutils.GenerateTrie(t, trieDB, 100, common.HashLength) - tr, err := trie.New(trie.TrieID(corruptedTrieRoot), trieDB) - if err != nil { - t.Fatal(err) - } + corruptedTrieRoot, _, _ := trie.GenerateTrie(t, trieDB, 100, common.HashLength) // Corrupt [corruptedTrieRoot] - syncutils.CorruptTrie(t, memdb, tr, 5) + trie.CorruptTrie(t, trieDB, corruptedTrieRoot, 5) - largeTrieRoot, largeTrieKeys, _ := syncutils.GenerateTrie(t, trieDB, 10_000, common.HashLength) - smallTrieRoot, _, _ := syncutils.GenerateTrie(t, trieDB, 500, common.HashLength) - accountTrieRoot, accounts := syncutils.FillAccounts( + largeTrieRoot, largeTrieKeys, _ := trie.GenerateTrie(t, trieDB, 10_000, common.HashLength) + smallTrieRoot, _, _ := trie.GenerateTrie(t, trieDB, 500, common.HashLength) + accountTrieRoot, accounts := trie.FillAccounts( t, trieDB, common.Hash{}, @@ -74,12 +71,6 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { } snapshotProvider := &TestSnapshotProvider{} leafsHandler := NewLeafsRequestHandler(trieDB, snapshotProvider, message.Codec, mockHandlerStats) - snapConfig := snapshot.Config{ - CacheSize: 64, - AsyncBuild: false, - NoBuild: false, - SkipVerify: true, - } tests := map[string]struct { prepareTestFn func() (context.Context, message.LeafsRequest) @@ -450,7 +441,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { }, "account data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { - snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) + snap, err := snapshot.New(memdb, trieDB, 64, common.Hash{}, accountTrieRoot, false, true, false) if err != nil { t.Fatal(err) } @@ -477,7 +468,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { }, "partial account data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { - snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) + snap, err := snapshot.New(memdb, trieDB, 64, common.Hash{}, accountTrieRoot, false, true, false) if err != nil { t.Fatal(err) } @@ -492,12 +483,15 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { } // modify one entry of 1 in 4 segments if i%(segmentLen*4) == 0 { - acc, err := types.FullAccount(it.Account()) - if err != nil { + var acc snapshot.Account + if err := rlp.DecodeBytes(it.Account(), &acc); err != nil { t.Fatalf("could not parse snapshot account: %v", err) } acc.Nonce++ - bytes := types.SlimAccountRLP(*acc) + bytes, err := rlp.EncodeToBytes(acc) + if err != nil { + t.Fatalf("coult not encode snapshot account to bytes: %v", err) + } rawdb.WriteAccountSnapshot(memdb, it.Hash(), bytes) } i++ @@ -530,7 +524,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { }, "storage data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { - snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) + snap, err := snapshot.New(memdb, trieDB, 64, common.Hash{}, accountTrieRoot, false, true, false) if err != nil { t.Fatal(err) } @@ -558,7 +552,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { }, "partial storage data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { - snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) + snap, err := snapshot.New(memdb, trieDB, 64, common.Hash{}, accountTrieRoot, false, true, false) if err != nil { t.Fatal(err) } @@ -609,7 +603,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { }, "last snapshot key removed": { prepareTestFn: func() (context.Context, message.LeafsRequest) { - snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) + snap, err := snapshot.New(memdb, trieDB, 64, common.Hash{}, accountTrieRoot, false, true, false) if err != nil { t.Fatal(err) } @@ -645,7 +639,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { }, "request last key when removed from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { - snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) + snap, err := snapshot.New(memdb, trieDB, 64, common.Hash{}, accountTrieRoot, false, true, false) if err != nil { t.Fatal(err) } @@ -699,16 +693,19 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { func assertRangeProofIsValid(t *testing.T, request *message.LeafsRequest, response *message.LeafsResponse, expectMore bool) { t.Helper() - var start []byte + var start, end []byte if len(request.Start) == 0 { start = bytes.Repeat([]byte{0x00}, common.HashLength) } else { start = request.Start } + if len(response.Keys) > 0 { + end = response.Keys[len(response.Vals)-1] + } var proof ethdb.Database if len(response.ProofVals) > 0 { - proof = rawdb.NewMemoryDatabase() + proof = memorydb.New() defer proof.Close() for _, proofVal := range response.ProofVals { proofKey := crypto.Keccak256(proofVal) @@ -718,7 +715,7 @@ func assertRangeProofIsValid(t *testing.T, request *message.LeafsRequest, respon } } - more, err := trie.VerifyRangeProof(request.Root, start, response.Keys, response.Vals, proof) + more, err := trie.VerifyRangeProof(request.Root, start, end, response.Keys, response.Vals, proof) assert.NoError(t, err) assert.Equal(t, expectMore, more) } diff --git a/sync/handlers/stats/stats.go b/sync/handlers/stats/stats.go index 9dd04c4ea0..9105835f70 100644 --- a/sync/handlers/stats/stats.go +++ b/sync/handlers/stats/stats.go @@ -6,7 +6,7 @@ package stats import ( "time" - "github.com/ava-labs/coreth/metrics" + "github.com/tenderly/coreth/metrics" ) // HandlerStats reports prometheus metrics for the state sync handlers diff --git a/sync/handlers/test_providers.go b/sync/handlers/test_providers.go index 81dafbfd00..cb8278e671 100644 --- a/sync/handlers/test_providers.go +++ b/sync/handlers/test_providers.go @@ -4,8 +4,8 @@ package handlers import ( - "github.com/ava-labs/coreth/core/state/snapshot" - "github.com/ava-labs/coreth/core/types" + "github.com/tenderly/coreth/core/state/snapshot" + "github.com/tenderly/coreth/core/types" "github.com/ethereum/go-ethereum/common" ) diff --git a/sync/statesync/code_syncer_test.go b/sync/statesync/code_syncer_test.go index 574290e286..4637074f4e 100644 --- a/sync/statesync/code_syncer_test.go +++ b/sync/statesync/code_syncer_test.go @@ -5,161 +5,46 @@ package statesync import ( "context" - "errors" "testing" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/plugin/evm/message" - statesyncclient "github.com/ava-labs/coreth/sync/client" - "github.com/ava-labs/coreth/sync/handlers" - handlerstats "github.com/ava-labs/coreth/sync/handlers/stats" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb/memorydb" + "github.com/tenderly/coreth/plugin/evm/message" + statesyncclient "github.com/tenderly/coreth/sync/client" + "github.com/tenderly/coreth/sync/handlers" + handlerstats "github.com/tenderly/coreth/sync/handlers/stats" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb/memorydb" - "github.com/stretchr/testify/assert" ) -type codeSyncerTest struct { - setupCodeSyncer func(*codeSyncer) - codeRequestHashes [][]common.Hash - codeByteSlices [][]byte - getCodeIntercept func(hashes []common.Hash, codeBytes [][]byte) ([][]byte, error) - err error -} - -func testCodeSyncer(t *testing.T, test codeSyncerTest) { +func TestCodeSyncerFetchCode(t *testing.T) { // Set up serverDB serverDB := memorydb.New() - - codeHashes := make([]common.Hash, 0, len(test.codeByteSlices)) - for _, codeBytes := range test.codeByteSlices { - codeHash := crypto.Keccak256Hash(codeBytes) - rawdb.WriteCode(serverDB, codeHash, codeBytes) - codeHashes = append(codeHashes, codeHash) - } + codeBytes := utils.RandomBytes(100) + codeHash := crypto.Keccak256Hash(codeBytes) + rawdb.WriteCode(serverDB, codeHash, codeBytes) // Set up mockClient codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, message.Codec, handlerstats.NewNoopHandlerStats()) mockClient := statesyncclient.NewMockClient(message.Codec, nil, codeRequestHandler, nil) - mockClient.GetCodeIntercept = test.getCodeIntercept - clientDB := rawdb.NewMemoryDatabase() + clientDB := memorydb.New() - codeSyncer := newCodeSyncer(CodeSyncerConfig{ - MaxOutstandingCodeHashes: DefaultMaxOutstandingCodeHashes, - NumCodeFetchingWorkers: DefaultNumCodeFetchingWorkers, - Client: mockClient, - DB: clientDB, - }) - if test.setupCodeSyncer != nil { - test.setupCodeSyncer(codeSyncer) - } - codeSyncer.start(context.Background()) + codeSyncer := newCodeSyncer(clientDB, mockClient) - for _, codeHashes := range test.codeRequestHashes { - if err := codeSyncer.addCode(codeHashes); err != nil { - if test.err == nil { - t.Fatal(err) - } else { - assert.ErrorIs(t, err, test.err) - } - } - } - codeSyncer.notifyAccountTrieCompleted() + // Start code syncer with the given context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + codeSyncer.start(ctx) - err := <-codeSyncer.Done() - if test.err != nil { - if err == nil { - t.Fatal(t, "expected non-nil error: %s", test.err) - } - assert.ErrorIs(t, err, test.err) - return - } else if err != nil { + if err := codeSyncer.addCode(codeHash); err != nil { t.Fatal(err) } - // Assert that the client synced the code correctly. - for i, codeHash := range codeHashes { - codeBytes := rawdb.ReadCode(clientDB, codeHash) - assert.Equal(t, test.codeByteSlices[i], codeBytes) - } -} - -func TestCodeSyncerSingleCodeHash(t *testing.T) { - codeBytes := utils.RandomBytes(100) - codeHash := crypto.Keccak256Hash(codeBytes) - testCodeSyncer(t, codeSyncerTest{ - codeRequestHashes: [][]common.Hash{{codeHash}}, - codeByteSlices: [][]byte{codeBytes}, - }) -} - -func TestCodeSyncerManyCodeHashes(t *testing.T) { - numCodeSlices := 5000 - codeHashes := make([]common.Hash, 0, numCodeSlices) - codeByteSlices := make([][]byte, 0, numCodeSlices) - for i := 0; i < numCodeSlices; i++ { - codeBytes := utils.RandomBytes(100) - codeHash := crypto.Keccak256Hash(codeBytes) - codeHashes = append(codeHashes, codeHash) - codeByteSlices = append(codeByteSlices, codeBytes) - } - - testCodeSyncer(t, codeSyncerTest{ - setupCodeSyncer: func(c *codeSyncer) { - c.codeHashes = make(chan common.Hash, 10) - }, - codeRequestHashes: [][]common.Hash{codeHashes[0:100], codeHashes[100:2000], codeHashes[2000:2005], codeHashes[2005:]}, - codeByteSlices: codeByteSlices, - }) -} - -func TestCodeSyncerRequestErrors(t *testing.T) { - codeBytes := utils.RandomBytes(100) - codeHash := crypto.Keccak256Hash(codeBytes) - err := errors.New("dummy error") - testCodeSyncer(t, codeSyncerTest{ - codeRequestHashes: [][]common.Hash{{codeHash}}, - codeByteSlices: [][]byte{codeBytes}, - getCodeIntercept: func(hashes []common.Hash, codeBytes [][]byte) ([][]byte, error) { - return nil, err - }, - err: err, - }) -} - -func TestCodeSyncerAddsInProgressCodeHashes(t *testing.T) { - codeBytes := utils.RandomBytes(100) - codeHash := crypto.Keccak256Hash(codeBytes) - testCodeSyncer(t, codeSyncerTest{ - setupCodeSyncer: func(c *codeSyncer) { - rawdb.AddCodeToFetch(c.DB, codeHash) - }, - codeRequestHashes: nil, - codeByteSlices: [][]byte{codeBytes}, - }) -} - -func TestCodeSyncerAddsMoreInProgressThanQueueSize(t *testing.T) { - numCodeSlices := 10 - codeHashes := make([]common.Hash, 0, numCodeSlices) - codeByteSlices := make([][]byte, 0, numCodeSlices) - for i := 0; i < numCodeSlices; i++ { - codeBytes := utils.RandomBytes(100) - codeHash := crypto.Keccak256Hash(codeBytes) - codeHashes = append(codeHashes, codeHash) - codeByteSlices = append(codeByteSlices, codeBytes) + garbageCodeHash := common.Hash{1} + cancel() + if err := codeSyncer.addCode(garbageCodeHash); err == nil { + t.Fatal("Expected fetch code to fail to fetch garbage hash due to cancelled context") } - - testCodeSyncer(t, codeSyncerTest{ - setupCodeSyncer: func(c *codeSyncer) { - for _, codeHash := range codeHashes { - rawdb.AddCodeToFetch(c.DB, codeHash) - } - c.codeHashes = make(chan common.Hash, numCodeSlices/2) - }, - codeRequestHashes: nil, - codeByteSlices: codeByteSlices, - }) } diff --git a/sync/statesync/state_syncer.go b/sync/statesync/state_syncer.go index 8d6312dbf6..218367d41f 100644 --- a/sync/statesync/state_syncer.go +++ b/sync/statesync/state_syncer.go @@ -7,261 +7,392 @@ import ( "context" "fmt" "sync" + "time" - "github.com/ava-labs/coreth/core/state/snapshot" - syncclient "github.com/ava-labs/coreth/sync/client" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "golang.org/x/sync/errgroup" + "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/plugin/evm/message" + "github.com/tenderly/coreth/rlp" + syncclient "github.com/tenderly/coreth/sync/client" + "github.com/tenderly/coreth/trie" ) -const ( - segmentThreshold = 500_000 // if we estimate trie to have greater than this number of leafs, split it - numStorageTrieSegments = 4 - numMainTrieSegments = 8 - defaultNumThreads = 8 -) +const defaultNumThreads int = 4 -type StateSyncerConfig struct { - Root common.Hash - Client syncclient.Client - DB ethdb.Database - BatchSize int - MaxOutstandingCodeHashes int // Maximum number of code hashes in the code syncer queue - NumCodeFetchingWorkers int // Number of code syncing threads - RequestSize uint16 // Number of leafs to request from a peer at a time -} +type TrieProgress struct { + trie *trie.StackTrie + batch ethdb.Batch + batchSize int + startFrom []byte -// stateSync keeps the state of the entire state sync operation. -type stateSync struct { - db ethdb.Database // database we are syncing - root common.Hash // root of the EVM state we are syncing to - trieDB *trie.Database // trieDB on top of db we are syncing. used to restore any existing tries. - snapshot snapshot.Snapshot // used to access the database we are syncing as a snapshot. - batchSize int // write batches when they reach this size - client syncclient.Client // used to contact peers over the network - - segments chan syncclient.LeafSyncTask // channel of tasks to sync - syncer *syncclient.CallbackLeafSyncer // performs the sync, looping over each task's range and invoking specified callbacks - codeSyncer *codeSyncer // manages the asynchronous download and batching of code hashes - trieQueue *trieQueue // manages a persistent list of storage tries we need to sync and any segments that are created for them - - // track the main account trie specifically to commit its root at the end of the operation - mainTrie *trieToSync - - // track the tries currently being synced - lock sync.RWMutex - triesInProgress map[common.Hash]*trieToSync - - // track completion and progress of work - mainTrieDone chan struct{} - triesInProgressSem chan struct{} - done chan error - stats *trieSyncStats + // used for ETA calculations + startTime time.Time + eta SyncETA } -func NewStateSyncer(config *StateSyncerConfig) (*stateSync, error) { - ss := &stateSync{ - batchSize: config.BatchSize, - db: config.DB, - client: config.Client, - root: config.Root, - trieDB: trie.NewDatabase(config.DB, nil), - snapshot: snapshot.NewDiskLayer(config.DB), - stats: newTrieSyncStats(), - triesInProgress: make(map[common.Hash]*trieToSync), - - // [triesInProgressSem] is used to keep the number of tries syncing - // less than or equal to [defaultNumThreads]. - triesInProgressSem: make(chan struct{}, defaultNumThreads), - - // Each [trieToSync] will have a maximum of [numSegments] segments. - // We set the capacity of [segments] such that [defaultNumThreads] - // storage tries can sync concurrently. - segments: make(chan syncclient.LeafSyncTask, defaultNumThreads*numStorageTrieSegments), - mainTrieDone: make(chan struct{}), - done: make(chan error, 1), - } - ss.syncer = syncclient.NewCallbackLeafSyncer(config.Client, ss.segments, config.RequestSize) - ss.codeSyncer = newCodeSyncer(CodeSyncerConfig{ - DB: config.DB, - Client: config.Client, - MaxOutstandingCodeHashes: config.MaxOutstandingCodeHashes, - NumCodeFetchingWorkers: config.NumCodeFetchingWorkers, - }) - - ss.trieQueue = NewTrieQueue(config.DB) - if err := ss.trieQueue.clearIfRootDoesNotMatch(ss.root); err != nil { - return nil, err +func NewTrieProgress(db ethdb.Batcher, batchSize int, eta SyncETA) *TrieProgress { + batch := db.NewBatch() + return &TrieProgress{ + batch: batch, + batchSize: batchSize, + trie: trie.NewStackTrie(batch), + eta: eta, } +} - // create a trieToSync for the main trie and mark it as in progress. - var err error - ss.mainTrie, err = NewTrieToSync(ss, ss.root, common.Hash{}, NewMainTrieTask(ss)) - if err != nil { - return nil, err - } - ss.addTrieInProgress(ss.root, ss.mainTrie) - ss.mainTrie.startSyncing() // start syncing after tracking the trie as in progress - return ss, nil +type StorageTrieProgress struct { + *TrieProgress + Account common.Hash + AdditionalAccounts []common.Hash + Skipped bool } -// onStorageTrieFinished is called after a storage trie finishes syncing. -func (t *stateSync) onStorageTrieFinished(root common.Hash) error { - <-t.triesInProgressSem // allow another trie to start (release the semaphore) - // mark the storage trie as done in trieQueue - if err := t.trieQueue.StorageTrieDone(root); err != nil { - return err - } - // track the completion of this storage trie - return t.removeTrieInProgress(root) +// StateSyncProgress tracks the progress of syncing the main trie and the +// sub-tasks for syncing storage tries. +type StateSyncProgress struct { + MainTrie *TrieProgress + MainTrieDone bool + Root common.Hash + StorageTries map[common.Hash]*StorageTrieProgress } -// onMainTrieFinishes is called after the main trie finishes syncing. -func (t *stateSync) onMainTrieFinished() error { - t.codeSyncer.notifyAccountTrieCompleted() +// stateSyncer manages syncing the main trie and storage tries concurrently from peers. +// Invariant that allows resumability: Each account with a snapshot entry and a non-empty +// storage trie MUST either: +// (a) have its storage trie fully on disk and its snapshot populated with the same data as the trie, or +// (b) have an entry in the progress marker persisted to disk. +// In case there is an entry for a storage trie in the progress marker, the in progress +// sync for that storage trie will be resumed prior to resuming the main trie sync. +// This ensures the number of tries in progress remains less than or equal to [numThreads]. +// Once fewer than [numThreads] storage tries are in progress, the main trie sync will +// continue concurrently. +// +// Note: stateSyncer assumes that the snapshot will be wiped completely prior to starting +// a new sync task (or if the target sync root changes or the snapshot is modified by normal operation). +type stateSyncer struct { + lock sync.Mutex + progressMarker *StateSyncProgress + numThreads int + + syncer *syncclient.CallbackLeafSyncer + codeSyncer *codeSyncer + trieDB *trie.Database + db ethdb.Database + batchSize int + client syncclient.Client + + // pointer to ETA struct, shared with all TrieProgress structs + eta SyncETA +} + +type EVMStateSyncerConfig struct { + Root common.Hash + Client syncclient.Client + DB ethdb.Database + BatchSize int +} - // count the number of storage tries we need to sync for eta purposes. - numStorageTries, err := t.trieQueue.countTries() +func NewEVMStateSyncer(config *EVMStateSyncerConfig) (*stateSyncer, error) { + eta := NewSyncEta(config.Root) + progressMarker, err := loadProgress(config.DB, config.Root) if err != nil { - return err + return nil, err + } + + // initialise tries in the progress marker + progressMarker.MainTrie = NewTrieProgress(config.DB, config.BatchSize, eta) + if err := restoreMainTrieProgressFromSnapshot(config.DB, progressMarker.MainTrie); err != nil { + return nil, err + } + + for _, storageProgress := range progressMarker.StorageTries { + storageProgress.TrieProgress = NewTrieProgress(config.DB, config.BatchSize, eta) + // the first account's storage snapshot contains the key/value pairs we need to restore + // the stack trie. if other in-progress accounts happen to share the same storage root, + // their storage snapshot remains empty until the storage trie is fully synced, then it + // will be copied from the first account's storage snapshot. + if err := restoreStorageTrieProgressFromSnapshot(config.DB, storageProgress.TrieProgress, storageProgress.Account); err != nil { + return nil, err + } } - t.stats.setTriesRemaining(numStorageTries) - // mark the main trie done - close(t.mainTrieDone) - return t.removeTrieInProgress(t.root) + return &stateSyncer{ + progressMarker: progressMarker, + batchSize: config.BatchSize, + client: config.Client, + trieDB: trie.NewDatabase(config.DB), + db: config.DB, + numThreads: defaultNumThreads, + syncer: syncclient.NewCallbackLeafSyncer(config.Client), + codeSyncer: newCodeSyncer(config.DB, config.Client), + eta: eta, + }, nil } -// onSyncComplete is called after the account trie and -// all storage tries have completed syncing. We persist -// [mainTrie]'s batch last to avoid persisting the state -// root before all storage tries are done syncing. -func (t *stateSync) onSyncComplete() error { - return t.mainTrie.batch.Write() +// Start starts the leaf syncer on the root task as well as any in-progress storage tasks. +func (s *stateSyncer) Start(ctx context.Context) { + rootTask := &syncclient.LeafSyncTask{ + Root: s.progressMarker.Root, + Start: s.progressMarker.MainTrie.startFrom, + NodeType: message.StateTrieNode, + OnLeafs: s.handleLeafs, + OnFinish: s.onFinish, + OnSyncFailure: s.onSyncFailure, + } + + storageTasks := make([]*syncclient.LeafSyncTask, 0, len(s.progressMarker.StorageTries)) + for storageRoot, storageTrieProgress := range s.progressMarker.StorageTries { + storageTasks = append(storageTasks, &syncclient.LeafSyncTask{ + Root: storageRoot, + Account: storageTrieProgress.Account, + Start: storageTrieProgress.startFrom, + NodeType: message.StateTrieNode, + OnLeafs: storageTrieProgress.handleLeafs, + OnFinish: s.onFinish, + OnSyncFailure: s.onSyncFailure, + }) + } + // Start the leaf syncer and code syncer goroutines. + s.syncer.Start(ctx, s.numThreads, rootTask, storageTasks...) + s.codeSyncer.start(ctx) } -// storageTrieProducer waits for the main trie to finish -// syncing then starts to add storage trie roots along -// with their corresponding accounts to the segments channel. -// returns nil if all storage tries were iterated and an -// error if one occurred or the context expired. -func (t *stateSync) storageTrieProducer(ctx context.Context) error { - // Wait for main trie to finish to ensure when this thread terminates - // there are no more storage tries to sync - select { - case <-t.mainTrieDone: - case <-ctx.Done(): - return ctx.Err() +func (s *stateSyncer) handleLeafs(root common.Hash, keys [][]byte, values [][]byte) ([]*syncclient.LeafSyncTask, error) { + var ( + tasks []*syncclient.LeafSyncTask + mainTrie = s.progressMarker.MainTrie + ) + if mainTrie.startTime.IsZero() { + mainTrie.startTime = time.Now() } - for { - // check ctx here to exit the loop early - if err := ctx.Err(); err != nil { - return err + for i, key := range keys { + value := values[i] + accountHash := common.BytesToHash(key) + if err := mainTrie.trie.TryUpdate(key, value); err != nil { + return nil, err } - root, accounts, more, err := t.trieQueue.getNextTrie() - if err != nil { - return err + // decode value into types.StateAccount + var acc types.StateAccount + if err := rlp.DecodeBytes(value, &acc); err != nil { + return nil, fmt.Errorf("could not decode main trie as account, key=%s, valueLen=%d, err=%w", common.Bytes2Hex(key), len(value), err) } - // If there are no storage tries, then root will be the empty hash on the first pass. - if root != (common.Hash{}) { - // acquire semaphore (to keep number of tries in progress limited) - select { - case t.triesInProgressSem <- struct{}{}: - case <-ctx.Done(): - return ctx.Err() + + // check if this account has storage root that we need to fetch + if acc.Root != (common.Hash{}) && acc.Root != types.EmptyRootHash { + if storageTask, err := s.createStorageTrieTask(accountHash, acc.Root); err != nil { + return nil, err + } else if storageTask != nil { + tasks = append(tasks, storageTask) } + } - // Arbitrarily use the first account for making requests to the server. - // Note: getNextTrie guarantees that if a non-nil storage root is returned, then the - // slice of account hashes is non-empty. - syncAccount := accounts[0] - // create a trieToSync for the storage trie and mark it as in progress. - storageTrie, err := NewTrieToSync(t, root, syncAccount, NewStorageTrieTask(t, root, accounts)) - if err != nil { - return err + // check if this account has code and fetch it + codeHash := common.BytesToHash(acc.CodeHash) + if codeHash != (common.Hash{}) && codeHash != types.EmptyCodeHash && !rawdb.HasCodeWithPrefix(s.db, codeHash) { + if err := s.codeSyncer.addCode(codeHash); err != nil { + return nil, err } - t.addTrieInProgress(root, storageTrie) - storageTrie.startSyncing() // start syncing after tracking the trie as in progress } - // if there are no more storage tries, close - // the task queue and exit the producer. - if !more { - close(t.segments) - return nil + + // write account snapshot + writeAccountSnapshot(mainTrie.batch, accountHash, acc) + + if mainTrie.batch.ValueSize() > mainTrie.batchSize { + if err := mainTrie.batch.Write(); err != nil { + return nil, err + } + mainTrie.batch.Reset() } } + if len(keys) > 0 { + // notify progress for eta calculations on the last key + mainTrie.eta.NotifyProgress(root, mainTrie.startTime, mainTrie.startFrom, keys[len(keys)-1]) + } + return tasks, nil } -func (t *stateSync) Start(ctx context.Context) error { - // Start the code syncer and leaf syncer. - eg, egCtx := errgroup.WithContext(ctx) - t.codeSyncer.start(egCtx) // start the code syncer first since the leaf syncer may add code tasks - t.syncer.Start(egCtx, defaultNumThreads, t.onSyncFailure) - eg.Go(func() error { - if err := <-t.syncer.Done(); err != nil { - return err +func (tp *StorageTrieProgress) handleLeafs(root common.Hash, keys [][]byte, values [][]byte) ([]*syncclient.LeafSyncTask, error) { + // Note this method does not need to hold a lock: + // - handleLeafs is called synchronously by CallbackLeafSyncer + // - if an additional account is encountered with the same storage trie, + // it will be appended to [tp.AdditionalAccounts] (not accessed here) + if tp.startTime.IsZero() { + tp.startTime = time.Now() + } + for i, key := range keys { + if err := tp.trie.TryUpdate(key, values[i]); err != nil { + return nil, err } - return t.onSyncComplete() - }) - eg.Go(func() error { - err := <-t.codeSyncer.Done() - return err - }) - eg.Go(func() error { - return t.storageTrieProducer(egCtx) - }) - - // The errgroup wait will take care of returning the first error that occurs, or returning - // nil if both finish without an error. - go func() { - t.done <- eg.Wait() - }() - return nil + keyHash := common.BytesToHash(key) + // write to [tp.Account] here, the snapshot for [tp.AdditionalAccounts] will be populated + // after the trie is finished syncing by copying entries from [tp.Account]'s storage snapshot. + rawdb.WriteStorageSnapshot(tp.batch, tp.Account, keyHash, values[i]) + if tp.batch.ValueSize() > tp.batchSize { + if err := tp.batch.Write(); err != nil { + return nil, err + } + tp.batch.Reset() + } + } + if len(keys) > 0 { + // notify progress for eta calculations on the last key + tp.eta.NotifyProgress(root, tp.startTime, tp.startFrom, keys[len(keys)-1]) + } + return nil, nil // storage tries never add new tasks to the leaf syncer } -func (t *stateSync) Done() <-chan error { return t.done } +// createStorageTrieTask creates a LeafSyncTask to be returned from the callback, +// and records the storage trie as in progress to maintain the resumability invariant. +func (s *stateSyncer) createStorageTrieTask(accountHash common.Hash, storageRoot common.Hash) (*syncclient.LeafSyncTask, error) { + s.lock.Lock() + defer s.lock.Unlock() + + // check if we're already syncing this storage trie. + // if we are: add this account hash to the progress marker so + // when the trie is downloaded, the snapshot will be copied + // to this account as well + if storageProgress, exists := s.progressMarker.StorageTries[storageRoot]; exists { + storageProgress.AdditionalAccounts = append(storageProgress.AdditionalAccounts, accountHash) + return nil, addInProgressTrie(s.db, storageRoot, accountHash) + } + + progress := &StorageTrieProgress{ + TrieProgress: NewTrieProgress(s.db, s.batchSize, s.eta), + Account: accountHash, + } + s.progressMarker.StorageTries[storageRoot] = progress + return &syncclient.LeafSyncTask{ + Root: storageRoot, + Account: accountHash, + NodeType: message.StateTrieNode, + OnLeafs: progress.handleLeafs, + OnFinish: s.onFinish, + OnSyncFailure: s.onSyncFailure, + OnStart: func(common.Hash) (bool, error) { + // check if this storage root is on disk + storageTrie, err := trie.New(storageRoot, s.trieDB) + if err != nil { + return false, nil + } -// addTrieInProgress tracks the root as being currently synced. -func (t *stateSync) addTrieInProgress(root common.Hash, trie *trieToSync) { - t.lock.Lock() - defer t.lock.Unlock() + // If the storage trie is already on disk, we only need to populate the storage snapshot for [accountHash] + // with the trie contents. There is no need to re-sync the trie, since it is already present. + if err := writeAccountStorageSnapshotFromTrie(s.db.NewBatch(), s.batchSize, accountHash, storageTrie); err != nil { + // If the storage trie cannot be iterated (due to an incomplete trie from pruning this storage trie in the past) + // then we re-sync it here. Therefore, this error is not fatal and we can safely continue here. + log.Info("could not populate storage snapshot from trie with existing root, syncing from peers instead", "account", accountHash, "root", storageRoot, "err", err) + return false, nil + } - t.triesInProgress[root] = trie + // If populating the snapshot from the existing storage trie was successful, + // return true to skip this task + progress.Skipped = true // set skipped to true to avoid committing the stack trie in onFinish + return true, s.onFinish(storageRoot) // call onFinish to delete this task from the map. onFinish will take [s.lock] + }, + }, addInProgressTrie(s.db, storageRoot, accountHash) +} + +// onFinish marks the task corresponding to [root] as finished. +// If [root] is a storage root, then we remove it from the progress marker. +// when the progress marker contains no more storage root and the +// main trie is marked as complete, the main trie's root is committed (see checkAllDone). +func (s *stateSyncer) onFinish(root common.Hash) error { + s.lock.Lock() + defer s.lock.Unlock() + + // handle the case where root is the main trie's root + if root == s.progressMarker.Root { + // mark main trie as done. + s.progressMarker.MainTrieDone = true + return s.checkAllDone() + } + + // since root was not the main trie, it must belong to a storage trie. + storageTrieProgress, exists := s.progressMarker.StorageTries[root] + if !exists { + return fmt.Errorf("unknown root [%s] finished syncing", root) + } + + if !storageTrieProgress.Skipped { + storageRoot, err := storageTrieProgress.trie.Commit() + if err != nil { + return err + } + if storageRoot != root { + return fmt.Errorf("unexpected storage root, expected=%s, actual=%s account=%s", root, storageRoot, storageTrieProgress.Account) + } + } + // Note: we hold the lock when copying storage snapshots and adding new accounts. + // This prevents race conditions between these two operations. + if len(storageTrieProgress.AdditionalAccounts) > 0 { + // It is necessary to flush the batch here to write + // any pending items to the storage snapshot before + // we use that as a source to copy to other accounts. + if err := storageTrieProgress.batch.Write(); err != nil { + return err + } + storageTrieProgress.batch.Reset() + if err := copyStorageSnapshot( + s.db, + storageTrieProgress.Account, + storageTrieProgress.batch, + storageTrieProgress.batchSize, + storageTrieProgress.AdditionalAccounts, + ); err != nil { + return err + } + } + delete(s.progressMarker.StorageTries, root) + // clear the progress marker on completion of the trie + if err := storageTrieProgress.batch.Write(); err != nil { + return err + } + if err := removeInProgressStorageTrie(s.db, root, storageTrieProgress); err != nil { + return err + } + s.eta.RemoveSyncedTrie(root, storageTrieProgress.Skipped) + return s.checkAllDone() } -// removeTrieInProgress removes root from the set of tracked -// tries in progress and notifies the storage root producer -// so it can continue in case it was paused due to the -// maximum number of tries in progress being previously reached. -func (t *stateSync) removeTrieInProgress(root common.Hash) error { - t.lock.Lock() - defer t.lock.Unlock() - - t.stats.trieDone(root) - if _, ok := t.triesInProgress[root]; !ok { - return fmt.Errorf("removeTrieInProgress for unexpected root: %s", root) +// checkAllDone checks if there are no more tries in progress and the main trie is complete +// this will write the main trie's root to disk, and is the last step of stateSyncer's process. +// assumes lock is held +func (s *stateSyncer) checkAllDone() error { + // Note: this check ensures we do not commit the main trie until all of the storage tries + // have been committed. + if !s.progressMarker.MainTrieDone || len(s.progressMarker.StorageTries) > 0 { + return nil } - delete(t.triesInProgress, root) - return nil + + mainTrie := s.progressMarker.MainTrie + mainTrieRoot, err := mainTrie.trie.Commit() + if err != nil { + return fmt.Errorf("failed to commit main trie: %w", err) + } + if mainTrieRoot != s.progressMarker.Root { + return fmt.Errorf("expected main trie root [%s] not same as actual [%s]", s.progressMarker.Root, mainTrieRoot) + } + if err := mainTrie.batch.Write(); err != nil { + return err + } + // remove the main trie storage marker, after which there should be none in the db. + return removeInProgressTrie(s.db, mainTrieRoot, common.Hash{}) } -// onSyncFailure is called if the sync fails, this writes all -// batches of in-progress trie segments to disk to have maximum -// progress to restore. -func (t *stateSync) onSyncFailure(error) error { - t.lock.RLock() - defer t.lock.RUnlock() - - for _, trie := range t.triesInProgress { - for _, segment := range trie.segments { - if err := segment.batch.Write(); err != nil { - return err - } +// Done returns a channel which produces any error that occurred during syncing or nil on success. +func (s *stateSyncer) Done() <-chan error { return s.syncer.Done() } + +// onSyncFailure writes all in-progress batches to disk to preserve maximum progress +func (s *stateSyncer) onSyncFailure(error) error { + for _, storageTrieProgress := range s.progressMarker.StorageTries { + if err := storageTrieProgress.batch.Write(); err != nil { + return err } } - return nil + return s.progressMarker.MainTrie.batch.Write() } diff --git a/sync/statesync/state_syncer_progress.go b/sync/statesync/state_syncer_progress.go new file mode 100644 index 0000000000..a65e04c770 --- /dev/null +++ b/sync/statesync/state_syncer_progress.go @@ -0,0 +1,103 @@ +// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package statesync + +import ( + "github.com/tenderly/coreth/ethdb" + "github.com/ethereum/go-ethereum/common" +) + +var ( + // keys: prefix + root + account + // main trie is stored with common.Hash{} as the account + syncProgressPrefix = []byte("sync_progress") + syncProgressKeyLen = len(syncProgressPrefix) + common.HashLength + common.HashLength +) + +func packKey(root common.Hash, account common.Hash) []byte { + bytes := make([]byte, 0, syncProgressKeyLen) + bytes = append(bytes, syncProgressPrefix...) + bytes = append(bytes, root[:]...) + bytes = append(bytes, account[:]...) + return bytes +} + +func unpackKey(bytes []byte) (common.Hash, common.Hash) { + bytes = bytes[len(syncProgressPrefix):] // skip prefix + root := common.BytesToHash(bytes[:common.HashLength]) + bytes = bytes[common.HashLength:] + account := common.BytesToHash(bytes) + return root, account +} + +// loadProgress checks for a progress marker serialized to [db] and returns it if it matches [root]. +// if the existing marker does not match [root] or one is not found, a new one is created, persisted, and returned. +// Additionally, any previous progress marker is wiped if this is the case. +func loadProgress(db ethdb.Database, root common.Hash) (*StateSyncProgress, error) { + progress := &StateSyncProgress{ + StorageTries: make(map[common.Hash]*StorageTrieProgress), + } + + // load from disk + it := db.NewIterator(syncProgressPrefix, nil) + defer it.Release() + for it.Next() { + if syncProgressKeyLen != len(it.Key()) { + continue + } + root, account := unpackKey(it.Key()) + if account == (common.Hash{}) { + progress.Root = root + continue + } + if storageTrie, exists := progress.StorageTries[root]; exists { + storageTrie.AdditionalAccounts = append(storageTrie.AdditionalAccounts, account) + } else { + progress.StorageTries[root] = &StorageTrieProgress{ + Account: account, + } + } + } + if err := it.Error(); err != nil { + return nil, err + } + + if progress.Root == root { + // marker found on disk and matches + return progress, nil + } else if progress.Root != root { + // marker found but does not match, delete it + // first, clear account storage trie markers + for root, storageTrie := range progress.StorageTries { + if err := removeInProgressStorageTrie(db, root, storageTrie); err != nil { + return nil, err + } + } + // then clear the root marker itself + if err := removeInProgressTrie(db, progress.Root, common.Hash{}); err != nil { + return nil, err + } + } + progress.Root = root + progress.StorageTries = make(map[common.Hash]*StorageTrieProgress) + return progress, addInProgressTrie(db, root, common.Hash{}) +} + +func addInProgressTrie(db ethdb.KeyValueWriter, root common.Hash, account common.Hash) error { + return db.Put(packKey(root, account), []byte{0x1}) +} + +// removeInProgressStorageTrie removes progress markers for all accounts associated with storageTrie +func removeInProgressStorageTrie(db ethdb.KeyValueWriter, root common.Hash, storageTrie *StorageTrieProgress) error { + for _, account := range storageTrie.AdditionalAccounts { + if err := removeInProgressTrie(db, root, account); err != nil { + return err + } + } + return removeInProgressTrie(db, root, storageTrie.Account) +} + +func removeInProgressTrie(db ethdb.KeyValueWriter, root common.Hash, account common.Hash) error { + return db.Delete(packKey(root, account)) +} diff --git a/sync/statesync/test_sync.go b/sync/statesync/test_sync.go index 418eb675fd..3c9145e481 100644 --- a/sync/statesync/test_sync.go +++ b/sync/statesync/test_sync.go @@ -8,22 +8,22 @@ import ( "math/rand" "testing" - "github.com/ava-labs/coreth/accounts/keystore" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/sync/syncutils" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" + "github.com/tenderly/coreth/accounts/keystore" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/state/snapshot" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/rlp" + "github.com/tenderly/coreth/trie" ) // assertDBConsistency checks [serverTrieDB] and [clientTrieDB] have the same EVM state trie at [root], // and that [clientTrieDB.DiskDB] has corresponding account & snapshot values. // Also verifies any code referenced by the EVM state is present in [clientTrieDB] and the hash is correct. -func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database, serverTrieDB, clientTrieDB *trie.Database) { +func assertDBConsistency(t testing.TB, root common.Hash, serverTrieDB, clientTrieDB *trie.Database) { + clientDB := clientTrieDB.DiskDB() numSnapshotAccounts := 0 accountIt := rawdb.IterateAccountSnapshots(clientDB) defer accountIt.Release() @@ -38,7 +38,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database } trieAccountLeaves := 0 - syncutils.AssertTrieConsistency(t, root, serverTrieDB, clientTrieDB, func(key, val []byte) error { + trie.AssertTrieConsistency(t, root, serverTrieDB, clientTrieDB, func(key, val []byte) error { trieAccountLeaves++ accHash := common.BytesToHash(key) var acc types.StateAccount @@ -46,14 +46,14 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database return err } // check snapshot consistency - snapshotVal := rawdb.ReadAccountSnapshot(clientDB, accHash) - expectedSnapshotVal := types.SlimAccountRLP(acc) + snapshotVal := rawdb.ReadAccountSnapshot(clientTrieDB.DiskDB(), accHash) + expectedSnapshotVal := snapshot.SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash, acc.IsMultiCoin) assert.Equal(t, expectedSnapshotVal, snapshotVal) // check code consistency if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { codeHash := common.BytesToHash(acc.CodeHash) - code := rawdb.ReadCode(clientDB, codeHash) + code := rawdb.ReadCode(clientTrieDB.DiskDB(), codeHash) actualHash := crypto.Keccak256Hash(code) assert.NotZero(t, len(code)) assert.Equal(t, codeHash, actualHash) @@ -73,9 +73,9 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database storageTrieLeavesCount := 0 // check storage trie and storage snapshot consistency - syncutils.AssertTrieConsistency(t, acc.Root, serverTrieDB, clientTrieDB, func(key, val []byte) error { + trie.AssertTrieConsistency(t, acc.Root, serverTrieDB, clientTrieDB, func(key, val []byte) error { storageTrieLeavesCount++ - snapshotVal := rawdb.ReadStorageSnapshot(clientDB, accHash, common.BytesToHash(key)) + snapshotVal := rawdb.ReadStorageSnapshot(clientTrieDB.DiskDB(), accHash, common.BytesToHash(key)) assert.Equal(t, val, snapshotVal) return nil }) @@ -88,8 +88,8 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database assert.Equal(t, trieAccountLeaves, numSnapshotAccounts) } -func fillAccountsWithStorage(t *testing.T, serverDB ethdb.Database, serverTrieDB *trie.Database, root common.Hash, numAccounts int) common.Hash { - newRoot, _ := syncutils.FillAccounts(t, serverTrieDB, root, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { +func fillAccountsWithStorage(t *testing.T, serverTrieDB *trie.Database, root common.Hash, numAccounts int) common.Hash { + newRoot, _ := trie.FillAccounts(t, serverTrieDB, root, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { codeBytes := make([]byte, 256) _, err := rand.Read(codeBytes) if err != nil { @@ -97,12 +97,12 @@ func fillAccountsWithStorage(t *testing.T, serverDB ethdb.Database, serverTrieDB } codeHash := crypto.Keccak256Hash(codeBytes) - rawdb.WriteCode(serverDB, codeHash, codeBytes) + rawdb.WriteCode(serverTrieDB.DiskDB(), codeHash, codeBytes) account.CodeHash = codeHash[:] // now create state trie numKeys := 16 - account.Root, _, _ = syncutils.GenerateTrie(t, serverTrieDB, numKeys, common.HashLength) + account.Root, _, _ = trie.GenerateTrie(t, serverTrieDB, numKeys, common.HashLength) return account }) return newRoot @@ -119,18 +119,18 @@ func FillAccountsWithOverlappingStorage( ) (common.Hash, map[*keystore.Key]*types.StateAccount) { storageRoots := make([]common.Hash, 0, numOverlappingStorageRoots) for i := 0; i < numOverlappingStorageRoots; i++ { - storageRoot, _, _ := syncutils.GenerateTrie(t, trieDB, 16, common.HashLength) + storageRoot, _, _ := trie.GenerateTrie(t, trieDB, 16, common.HashLength) storageRoots = append(storageRoots, storageRoot) } storageRootIndex := 0 - return syncutils.FillAccounts(t, trieDB, root, numAccounts, func(t *testing.T, i int, account types.StateAccount) types.StateAccount { + return trie.FillAccounts(t, trieDB, root, numAccounts, func(t *testing.T, i int, account types.StateAccount) types.StateAccount { switch i % 3 { case 0: // unmodified account case 1: // account with overlapping storage root account.Root = storageRoots[storageRootIndex%numOverlappingStorageRoots] storageRootIndex++ case 2: // account with unique storage root - account.Root, _, _ = syncutils.GenerateTrie(t, trieDB, 16, common.HashLength) + account.Root, _, _ = trie.GenerateTrie(t, trieDB, 16, common.HashLength) } return account diff --git a/sync/syncutils/iterators.go b/sync/syncutils/iterators.go index 2ae6bfcc80..c6c9377a92 100644 --- a/sync/syncutils/iterators.go +++ b/sync/syncutils/iterators.go @@ -1,69 +1,68 @@ // (c) 2021-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package syncutils +package handlers import ( - "github.com/ava-labs/coreth/core/state/snapshot" - "github.com/ava-labs/coreth/core/types" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/tenderly/coreth/core/state/snapshot" + "github.com/tenderly/coreth/ethdb" ) var ( - _ ethdb.Iterator = &AccountIterator{} - _ ethdb.Iterator = &StorageIterator{} + _ ethdb.Iterator = &accountIt{} + _ ethdb.Iterator = &storageIt{} ) -// AccountIterator wraps a [snapshot.AccountIterator] to conform to [ethdb.Iterator] +// accountIt wraps a [snapshot.AccountIterator] to conform to [ethdb.Iterator] // accounts will be returned in consensus (FullRLP) format for compatibility with trie data. -type AccountIterator struct { +type accountIt struct { snapshot.AccountIterator err error val []byte } -func (it *AccountIterator) Next() bool { +func (it *accountIt) Next() bool { if it.err != nil { return false } for it.AccountIterator.Next() { - it.val, it.err = types.FullAccountRLP(it.Account()) + it.val, it.err = snapshot.FullAccountRLP(it.Account()) return it.err == nil } it.val = nil return false } -func (it *AccountIterator) Key() []byte { +func (it *accountIt) Key() []byte { if it.err != nil { return nil } return it.Hash().Bytes() } -func (it *AccountIterator) Value() []byte { +func (it *accountIt) Value() []byte { if it.err != nil { return nil } return it.val } -func (it *AccountIterator) Error() error { +func (it *accountIt) Error() error { if it.err != nil { return it.err } return it.AccountIterator.Error() } -// StorageIterator wraps a [snapshot.StorageIterator] to conform to [ethdb.Iterator] -type StorageIterator struct { +// storageIt wraps a [snapshot.StorageIterator] to conform to [ethdb.Iterator] +type storageIt struct { snapshot.StorageIterator } -func (it *StorageIterator) Key() []byte { +func (it *storageIt) Key() []byte { return it.Hash().Bytes() } -func (it *StorageIterator) Value() []byte { +func (it *storageIt) Value() []byte { return it.Slot() } diff --git a/sync/syncutils/test_trie.go b/sync/syncutils/test_trie.go index ea1b7eff04..55b760f18d 100644 --- a/sync/syncutils/test_trie.go +++ b/sync/syncutils/test_trie.go @@ -1,7 +1,7 @@ // (c) 2021-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package syncutils +package trie import ( cryptoRand "crypto/rand" @@ -11,42 +11,46 @@ import ( "testing" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/accounts/keystore" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/trie/trienode" + "github.com/tenderly/coreth/accounts/keystore" + "github.com/tenderly/coreth/core/types" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" + "github.com/tenderly/coreth/rlp" ) // GenerateTrie creates a trie with [numKeys] key-value pairs inside of [trieDB]. // Returns the root of the generated trie, the slice of keys inserted into the trie in lexicographical // order, and the slice of corresponding values. // GenerateTrie reads from [rand] and the caller should call rand.Seed(n) for deterministic results -func GenerateTrie(t *testing.T, trieDB *trie.Database, numKeys int, keySize int) (common.Hash, [][]byte, [][]byte) { +func GenerateTrie(t *testing.T, trieDB *Database, numKeys int, keySize int) (common.Hash, [][]byte, [][]byte) { if keySize < wrappers.LongLen+1 { t.Fatal("key size must be at least 9 bytes (8 bytes for uint64 and 1 random byte)") } - return FillTrie(t, 0, numKeys, keySize, trieDB, types.EmptyRootHash) + testTrie := NewEmpty(trieDB) + + keys, values := FillTrie(t, numKeys, keySize, testTrie) + + // Commit the root to [trieDB] + root, nodes, err := testTrie.Commit(false) + assert.NoError(t, err) + err = trieDB.Update(NewWithNodeSet(nodes)) + assert.NoError(t, err) + err = trieDB.Commit(root, false, nil) + assert.NoError(t, err) + + return root, keys, values } // FillTrie fills a given trie with [numKeys] number of keys, each of size [keySize] // returns inserted keys and values // FillTrie reads from [rand] and the caller should call rand.Seed(n) for deterministic results -func FillTrie(t *testing.T, start, numKeys int, keySize int, trieDB *trie.Database, root common.Hash) (common.Hash, [][]byte, [][]byte) { - testTrie, err := trie.New(trie.TrieID(root), trieDB) - if err != nil { - t.Fatalf("error creating trie: %v", err) - } - +func FillTrie(t *testing.T, numKeys int, keySize int, testTrie *Trie) ([][]byte, [][]byte) { keys := make([][]byte, 0, numKeys) values := make([][]byte, 0, numKeys) // Generate key-value pairs - for i := start; i < numKeys; i++ { + for i := 0; i < numKeys; i++ { key := make([]byte, keySize) binary.BigEndian.PutUint64(key[:wrappers.LongLen], uint64(i+1)) _, err := rand.Read(key[wrappers.LongLen:]) @@ -56,45 +60,30 @@ func FillTrie(t *testing.T, start, numKeys int, keySize int, trieDB *trie.Databa _, err = rand.Read(value) assert.NoError(t, err) - testTrie.MustUpdate(key, value) + if err = testTrie.TryUpdate(key, value); err != nil { + t.Fatal("error updating trie", err) + } keys = append(keys, key) values = append(values, value) } - - // Commit the root to [trieDB] - nextRoot, nodes, err := testTrie.Commit(false) - assert.NoError(t, err) - err = trieDB.Update(nextRoot, root, 0, trienode.NewWithNodeSet(nodes), nil) - assert.NoError(t, err) - err = trieDB.Commit(nextRoot, false) - assert.NoError(t, err) - - return nextRoot, keys, values + return keys, values } // AssertTrieConsistency ensures given trieDB [a] and [b] both have the same // non-empty trie at [root]. (all key/value pairs must be equal) -func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *trie.Database, onLeaf func(key, val []byte) error) { - trieA, err := trie.New(trie.TrieID(root), a) +func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *Database, onLeaf func(key, val []byte) error) { + trieA, err := New(common.Hash{}, root, a) if err != nil { t.Fatalf("error creating trieA, root=%s, err=%v", root, err) } - trieB, err := trie.New(trie.TrieID(root), b) + trieB, err := New(common.Hash{}, root, b) if err != nil { t.Fatalf("error creating trieB, root=%s, err=%v", root, err) } - nodeItA, err := trieA.NodeIterator(nil) - if err != nil { - t.Fatalf("error creating node iterator for trieA, root=%s, err=%v", root, err) - } - nodeItB, err := trieB.NodeIterator(nil) - if err != nil { - t.Fatalf("error creating node iterator for trieB, root=%s, err=%v", root, err) - } - itA := trie.NewIterator(nodeItA) - itB := trie.NewIterator(nodeItB) + itA := NewIterator(trieA.NodeIterator(nil)) + itB := NewIterator(trieB.NodeIterator(nil)) count := 0 for itA.Next() && itB.Next() { count++ @@ -113,15 +102,17 @@ func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *trie.Database, assert.Greater(t, count, 0) } -// CorruptTrie deletes every [n]th trie node from the trie given by [tr] from the underlying [db]. -// Assumes [tr] can be iterated without issue. -func CorruptTrie(t *testing.T, diskdb ethdb.Batcher, tr *trie.Trie, n int) { - // Delete some trie nodes - batch := diskdb.NewBatch() - nodeIt, err := tr.NodeIterator(nil) +// CorruptTrie deletes every [n]th trie node from the trie given by [root] from the trieDB. +// Assumes that the trie given by root can be iterated without issue. +func CorruptTrie(t *testing.T, trieDB *Database, root common.Hash, n int) { + batch := trieDB.DiskDB().NewBatch() + // next delete some trie nodes + tr, err := New(common.Hash{}, root, trieDB) if err != nil { t.Fatal(err) } + + nodeIt := tr.NodeIterator(nil) count := 0 for nodeIt.Next(true) { count++ @@ -144,7 +135,7 @@ func CorruptTrie(t *testing.T, diskdb ethdb.Batcher, tr *trie.Trie, n int) { // [onAccount] is called if non-nil (so the caller can modify the account before it is stored in the secure trie). // returns the new trie root and a map of funded keys to StateAccount structs. func FillAccounts( - t *testing.T, trieDB *trie.Database, root common.Hash, numAccounts int, + t *testing.T, trieDB *Database, root common.Hash, numAccounts int, onAccount func(*testing.T, int, types.StateAccount) types.StateAccount, ) (common.Hash, map[*keystore.Key]*types.StateAccount) { var ( @@ -154,7 +145,7 @@ func FillAccounts( accounts = make(map[*keystore.Key]*types.StateAccount, numAccounts) ) - tr, err := trie.NewStateTrie(trie.TrieID(root), trieDB) + tr, err := NewStateTrie(common.Hash{}, root, trieDB) if err != nil { t.Fatalf("error opening trie: %v", err) } @@ -179,7 +170,9 @@ func FillAccounts( if err != nil { t.Fatal(err) } - tr.MustUpdate(key.Address[:], accBytes) + if err = tr.TryUpdate(key.Address[:], accBytes); err != nil { + t.Fatalf("error updating trie with account, address=%s, err=%v", key.Address, err) + } accounts[key] = &acc } @@ -187,10 +180,10 @@ func FillAccounts( if err != nil { t.Fatalf("error committing trie: %v", err) } - if err := trieDB.Update(newRoot, root, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { + if err := trieDB.Update(NewWithNodeSet(nodes)); err != nil { t.Fatalf("error updating trieDB: %v", err) } - if err := trieDB.Commit(newRoot, false); err != nil { + if err := trieDB.Commit(newRoot, false, nil); err != nil { t.Fatalf("error committing trieDB: %v", err) } return newRoot, accounts diff --git a/tests/init.go b/tests/init.go index a2080e861f..8d64f787d1 100644 --- a/tests/init.go +++ b/tests/init.go @@ -31,8 +31,7 @@ import ( "math/big" "sort" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/utils" + "github.com/tenderly/coreth/params" ) // Forks table defines supported forks and their chain config. @@ -99,19 +98,6 @@ var Forks = map[string]*params.ChainConfig{ PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), }, - "MuirGlacier": { - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - }, "FrontierToHomesteadAt5": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(5), @@ -175,7 +161,7 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase1BlockTimestamp: big.NewInt(0), }, "ApricotPhase2": { ChainID: big.NewInt(1), @@ -188,8 +174,8 @@ var Forks = map[string]*params.ChainConfig{ PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: utils.NewUint64(0), - ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase1BlockTimestamp: big.NewInt(0), + ApricotPhase2BlockTimestamp: big.NewInt(0), }, "ApricotPhase3": { ChainID: big.NewInt(1), @@ -202,9 +188,9 @@ var Forks = map[string]*params.ChainConfig{ PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: utils.NewUint64(0), - ApricotPhase2BlockTimestamp: utils.NewUint64(0), - ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase1BlockTimestamp: big.NewInt(0), + ApricotPhase2BlockTimestamp: big.NewInt(0), + ApricotPhase3BlockTimestamp: big.NewInt(0), }, "ApricotPhase4": { ChainID: big.NewInt(1), @@ -217,10 +203,10 @@ var Forks = map[string]*params.ChainConfig{ PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: utils.NewUint64(0), - ApricotPhase2BlockTimestamp: utils.NewUint64(0), - ApricotPhase3BlockTimestamp: utils.NewUint64(0), - ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase1BlockTimestamp: big.NewInt(0), + ApricotPhase2BlockTimestamp: big.NewInt(0), + ApricotPhase3BlockTimestamp: big.NewInt(0), + ApricotPhase4BlockTimestamp: big.NewInt(0), }, "ApricotPhase5": { ChainID: big.NewInt(1), @@ -232,89 +218,15 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: utils.NewUint64(0), - ApricotPhase2BlockTimestamp: utils.NewUint64(0), - ApricotPhase3BlockTimestamp: utils.NewUint64(0), - ApricotPhase4BlockTimestamp: utils.NewUint64(0), - ApricotPhase5BlockTimestamp: utils.NewUint64(0), - }, - "Banff": { - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: utils.NewUint64(0), - ApricotPhase2BlockTimestamp: utils.NewUint64(0), - ApricotPhase3BlockTimestamp: utils.NewUint64(0), - ApricotPhase4BlockTimestamp: utils.NewUint64(0), - ApricotPhase5BlockTimestamp: utils.NewUint64(0), - BanffBlockTimestamp: utils.NewUint64(0), - }, - "Cortina": { - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: utils.NewUint64(0), - ApricotPhase2BlockTimestamp: utils.NewUint64(0), - ApricotPhase3BlockTimestamp: utils.NewUint64(0), - ApricotPhase4BlockTimestamp: utils.NewUint64(0), - ApricotPhase5BlockTimestamp: utils.NewUint64(0), - BanffBlockTimestamp: utils.NewUint64(0), - CortinaBlockTimestamp: utils.NewUint64(0), - }, - "Durango": { - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: utils.NewUint64(0), - ApricotPhase2BlockTimestamp: utils.NewUint64(0), - ApricotPhase3BlockTimestamp: utils.NewUint64(0), - ApricotPhase4BlockTimestamp: utils.NewUint64(0), - ApricotPhase5BlockTimestamp: utils.NewUint64(0), - BanffBlockTimestamp: utils.NewUint64(0), - CortinaBlockTimestamp: utils.NewUint64(0), - DurangoBlockTimestamp: utils.NewUint64(0), - }, - "Cancun": { - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: utils.NewUint64(0), - ApricotPhase2BlockTimestamp: utils.NewUint64(0), - ApricotPhase3BlockTimestamp: utils.NewUint64(0), - ApricotPhase4BlockTimestamp: utils.NewUint64(0), - ApricotPhase5BlockTimestamp: utils.NewUint64(0), - BanffBlockTimestamp: utils.NewUint64(0), - CortinaBlockTimestamp: utils.NewUint64(0), - DurangoBlockTimestamp: utils.NewUint64(0), - CancunTime: utils.NewUint64(0), + ApricotPhase1BlockTimestamp: big.NewInt(0), + ApricotPhase2BlockTimestamp: big.NewInt(0), + ApricotPhase3BlockTimestamp: big.NewInt(0), + ApricotPhase4BlockTimestamp: big.NewInt(0), + ApricotPhase5BlockTimestamp: big.NewInt(0), }, } -// AvailableForks returns the set of defined fork names +// Returns the set of defined fork names func AvailableForks() []string { var availableForks []string for k := range Forks { diff --git a/tests/init_test.go b/tests/init_test.go new file mode 100644 index 0000000000..35725f4f6e --- /dev/null +++ b/tests/init_test.go @@ -0,0 +1,287 @@ +// (c) 2019-2020, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package tests + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "sort" + "strings" + "testing" + + "github.com/tenderly/coreth/params" +) + +func readJSON(reader io.Reader, value interface{}) error { + data, err := ioutil.ReadAll(reader) + if err != nil { + return fmt.Errorf("error reading JSON file: %v", err) + } + if err = json.Unmarshal(data, &value); err != nil { + if syntaxerr, ok := err.(*json.SyntaxError); ok { + line := findLine(data, syntaxerr.Offset) + return fmt.Errorf("JSON syntax error at line %v: %v", line, err) + } + return err + } + return nil +} + +func readJSONFile(fn string, value interface{}) error { + file, err := os.Open(fn) + if err != nil { + return err + } + defer file.Close() + + err = readJSON(file, value) + if err != nil { + return fmt.Errorf("%s in file %s", err.Error(), fn) + } + return nil +} + +// findLine returns the line number for the given offset into data. +func findLine(data []byte, offset int64) (line int) { + line = 1 + for i, r := range string(data) { + if int64(i) >= offset { + return + } + if r == '\n' { + line++ + } + } + return +} + +// testMatcher controls skipping and chain config assignment to tests. +type testMatcher struct { + configpat []testConfig + failpat []testFailure + skiploadpat []*regexp.Regexp + slowpat []*regexp.Regexp + runonlylistpat *regexp.Regexp +} + +type testConfig struct { + p *regexp.Regexp + config params.ChainConfig +} + +type testFailure struct { + p *regexp.Regexp + reason string +} + +// skipShortMode skips tests matching when the -short flag is used. +func (tm *testMatcher) slow(pattern string) { + tm.slowpat = append(tm.slowpat, regexp.MustCompile(pattern)) +} + +// skipLoad skips JSON loading of tests matching the pattern. +func (tm *testMatcher) skipLoad(pattern string) { + tm.skiploadpat = append(tm.skiploadpat, regexp.MustCompile(pattern)) +} + +// fails adds an expected failure for tests matching the pattern. +func (tm *testMatcher) fails(pattern string, reason string) { + if reason == "" { + panic("empty fail reason") + } + tm.failpat = append(tm.failpat, testFailure{regexp.MustCompile(pattern), reason}) +} + +func (tm *testMatcher) runonly(pattern string) { + tm.runonlylistpat = regexp.MustCompile(pattern) +} + +// config defines chain config for tests matching the pattern. +func (tm *testMatcher) config(pattern string, cfg params.ChainConfig) { + tm.configpat = append(tm.configpat, testConfig{regexp.MustCompile(pattern), cfg}) +} + +// findSkip matches name against test skip patterns. +func (tm *testMatcher) findSkip(name string) (reason string, skipload bool) { + isWin32 := runtime.GOARCH == "386" && runtime.GOOS == "windows" + for _, re := range tm.slowpat { + if re.MatchString(name) { + if testing.Short() { + return "skipped in -short mode", false + } + if isWin32 { + return "skipped on 32bit windows", false + } + } + } + for _, re := range tm.skiploadpat { + if re.MatchString(name) { + return "skipped by skipLoad", true + } + } + return "", false +} + +// findConfig returns the chain config matching defined patterns. +func (tm *testMatcher) findConfig(t *testing.T) *params.ChainConfig { + for _, m := range tm.configpat { + if m.p.MatchString(t.Name()) { + return &m.config + } + } + return new(params.ChainConfig) +} + +// checkFailure checks whether a failure is expected. +func (tm *testMatcher) checkFailure(t *testing.T, name string, err error) error { + failReason := "" + for _, m := range tm.failpat { + if m.p.MatchString(name) { + failReason = m.reason + break + } + } + if failReason != "" { + t.Logf("expected failure: %s", failReason) + if err != nil { + t.Logf("error: %v", err) + return nil + } + return fmt.Errorf("test succeeded unexpectedly") + } + return err +} + +// walk invokes its runTest argument for all subtests in the given directory. +// +// runTest should be a function of type func(t *testing.T, name string, x ), +// where TestType is the type of the test contained in test files. +func (tm *testMatcher) walk(t *testing.T, dir string, runTest interface{}) { + // Walk the directory. + dirinfo, err := os.Stat(dir) + if os.IsNotExist(err) || !dirinfo.IsDir() { + fmt.Fprintf(os.Stderr, "can't find test files in %s, did you clone the tests submodule?\n", dir) + t.Skip("missing test files") + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + name := filepath.ToSlash(strings.TrimPrefix(path, dir+string(filepath.Separator))) + if info.IsDir() { + if _, skipload := tm.findSkip(name + "/"); skipload { + return filepath.SkipDir + } + return nil + } + if filepath.Ext(path) == ".json" { + t.Run(name, func(t *testing.T) { tm.runTestFile(t, path, name, runTest) }) + } + return nil + }) + if err != nil { + t.Fatal(err) + } +} + +func (tm *testMatcher) runTestFile(t *testing.T, path, name string, runTest interface{}) { + if r, _ := tm.findSkip(name); r != "" { + t.Skip(r) + } + if tm.runonlylistpat != nil { + if !tm.runonlylistpat.MatchString(name) { + t.Skip("Skipped by runonly") + } + } + t.Parallel() + + // Load the file as map[string]. + m := makeMapFromTestFunc(runTest) + if err := readJSONFile(path, m.Addr().Interface()); err != nil { + t.Fatal(err) + } + + // Run all tests from the map. Don't wrap in a subtest if there is only one test in the file. + keys := sortedMapKeys(m) + if len(keys) == 1 { + runTestFunc(runTest, t, name, m, keys[0]) + } else { + for _, key := range keys { + name := name + "/" + key + t.Run(key, func(t *testing.T) { + if r, _ := tm.findSkip(name); r != "" { + t.Skip(r) + } + runTestFunc(runTest, t, name, m, key) + }) + } + } +} + +func makeMapFromTestFunc(f interface{}) reflect.Value { + stringT := reflect.TypeOf("") + testingT := reflect.TypeOf((*testing.T)(nil)) + ftyp := reflect.TypeOf(f) + if ftyp.Kind() != reflect.Func || ftyp.NumIn() != 3 || ftyp.NumOut() != 0 || ftyp.In(0) != testingT || ftyp.In(1) != stringT { + panic(fmt.Sprintf("bad test function type: want func(*testing.T, string, ), have %s", ftyp)) + } + testType := ftyp.In(2) + mp := reflect.New(reflect.MapOf(stringT, testType)) + return mp.Elem() +} + +func sortedMapKeys(m reflect.Value) []string { + keys := make([]string, m.Len()) + for i, k := range m.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + return keys +} + +func runTestFunc(runTest interface{}, t *testing.T, name string, m reflect.Value, key string) { + reflect.ValueOf(runTest).Call([]reflect.Value{ + reflect.ValueOf(t), + reflect.ValueOf(name), + m.MapIndex(reflect.ValueOf(key)), + }) +} + +// func TestMatcherRunonlylist(t *testing.T) { +// t.Parallel() +// tm := new(testMatcher) +// tm.runonly("invalid*") +// tm.walk(t, rlpTestDir, func(t *testing.T, name string, test *RLPTest) { +// if name[:len("invalidRLPTest.json")] != "invalidRLPTest.json" { +// t.Fatalf("invalid test found: %s != invalidRLPTest.json", name) +// } +// }) +// } diff --git a/tests/rlp_test_util.go b/tests/rlp_test_util.go index 5af235bc5a..f0a282cc5c 100644 --- a/tests/rlp_test_util.go +++ b/tests/rlp_test_util.go @@ -34,7 +34,7 @@ import ( "math/big" "strings" - "github.com/ethereum/go-ethereum/rlp" + "github.com/tenderly/coreth/rlp" ) // RLPTest is the JSON structure of a single RLP test. diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 29e934efe5..dc66c0a872 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -27,28 +27,132 @@ package tests import ( - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/state/snapshot" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/trie/triedb/hashdb" - "github.com/ava-labs/coreth/trie/triedb/pathdb" + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + "strconv" + "strings" + + "github.com/tenderly/coreth/core" + "github.com/tenderly/coreth/core/state" + "github.com/tenderly/coreth/core/state/snapshot" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/core/vm" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/params" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto" ) -func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool, scheme string) (*trie.Database, *snapshot.Tree, *state.StateDB) { - tconf := &trie.Config{Preimages: true} - if scheme == rawdb.HashScheme { - tconf.HashDB = hashdb.Defaults - } else { - tconf.PathDB = pathdb.Defaults +// StateTest checks transaction processing without block context. +// See https://github.com/ethereum/EIPs/issues/176 for the test format specification. +type StateTest struct { + json stJSON +} + +// StateSubtest selects a specific configuration of a General State Test. +type StateSubtest struct { + Fork string + Index int +} + +func (t *StateTest) UnmarshalJSON(in []byte) error { + return json.Unmarshal(in, &t.json) +} + +type stJSON struct { + Env stEnv `json:"env"` + Pre core.GenesisAlloc `json:"pre"` + Tx stTransaction `json:"transaction"` + Out hexutil.Bytes `json:"out"` + Post map[string][]stPostState `json:"post"` +} + +type stPostState struct { + Root common.UnprefixedHash `json:"hash"` + Logs common.UnprefixedHash `json:"logs"` + TxBytes hexutil.Bytes `json:"txbytes"` + ExpectException string `json:"expectException"` + Indexes struct { + Data int `json:"data"` + Gas int `json:"gas"` + Value int `json:"value"` + } +} + +//go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go + +type stEnv struct { + Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` + Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"` + GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` + Number uint64 `json:"currentNumber" gencodec:"required"` + Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` + BaseFee *big.Int `json:"currentBaseFee" gencodec:"optional"` +} + +//go:generate gencodec -type stTransaction -field-override stTransactionMarshaling -out gen_sttransaction.go + +type stTransaction struct { + GasPrice *big.Int `json:"gasPrice"` + MaxFeePerGas *big.Int `json:"maxFeePerGas"` + MaxPriorityFeePerGas *big.Int `json:"maxPriorityFeePerGas"` + Nonce uint64 `json:"nonce"` + To string `json:"to"` + Data []string `json:"data"` + AccessLists []*types.AccessList `json:"accessLists,omitempty"` + GasLimit []uint64 `json:"gasLimit"` + Value []string `json:"value"` + PrivateKey []byte `json:"secretKey"` +} + +// GetChainConfig takes a fork definition and returns a chain config. +// The fork definition can be +// - a plain forkname, e.g. `Byzantium`, +// - a fork basename, and a list of EIPs to enable; e.g. `Byzantium+1884+1283`. +func GetChainConfig(forkString string) (baseConfig *params.ChainConfig, eips []int, err error) { + var ( + splitForks = strings.Split(forkString, "+") + ok bool + baseName, eipsStrings = splitForks[0], splitForks[1:] + ) + if baseConfig, ok = Forks[baseName]; !ok { + return nil, nil, UnsupportedForkError{baseName} + } + for _, eip := range eipsStrings { + if eipNum, err := strconv.Atoi(eip); err != nil { + return nil, nil, fmt.Errorf("syntax error, invalid eip number %v", eipNum) + } else { + if !vm.ValidEip(eipNum) { + return nil, nil, fmt.Errorf("syntax error, invalid eip number %v", eipNum) + } + eips = append(eips, eipNum) + } } - triedb := trie.NewDatabase(db, tconf) - sdb := state.NewDatabaseWithNodeDB(db, triedb) - statedb, _ := state.New(types.EmptyRootHash, sdb, nil) + return baseConfig, eips, nil +} + +// Subtests returns all valid subtests of the test. +func (t *StateTest) Subtests() []StateSubtest { + var sub []StateSubtest + for fork, pss := range t.json.Post { + for i := range pss { + sub = append(sub, StateSubtest{fork, i}) + } + } + return sub +} + +func (t *StateTest) gasLimit(subtest StateSubtest) uint64 { + return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas] +} + +func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) { + sdb := state.NewDatabase(db) + statedb, _ := state.New(common.Hash{}, sdb, nil) for addr, a := range accounts { statedb.SetCode(addr, a.Code) statedb.SetNonce(addr, a.Nonce) @@ -58,18 +162,97 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo } } // Commit and re-open to start with a clean state. - root, _ := statedb.Commit(0, false, false) + root, _ := statedb.Commit(false) var snaps *snapshot.Tree if snapshotter { - snapconfig := snapshot.Config{ - CacheSize: 1, - NoBuild: false, - AsyncBuild: false, - SkipVerify: true, - } - snaps, _ = snapshot.New(snapconfig, db, triedb, common.Hash{}, root) + snaps, _ = snapshot.New(db, sdb.TrieDB(), 1, common.Hash{}, root, false, true, false) } statedb, _ = state.New(root, sdb, snaps) - return triedb, snaps, statedb + return snaps, statedb +} + +func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis { + return &core.Genesis{ + Config: config, + Coinbase: t.json.Env.Coinbase, + Difficulty: t.json.Env.Difficulty, + GasLimit: t.json.Env.GasLimit, + Number: t.json.Env.Number, + Timestamp: t.json.Env.Timestamp, + Alloc: t.json.Pre, + } +} + +func (tx *stTransaction) toMessage(ps stPostState, baseFee *big.Int) (core.Message, error) { + // Derive sender from private key if present. + var from common.Address + if len(tx.PrivateKey) > 0 { + key, err := crypto.ToECDSA(tx.PrivateKey) + if err != nil { + return nil, fmt.Errorf("invalid private key: %v", err) + } + from = crypto.PubkeyToAddress(key.PublicKey) + } + // Parse recipient if present. + var to *common.Address + if tx.To != "" { + to = new(common.Address) + if err := to.UnmarshalText([]byte(tx.To)); err != nil { + return nil, fmt.Errorf("invalid to address: %v", err) + } + } + + // Get values specific to this post state. + if ps.Indexes.Data > len(tx.Data) { + return nil, fmt.Errorf("tx data index %d out of bounds", ps.Indexes.Data) + } + if ps.Indexes.Value > len(tx.Value) { + return nil, fmt.Errorf("tx value index %d out of bounds", ps.Indexes.Value) + } + if ps.Indexes.Gas > len(tx.GasLimit) { + return nil, fmt.Errorf("tx gas limit index %d out of bounds", ps.Indexes.Gas) + } + dataHex := tx.Data[ps.Indexes.Data] + valueHex := tx.Value[ps.Indexes.Value] + gasLimit := tx.GasLimit[ps.Indexes.Gas] + // Value, Data hex encoding is messy: https://github.com/ethereum/tests/issues/203 + value := new(big.Int) + if valueHex != "0x" { + v, ok := math.ParseBig256(valueHex) + if !ok { + return nil, fmt.Errorf("invalid tx value %q", valueHex) + } + value = v + } + data, err := hex.DecodeString(strings.TrimPrefix(dataHex, "0x")) + if err != nil { + return nil, fmt.Errorf("invalid tx data %q", dataHex) + } + var accessList types.AccessList + if tx.AccessLists != nil && tx.AccessLists[ps.Indexes.Data] != nil { + accessList = *tx.AccessLists[ps.Indexes.Data] + } + // If baseFee provided, set gasPrice to effectiveGasPrice. + gasPrice := tx.GasPrice + if baseFee != nil { + if tx.MaxFeePerGas == nil { + tx.MaxFeePerGas = gasPrice + } + if tx.MaxFeePerGas == nil { + tx.MaxFeePerGas = new(big.Int) + } + if tx.MaxPriorityFeePerGas == nil { + tx.MaxPriorityFeePerGas = tx.MaxFeePerGas + } + gasPrice = math.BigMin(new(big.Int).Add(tx.MaxPriorityFeePerGas, baseFee), + tx.MaxFeePerGas) + } + if gasPrice == nil { + return nil, fmt.Errorf("no gas price provided") + } + + msg := types.NewMessage(from, to, tx.Nonce, value, gasLimit, gasPrice, + tx.MaxFeePerGas, tx.MaxPriorityFeePerGas, data, accessList, false) + return msg, nil } diff --git a/trie/committer.go b/trie/committer.go index ac317ecb33..535f2cf0d4 100644 --- a/trie/committer.go +++ b/trie/committer.go @@ -29,39 +29,46 @@ package trie import ( "fmt" - "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" ) +// leaf represents a trie leaf node +type leaf struct { + blob []byte // raw blob of leaf + parent common.Hash // the hash of parent node +} + // committer is the tool used for the trie Commit operation. The committer will // capture all dirty nodes during the commit process and keep them cached in // insertion order. type committer struct { - nodes *trienode.NodeSet - tracer *tracer + nodes *NodeSet collectLeaf bool } // newCommitter creates a new committer or picks one from the pool. -func newCommitter(nodeset *trienode.NodeSet, tracer *tracer, collectLeaf bool) *committer { +func newCommitter(owner common.Hash, collectLeaf bool) *committer { return &committer{ - nodes: nodeset, - tracer: tracer, + nodes: NewNodeSet(owner), collectLeaf: collectLeaf, } } -// Commit collapses a node down into a hash node. -func (c *committer) Commit(n node) hashNode { - return c.commit(nil, n).(hashNode) +// Commit collapses a node down into a hash node and inserts it into the database +func (c *committer) Commit(n node) (hashNode, *NodeSet, error) { + h, err := c.commit(nil, n) + if err != nil { + return nil, nil, err + } + return h.(hashNode), c.nodes, nil } -// commit collapses a node down into a hash node and returns it. -func (c *committer) commit(path []byte, n node) node { +// commit collapses a node down into a hash node and inserts it into the database +func (c *committer) commit(path []byte, n node) (node, error) { // if this path is clean, use available cached data hash, dirty := n.cache() if hash != nil && !dirty { - return hash + return hash, nil } // Commit children, then parent, and remove the dirty flag. switch cn := n.(type) { @@ -72,28 +79,34 @@ func (c *committer) commit(path []byte, n node) node { // If the child is fullNode, recursively commit, // otherwise it can only be hashNode or valueNode. if _, ok := cn.Val.(*fullNode); ok { - collapsed.Val = c.commit(append(path, cn.Key...), cn.Val) + childV, err := c.commit(append(path, cn.Key...), cn.Val) + if err != nil { + return nil, err + } + collapsed.Val = childV } - // The key needs to be copied, since we're adding it to the - // modified nodeset. + // The key needs to be copied, since we're delivering it to database collapsed.Key = hexToCompact(cn.Key) hashedNode := c.store(path, collapsed) if hn, ok := hashedNode.(hashNode); ok { - return hn + return hn, nil } - return collapsed + return collapsed, nil case *fullNode: - hashedKids := c.commitChildren(path, cn) + hashedKids, err := c.commitChildren(path, cn) + if err != nil { + return nil, err + } collapsed := cn.copy() collapsed.Children = hashedKids hashedNode := c.store(path, collapsed) if hn, ok := hashedNode.(hashNode); ok { - return hn + return hn, nil } - return collapsed + return collapsed, nil case hashNode: - return cn + return cn, nil default: // nil, valuenode shouldn't be committed panic(fmt.Sprintf("%T: invalid node: %v", n, n)) @@ -101,7 +114,7 @@ func (c *committer) commit(path []byte, n node) node { } // commitChildren commits the children of the given fullnode -func (c *committer) commitChildren(path []byte, n *fullNode) [17]node { +func (c *committer) commitChildren(path []byte, n *fullNode) ([17]node, error) { var children [17]node for i := 0; i < 16; i++ { child := n.Children[i] @@ -118,38 +131,45 @@ func (c *committer) commitChildren(path []byte, n *fullNode) [17]node { // Commit the child recursively and store the "hashed" value. // Note the returned node can be some embedded nodes, so it's // possible the type is not hashNode. - children[i] = c.commit(append(path, byte(i)), child) + hashed, err := c.commit(append(path, byte(i)), child) + if err != nil { + return children, err + } + children[i] = hashed } // For the 17th child, it's possible the type is valuenode. if n.Children[16] != nil { children[16] = n.Children[16] } - return children + return children, nil } -// store hashes the node n and adds it to the modified nodeset. If leaf collection -// is enabled, leaf nodes will be tracked in the modified nodeset as well. +// store hashes the node n and if we have a storage layer specified, it writes +// the key/value pair to it and tracks any node->child references as well as any +// node->external trie references. func (c *committer) store(path []byte, n node) node { // Larger nodes are replaced by their hash and stored in the database. var hash, _ = n.cache() - // This was not generated - must be a small node stored in the parent. // In theory, we should check if the node is leaf here (embedded node - // usually is leaf node). But small value (less than 32bytes) is not - // our target (leaves in account trie only). + // usually is leaf node). But small value(less than 32bytes) is not + // our target(leaves in account trie only). if hash == nil { - // The node is embedded in its parent, in other words, this node - // will not be stored in the database independently, mark it as - // deleted only if the node was existent in database before. - _, ok := c.tracer.accessList[string(path)] - if ok { - c.nodes.AddNode(path, trienode.NewDeleted()) - } return n } + // We have the hash already, estimate the RLP encoding-size of the node. + // The size is used for mem tracking, does not need to be exact + var ( + size = estimateSize(n) + nhash = common.BytesToHash(hash) + mnode = &memoryNode{ + hash: nhash, + node: simplifyNode(n), + size: uint16(size), + } + ) // Collect the dirty node to nodeset for return. - nhash := common.BytesToHash(hash) - c.nodes.AddNode(path, trienode.New(nhash, nodeToBytes(n))) + c.nodes.add(string(path), mnode) // Collect the corresponding leaf node if it's required. We don't check // full node since it's impossible to store value in fullNode. The key @@ -157,36 +177,38 @@ func (c *committer) store(path []byte, n node) node { if c.collectLeaf { if sn, ok := n.(*shortNode); ok { if val, ok := sn.Val.(valueNode); ok { - c.nodes.AddLeaf(nhash, val) + c.nodes.addLeaf(&leaf{blob: val, parent: nhash}) } } } return hash } -// mptResolver the children resolver in merkle-patricia-tree. -type mptResolver struct{} - -// ForEach implements childResolver, decodes the provided node and -// traverses the children inside. -func (resolver mptResolver) ForEach(node []byte, onChild func(common.Hash)) { - forGatherChildren(mustDecodeNodeUnsafe(nil, node), onChild) -} - -// forGatherChildren traverses the node hierarchy and invokes the callback -// for all the hashnode children. -func forGatherChildren(n node, onChild func(hash common.Hash)) { +// estimateSize estimates the size of an rlp-encoded node, without actually +// rlp-encoding it (zero allocs). This method has been experimentally tried, and with a trie +// with 1000 leaves, the only errors above 1% are on small shortnodes, where this +// method overestimates by 2 or 3 bytes (e.g. 37 instead of 35) +func estimateSize(n node) int { switch n := n.(type) { case *shortNode: - forGatherChildren(n.Val, onChild) + // A short node contains a compacted key, and a value. + return 3 + len(n.Key) + estimateSize(n.Val) case *fullNode: + // A full node contains up to 16 hashes (some nils), and a key + s := 3 for i := 0; i < 16; i++ { - forGatherChildren(n.Children[i], onChild) + if child := n.Children[i]; child != nil { + s += estimateSize(child) + } else { + s++ + } } + return s + case valueNode: + return 1 + len(n) case hashNode: - onChild(common.BytesToHash(n)) - case valueNode, nil: + return 1 + len(n) default: - panic(fmt.Sprintf("unknown node type: %T", n)) + panic(fmt.Sprintf("node type %T", n)) } } diff --git a/trie/database.go b/trie/database.go index a3bd3ecc65..69989ccb5b 100644 --- a/trie/database.go +++ b/trie/database.go @@ -1,4 +1,14 @@ -// Copyright 2022 The go-ethereum Authors +// (c) 2020-2022, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2018 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -18,309 +28,870 @@ package trie import ( "errors" + "fmt" + "io" + "reflect" + "sync" + "time" - "github.com/ava-labs/coreth/trie/triedb/hashdb" - "github.com/ava-labs/coreth/trie/triedb/pathdb" - "github.com/ava-labs/coreth/trie/trienode" - "github.com/ava-labs/coreth/trie/triestate" + "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/ethdb" + "github.com/tenderly/coreth/metrics" + "github.com/tenderly/coreth/rlp" ) -// Config defines all necessary options for database. -type Config struct { - Preimages bool // Flag whether the preimage of node key is recorded - IsVerkle bool // Flag whether the db is holding a verkle tree - HashDB *hashdb.Config // Configs for hash-based scheme - PathDB *pathdb.Config // Configs for experimental path-based scheme +var ( + memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) + memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) + memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) + memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) + + memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) + memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) + memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) + memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) + memcacheDirtySizeGauge = metrics.NewRegisteredGaugeFloat64("trie/memcache/dirty/size", nil) + memcacheDirtyChildSizeGauge = metrics.NewRegisteredGaugeFloat64("trie/memcache/dirty/childsize", nil) + memcacheDirtyNodesGauge = metrics.NewRegisteredGauge("trie/memcache/dirty/nodes", nil) + + memcacheFlushMeter = metrics.NewRegisteredMeter("trie/memcache/flush/count", nil) + memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) + memcacheFlushLockTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/locktime", nil) + memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) + memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) + + memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) + memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) + memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) + + memcacheCommitMeter = metrics.NewRegisteredMeter("trie/memcache/commit/count", nil) + memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) + memcacheCommitLockTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/locktime", nil) + memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) + memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) +) + +// Database is an intermediate write layer between the trie data structures and +// the disk database. The aim is to accumulate trie writes in-memory and only +// periodically flush a couple tries to disk, garbage collecting the remainder. +// +// The trie Database is thread-safe in its mutations and is thread-safe in providing individual, +// independent node access. +type Database struct { + diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes + + cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs + dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes + oldest common.Hash // Oldest tracked node, flush-list head + newest common.Hash // Newest tracked node, flush-list tail + + gctime time.Duration // Time spent on garbage collection since last commit + gcnodes uint64 // Nodes garbage collected since last commit + gcsize common.StorageSize // Data storage garbage collected since last commit + + flushtime time.Duration // Time spent on data flushing since last commit + flushnodes uint64 // Nodes flushed since last commit + flushsize common.StorageSize // Data storage flushed since last commit + + dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata) + childrenSize common.StorageSize // Storage size of the external children tracking + preimages *preimageStore // The store for caching preimages + + lock sync.RWMutex } -// HashDefaults represents a config for using hash-based scheme with -// default settings. -var HashDefaults = &Config{ - Preimages: false, - HashDB: hashdb.Defaults, +// rawNode is a simple binary blob used to differentiate between collapsed trie +// nodes and already encoded RLP binary blobs (while at the same time store them +// in the same cache fields). +type rawNode []byte + +func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } +func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } + +func (n rawNode) EncodeRLP(w io.Writer) error { + _, err := w.Write(n) + return err } -// backend defines the methods needed to access/update trie nodes in different -// state scheme. -type backend interface { - // Scheme returns the identifier of used storage scheme. - Scheme() string +// rawFullNode represents only the useful data content of a full node, with the +// caches and flags stripped out to minimize its data storage. This type honors +// the same RLP encoding as the original parent. +type rawFullNode [17]node - // Initialized returns an indicator if the state data is already initialized - // according to the state scheme. - Initialized(genesisRoot common.Hash) bool +func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } +func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } - // Size returns the current storage size of the diff layers on top of the - // disk layer and the storage size of the nodes cached in the disk layer. - // - // For hash scheme, there is no differentiation between diff layer nodes - // and dirty disk layer nodes, so both are merged into the second return. - Size() (common.StorageSize, common.StorageSize) +func (n rawFullNode) EncodeRLP(w io.Writer) error { + eb := rlp.NewEncoderBuffer(w) + n.encode(eb) + return eb.Flush() +} - // Update performs a state transition by committing dirty nodes contained - // in the given set in order to update state from the specified parent to - // the specified root. - // - // The passed in maps(nodes, states) will be retained to avoid copying - // everything. Therefore, these maps must not be changed afterwards. - Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error +// rawShortNode represents only the useful data content of a short node, with the +// caches and flags stripped out to minimize its data storage. This type honors +// the same RLP encoding as the original parent. +type rawShortNode struct { + Key []byte + Val node +} + +func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } +func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } - // Commit writes all relevant trie nodes belonging to the specified state - // to disk. Report specifies whether logs will be displayed in info level. - Commit(root common.Hash, report bool) error +// cachedNode is all the information we know about a single cached trie node +// in the memory database write layer. +type cachedNode struct { + node node // Cached collapsed trie node, or raw rlp data + size uint16 // Byte size of the useful cached data - // Close closes the trie database backend and releases all held resources. - Close() error + parents uint32 // Number of live nodes referencing this one + children map[common.Hash]uint16 // External children referenced by this node + + flushPrev common.Hash // Previous node in the flush-list + flushNext common.Hash // Next node in the flush-list } -// Database is the wrapper of the underlying backend which is shared by different -// types of node backend as an entrypoint. It's responsible for all interactions -// relevant with trie nodes and node preimages. -type Database struct { - config *Config // Configuration for trie database - diskdb ethdb.Database // Persistent database to store the snapshot - preimages *preimageStore // The store for caching preimages - backend backend // The backend for managing trie nodes +// cachedNodeSize is the raw size of a cachedNode data structure without any +// node data included. It's an approximate size, but should be a lot better +// than not counting them. +var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) + +// cachedNodeChildrenSize is the raw size of an initialized but empty external +// reference map. +const cachedNodeChildrenSize = 48 + +// rlp returns the raw rlp encoded blob of the cached trie node, either directly +// from the cache, or by regenerating it from the collapsed node. +func (n *cachedNode) rlp() []byte { + if node, ok := n.node.(rawNode); ok { + return node + } + return nodeToBytes(n.node) } -// NewDatabase initializes the trie database with default settings, note -// the legacy hash-based scheme is used by default. -func NewDatabase(diskdb ethdb.Database, config *Config) *Database { - // Sanitize the config and use the default one if it's not specified. - if config == nil { - config = HashDefaults +// obj returns the decoded and expanded trie node, either directly from the cache, +// or by regenerating it from the rlp encoded blob. +func (n *cachedNode) obj(hash common.Hash) node { + if node, ok := n.node.(rawNode); ok { + // The raw-blob format nodes are loaded either from the + // clean cache or the database, they are all in their own + // copy and safe to use unsafe decoder. + return mustDecodeNodeUnsafe(hash[:], node) } - var preimages *preimageStore - if config.Preimages { - preimages = newPreimageStore(diskdb) + return expandNode(hash[:], n.node) +} + +// forChilds invokes the callback for all the tracked children of this node, +// both the implicit ones from inside the node as well as the explicit ones +// from outside the node. +func (n *cachedNode) forChilds(onChild func(hash common.Hash)) { + for child := range n.children { + onChild(child) } - db := &Database{ - config: config, - diskdb: diskdb, - preimages: preimages, + if _, ok := n.node.(rawNode); !ok { + forGatherChildren(n.node, onChild) } - if config.HashDB != nil && config.PathDB != nil { - log.Crit("Both 'hash' and 'path' mode are configured") +} + +// forGatherChildren traverses the node hierarchy of a collapsed storage node and +// invokes the callback for all the hashnode children. +func forGatherChildren(n node, onChild func(hash common.Hash)) { + switch n := n.(type) { + case *rawShortNode: + forGatherChildren(n.Val, onChild) + case rawFullNode: + for i := 0; i < 16; i++ { + forGatherChildren(n[i], onChild) + } + case hashNode: + onChild(common.BytesToHash(n)) + case valueNode, nil, rawNode: + default: + panic(fmt.Sprintf("unknown node type: %T", n)) } - if config.PathDB != nil { - db.backend = pathdb.New(diskdb, config.PathDB) - } else { - db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{}) +} + +// simplifyNode traverses the hierarchy of an expanded memory node and discards +// all the internal caches, returning a node that only contains the raw data. +func simplifyNode(n node) node { + switch n := n.(type) { + case *shortNode: + // Short nodes discard the flags and cascade + return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} + + case *fullNode: + // Full nodes discard the flags and cascade + node := rawFullNode(n.Children) + for i := 0; i < len(node); i++ { + if node[i] != nil { + node[i] = simplifyNode(node[i]) + } + } + return node + + case valueNode, hashNode, rawNode: + return n + + default: + panic(fmt.Sprintf("unknown node type: %T", n)) } - return db } -// Reader returns a reader for accessing all trie nodes with provided state root. -// An error will be returned if the requested state is not available. -func (db *Database) Reader(blockRoot common.Hash) (Reader, error) { - switch b := db.backend.(type) { - case *hashdb.Database: - return b.Reader(blockRoot) - case *pathdb.Database: - return b.Reader(blockRoot) +// expandNode traverses the node hierarchy of a collapsed storage node and converts +// all fields and keys into expanded memory form. +func expandNode(hash hashNode, n node) node { + switch n := n.(type) { + case *rawShortNode: + // Short nodes need key and child expansion + return &shortNode{ + Key: compactToHex(n.Key), + Val: expandNode(nil, n.Val), + flags: nodeFlag{ + hash: hash, + }, + } + + case rawFullNode: + // Full nodes need child expansion + node := &fullNode{ + flags: nodeFlag{ + hash: hash, + }, + } + for i := 0; i < len(node.Children); i++ { + if n[i] != nil { + node.Children[i] = expandNode(nil, n[i]) + } + } + return node + + case valueNode, hashNode: + return n + + default: + panic(fmt.Sprintf("unknown node type: %T", n)) } - return nil, errors.New("unknown backend") } -// Update performs a state transition by committing dirty nodes contained in the -// given set in order to update state from the specified parent to the specified -// root. The held pre-images accumulated up to this point will be flushed in case -// the size exceeds the threshold. -// -// The passed in maps(nodes, states) will be retained to avoid copying everything. -// Therefore, these maps must not be changed afterwards. -func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { - if db.preimages != nil { - db.preimages.commit(false) +// Config defines all necessary options for database. +type Config struct { + Cache int // Memory allowance (MB) to use for caching trie nodes in memory + Preimages bool // Flag whether the preimage of trie key is recorded +} + +// NewDatabase creates a new trie database to store ephemeral trie content before +// its written out to disk or garbage collected. No read cache is created, so all +// data retrievals will hit the underlying disk database. +func NewDatabase(diskdb ethdb.KeyValueStore) *Database { + return NewDatabaseWithConfig(diskdb, nil) +} + +// NewDatabaseWithConfig creates a new trie database to store ephemeral trie content +// before its written out to disk or garbage collected. It also acts as a read cache +// for nodes loaded from disk. +func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database { + var cleans *fastcache.Cache + if config != nil && config.Cache > 0 { + cleans = fastcache.New(config.Cache * 1024 * 1024) + } + var preimage *preimageStore + if config != nil && config.Preimages { + preimage = newPreimageStore(diskdb) + } + db := &Database{ + diskdb: diskdb, + cleans: cleans, + dirties: map[common.Hash]*cachedNode{{}: { + children: make(map[common.Hash]uint16), + }}, + preimages: preimage, } - return db.backend.Update(root, parent, block, nodes, states) + return db } -func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { - if db.preimages != nil { - db.preimages.commit(false) +// DiskDB retrieves the persistent storage backing the trie database. +func (db *Database) DiskDB() ethdb.KeyValueStore { + return db.diskdb +} + +// insert inserts a simplified trie node into the memory database. +// All nodes inserted by this function will be reference tracked +// and in theory should only used for **trie nodes** insertion. +func (db *Database) insert(hash common.Hash, size int, node node) { + // If the node's already cached, skip + if _, ok := db.dirties[hash]; ok { + return } - hdb, ok := db.backend.(*hashdb.Database) - if ok { - return hdb.UpdateAndReferenceRoot(root, parent, block, nodes, states) + memcacheDirtyWriteMeter.Mark(int64(size)) + + // Create the cached entry for this node + entry := &cachedNode{ + node: node, + size: uint16(size), + flushPrev: db.newest, + } + entry.forChilds(func(child common.Hash) { + if c := db.dirties[child]; c != nil { + c.parents++ + } + }) + db.dirties[hash] = entry + + // Update the flush-list endpoints + if db.oldest == (common.Hash{}) { + db.oldest, db.newest = hash, hash + } else { + db.dirties[db.newest].flushNext, db.newest = hash, hash } - return db.backend.Update(root, parent, block, nodes, states) + db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) } -// Commit iterates over all the children of a particular node, writes them out -// to disk. As a side effect, all pre-images accumulated up to this point are -// also written. -func (db *Database) Commit(root common.Hash, report bool) error { - if db.preimages != nil { - db.preimages.commit(true) +// RawNode retrieves an encoded cached trie node from memory. If it cannot be found +// cached, the method queries the persistent database for the content. This function +// will not return the metaroot. +func (db *Database) RawNode(h common.Hash) ([]byte, error) { + if h == (common.Hash{}) { + return nil, errors.New("not found") + } + enc, cn, err := db.node(h) + if err != nil { + return nil, err + } + if len(enc) > 0 { + return enc, nil } - return db.backend.Commit(root, report) + return cn.rlp(), nil } -// Size returns the storage size of diff layer nodes above the persistent disk -// layer, the dirty nodes buffered within the disk layer, and the size of cached -// preimages. -func (db *Database) Size() (common.StorageSize, common.StorageSize, common.StorageSize) { - var ( - diffs, nodes common.StorageSize - preimages common.StorageSize - ) - diffs, nodes = db.backend.Size() - if db.preimages != nil { - preimages = db.preimages.size() +// EncodedNode returns a formatted [node] when given a node hash. If no node +// exists, nil is returned. This function will return the metaroot. +func (db *Database) EncodedNode(h common.Hash) node { + enc, cn, err := db.node(h) + if err != nil { + return nil } - return diffs, nodes, preimages + if len(enc) > 0 { + return mustDecodeNode(h[:], enc) + } + return cn.obj(h) } -// Initialized returns an indicator if the state data is already initialized -// according to the state scheme. -func (db *Database) Initialized(genesisRoot common.Hash) bool { - return db.backend.Initialized(genesisRoot) +// node retrieves an encoded cached trie node from memory. If it cannot be found +// cached, the method queries the persistent database for the content. +// +// We do not return a single node representation to avoid useless +// encoding/decoding depending on the caller. +func (db *Database) node(hash common.Hash) ([]byte, *cachedNode, error) { + // Retrieve the node from the clean cache if available + if db.cleans != nil { + if enc := db.cleans.Get(nil, hash[:]); enc != nil { + memcacheCleanHitMeter.Mark(1) + memcacheCleanReadMeter.Mark(int64(len(enc))) + return enc, nil, nil + } + } + // Retrieve the node from the dirty cache if available + db.lock.RLock() + dirty := db.dirties[hash] + db.lock.RUnlock() + + if dirty != nil { + memcacheDirtyHitMeter.Mark(1) + memcacheDirtyReadMeter.Mark(int64(dirty.size)) + return nil, dirty, nil + } + memcacheDirtyMissMeter.Mark(1) + + // Content unavailable in memory, attempt to retrieve from disk + enc := rawdb.ReadTrieNode(db.diskdb, hash) + if len(enc) != 0 { + if db.cleans != nil { + db.cleans.Set(hash[:], enc) + memcacheCleanMissMeter.Mark(1) + memcacheCleanWriteMeter.Mark(int64(len(enc))) + } + return enc, nil, nil + } + return nil, nil, errors.New("not found") } -// Scheme returns the node scheme used in the database. -func (db *Database) Scheme() string { - return db.backend.Scheme() +// Nodes retrieves the hashes of all the nodes cached within the memory database. +// This method is extremely expensive and should only be used to validate internal +// states in test code. +func (db *Database) Nodes() []common.Hash { + db.lock.RLock() + defer db.lock.RUnlock() + + var hashes = make([]common.Hash, 0, len(db.dirties)) + for hash := range db.dirties { + if hash != (common.Hash{}) { // Special case for "root" references/nodes + hashes = append(hashes, hash) + } + } + return hashes } -// Close flushes the dangling preimages to disk and closes the trie database. -// It is meant to be called when closing the blockchain object, so that all -// resources held can be released correctly. -func (db *Database) Close() error { - db.WritePreimages() - return db.backend.Close() +// Reference adds a new reference from a parent node to a child node. +// This function is used to add reference between internal trie node +// and external node(e.g. storage trie root), all internal trie nodes +// are referenced together by database itself. +func (db *Database) Reference(child common.Hash, parent common.Hash) { + db.lock.Lock() + defer db.lock.Unlock() + + db.reference(child, parent) } -// WritePreimages flushes all accumulated preimages to disk forcibly. -func (db *Database) WritePreimages() { - if db.preimages != nil { - db.preimages.commit(true) +func (db *Database) reference(child common.Hash, parent common.Hash) { + // If the node does not exist, it's a node pulled from disk, skip + node, ok := db.dirties[child] + if !ok { + return + } + // If the reference already exists, only duplicate for roots + if db.dirties[parent].children == nil { + db.dirties[parent].children = make(map[common.Hash]uint16) + db.childrenSize += cachedNodeChildrenSize + } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { + return + } + node.parents++ + db.dirties[parent].children[child]++ + if db.dirties[parent].children[child] == 1 { + db.childrenSize += common.HashLength + 2 // uint16 counter } } -// Preimage retrieves a cached trie node pre-image from memory. If it cannot be -// found cached, the method queries the persistent database for the content. -func (db *Database) Preimage(hash common.Hash) []byte { - if db.preimages == nil { - return nil +// Dereference removes an existing reference from a root node. +func (db *Database) Dereference(root common.Hash) { + // Sanity check to ensure that the meta-root is not removed + if root == (common.Hash{}) { + log.Error("Attempted to dereference the trie cache meta root") + return } - return db.preimages.preimage(hash) + + db.lock.Lock() + defer db.lock.Unlock() + nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() + db.dereference(root, common.Hash{}) + + db.gcnodes += uint64(nodes - len(db.dirties)) + db.gcsize += storage - db.dirtiesSize + db.gctime += time.Since(start) + + memcacheDirtySizeGauge.Update(float64(db.dirtiesSize)) + memcacheDirtyChildSizeGauge.Update(float64(db.childrenSize)) + memcacheDirtyNodesGauge.Update(int64(len(db.dirties))) + + memcacheGCTimeTimer.Update(time.Since(start)) + memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) + + log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), + "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) } -// Cap iteratively flushes old but still referenced trie nodes until the total -// memory usage goes below the given threshold. The held pre-images accumulated -// up to this point will be flushed in case the size exceeds the threshold. -// -// It's only supported by hash-based database and will return an error for others. -func (db *Database) Cap(limit common.StorageSize) error { - hdb, ok := db.backend.(*hashdb.Database) +// dereference is the private locked version of Dereference. +func (db *Database) dereference(child common.Hash, parent common.Hash) { + // Dereference the parent-child + node := db.dirties[parent] + + if node.children != nil && node.children[child] > 0 { + node.children[child]-- + if node.children[child] == 0 { + delete(node.children, child) + db.childrenSize -= (common.HashLength + 2) // uint16 counter + } + } + // If the child does not exist, it's a previously committed node. + node, ok := db.dirties[child] if !ok { - return errors.New("not supported") + return } - if db.preimages != nil { - db.preimages.commit(false) + // If there are no more references to the child, delete it and cascade + if node.parents > 0 { + // This is a special cornercase where a node loaded from disk (i.e. not in the + // memcache any more) gets reinjected as a new node (short node split into full, + // then reverted into short), causing a cached node to have no parents. That is + // no problem in itself, but don't make maxint parents out of it. + node.parents-- + } + if node.parents == 0 { + // Remove the node from the flush-list + switch child { + case db.oldest: + db.oldest = node.flushNext + db.dirties[node.flushNext].flushPrev = common.Hash{} + case db.newest: + db.newest = node.flushPrev + db.dirties[node.flushPrev].flushNext = common.Hash{} + default: + db.dirties[node.flushPrev].flushNext = node.flushNext + db.dirties[node.flushNext].flushPrev = node.flushPrev + } + // Dereference all children and delete the node + node.forChilds(func(hash common.Hash) { + db.dereference(hash, child) + }) + delete(db.dirties, child) + db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) + if node.children != nil { + db.childrenSize -= cachedNodeChildrenSize + } } - return hdb.Cap(limit) } -// Reference adds a new reference from a parent node to a child node. This function -// is used to add reference between internal trie node and external node(e.g. storage -// trie root), all internal trie nodes are referenced together by database itself. -// -// It's only supported by hash-based database and will return an error for others. -func (db *Database) Reference(root common.Hash, parent common.Hash) error { - hdb, ok := db.backend.(*hashdb.Database) - if !ok { - return errors.New("not supported") +// flushItem is used to track all [cachedNode]s that must be written to disk +type flushItem struct { + hash common.Hash + node *cachedNode + rlp []byte +} + +// writeFlushItems writes all items in [toFlush] to disk in batches of +// [ethdb.IdealBatchSize]. This function does not access any variables inside +// of [Database] and does not need to be synchronized. +func (db *Database) writeFlushItems(toFlush []flushItem) error { + batch := db.diskdb.NewBatch() + for _, item := range toFlush { + rlp := item.node.rlp() + item.rlp = rlp + rawdb.WriteTrieNode(batch, item.hash, rlp) + + // If we exceeded the ideal batch size, commit and reset + if batch.ValueSize() >= ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + log.Error("Failed to write flush list to disk", "err", err) + return err + } + batch.Reset() + } + } + + // Flush out any remainder data from the last batch + if err := batch.Write(); err != nil { + log.Error("Failed to write flush list to disk", "err", err) + return err } - hdb.Reference(root, parent) + return nil } -// Dereference removes an existing reference from a root node. It's only -// supported by hash-based database and will return an error for others. -func (db *Database) Dereference(root common.Hash) error { - hdb, ok := db.backend.(*hashdb.Database) - if !ok { - return errors.New("not supported") +// Cap iteratively flushes old but still referenced trie nodes until the total +// memory usage goes below the given threshold. +func (db *Database) Cap(limit common.StorageSize) error { + start := time.Now() + // If the preimage cache got large enough, push to disk. If it's still small + // leave for later to deduplicate writes. + if db.preimages != nil { + if err := db.preimages.commit(false); err != nil { + return err + } + } + + // It is important that outside code doesn't see an inconsistent state + // (referenced data removed from memory cache during commit but not yet + // in persistent storage). This is ensured by only uncaching existing + // data when the database write finalizes. + db.lock.RLock() + lockStart := time.Now() + nodes, storage := len(db.dirties), db.dirtiesSize + + // db.dirtiesSize only contains the useful data in the cache, but when reporting + // the total memory consumption, the maintenance metadata is also needed to be + // counted. + pendingSize := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize) + pendingSize += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) + if pendingSize <= limit { + db.lock.RUnlock() + return nil + } + + // Keep removing nodes from the flush-list until we're below allowance + toFlush := make([]flushItem, 0, 128) + oldest := db.oldest + for pendingSize > limit && oldest != (common.Hash{}) { + // Fetch the oldest referenced node and push into the batch + node := db.dirties[oldest] + toFlush = append(toFlush, flushItem{oldest, node, nil}) + + // Iterate to the next flush item, or abort if the size cap was achieved. Size + // is the total size, including the useful cached data (hash -> blob), the + // cache item metadata, as well as external children mappings. + pendingSize -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize) + if node.children != nil { + pendingSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) + } + oldest = node.flushNext } - hdb.Dereference(root) + db.lock.RUnlock() + lockTime := time.Since(lockStart) + + // Write nodes to disk + if err := db.writeFlushItems(toFlush); err != nil { + return err + } + + // Flush all written items from dirites + // + // NOTE: The order of the flushlist may have changed while the lock was not + // held, so we cannot just iterate to [oldest]. + db.lock.Lock() + defer db.lock.Unlock() + lockStart = time.Now() + for _, item := range toFlush { + // [item.rlp] is populated in [writeFlushItems] + db.removeFromDirties(item.hash, item.rlp) + } + db.flushnodes += uint64(nodes - len(db.dirties)) + db.flushsize += storage - db.dirtiesSize + db.flushtime += time.Since(start) + + memcacheDirtySizeGauge.Update(float64(db.dirtiesSize)) + memcacheDirtyChildSizeGauge.Update(float64(db.childrenSize)) + memcacheDirtyNodesGauge.Update(int64(len(db.dirties))) + + memcacheFlushMeter.Mark(1) + memcacheFlushTimeTimer.Update(time.Since(start)) + memcacheFlushLockTimeTimer.Update(lockTime + time.Since(lockStart)) + memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) + + log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), + "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) return nil } -// Recover rollbacks the database to a specified historical point. The state is -// supported as the rollback destination only if it's canonical state and the -// corresponding trie histories are existent. It's only supported by path-based -// database and will return an error for others. -func (db *Database) Recover(target common.Hash) error { - pdb, ok := db.backend.(*pathdb.Database) - if !ok { - return errors.New("not supported") +// Commit iterates over all the children of a particular node, writes them out +// to disk, forcefully tearing down all references in both directions. As a side +// effect, all pre-images accumulated up to this point are also written. +func (db *Database) Commit(node common.Hash, report bool, callback func(common.Hash)) error { + start := time.Now() + if db.preimages != nil { + if err := db.preimages.commit(true); err != nil { + return err + } + } + + // It is important that outside code doesn't see an inconsistent state (referenced + // data removed from memory cache during commit but not yet in persistent storage). + // This is ensured by only uncaching existing data when the database write finalizes. + db.lock.RLock() + lockStart := time.Now() + nodes, storage := len(db.dirties), db.dirtiesSize + toFlush, err := db.commit(node, make([]flushItem, 0, 128), callback) + if err != nil { + log.Error("Failed to commit trie from trie database", "err", err) + return err + } + db.lock.RUnlock() + lockTime := time.Since(lockStart) + + // Write nodes to disk + if err := db.writeFlushItems(toFlush); err != nil { + return err + } + + // Flush all written items from dirites + db.lock.Lock() + defer db.lock.Unlock() + lockStart = time.Now() + for _, item := range toFlush { + // [item.rlp] is populated in [writeFlushItems] + db.removeFromDirties(item.hash, item.rlp) + } + + memcacheDirtySizeGauge.Update(float64(db.dirtiesSize)) + memcacheDirtyChildSizeGauge.Update(float64(db.childrenSize)) + memcacheDirtyNodesGauge.Update(int64(len(db.dirties))) + + memcacheCommitMeter.Mark(1) + memcacheCommitTimeTimer.Update(time.Since(start)) + memcacheCommitLockTimeTimer.Update(lockTime + time.Since(lockStart)) + memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) + + logger := log.Info + if !report { + logger = log.Debug } - return pdb.Recover(target, &trieLoader{db: db}) + logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime, + "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) + + // Reset the garbage collection statistics + db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 + db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 + return nil } -// Recoverable returns the indicator if the specified state is enabled to be -// recovered. It's only supported by path-based database and will return an -// error for others. -func (db *Database) Recoverable(root common.Hash) (bool, error) { - pdb, ok := db.backend.(*pathdb.Database) +// commit is the private locked version of Commit. This function does not +// mutate any data, rather it collects all data that should be committed. +// +// [callback] will be invoked as soon as it is determined a trie node will be +// flushed to disk (before it is actually written). +func (db *Database) commit(hash common.Hash, toFlush []flushItem, callback func(common.Hash)) ([]flushItem, error) { + // If the node does not exist, it's a previously committed node + node, ok := db.dirties[hash] if !ok { - return false, errors.New("not supported") + return toFlush, nil + } + var err error + node.forChilds(func(child common.Hash) { + if err == nil { + toFlush, err = db.commit(child, toFlush, callback) + } + }) + if err != nil { + return nil, err + } + // By processing the children of each node before the node itself, we ensure + // that children are committed before their parents (an invariant of this + // package). + toFlush = append(toFlush, flushItem{hash, node, nil}) + if callback != nil { + callback(hash) } - return pdb.Recoverable(root), nil + return toFlush, nil } -// Disable deactivates the database and invalidates all available state layers -// as stale to prevent access to the persistent state, which is in the syncing -// stage. +// removeFromDirties is invoked after database writes and implements dirty data uncaching. // -// It's only supported by path-based database and will return an error for others. -func (db *Database) Disable() error { - pdb, ok := db.backend.(*pathdb.Database) +// This is the post-processing step of a commit operation where the already persisted trie is +// removed from the dirty cache and moved into the clean cache. The reason behind +// the two-phase commit is to ensure data availability while moving from memory +// to disk. +// +// It is assumed the caller holds the [dirtiesLock] when this function is +// called. +func (db *Database) removeFromDirties(hash common.Hash, rlp []byte) { + // If the node does not exist, we're done on this path. This could happen if + // nodes are capped to disk while another thread is committing those same + // nodes. + node, ok := db.dirties[hash] if !ok { - return errors.New("not supported") + return + } + // Node still exists, remove it from the flush-list + switch hash { + case db.oldest: + db.oldest = node.flushNext + db.dirties[node.flushNext].flushPrev = common.Hash{} + case db.newest: + db.newest = node.flushPrev + db.dirties[node.flushPrev].flushNext = common.Hash{} + default: + db.dirties[node.flushPrev].flushNext = node.flushNext + db.dirties[node.flushNext].flushPrev = node.flushPrev + } + // Remove the node from the dirty cache + delete(db.dirties, hash) + db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) + if node.children != nil { + db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) + } + // Move the flushed node into the clean cache to prevent insta-reloads + if db.cleans != nil { + db.cleans.Set(hash[:], rlp) + memcacheCleanWriteMeter.Mark(int64(len(rlp))) } - return pdb.Disable() } -// Enable activates database and resets the state tree with the provided persistent -// state root once the state sync is finished. -func (db *Database) Enable(root common.Hash) error { - pdb, ok := db.backend.(*pathdb.Database) - if !ok { - return errors.New("not supported") +// Update inserts the dirty nodes in provided nodeset into database and +// links the account trie with multiple storage tries if necessary. +func (db *Database) Update(nodes *MergedNodeSet) error { + db.lock.Lock() + defer db.lock.Unlock() + + return db.update(nodes) +} + +// UpdateAndReferenceRoot inserts the dirty nodes in provided nodeset into +// database and links the account trie with multiple storage tries if necessary, +// then adds a reference [from] root to the metaroot while holding the db's lock. +func (db *Database) UpdateAndReferenceRoot(nodes *MergedNodeSet, root common.Hash) error { + db.lock.Lock() + defer db.lock.Unlock() + + if err := db.update(nodes); err != nil { + return err } - return pdb.Enable(root) + db.reference(root, common.Hash{}) + return nil } -// Journal commits an entire diff hierarchy to disk into a single journal entry. -// This is meant to be used during shutdown to persist the snapshot without -// flattening everything down (bad for reorgs). It's only supported by path-based -// database and will return an error for others. -func (db *Database) Journal(root common.Hash) error { - pdb, ok := db.backend.(*pathdb.Database) - if !ok { - return errors.New("not supported") +func (db *Database) update(nodes *MergedNodeSet) error { + // Insert dirty nodes into the database. In the same tree, it must be + // ensured that children are inserted first, then parent so that children + // can be linked with their parent correctly. + // + // Note, the storage tries must be flushed before the account trie to + // retain the invariant that children go into the dirty cache first. + var order []common.Hash + for owner := range nodes.sets { + if owner == (common.Hash{}) { + continue + } + order = append(order, owner) + } + if _, ok := nodes.sets[common.Hash{}]; ok { + order = append(order, common.Hash{}) } - return pdb.Journal(root) + for _, owner := range order { + subset := nodes.sets[owner] + for _, path := range subset.paths { + n, ok := subset.nodes[path] + if !ok { + return fmt.Errorf("missing node %x %v", owner, path) + } + db.insert(n.hash, int(n.size), n.node) + } + } + // Link up the account trie and storage trie if the node points + // to an account trie leaf. + if set, present := nodes.sets[common.Hash{}]; present { + for _, n := range set.leaves { + var account types.StateAccount + if err := rlp.DecodeBytes(n.blob, &account); err != nil { + return err + } + if account.Root != emptyRoot { + db.reference(account.Root, n.parent) + } + } + } + return nil } -// SetBufferSize sets the node buffer size to the provided value(in bytes). -// It's only supported by path-based database and will return an error for -// others. -func (db *Database) SetBufferSize(size int) error { - pdb, ok := db.backend.(*pathdb.Database) - if !ok { - return errors.New("not supported") +// Size returns the current storage size of the memory cache in front of the +// persistent database layer. +func (db *Database) Size() (common.StorageSize, common.StorageSize) { + // db.dirtiesSize only contains the useful data in the cache, but when reporting + // the total memory consumption, the maintenance metadata is also needed to be + // counted. + db.lock.RLock() + defer db.lock.RUnlock() + var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize) + var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2)) + var preimageSize common.StorageSize + if db.preimages != nil { + preimageSize = db.preimages.size() } - return pdb.SetBufferSize(size) + return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, preimageSize } -// IsVerkle returns the indicator if the database is holding a verkle tree. -func (db *Database) IsVerkle() bool { - return db.config.IsVerkle +// CommitPreimages flushes the dangling preimages to disk. It is meant to be +// called when closing the blockchain object, so that preimages are persisted +// to the database. +func (db *Database) CommitPreimages() error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.preimages == nil { + return nil + } + return db.preimages.commit(true) } diff --git a/trie/database_test.go b/trie/database_test.go index 156e04d940..8a0354f90f 100644 --- a/trie/database_test.go +++ b/trie/database_test.go @@ -27,24 +27,17 @@ package trie import ( - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/trie/triedb/hashdb" - "github.com/ava-labs/coreth/trie/triedb/pathdb" - "github.com/ethereum/go-ethereum/ethdb" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/ethdb/memorydb" ) -// newTestDatabase initializes the trie database with specified scheme. -func newTestDatabase(diskdb ethdb.Database, scheme string) *Database { - config := &Config{Preimages: false} - if scheme == rawdb.HashScheme { - config.HashDB = &hashdb.Config{ - CleanCacheSize: 0, - } // disable clean cache - } else { - config.PathDB = &pathdb.Config{ - CleanCacheSize: 0, - DirtyCacheSize: 0, - } // disable clean/dirty cache +// Tests that the trie database returns a missing trie node error if attempting +// to retrieve the meta root. +func TestDatabaseMetarootFetch(t *testing.T) { + db := NewDatabase(memorydb.New()) + if _, err := db.RawNode(common.Hash{}); err == nil { + t.Fatalf("metaroot retrieval succeeded") } - return NewDatabase(diskdb, config) } diff --git a/trie/nodeset.go b/trie/nodeset.go new file mode 100644 index 0000000000..421ad13435 --- /dev/null +++ b/trie/nodeset.go @@ -0,0 +1,104 @@ +// (c) 2022, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" +) + +// memoryNode is all the information we know about a single cached trie node +// in the memory. +type memoryNode struct { + hash common.Hash // Node hash, computed by hashing rlp value + size uint16 // Byte size of the useful cached data + node node // Cached collapsed trie node, or raw rlp data +} + +// NodeSet contains all dirty nodes collected during the commit operation. +// Each node is keyed by path. It's not thread-safe to use. +type NodeSet struct { + owner common.Hash // the identifier of the trie + paths []string // the path of dirty nodes, sort by insertion order + nodes map[string]*memoryNode // the map of dirty nodes, keyed by node path + leaves []*leaf // the list of dirty leaves +} + +// NewNodeSet initializes an empty node set to be used for tracking dirty nodes +// from a specific account or storage trie. The owner is zero for the account +// trie and the owning account address hash for storage tries. +func NewNodeSet(owner common.Hash) *NodeSet { + return &NodeSet{ + owner: owner, + nodes: make(map[string]*memoryNode), + } +} + +// add caches node with provided path and node object. +func (set *NodeSet) add(path string, node *memoryNode) { + set.paths = append(set.paths, path) + set.nodes[path] = node +} + +// addLeaf caches the provided leaf node. +func (set *NodeSet) addLeaf(node *leaf) { + set.leaves = append(set.leaves, node) +} + +// Len returns the number of dirty nodes contained in the set. +func (set *NodeSet) Len() int { + return len(set.nodes) +} + +// MergedNodeSet represents a merged dirty node set for a group of tries. +type MergedNodeSet struct { + sets map[common.Hash]*NodeSet +} + +// NewMergedNodeSet initializes an empty merged set. +func NewMergedNodeSet() *MergedNodeSet { + return &MergedNodeSet{sets: make(map[common.Hash]*NodeSet)} +} + +// NewWithNodeSet constructs a merged nodeset with the provided single set. +func NewWithNodeSet(set *NodeSet) *MergedNodeSet { + merged := NewMergedNodeSet() + merged.Merge(set) + return merged +} + +// Merge merges the provided dirty nodes of a trie into the set. The assumption +// is held that no duplicated set belonging to the same trie will be merged twice. +func (set *MergedNodeSet) Merge(other *NodeSet) error { + _, present := set.sets[other.owner] + if present { + return fmt.Errorf("duplicate trie for owner %#x", other.owner) + } + set.sets[other.owner] = other + return nil +} diff --git a/trie/preimages.go b/trie/preimages.go index 1502372f7c..da6c53baf5 100644 --- a/trie/preimages.go +++ b/trie/preimages.go @@ -29,9 +29,9 @@ package trie import ( "sync" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb" ) const defaultPreimagesLimit = 4 * 1024 * 1024 // 4 MB diff --git a/trie/proof.go b/trie/proof.go index df78f1926e..b360aebb32 100644 --- a/trie/proof.go +++ b/trie/proof.go @@ -32,8 +32,9 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb" ) // Prove constructs a merkle proof for key. The result contains all encoded nodes @@ -43,11 +44,7 @@ import ( // If the trie does not contain a value for key, the returned proof contains all // nodes of the longest existing prefix of the key (at least the root node), ending // with the node that proves the absence of the key. -func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { - // Short circuit if the trie is already committed and not usable. - if t.committed { - return ErrCommitted - } +func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error { // Collect all nodes on the path to key. var ( prefix []byte @@ -73,20 +70,12 @@ func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { key = key[1:] nodes = append(nodes, n) case hashNode: - // Retrieve the specified node from the underlying node reader. - // trie.resolveAndTrack is not used since in that function the - // loaded blob will be tracked, while it's not required here since - // all loaded nodes won't be linked to trie at all and track nodes - // may lead to out-of-memory issue. - blob, err := t.reader.node(prefix, common.BytesToHash(n)) + var err error + tn, err = t.resolveHash(n, prefix) if err != nil { - log.Error("Unhandled trie error in Trie.Prove", "err", err) + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) return err } - // The raw-blob format nodes are loaded either from the - // clean cache or the database, they are all in their own - // copy and safe to use unsafe decoder. - tn = mustDecodeNodeUnsafe(n, blob) default: panic(fmt.Sprintf("%T: invalid node: %v", tn, tn)) } @@ -95,6 +84,10 @@ func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { defer returnHasherToPool(hasher) for i, n := range nodes { + if fromLevel > 0 { + fromLevel-- + continue + } var hn node n, hn = hasher.proofHash(n) if hash, ok := hn.(hashNode); ok || i == 0 { @@ -117,8 +110,8 @@ func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { // If the trie does not contain a value for key, the returned proof contains all // nodes of the longest existing prefix of the key (at least the root node), ending // with the node that proves the absence of the key. -func (t *StateTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { - return t.trie.Prove(key, proofDb) +func (t *StateTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error { + return t.trie.Prove(key, fromLevel, proofDb) } // VerifyProof checks merkle proofs. The given proof must contain the value for @@ -356,9 +349,9 @@ findFork: // unset removes all internal node references either the left most or right most. // It can meet these scenarios: // -// - The given path is existent in the trie, unset the associated nodes with the -// specific direction -// - The given path is non-existent in the trie +// - The given path is existent in the trie, unset the associated nodes with the +// specific direction +// - The given path is non-existent in the trie // - the fork point is a fullnode, the corresponding child pointed by path // is nil, return // - the fork point is a shortnode, the shortnode is included in the range, @@ -386,7 +379,7 @@ func unset(parent node, child node, key []byte, pos int, removeLeft bool) error if removeLeft { if bytes.Compare(cld.Key, key[pos:]) < 0 { // The key of fork shortnode is less than the path - // (it belongs to the range), unset the entire + // (it belongs to the range), unset the entrie // branch. The parent must be a fullnode. fn := parent.(*fullNode) fn.Children[key[pos-1]] = nil @@ -475,15 +468,15 @@ func hasRightElement(node node, key []byte) bool { // Expect the normal case, this function can also be used to verify the following // range proofs: // -// - All elements proof. In this case the proof can be nil, but the range should -// be all the leaves in the trie. +// - All elements proof. In this case the proof can be nil, but the range should +// be all the leaves in the trie. // -// - One element proof. In this case no matter the edge proof is a non-existent -// proof or not, we can always verify the correctness of the proof. +// - One element proof. In this case no matter the edge proof is a non-existent +// proof or not, we can always verify the correctness of the proof. // -// - Zero element proof. In this case a single non-existent proof is enough to prove. -// Besides, if there are still some other leaves available on the right side, then -// an error will be returned. +// - Zero element proof. In this case a single non-existent proof is enough to prove. +// Besides, if there are still some other leaves available on the right side, then +// an error will be returned. // // Except returning the error to indicate the proof is valid or not, the function will // also return a flag to indicate whether there exists more accounts/slots in the trie. @@ -491,7 +484,7 @@ func hasRightElement(node node, key []byte) bool { // Note: This method does not verify that the proof is of minimal form. If the input // proofs are 'bloated' with neighbour leaves or random data, aside from the 'useful' // data, then the proof will still be accepted. -func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (bool, error) { +func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (bool, error) { if len(keys) != len(values) { return false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values)) } @@ -511,7 +504,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, valu if proof == nil { tr := NewStackTrie(nil) for index, key := range keys { - tr.Update(key, values[index]) + tr.TryUpdate(key, values[index]) } if have, want := tr.Hash(), rootHash; have != want { return false, fmt.Errorf("invalid proof, want hash %x, got %x", want, have) @@ -530,7 +523,6 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, valu } return false, nil } - var lastKey = keys[len(keys)-1] // Special case, there is only one element and two edge keys are same. // In this case, we can't construct two edge paths. So handle it here. if len(keys) == 1 && bytes.Equal(firstKey, lastKey) { @@ -577,12 +569,12 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, valu } // Rebuild the trie with the leaf stream, the shape of trie // should be same with the original one. - tr := &Trie{root: root, reader: newEmptyReader(), tracer: newTracer()} + tr := &Trie{root: root, db: NewDatabase(rawdb.NewMemoryDatabase())} if empty { tr.root = nil } for index, key := range keys { - tr.Update(key, values[index]) + tr.TryUpdate(key, values[index]) } if tr.Hash() != rootHash { return false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash()) diff --git a/trie/proof_test.go b/trie/proof_test.go index aae0a11b3a..a53007e654 100644 --- a/trie/proof_test.go +++ b/trie/proof_test.go @@ -30,33 +30,19 @@ import ( "bytes" crand "crypto/rand" "encoding/binary" - "fmt" mrand "math/rand" + "sort" "testing" + "time" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb/memorydb" - "golang.org/x/exp/slices" + "github.com/tenderly/coreth/core/rawdb" + "github.com/tenderly/coreth/ethdb/memorydb" ) -// Prng is a pseudo random number generator seeded by strong randomness. -// The randomness is printed on startup in order to make failures reproducible. -var prng = initRnd() - -func initRnd() *mrand.Rand { - var seed [8]byte - crand.Read(seed[:]) - rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:])))) - fmt.Printf("Seed: %x\n", seed) - return rnd -} - -func randBytes(n int) []byte { - r := make([]byte, n) - prng.Read(r) - return r +func init() { + mrand.Seed(time.Now().Unix()) } // makeProvers creates Merkle trie provers based on different implementations to @@ -67,13 +53,13 @@ func makeProvers(trie *Trie) []func(key []byte) *memorydb.Database { // Create a direct trie based Merkle prover provers = append(provers, func(key []byte) *memorydb.Database { proof := memorydb.New() - trie.Prove(key, proof) + trie.Prove(key, 0, proof) return proof }) // Create a leaf iterator based Merkle prover provers = append(provers, func(key []byte) *memorydb.Database { proof := memorydb.New() - if it := NewIterator(trie.MustNodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) { + if it := NewIterator(trie.NodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) { for _, p := range it.Prove() { proof.Put(crypto.Keccak256(p), p) } @@ -104,7 +90,7 @@ func TestProof(t *testing.T) { } func TestOneElementProof(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) updateString(trie, "k", "v") for i, prover := range makeProvers(trie) { proof := prover([]byte("k")) @@ -155,12 +141,12 @@ func TestBadProof(t *testing.T) { // Tests that missing keys can also be proven. The test explicitly uses a single // entry trie and checks for missing keys both before and after the single entry. func TestMissingKeyProof(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) updateString(trie, "k", "v") for i, key := range []string{"a", "j", "l", "z"} { proof := memorydb.New() - trie.Prove([]byte(key), proof) + trie.Prove([]byte(key), 0, proof) if proof.Len() != 1 { t.Errorf("test %d: proof should have one element", i) @@ -175,24 +161,30 @@ func TestMissingKeyProof(t *testing.T) { } } +type entrySlice []*kv + +func (p entrySlice) Len() int { return len(p) } +func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 } +func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + // TestRangeProof tests normal range proof with both edge proofs // as the existent proof. The test cases are generated randomly. func TestRangeProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) for i := 0; i < 500; i++ { start := mrand.Intn(len(entries)) end := mrand.Intn(len(entries)-start) + start + 1 proof := memorydb.New() - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, proof); err != nil { + if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -201,7 +193,7 @@ func TestRangeProof(t *testing.T) { keys = append(keys, entries[i].k) vals = append(vals, entries[i].v) } - _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof) + _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof) if err != nil { t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err) } @@ -212,11 +204,11 @@ func TestRangeProof(t *testing.T) { // The test cases are generated randomly. func TestRangeProofWithNonExistentProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) for i := 0; i < 500; i++ { start := mrand.Intn(len(entries)) end := mrand.Intn(len(entries)-start) + start + 1 @@ -231,10 +223,19 @@ func TestRangeProofWithNonExistentProof(t *testing.T) { if bytes.Compare(first, entries[start].k) > 0 { continue } - if err := trie.Prove(first, proof); err != nil { + // Short circuit if the increased key is same with the next key + last := increaseKey(common.CopyBytes(entries[end-1].k)) + if end != len(entries) && bytes.Equal(last, entries[end].k) { + continue + } + // Short circuit if the increased key is overflow + if bytes.Compare(last, entries[end-1].k) < 0 { + continue + } + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, proof); err != nil { + if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -243,32 +244,53 @@ func TestRangeProofWithNonExistentProof(t *testing.T) { keys = append(keys, entries[i].k) vals = append(vals, entries[i].v) } - _, err := VerifyRangeProof(trie.Hash(), first, keys, vals, proof) + _, err := VerifyRangeProof(trie.Hash(), first, last, keys, vals, proof) if err != nil { t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err) } } + // Special case, two edge proofs for two edge key. + proof := memorydb.New() + first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes() + last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes() + if err := trie.Prove(first, 0, proof); err != nil { + t.Fatalf("Failed to prove the first node %v", err) + } + if err := trie.Prove(last, 0, proof); err != nil { + t.Fatalf("Failed to prove the last node %v", err) + } + var k [][]byte + var v [][]byte + for i := 0; i < len(entries); i++ { + k = append(k, entries[i].k) + v = append(v, entries[i].v) + } + _, err := VerifyRangeProof(trie.Hash(), first, last, k, v, proof) + if err != nil { + t.Fatal("Failed to verify whole rang with non-existent edges") + } } // TestRangeProofWithInvalidNonExistentProof tests such scenarios: // - There exists a gap between the first element and the left edge proof +// - There exists a gap between the last element and the right edge proof func TestRangeProofWithInvalidNonExistentProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) // Case 1 start, end := 100, 200 first := decreaseKey(common.CopyBytes(entries[start].k)) proof := memorydb.New() - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, proof); err != nil { + if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } start = 105 // Gap created @@ -278,7 +300,29 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) { k = append(k, entries[i].k) v = append(v, entries[i].v) } - _, err := VerifyRangeProof(trie.Hash(), first, k, v, proof) + _, err := VerifyRangeProof(trie.Hash(), first, k[len(k)-1], k, v, proof) + if err == nil { + t.Fatalf("Expected to detect the error, got nil") + } + + // Case 2 + start, end = 100, 200 + last := increaseKey(common.CopyBytes(entries[end-1].k)) + proof = memorydb.New() + if err := trie.Prove(entries[start].k, 0, proof); err != nil { + t.Fatalf("Failed to prove the first node %v", err) + } + if err := trie.Prove(last, 0, proof); err != nil { + t.Fatalf("Failed to prove the last node %v", err) + } + end = 195 // Capped slice + k = make([][]byte, 0) + v = make([][]byte, 0) + for i := start; i < end; i++ { + k = append(k, entries[i].k) + v = append(v, entries[i].v) + } + _, err = VerifyRangeProof(trie.Hash(), k[0], last, k, v, proof) if err == nil { t.Fatalf("Expected to detect the error, got nil") } @@ -289,20 +333,20 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) { // non-existent one. func TestOneElementRangeProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) // One element with existent edge proof, both edge proofs // point to the SAME key. start := 1000 proof := memorydb.New() - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - _, err := VerifyRangeProof(trie.Hash(), entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) + _, err := VerifyRangeProof(trie.Hash(), entries[start].k, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -311,13 +355,13 @@ func TestOneElementRangeProof(t *testing.T) { start = 1000 first := decreaseKey(common.CopyBytes(entries[start].k)) proof = memorydb.New() - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - _, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) + _, err = VerifyRangeProof(trie.Hash(), first, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -326,13 +370,13 @@ func TestOneElementRangeProof(t *testing.T) { start = 1000 last := increaseKey(common.CopyBytes(entries[start].k)) proof = memorydb.New() - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, proof); err != nil { + if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - _, err = VerifyRangeProof(trie.Hash(), entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) + _, err = VerifyRangeProof(trie.Hash(), entries[start].k, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -341,32 +385,32 @@ func TestOneElementRangeProof(t *testing.T) { start = 1000 first, last = decreaseKey(common.CopyBytes(entries[start].k)), increaseKey(common.CopyBytes(entries[start].k)) proof = memorydb.New() - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, proof); err != nil { + if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - _, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) + _, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } // Test the mini trie with only a single element. - tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) entry := &kv{randBytes(32), randBytes(20), false} - tinyTrie.MustUpdate(entry.k, entry.v) + tinyTrie.Update(entry.k, entry.v) first = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes() last = entry.k proof = memorydb.New() - if err := tinyTrie.Prove(first, proof); err != nil { + if err := tinyTrie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := tinyTrie.Prove(last, proof); err != nil { + if err := tinyTrie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - _, err = VerifyRangeProof(tinyTrie.Hash(), first, [][]byte{entry.k}, [][]byte{entry.v}, proof) + _, err = VerifyRangeProof(tinyTrie.Hash(), first, last, [][]byte{entry.k}, [][]byte{entry.v}, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -376,11 +420,11 @@ func TestOneElementRangeProof(t *testing.T) { // The edge proofs can be nil. func TestAllElementsProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) var k [][]byte var v [][]byte @@ -388,20 +432,20 @@ func TestAllElementsProof(t *testing.T) { k = append(k, entries[i].k) v = append(v, entries[i].v) } - _, err := VerifyRangeProof(trie.Hash(), nil, k, v, nil) + _, err := VerifyRangeProof(trie.Hash(), nil, nil, k, v, nil) if err != nil { t.Fatalf("Expected no error, got %v", err) } // With edge proofs, it should still work. proof := memorydb.New() - if err := trie.Prove(entries[0].k, proof); err != nil { + if err := trie.Prove(entries[0].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil { + if err := trie.Prove(entries[len(entries)-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - _, err = VerifyRangeProof(trie.Hash(), k[0], k, v, proof) + _, err = VerifyRangeProof(trie.Hash(), k[0], k[len(k)-1], k, v, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -409,13 +453,14 @@ func TestAllElementsProof(t *testing.T) { // Even with non-existent edge proofs, it should still work. proof = memorydb.New() first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes() - if err := trie.Prove(first, proof); err != nil { + last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes() + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil { + if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - _, err = VerifyRangeProof(trie.Hash(), first, k, v, proof) + _, err = VerifyRangeProof(trie.Hash(), first, last, k, v, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -424,22 +469,22 @@ func TestAllElementsProof(t *testing.T) { // TestSingleSideRangeProof tests the range starts from zero. func TestSingleSideRangeProof(t *testing.T) { for i := 0; i < 64; i++ { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) - var entries []*kv + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + var entries entrySlice for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} - trie.MustUpdate(value.k, value.v) + trie.Update(value.k, value.v) entries = append(entries, value) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1} for _, pos := range cases { proof := memorydb.New() - if err := trie.Prove(common.Hash{}.Bytes(), proof); err != nil { + if err := trie.Prove(common.Hash{}.Bytes(), 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[pos].k, proof); err != nil { + if err := trie.Prove(entries[pos].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } k := make([][]byte, 0) @@ -448,7 +493,43 @@ func TestSingleSideRangeProof(t *testing.T) { k = append(k, entries[i].k) v = append(v, entries[i].v) } - _, err := VerifyRangeProof(trie.Hash(), common.Hash{}.Bytes(), k, v, proof) + _, err := VerifyRangeProof(trie.Hash(), common.Hash{}.Bytes(), k[len(k)-1], k, v, proof) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + } + } +} + +// TestReverseSingleSideRangeProof tests the range ends with 0xffff...fff. +func TestReverseSingleSideRangeProof(t *testing.T) { + for i := 0; i < 64; i++ { + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + var entries entrySlice + for i := 0; i < 4096; i++ { + value := &kv{randBytes(32), randBytes(20), false} + trie.Update(value.k, value.v) + entries = append(entries, value) + } + sort.Sort(entries) + + var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1} + for _, pos := range cases { + proof := memorydb.New() + if err := trie.Prove(entries[pos].k, 0, proof); err != nil { + t.Fatalf("Failed to prove the first node %v", err) + } + last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + if err := trie.Prove(last.Bytes(), 0, proof); err != nil { + t.Fatalf("Failed to prove the last node %v", err) + } + k := make([][]byte, 0) + v := make([][]byte, 0) + for i := pos; i < len(entries); i++ { + k = append(k, entries[i].k) + v = append(v, entries[i].v) + } + _, err := VerifyRangeProof(trie.Hash(), k[0], last.Bytes(), k, v, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -460,20 +541,20 @@ func TestSingleSideRangeProof(t *testing.T) { // The prover is expected to detect the error. func TestBadRangeProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) for i := 0; i < 500; i++ { start := mrand.Intn(len(entries)) end := mrand.Intn(len(entries)-start) + start + 1 proof := memorydb.New() - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, proof); err != nil { + if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -482,7 +563,7 @@ func TestBadRangeProof(t *testing.T) { keys = append(keys, entries[i].k) vals = append(vals, entries[i].v) } - var first = keys[0] + var first, last = keys[0], keys[len(keys)-1] testcase := mrand.Intn(6) var index int switch testcase { @@ -497,7 +578,7 @@ func TestBadRangeProof(t *testing.T) { case 2: // Gapped entry slice index = mrand.Intn(end - start) - if (index == 0 && start < 100) || (index == end-start-1) { + if (index == 0 && start < 100) || (index == end-start-1 && end <= 100) { continue } keys = append(keys[:index], keys[index+1:]...) @@ -520,7 +601,7 @@ func TestBadRangeProof(t *testing.T) { index = mrand.Intn(end - start) vals[index] = nil } - _, err := VerifyRangeProof(trie.Hash(), first, keys, vals, proof) + _, err := VerifyRangeProof(trie.Hash(), first, last, keys, vals, proof) if err == nil { t.Fatalf("%d Case %d index %d range: (%d->%d) expect error, got nil", i, testcase, index, start, end-1) } @@ -530,19 +611,19 @@ func TestBadRangeProof(t *testing.T) { // TestGappedRangeProof focuses on the small trie with embedded nodes. // If the gapped node is embedded in the trie, it should be detected too. func TestGappedRangeProof(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) var entries []*kv // Sorted entries for i := byte(0); i < 10; i++ { value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} - trie.MustUpdate(value.k, value.v) + trie.Update(value.k, value.v) entries = append(entries, value) } first, last := 2, 8 proof := memorydb.New() - if err := trie.Prove(entries[first].k, proof); err != nil { + if err := trie.Prove(entries[first].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[last-1].k, proof); err != nil { + if err := trie.Prove(entries[last-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -554,7 +635,7 @@ func TestGappedRangeProof(t *testing.T) { keys = append(keys, entries[i].k) vals = append(vals, entries[i].v) } - _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof) + _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof) if err == nil { t.Fatal("expect error, got nil") } @@ -563,53 +644,55 @@ func TestGappedRangeProof(t *testing.T) { // TestSameSideProofs tests the element is not in the range covered by proofs func TestSameSideProofs(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) pos := 1000 - first := common.CopyBytes(entries[0].k) + first := decreaseKey(common.CopyBytes(entries[pos].k)) + first = decreaseKey(first) + last := decreaseKey(common.CopyBytes(entries[pos].k)) proof := memorydb.New() - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[2000].k, proof); err != nil { - t.Fatalf("Failed to prove the first node %v", err) + if err := trie.Prove(last, 0, proof); err != nil { + t.Fatalf("Failed to prove the last node %v", err) } - _, err := VerifyRangeProof(trie.Hash(), first, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof) + _, err := VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof) if err == nil { t.Fatalf("Expected error, got nil") } first = increaseKey(common.CopyBytes(entries[pos].k)) - last := increaseKey(common.CopyBytes(entries[pos].k)) + last = increaseKey(common.CopyBytes(entries[pos].k)) last = increaseKey(last) proof = memorydb.New() - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, proof); err != nil { + if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - _, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof) + _, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof) if err == nil { t.Fatalf("Expected error, got nil") } } func TestHasRightElement(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) - var entries []*kv + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + var entries entrySlice for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} - trie.MustUpdate(value.k, value.v) + trie.Update(value.k, value.v) entries = append(entries, value) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) var cases = []struct { start int @@ -622,29 +705,40 @@ func TestHasRightElement(t *testing.T) { {50, 100, true}, {50, len(entries), false}, // No more element expected {len(entries) - 1, len(entries), false}, // Single last element with two existent proofs(point to same key) + {len(entries) - 1, -1, false}, // Single last element with non-existent right proof {0, len(entries), false}, // The whole set with existent left proof {-1, len(entries), false}, // The whole set with non-existent left proof + {-1, -1, false}, // The whole set with non-existent left/right proof } for _, c := range cases { var ( firstKey []byte + lastKey []byte start = c.start end = c.end proof = memorydb.New() ) if c.start == -1 { firstKey, start = common.Hash{}.Bytes(), 0 - if err := trie.Prove(firstKey, proof); err != nil { + if err := trie.Prove(firstKey, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } } else { firstKey = entries[c.start].k - if err := trie.Prove(entries[c.start].k, proof); err != nil { + if err := trie.Prove(entries[c.start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } } - if err := trie.Prove(entries[c.end-1].k, proof); err != nil { - t.Fatalf("Failed to prove the first node %v", err) + if c.end == -1 { + lastKey, end = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes(), len(entries) + if err := trie.Prove(lastKey, 0, proof); err != nil { + t.Fatalf("Failed to prove the first node %v", err) + } + } else { + lastKey = entries[c.end-1].k + if err := trie.Prove(entries[c.end-1].k, 0, proof); err != nil { + t.Fatalf("Failed to prove the first node %v", err) + } } k := make([][]byte, 0) v := make([][]byte, 0) @@ -652,7 +746,7 @@ func TestHasRightElement(t *testing.T) { k = append(k, entries[i].k) v = append(v, entries[i].v) } - hasMore, err := VerifyRangeProof(trie.Hash(), firstKey, k, v, proof) + hasMore, err := VerifyRangeProof(trie.Hash(), firstKey, lastKey, k, v, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -666,11 +760,11 @@ func TestHasRightElement(t *testing.T) { // The first edge proof must be a non-existent proof. func TestEmptyRangeProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) var cases = []struct { pos int @@ -682,10 +776,10 @@ func TestEmptyRangeProof(t *testing.T) { for _, c := range cases { proof := memorydb.New() first := increaseKey(common.CopyBytes(entries[c.pos].k)) - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - _, err := VerifyRangeProof(trie.Hash(), first, nil, nil, proof) + _, err := VerifyRangeProof(trie.Hash(), first, nil, nil, nil, proof) if c.err && err == nil { t.Fatalf("Expected error, got nil") } @@ -701,11 +795,11 @@ func TestEmptyRangeProof(t *testing.T) { func TestBloatedProof(t *testing.T) { // Use a small trie trie, kvs := nonRandomTrie(100) - var entries []*kv + var entries entrySlice for _, kv := range kvs { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) var keys [][]byte var vals [][]byte @@ -713,7 +807,7 @@ func TestBloatedProof(t *testing.T) { // In the 'malicious' case, we add proofs for every single item // (but only one key/value pair used as leaf) for i, entry := range entries { - trie.Prove(entry.k, proof) + trie.Prove(entry.k, 0, proof) if i == 50 { keys = append(keys, entry.k) vals = append(vals, entry.v) @@ -722,10 +816,10 @@ func TestBloatedProof(t *testing.T) { // For reference, we use the same function, but _only_ prove the first // and last element want := memorydb.New() - trie.Prove(keys[0], want) - trie.Prove(keys[len(keys)-1], want) + trie.Prove(keys[0], 0, want) + trie.Prove(keys[len(keys)-1], 0, want) - if _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof); err != nil { + if _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof); err != nil { t.Fatalf("expected bloated proof to succeed, got %v", err) } } @@ -735,11 +829,11 @@ func TestBloatedProof(t *testing.T) { // noop technically, but practically should be rejected. func TestEmptyValueRangeProof(t *testing.T) { trie, values := randomTrie(512) - var entries []*kv + var entries entrySlice for _, kv := range values { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) // Create a new entry with a slightly modified key mid := len(entries) / 2 @@ -756,10 +850,10 @@ func TestEmptyValueRangeProof(t *testing.T) { start, end := 1, len(entries)-1 proof := memorydb.New() - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, proof); err != nil { + if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -768,7 +862,7 @@ func TestEmptyValueRangeProof(t *testing.T) { keys = append(keys, entries[i].k) vals = append(vals, entries[i].v) } - _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof) + _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof) if err == nil { t.Fatalf("Expected failure on noop entry") } @@ -779,11 +873,11 @@ func TestEmptyValueRangeProof(t *testing.T) { // practically should be rejected. func TestAllElementsEmptyValueRangeProof(t *testing.T) { trie, values := randomTrie(512) - var entries []*kv + var entries entrySlice for _, kv := range values { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) // Create a new entry with a slightly modified key mid := len(entries) / 2 @@ -803,7 +897,7 @@ func TestAllElementsEmptyValueRangeProof(t *testing.T) { keys = append(keys, entries[i].k) vals = append(vals, entries[i].v) } - _, err := VerifyRangeProof(trie.Hash(), nil, keys, vals, nil) + _, err := VerifyRangeProof(trie.Hash(), nil, nil, keys, vals, nil) if err == nil { t.Fatalf("Expected failure on noop entry") } @@ -851,7 +945,7 @@ func BenchmarkProve(b *testing.B) { for i := 0; i < b.N; i++ { kv := vals[keys[i%len(keys)]] proofs := memorydb.New() - if trie.Prove(kv.k, proofs); proofs.Len() == 0 { + if trie.Prove(kv.k, 0, proofs); proofs.Len() == 0 { b.Fatalf("zero length proof for %x", kv.k) } } @@ -865,7 +959,7 @@ func BenchmarkVerifyProof(b *testing.B) { for k := range vals { keys = append(keys, k) proof := memorydb.New() - trie.Prove([]byte(k), proof) + trie.Prove([]byte(k), 0, proof) proofs = append(proofs, proof) } @@ -885,19 +979,19 @@ func BenchmarkVerifyRangeProof5000(b *testing.B) { benchmarkVerifyRangeProof(b, func benchmarkVerifyRangeProof(b *testing.B, size int) { trie, vals := randomTrie(8192) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) start := 2 end := start + size proof := memorydb.New() - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { b.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, proof); err != nil { + if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { b.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -909,7 +1003,7 @@ func benchmarkVerifyRangeProof(b *testing.B, size int) { b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, values, proof) + _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, values, proof) if err != nil { b.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err) } @@ -922,11 +1016,11 @@ func BenchmarkVerifyRangeNoProof1000(b *testing.B) { benchmarkVerifyRangeNoProof func benchmarkVerifyRangeNoProof(b *testing.B, size int) { trie, vals := randomTrie(size) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) var keys [][]byte var values [][]byte @@ -936,7 +1030,7 @@ func benchmarkVerifyRangeNoProof(b *testing.B, size int) { } b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, values, nil) + _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, values, nil) if err != nil { b.Fatalf("Expected no error, got %v", err) } @@ -944,26 +1038,32 @@ func benchmarkVerifyRangeNoProof(b *testing.B, size int) { } func randomTrie(n int) (*Trie, map[string]*kv) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) vals := make(map[string]*kv) for i := byte(0); i < 100; i++ { value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}, false} - trie.MustUpdate(value.k, value.v) - trie.MustUpdate(value2.k, value2.v) + trie.Update(value.k, value.v) + trie.Update(value2.k, value2.v) vals[string(value.k)] = value vals[string(value2.k)] = value2 } for i := 0; i < n; i++ { value := &kv{randBytes(32), randBytes(20), false} - trie.MustUpdate(value.k, value.v) + trie.Update(value.k, value.v) vals[string(value.k)] = value } return trie, vals } +func randBytes(n int) []byte { + r := make([]byte, n) + crand.Read(r) + return r +} + func nonRandomTrie(n int) (*Trie, map[string]*kv) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) vals := make(map[string]*kv) max := uint64(0xffffffffffffffff) for i := uint64(0); i < uint64(n); i++ { @@ -973,7 +1073,7 @@ func nonRandomTrie(n int) (*Trie, map[string]*kv) { binary.LittleEndian.PutUint64(value, i-max) //value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} elem := &kv{key, value, false} - trie.MustUpdate(elem.k, elem.v) + trie.Update(elem.k, elem.v) vals[string(elem.k)] = elem } return trie, vals @@ -988,21 +1088,22 @@ func TestRangeProofKeysWithSharedPrefix(t *testing.T) { common.Hex2Bytes("02"), common.Hex2Bytes("03"), } - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) for i, key := range keys { - trie.MustUpdate(key, vals[i]) + trie.Update(key, vals[i]) } root := trie.Hash() proof := memorydb.New() start := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000") - if err := trie.Prove(start, proof); err != nil { + end := common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + if err := trie.Prove(start, 0, proof); err != nil { t.Fatalf("failed to prove start: %v", err) } - if err := trie.Prove(keys[len(keys)-1], proof); err != nil { + if err := trie.Prove(end, 0, proof); err != nil { t.Fatalf("failed to prove end: %v", err) } - more, err := VerifyRangeProof(root, start, keys, vals, proof) + more, err := VerifyRangeProof(root, start, end, keys, vals, proof) if err != nil { t.Fatalf("failed to verify range proof: %v", err) } diff --git a/trie/secure_trie.go b/trie/secure_trie.go index bb81f85819..1d2dbab929 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -27,10 +27,12 @@ package trie import ( - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie/trienode" + "fmt" + "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/log" + "github.com/tenderly/coreth/core/types" + "github.com/tenderly/coreth/rlp" ) // SecureTrie is the old name of StateTrie. @@ -39,23 +41,18 @@ type SecureTrie = StateTrie // NewSecure creates a new StateTrie. // Deprecated: use NewStateTrie. -func NewSecure(stateRoot common.Hash, owner common.Hash, root common.Hash, db *Database) (*SecureTrie, error) { - id := &ID{ - StateRoot: stateRoot, - Owner: owner, - Root: root, - } - return NewStateTrie(id, db) +func NewSecure(owner common.Hash, root common.Hash, db *Database) (*SecureTrie, error) { + return NewStateTrie(owner, root, db) } -// StateTrie wraps a trie with key hashing. In a stateTrie trie, all +// StateTrie wraps a trie with key hashing. In a secure trie, all // access operations hash the key using keccak256. This prevents // calling code from creating long chains of nodes that // increase the access time. // // Contrary to a regular trie, a StateTrie can only be created with // New and must have an attached database. The database also stores -// the preimage of each key if preimage recording is enabled. +// the preimage of each key. // // StateTrie is not safe for concurrent use. type StateTrie struct { @@ -66,105 +63,120 @@ type StateTrie struct { secKeyCacheOwner *StateTrie // Pointer to self, replace the key cache on mismatch } -// NewStateTrie creates a trie with an existing root node from a backing database. +// NewStateTrie creates a trie with an existing root node from a backing database +// and optional intermediate in-memory node pool. // // If root is the zero hash or the sha3 hash of an empty string, the // trie is initially empty. Otherwise, New will panic if db is nil // and returns MissingNodeError if the root node cannot be found. -func NewStateTrie(id *ID, db *Database) (*StateTrie, error) { +// +// Accessing the trie loads nodes from the database or node pool on demand. +// Loaded nodes are kept around until their 'cache generation' expires. +// A new cache generation is created by each call to Commit. +// cachelimit sets the number of past cache generations to keep. +func NewStateTrie(owner common.Hash, root common.Hash, db *Database) (*StateTrie, error) { if db == nil { - panic("trie.NewStateTrie called without a database") + panic("trie.NewSecure called without a database") } - trie, err := New(id, db) + trie, err := New(owner, root, db) if err != nil { return nil, err } return &StateTrie{trie: *trie, preimages: db.preimages}, nil } -// MustGet returns the value for key stored in the trie. +// Get returns the value for key stored in the trie. // The value bytes must not be modified by the caller. -// -// This function will omit any encountered error but just -// print out an error message. -func (t *StateTrie) MustGet(key []byte) []byte { - return t.trie.MustGet(t.hashKey(key)) +func (t *StateTrie) Get(key []byte) []byte { + res, err := t.TryGet(key) + if err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + } + return res } -// GetStorage attempts to retrieve a storage slot with provided account address -// and slot key. The value bytes must not be modified by the caller. -// If the specified storage slot is not in the trie, nil will be returned. -// If a trie node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) { - enc, err := t.trie.Get(t.hashKey(key)) - if err != nil || len(enc) == 0 { - return nil, err - } - _, content, _, err := rlp.Split(enc) - return content, err +// TryGet returns the value for key stored in the trie. +// The value bytes must not be modified by the caller. +// If a node was not found in the database, a MissingNodeError is returned. +func (t *StateTrie) TryGet(key []byte) ([]byte, error) { + return t.trie.TryGet(t.hashKey(key)) } -// GetAccount attempts to retrieve an account with provided account address. -// If the specified account is not in the trie, nil will be returned. -// If a trie node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) GetAccount(address common.Address) (*types.StateAccount, error) { - res, err := t.trie.Get(t.hashKey(address.Bytes())) - if res == nil || err != nil { - return nil, err +func (t *StateTrie) TryGetAccount(key []byte) (*types.StateAccount, error) { + var ret types.StateAccount + res, err := t.trie.TryGet(t.hashKey(key)) + if err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + return &ret, err } - ret := new(types.StateAccount) - err = rlp.DecodeBytes(res, ret) - return ret, err + if res == nil { + return nil, nil + } + err = rlp.DecodeBytes(res, &ret) + return &ret, err } -// GetAccountByHash does the same thing as GetAccount, however it expects an -// account hash that is the hash of address. This constitutes an abstraction -// leak, since the client code needs to know the key format. -func (t *StateTrie) GetAccountByHash(addrHash common.Hash) (*types.StateAccount, error) { - res, err := t.trie.Get(addrHash.Bytes()) - if res == nil || err != nil { - return nil, err +// TryGetAccountWithPreHashedKey does the same thing as TryGetAccount, however +// it expects a key that is already hashed. This constitutes an abstraction leak, +// since the client code needs to know the key format. +func (t *StateTrie) TryGetAccountWithPreHashedKey(key []byte) (*types.StateAccount, error) { + var ret types.StateAccount + res, err := t.trie.TryGet(key) + if err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + return &ret, err } - ret := new(types.StateAccount) - err = rlp.DecodeBytes(res, ret) - return ret, err + if res == nil { + return nil, nil + } + err = rlp.DecodeBytes(res, &ret) + return &ret, err } -// GetNode attempts to retrieve a trie node by compact-encoded path. It is not +// TryGetNode attempts to retrieve a trie node by compact-encoded path. It is not // possible to use keybyte-encoding as the path might contain odd nibbles. -// If the specified trie node is not in the trie, nil will be returned. -// If a trie node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) GetNode(path []byte) ([]byte, int, error) { - return t.trie.GetNode(path) +func (t *StateTrie) TryGetNode(path []byte) ([]byte, int, error) { + return t.trie.TryGetNode(path) +} + +// TryUpdateAccount account will abstract the write of an account to the +// secure trie. +func (t *StateTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error { + hk := t.hashKey(key) + data, err := rlp.EncodeToBytes(acc) + if err != nil { + return err + } + if err := t.trie.TryUpdate(hk, data); err != nil { + return err + } + t.getSecKeyCache()[string(hk)] = common.CopyBytes(key) + return nil } -// MustUpdate associates key with value in the trie. Subsequent calls to +// Update associates key with value in the trie. Subsequent calls to // Get will return value. If value has length zero, any existing value // is deleted from the trie and calls to Get will return nil. // // The value bytes must not be modified by the caller while they are // stored in the trie. -// -// This function will omit any encountered error but just print out an -// error message. -func (t *StateTrie) MustUpdate(key, value []byte) { - hk := t.hashKey(key) - t.trie.MustUpdate(hk, value) - t.getSecKeyCache()[string(hk)] = common.CopyBytes(key) +func (t *StateTrie) Update(key, value []byte) { + if err := t.TryUpdate(key, value); err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + } } -// UpdateStorage associates key with value in the trie. Subsequent calls to +// TryUpdate associates key with value in the trie. Subsequent calls to // Get will return value. If value has length zero, any existing value // is deleted from the trie and calls to Get will return nil. // // The value bytes must not be modified by the caller while they are // stored in the trie. // -// If a node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error { +// If a node was not found in the database, a MissingNodeError is returned. +func (t *StateTrie) TryUpdate(key, value []byte) error { hk := t.hashKey(key) - v, _ := rlp.EncodeToBytes(value) - err := t.trie.Update(hk, v) + err := t.trie.TryUpdate(hk, value) if err != nil { return err } @@ -172,46 +184,26 @@ func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error { return nil } -// UpdateAccount will abstract the write of an account to the secure trie. -func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error { - hk := t.hashKey(address.Bytes()) - data, err := rlp.EncodeToBytes(acc) - if err != nil { - return err +// Delete removes any existing value for key from the trie. +func (t *StateTrie) Delete(key []byte) { + if err := t.TryDelete(key); err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) } - if err := t.trie.Update(hk, data); err != nil { - return err - } - t.getSecKeyCache()[string(hk)] = address.Bytes() - return nil -} - -func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error { - return nil } -// MustDelete removes any existing value for key from the trie. This function -// will omit any encountered error but just print out an error message. -func (t *StateTrie) MustDelete(key []byte) { +// TryDelete removes any existing value for key from the trie. +// If a node was not found in the database, a MissingNodeError is returned. +func (t *StateTrie) TryDelete(key []byte) error { hk := t.hashKey(key) delete(t.getSecKeyCache(), string(hk)) - t.trie.MustDelete(hk) + return t.trie.TryDelete(hk) } -// DeleteStorage removes any existing storage slot from the trie. -// If the specified trie node is not in the trie, nothing will be changed. -// If a node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) DeleteStorage(_ common.Address, key []byte) error { +// TryDeleteAccount abstracts an account deletion from the trie. +func (t *StateTrie) TryDeleteAccount(key []byte) error { hk := t.hashKey(key) delete(t.getSecKeyCache(), string(hk)) - return t.trie.Delete(hk) -} - -// DeleteAccount abstracts an account deletion from the trie. -func (t *StateTrie) DeleteAccount(address common.Address) error { - hk := t.hashKey(address.Bytes()) - delete(t.getSecKeyCache(), string(hk)) - return t.trie.Delete(hk) + return t.trie.TryDelete(hk) } // GetKey returns the sha3 preimage of a hashed key that was @@ -226,14 +218,14 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte { return t.preimages.preimage(common.BytesToHash(shaKey)) } -// Commit collects all dirty nodes in the trie and replaces them with the -// corresponding node hash. All collected nodes (including dirty leaves if +// Commit collects all dirty nodes in the trie and replace them with the +// corresponding node hash. All collected nodes(including dirty leaves if // collectLeaf is true) will be encapsulated into a nodeset for return. -// The returned nodeset can be nil if the trie is clean (nothing to commit). +// The returned nodeset can be nil if the trie is clean(nothing to commit). // All cached preimages will be also flushed if preimages recording is enabled. // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage -func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { +func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) { // Write all the pre-images to the actual disk database if len(t.getSecKeyCache()) > 0 { if t.preimages != nil { @@ -245,17 +237,17 @@ func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, er } t.secKeyCache = make(map[string][]byte) } - // Commit the trie and return its modified nodeset. + // Commit the trie to its intermediate node database return t.trie.Commit(collectLeaf) } -// Hash returns the root hash of StateTrie. It does not write to the +// Hash returns the root hash of SecureTrie. It does not write to the // database and can be used even if the trie doesn't have one. func (t *StateTrie) Hash() common.Hash { return t.trie.Hash() } -// Copy returns a copy of StateTrie. +// Copy returns a copy of SecureTrie. func (t *StateTrie) Copy() *StateTrie { return &StateTrie{ trie: *t.trie.Copy(), @@ -264,18 +256,12 @@ func (t *StateTrie) Copy() *StateTrie { } } -// NodeIterator returns an iterator that returns nodes of the underlying trie. -// Iteration starts at the key after the given start key. -func (t *StateTrie) NodeIterator(start []byte) (NodeIterator, error) { +// NodeIterator returns an iterator that returns nodes of the underlying trie. Iteration +// starts at the key after the given start key. +func (t *StateTrie) NodeIterator(start []byte) NodeIterator { return t.trie.NodeIterator(start) } -// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered -// error but just print out an error message. -func (t *StateTrie) MustNodeIterator(start []byte) NodeIterator { - return t.trie.MustNodeIterator(start) -} - // hashKey returns the hash of key as an ephemeral buffer. // The caller must not hold onto the return value because it will become // invalid on the next call to hashKey or secKey. diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go index 8081f9b500..af979a9e24 100644 --- a/trie/secure_trie_test.go +++ b/trie/secure_trie_test.go @@ -33,23 +33,21 @@ import ( "sync" "testing" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/tenderly/coreth/ethdb/memorydb" ) func newEmptySecure() *StateTrie { - trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), NewDatabase(rawdb.NewMemoryDatabase(), nil)) + trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, NewDatabase(memorydb.New())) return trie } // makeTestStateTrie creates a large enough secure trie for testing. func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) { // Create an empty trie - triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil) - trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb) + triedb := NewDatabase(memorydb.New()) + trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, triedb) // Fill it with some arbitrary data content := make(map[string][]byte) @@ -57,25 +55,28 @@ func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) { // Map the same data under multiple keys key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i} content[string(key)] = val - trie.MustUpdate(key, val) + trie.Update(key, val) key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i} content[string(key)] = val - trie.MustUpdate(key, val) + trie.Update(key, val) // Add some other data to inflate the trie for j := byte(3); j < 13; j++ { key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i} content[string(key)] = val - trie.MustUpdate(key, val) + trie.Update(key, val) } } - root, nodes, _ := trie.Commit(false) - if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { + root, nodes, err := trie.Commit(false) + if err != nil { + panic(fmt.Errorf("failed to commit trie %v", err)) + } + if err := triedb.Update(NewWithNodeSet(nodes)); err != nil { panic(fmt.Errorf("failed to commit db %v", err)) } // Re-create the trie based on the new state - trie, _ = NewStateTrie(TrieID(root), triedb) + trie, _ = NewSecure(common.Hash{}, root, triedb) return triedb, trie, content } @@ -93,9 +94,9 @@ func TestSecureDelete(t *testing.T) { } for _, val := range vals { if val.v != "" { - trie.MustUpdate([]byte(val.k), []byte(val.v)) + trie.Update([]byte(val.k), []byte(val.v)) } else { - trie.MustDelete([]byte(val.k)) + trie.Delete([]byte(val.k)) } } hash := trie.Hash() @@ -107,13 +108,13 @@ func TestSecureDelete(t *testing.T) { func TestSecureGetKey(t *testing.T) { trie := newEmptySecure() - trie.MustUpdate([]byte("foo"), []byte("bar")) + trie.Update([]byte("foo"), []byte("bar")) key := []byte("foo") value := []byte("bar") seckey := crypto.Keccak256(key) - if !bytes.Equal(trie.MustGet(key), value) { + if !bytes.Equal(trie.Get(key), value) { t.Errorf("Get did not return bar") } if k := trie.GetKey(seckey); !bytes.Equal(k, key) { @@ -130,7 +131,7 @@ func TestStateTrieConcurrency(t *testing.T) { for i := 0; i < threads; i++ { tries[i] = trie.Copy() } - // Start a batch of goroutines interacting with the trie + // Start a batch of goroutines interactng with the trie pend := new(sync.WaitGroup) pend.Add(threads) for i := 0; i < threads; i++ { @@ -140,15 +141,15 @@ func TestStateTrieConcurrency(t *testing.T) { for j := byte(0); j < 255; j++ { // Map the same data under multiple keys key, val := common.LeftPadBytes([]byte{byte(index), 1, j}, 32), []byte{j} - tries[index].MustUpdate(key, val) + tries[index].Update(key, val) key, val = common.LeftPadBytes([]byte{byte(index), 2, j}, 32), []byte{j} - tries[index].MustUpdate(key, val) + tries[index].Update(key, val) // Add some other data to inflate the trie for k := byte(3); k < 13; k++ { key, val = common.LeftPadBytes([]byte{byte(index), k, j}, 32), []byte{k, j} - tries[index].MustUpdate(key, val) + tries[index].Update(key, val) } } tries[index].Commit(false) diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go index 3a7767d360..ff52f60263 100644 --- a/trie/stacktrie_test.go +++ b/trie/stacktrie_test.go @@ -29,15 +29,11 @@ package trie import ( "bytes" "math/big" - "math/rand" "testing" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/trie/testutil" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/stretchr/testify/assert" - "golang.org/x/exp/slices" + "github.com/tenderly/coreth/ethdb/memorydb" ) func TestStackTrieInsertAndHash(t *testing.T) { @@ -179,47 +175,16 @@ func TestStackTrieInsertAndHash(t *testing.T) { {"123e", "x___________________________2", "0d230561e398c579e09a9f7b69ceaf7d3970f5a436fdb28b68b7a37c5bdd6b80"}, {"13aa", "x___________________________3", "ff0dc70ce2e5db90ee42a4c2ad12139596b890e90eb4e16526ab38fa465b35cf"}, }, - { // branch node with short values - {"01", "a", "b48605025f5f4b129d40a420e721aa7d504487f015fce85b96e52126365ef7dc"}, - {"80", "b", "2dc6b680daf74db067cb7aeaad73265ded93d96fce190fcbf64f498d475672ab"}, - {"ee", "c", "017dc705a54ac5328dd263fa1bae68d655310fb3e3f7b7bc57e9a43ddf99c4bf"}, - {"ff", "d", "bd5a3584d271d459bd4eb95247b2fc88656b3671b60c1125ffe7bc0b689470d0"}, - }, - { // ext node with short branch node, then becoming long - {"a0", "a", "a83e028cb1e4365935661a9fd36a5c65c30b9ab416eaa877424146ca2a69d088"}, - {"a1", "b", "f586a4639b07b01798ca65e05c253b75d51135ebfbf6f8d6e87c0435089e65f0"}, - {"a2", "c", "63e297c295c008e09a8d531e18d57f270b6bc403e23179b915429db948cd62e3"}, - {"a3", "d", "94a7b721535578e9381f1f4e4b6ec29f8bdc5f0458a30320684c562f5d47b4b5"}, - {"a4", "e", "4b7e66d1c81965cdbe8fab8295ef56bc57fefdc5733d4782d2f8baf630f083c6"}, - {"a5", "f", "2997e7b502198ce1783b5277faacf52b25844fb55a99b63e88bdbbafac573106"}, - {"a6", "g", "bee629dd27a40772b2e1a67ec6db270d26acdf8d3b674dfae27866ad6ae1f48b"}, - }, - { // branch node with short values, then long ones - {"a001", "v1", "b9cc982d995392b51e6787f1915f0b88efd4ad8b30f138da0a3e2242f2323e35"}, - {"b002", "v2", "a7b474bc77ef5097096fa0ee6298fdae8928c0bc3724e7311cd0fa9ed1942fc7"}, - {"c003", "v___________________________3", "dceb5bb7c92b0e348df988a8d9fc36b101397e38ebd405df55ba6ee5f14a264a"}, - {"d004", "v___________________________4", "36e60ecb86b9626165e1c6543c42ecbe4d83bca58e8e1124746961511fce362a"}, - }, - { // ext node to branch node with short values, then long ones - {"8002", "v1", "3258fcb3e9e7d7234ecd3b8d4743999e4ab3a21592565e0a5ca64c141e8620d9"}, - {"8004", "v2", "b6cb95b7024a83c17624a3c9bed09b4b5e8ed426f49f54b8ad13c39028b1e75a"}, - {"8008", "v___________________________3", "c769d82963abe6f0900bf69754738eeb2f84559777cfa87a44f54e1aab417871"}, - {"800d", "v___________________________4", "1cad1fdaab1a6fa95d7b780fd680030e423eb76669971368ba04797a8d9cdfc9"}, - }, - { // ext node with a child of size 31 (Y) and branch node with a child of size 31 (X) - {"000001", "ZZZZZZZZZ", "cef154b87c03c563408520ff9b26923c360cbc3ddb590c079bedeeb25a8c9c77"}, - {"000002", "Y", "2130735e600f612f6e657a32bd7be64ddcaec6512c5694844b19de713922895d"}, - {"000003", "XXXXXXXXXXXXXXXXXXXXXXXXXXXX", "962c0fffdeef7612a4f7bff1950d67e3e81c878e48b9ae45b3b374253b050bd8"}, - }, } + st := NewStackTrie(nil) for i, test := range tests { // The StackTrie does not allow Insert(), Hash(), Insert(), ... // so we will create new trie for every sequence length of inserts. for l := 1; l <= len(test); l++ { - st := NewStackTrie(nil) + st.Reset() for j := 0; j < l; j++ { kv := &test[j] - if err := st.Update(common.FromHex(kv.K), []byte(kv.V)); err != nil { + if err := st.TryUpdate(common.FromHex(kv.K), []byte(kv.V)); err != nil { t.Fatal(err) } } @@ -233,13 +198,13 @@ func TestStackTrieInsertAndHash(t *testing.T) { func TestSizeBug(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + nt := NewEmpty(NewDatabase(memorydb.New())) leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") - nt.Update(leaf, value) - st.Update(leaf, value) + nt.TryUpdate(leaf, value) + st.TryUpdate(leaf, value) if nt.Hash() != st.Hash() { t.Fatalf("error %x != %x", st.Hash(), nt.Hash()) @@ -248,7 +213,7 @@ func TestSizeBug(t *testing.T) { func TestEmptyBug(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + nt := NewEmpty(NewDatabase(memorydb.New())) //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") @@ -263,8 +228,8 @@ func TestEmptyBug(t *testing.T) { } for _, kv := range kvs { - nt.Update(common.FromHex(kv.K), common.FromHex(kv.V)) - st.Update(common.FromHex(kv.K), common.FromHex(kv.V)) + nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) + st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) } if nt.Hash() != st.Hash() { @@ -274,7 +239,7 @@ func TestEmptyBug(t *testing.T) { func TestValLength56(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + nt := NewEmpty(NewDatabase(memorydb.New())) //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") @@ -286,8 +251,8 @@ func TestValLength56(t *testing.T) { } for _, kv := range kvs { - nt.Update(common.FromHex(kv.K), common.FromHex(kv.V)) - st.Update(common.FromHex(kv.K), common.FromHex(kv.V)) + nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) + st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) } if nt.Hash() != st.Hash() { @@ -299,7 +264,7 @@ func TestValLength56(t *testing.T) { // which causes a lot of node-within-node. This case was found via fuzzing. func TestUpdateSmallNodes(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + nt := NewEmpty(NewDatabase(memorydb.New())) kvs := []struct { K string V string @@ -308,8 +273,8 @@ func TestUpdateSmallNodes(t *testing.T) { {"65", "3000"}, // stacktrie.Update } for _, kv := range kvs { - nt.Update(common.FromHex(kv.K), common.FromHex(kv.V)) - st.Update(common.FromHex(kv.K), common.FromHex(kv.V)) + nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) + st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) } if nt.Hash() != st.Hash() { t.Fatalf("error %x != %x", st.Hash(), nt.Hash()) @@ -327,7 +292,7 @@ func TestUpdateSmallNodes(t *testing.T) { func TestUpdateVariableKeys(t *testing.T) { t.SkipNow() st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + nt := NewEmpty(NewDatabase(memorydb.New())) kvs := []struct { K string V string @@ -336,8 +301,8 @@ func TestUpdateVariableKeys(t *testing.T) { {"0x3330353463653239356131303167617430", "313131"}, } for _, kv := range kvs { - nt.Update(common.FromHex(kv.K), common.FromHex(kv.V)) - st.Update(common.FromHex(kv.K), common.FromHex(kv.V)) + nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) + st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) } if nt.Hash() != st.Hash() { t.Fatalf("error %x != %x", st.Hash(), nt.Hash()) @@ -354,7 +319,7 @@ func TestStacktrieNotModifyValues(t *testing.T) { value := make([]byte, 1, 100) value[0] = 0x2 want := common.CopyBytes(value) - st.Update([]byte{0x01}, value) + st.TryUpdate([]byte{0x01}, value) st.Hash() if have := value; !bytes.Equal(have, want) { t.Fatalf("tiny trie: have %#x want %#x", have, want) @@ -375,7 +340,7 @@ func TestStacktrieNotModifyValues(t *testing.T) { for i := 0; i < 1000; i++ { key := common.BigToHash(keyB) value := getValue(i) - st.Update(key.Bytes(), value) + st.TryUpdate(key.Bytes(), value) vals = append(vals, value) keyB = keyB.Add(keyB, keyDelta) keyDelta.Add(keyDelta, common.Big1) @@ -391,107 +356,47 @@ func TestStacktrieNotModifyValues(t *testing.T) { } } -func buildPartialTree(entries []*kv, t *testing.T) map[string]common.Hash { - var ( - options = NewStackTrieOptions() - nodes = make(map[string]common.Hash) - ) +// TestStacktrieSerialization tests that the stacktrie works well if we +// serialize/unserialize it a lot +func TestStacktrieSerialization(t *testing.T) { var ( - first int - last = len(entries) - 1 - - noLeft bool - noRight bool + st = NewStackTrie(nil) + nt = NewEmpty(NewDatabase(memorydb.New())) + keyB = big.NewInt(1) + keyDelta = big.NewInt(1) + vals [][]byte + keys [][]byte ) - // Enter split mode if there are at least two elements - if rand.Intn(5) != 0 { - for { - first = rand.Intn(len(entries)) - last = rand.Intn(len(entries)) - if first <= last { - break - } - } - if first != 0 { - noLeft = true - } - if last != len(entries)-1 { - noRight = true + getValue := func(i int) []byte { + if i%2 == 0 { // large + return crypto.Keccak256(big.NewInt(int64(i)).Bytes()) + } else { //small + return big.NewInt(int64(i)).Bytes() } } - options = options.WithSkipBoundary(noLeft, noRight, nil) - options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) { - nodes[string(path)] = hash - }) - tr := NewStackTrie(options) - - for i := first; i <= last; i++ { - tr.MustUpdate(entries[i].k, entries[i].v) + for i := 0; i < 10; i++ { + vals = append(vals, getValue(i)) + keys = append(keys, common.BigToHash(keyB).Bytes()) + keyB = keyB.Add(keyB, keyDelta) + keyDelta.Add(keyDelta, common.Big1) + } + for i, k := range keys { + nt.TryUpdate(k, common.CopyBytes(vals[i])) } - tr.Commit() - return nodes -} - -func TestPartialStackTrie(t *testing.T) { - for round := 0; round < 100; round++ { - var ( - n = rand.Intn(100) + 1 - entries []*kv - ) - for i := 0; i < n; i++ { - var val []byte - if rand.Intn(3) == 0 { - val = testutil.RandBytes(3) - } else { - val = testutil.RandBytes(32) - } - entries = append(entries, &kv{ - k: testutil.RandBytes(32), - v: val, - }) - } - slices.SortFunc(entries, (*kv).cmp) - - var ( - nodes = make(map[string]common.Hash) - options = NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) { - nodes[string(path)] = hash - }) - ) - tr := NewStackTrie(options) - for i := 0; i < len(entries); i++ { - tr.MustUpdate(entries[i].k, entries[i].v) + for i, k := range keys { + blob, err := st.MarshalBinary() + if err != nil { + t.Fatal(err) } - tr.Commit() - - for j := 0; j < 100; j++ { - for path, hash := range buildPartialTree(entries, t) { - if nodes[path] != hash { - t.Errorf("%v, want %x, got %x", []byte(path), nodes[path], hash) - } - } + newSt, err := NewFromBinary(blob, nil) + if err != nil { + t.Fatal(err) } + st = newSt + st.TryUpdate(k, common.CopyBytes(vals[i])) } -} - -func TestStackTrieErrors(t *testing.T) { - s := NewStackTrie(nil) - // Deletion - if err := s.Update(nil, nil); err == nil { - t.Fatal("expected error") - } - if err := s.Update(nil, []byte{}); err == nil { - t.Fatal("expected error") - } - if err := s.Update([]byte{0xa}, []byte{}); err == nil { - t.Fatal("expected error") + if have, want := st.Hash(), nt.Hash(); have != want { + t.Fatalf("have %#x want %#x", have, want) } - // Non-ascending keys (going backwards or repeating) - assert.Nil(t, s.Update([]byte{0xaa}, []byte{0xa})) - assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xa}), "repeat insert same key") - assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xb}), "repeat insert same key") - assert.Nil(t, s.Update([]byte{0xab}, []byte{0xa})) - assert.NotNil(t, s.Update([]byte{0x10}, []byte{0xb}), "out of order insert") - assert.NotNil(t, s.Update([]byte{0xaa}, []byte{0xb}), "repeat insert same key") } diff --git a/trie/sync_test.go b/trie/sync_test.go index 71cf0d5530..ef4159f2c6 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -29,19 +29,15 @@ package trie import ( "fmt" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/tenderly/coreth/ethdb/memorydb" ) // makeTestTrie create a sample test trie to test node-wise reconstruction. -func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[string][]byte) { +func makeTestTrie() (*Database, *StateTrie, map[string][]byte) { // Create an empty trie - db := rawdb.NewMemoryDatabase() - triedb := newTestDatabase(db, scheme) - trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb) + triedb := NewDatabase(memorydb.New()) + trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, triedb) // Fill it with some arbitrary data content := make(map[string][]byte) @@ -49,49 +45,27 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[str // Map the same data under multiple keys key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i} content[string(key)] = val - trie.MustUpdate(key, val) + trie.Update(key, val) key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i} content[string(key)] = val - trie.MustUpdate(key, val) + trie.Update(key, val) // Add some other data to inflate the trie for j := byte(3); j < 13; j++ { key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i} content[string(key)] = val - trie.MustUpdate(key, val) + trie.Update(key, val) } } - root, nodes, _ := trie.Commit(false) - if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { - panic(fmt.Errorf("failed to commit db %v", err)) + root, nodes, err := trie.Commit(false) + if err != nil { + panic(fmt.Errorf("failed to commit trie %v", err)) } - if err := triedb.Commit(root, false); err != nil { - panic(err) + if err := triedb.Update(NewWithNodeSet(nodes)); err != nil { + panic(fmt.Errorf("failed to commit db %v", err)) } // Re-create the trie based on the new state - trie, _ = NewStateTrie(TrieID(root), triedb) - return db, triedb, trie, content -} - -// checkTrieConsistency checks that all nodes in a trie are indeed present. -func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash, rawTrie bool) error { - ndb := newTestDatabase(db, scheme) - var it NodeIterator - if rawTrie { - trie, err := New(TrieID(root), ndb) - if err != nil { - return nil // Consider a non existent state consistent - } - it = trie.MustNodeIterator(nil) - } else { - trie, err := NewStateTrie(TrieID(root), ndb) - if err != nil { - return nil // Consider a non existent state consistent - } - it = trie.MustNodeIterator(nil) - } - for it.Next(true) { - } - return it.Error() + trie, _ = NewSecure(common.Hash{}, root, triedb) + return triedb, trie, content } diff --git a/trie/trie.go b/trie/trie.go index 66c3d60627..c6433bd692 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -32,12 +32,31 @@ import ( "errors" "fmt" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") +) + +// LeafCallback is a callback type invoked when a trie operation reaches a leaf +// node. +// +// The keys is a path tuple identifying a particular trie node either in a single +// trie (account) or a layered trie (account -> storage). Each key in the tuple +// is in the raw format(32 bytes). +// +// The path is a composite hexary path identifying the trie node. All the key +// bytes are converted to the hexary nibbles and composited with the parent path +// if the trie node is in a layered trie. +// +// It's used by state sync and commit to allow handling external references +// between account and storage tries. And also it's used in the state healing +// for extracting the raw states(leaf nodes) with corresponding paths. +type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error + // Trie is a Merkle Patricia Trie. Use New to create a trie that sits on // top of a database. Whenever trie performs a commit operation, the generated // nodes will be gathered and returned in a set. Once the trie is committed, @@ -49,17 +68,14 @@ type Trie struct { root node owner common.Hash - // Flag whether the commit operation is already performed. If so the - // trie is not usable(latest states is invisible). - committed bool - // Keep track of the number leaves which have been inserted since the last // hashing operation. This number will not directly map to the number of // actually unhashed nodes. unhashed int - // reader is the handler trie can retrieve nodes from. - reader *trieReader + // db is the handler trie can retrieve nodes from. It's + // only for reading purpose and not available for writing. + db *Database // tracer is the tool to track the trie changes. // It will be reset after each commit operation. @@ -74,33 +90,29 @@ func (t *Trie) newFlag() nodeFlag { // Copy returns a copy of Trie. func (t *Trie) Copy() *Trie { return &Trie{ - root: t.root, - owner: t.owner, - committed: t.committed, - unhashed: t.unhashed, - reader: t.reader, - tracer: t.tracer.copy(), + root: t.root, + owner: t.owner, + unhashed: t.unhashed, + db: t.db, + tracer: t.tracer.copy(), } } -// New creates the trie instance with provided trie id and the read-only -// database. The state specified by trie id must be available, otherwise -// an error will be returned. The trie root specified by trie id can be -// zero hash or the sha3 hash of an empty string, then trie is initially -// empty, otherwise, the root node must be present in database or returns -// a MissingNodeError if not. -func New(id *ID, db *Database) (*Trie, error) { - reader, err := newTrieReader(id.StateRoot, id.Owner, db) - if err != nil { - return nil, err - } +// New creates a trie with an existing root node from db and an assigned +// owner for storage proximity. +// +// If root is the zero hash or the sha3 hash of an empty string, the +// trie is initially empty and does not require a database. Otherwise, +// New will panic if db is nil and returns a MissingNodeError if root does +// not exist in the database. Accessing the trie loads nodes from db on demand. +func New(owner common.Hash, root common.Hash, db *Database) (*Trie, error) { trie := &Trie{ - owner: id.Owner, - reader: reader, - tracer: newTracer(), + owner: owner, + db: db, + //tracer: newTracer(), } - if id.Root != (common.Hash{}) && id.Root != types.EmptyRootHash { - rootnode, err := trie.resolveAndTrack(id.Root[:], nil) + if root != (common.Hash{}) && root != emptyRoot { + rootnode, err := trie.resolveHash(root[:], nil) if err != nil { return nil, err } @@ -111,58 +123,38 @@ func New(id *ID, db *Database) (*Trie, error) { // NewEmpty is a shortcut to create empty tree. It's mostly used in tests. func NewEmpty(db *Database) *Trie { - tr, _ := New(TrieID(types.EmptyRootHash), db) + tr, _ := New(common.Hash{}, common.Hash{}, db) return tr } -// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered -// error but just print out an error message. -func (t *Trie) MustNodeIterator(start []byte) NodeIterator { - it, err := t.NodeIterator(start) - if err != nil { - log.Error("Unhandled trie error in Trie.NodeIterator", "err", err) - } - return it -} - // NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at // the key after the given start key. -func (t *Trie) NodeIterator(start []byte) (NodeIterator, error) { - // Short circuit if the trie is already committed and not usable. - if t.committed { - return nil, ErrCommitted - } - return newNodeIterator(t, start), nil +func (t *Trie) NodeIterator(start []byte) NodeIterator { + return newNodeIterator(t, start) } -// MustGet is a wrapper of Get and will omit any encountered error but just -// print out an error message. -func (t *Trie) MustGet(key []byte) []byte { - res, err := t.Get(key) +// Get returns the value for key stored in the trie. +// The value bytes must not be modified by the caller. +func (t *Trie) Get(key []byte) []byte { + res, err := t.TryGet(key) if err != nil { - log.Error("Unhandled trie error in Trie.Get", "err", err) + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) } return res } -// Get returns the value for key stored in the trie. +// TryGet returns the value for key stored in the trie. // The value bytes must not be modified by the caller. -// -// If the requested node is not present in trie, no error will be returned. -// If the trie is corrupted, a MissingNodeError is returned. -func (t *Trie) Get(key []byte) ([]byte, error) { - // Short circuit if the trie is already committed and not usable. - if t.committed { - return nil, ErrCommitted - } - value, newroot, didResolve, err := t.get(t.root, keybytesToHex(key), 0) +// If a node was not found in the database, a MissingNodeError is returned. +func (t *Trie) TryGet(key []byte) ([]byte, error) { + value, newroot, didResolve, err := t.tryGet(t.root, keybytesToHex(key), 0) if err == nil && didResolve { t.root = newroot } return value, err } -func (t *Trie) get(origNode node, key []byte, pos int) (value []byte, newnode node, didResolve bool, err error) { +func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode node, didResolve bool, err error) { switch n := (origNode).(type) { case nil: return nil, nil, false, nil @@ -173,52 +165,35 @@ func (t *Trie) get(origNode node, key []byte, pos int) (value []byte, newnode no // key not found in trie return nil, n, false, nil } - value, newnode, didResolve, err = t.get(n.Val, key, pos+len(n.Key)) + value, newnode, didResolve, err = t.tryGet(n.Val, key, pos+len(n.Key)) if err == nil && didResolve { n = n.copy() n.Val = newnode } return value, n, didResolve, err case *fullNode: - value, newnode, didResolve, err = t.get(n.Children[key[pos]], key, pos+1) + value, newnode, didResolve, err = t.tryGet(n.Children[key[pos]], key, pos+1) if err == nil && didResolve { n = n.copy() n.Children[key[pos]] = newnode } return value, n, didResolve, err case hashNode: - child, err := t.resolveAndTrack(n, key[:pos]) + child, err := t.resolveHash(n, key[:pos]) if err != nil { return nil, n, true, err } - value, newnode, _, err := t.get(child, key, pos) + value, newnode, _, err := t.tryGet(child, key, pos) return value, newnode, true, err default: panic(fmt.Sprintf("%T: invalid node: %v", origNode, origNode)) } } -// MustGetNode is a wrapper of GetNode and will omit any encountered error but -// just print out an error message. -func (t *Trie) MustGetNode(path []byte) ([]byte, int) { - item, resolved, err := t.GetNode(path) - if err != nil { - log.Error("Unhandled trie error in Trie.GetNode", "err", err) - } - return item, resolved -} - -// GetNode retrieves a trie node by compact-encoded path. It is not possible -// to use keybyte-encoding as the path might contain odd nibbles. -// -// If the requested node is not present in trie, no error will be returned. -// If the trie is corrupted, a MissingNodeError is returned. -func (t *Trie) GetNode(path []byte) ([]byte, int, error) { - // Short circuit if the trie is already committed and not usable. - if t.committed { - return nil, 0, ErrCommitted - } - item, newroot, resolved, err := t.getNode(t.root, compactToHex(path), 0) +// TryGetNode attempts to retrieve a trie node by compact-encoded path. It is not +// possible to use keybyte-encoding as the path might contain odd nibbles. +func (t *Trie) TryGetNode(path []byte) ([]byte, int, error) { + item, newroot, resolved, err := t.tryGetNode(t.root, compactToHex(path), 0) if err != nil { return nil, resolved, err } @@ -228,10 +203,10 @@ func (t *Trie) GetNode(path []byte) ([]byte, int, error) { if item == nil { return nil, resolved, nil } - return item, resolved, nil + return item, resolved, err } -func (t *Trie) getNode(origNode node, path []byte, pos int) (item []byte, newnode node, resolved int, err error) { +func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, newnode node, resolved int, err error) { // If non-existent path requested, abort if origNode == nil { return nil, nil, 0, nil @@ -250,7 +225,7 @@ func (t *Trie) getNode(origNode node, path []byte, pos int) (item []byte, newnod if hash == nil { return nil, origNode, 0, errors.New("non-consensus node") } - blob, err := t.reader.node(path, common.BytesToHash(hash)) + blob, err := t.db.RawNode(common.BytesToHash(hash)) return blob, origNode, 1, err } // Path still needs to be traversed, descend into children @@ -264,7 +239,7 @@ func (t *Trie) getNode(origNode node, path []byte, pos int) (item []byte, newnod // Path branches off from short node return nil, n, 0, nil } - item, newnode, resolved, err = t.getNode(n.Val, path, pos+len(n.Key)) + item, newnode, resolved, err = t.tryGetNode(n.Val, path, pos+len(n.Key)) if err == nil && resolved > 0 { n = n.copy() n.Val = newnode @@ -272,7 +247,7 @@ func (t *Trie) getNode(origNode node, path []byte, pos int) (item []byte, newnod return item, n, resolved, err case *fullNode: - item, newnode, resolved, err = t.getNode(n.Children[path[pos]], path, pos+1) + item, newnode, resolved, err = t.tryGetNode(n.Children[path[pos]], path, pos+1) if err == nil && resolved > 0 { n = n.copy() n.Children[path[pos]] = newnode @@ -280,11 +255,11 @@ func (t *Trie) getNode(origNode node, path []byte, pos int) (item []byte, newnod return item, n, resolved, err case hashNode: - child, err := t.resolveAndTrack(n, path[:pos]) + child, err := t.resolveHash(n, path[:pos]) if err != nil { return nil, n, 1, err } - item, newnode, resolved, err := t.getNode(child, path, pos) + item, newnode, resolved, err := t.tryGetNode(child, path, pos) return item, newnode, resolved + 1, err default: @@ -292,32 +267,33 @@ func (t *Trie) getNode(origNode node, path []byte, pos int) (item []byte, newnod } } -// MustUpdate is a wrapper of Update and will omit any encountered error but -// just print out an error message. -func (t *Trie) MustUpdate(key, value []byte) { - if err := t.Update(key, value); err != nil { - log.Error("Unhandled trie error in Trie.Update", "err", err) +// Update associates key with value in the trie. Subsequent calls to +// Get will return value. If value has length zero, any existing value +// is deleted from the trie and calls to Get will return nil. +// +// The value bytes must not be modified by the caller while they are +// stored in the trie. +func (t *Trie) Update(key, value []byte) { + if err := t.TryUpdate(key, value); err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) } } -// Update associates key with value in the trie. Subsequent calls to +// TryUpdate associates key with value in the trie. Subsequent calls to // Get will return value. If value has length zero, any existing value // is deleted from the trie and calls to Get will return nil. // // The value bytes must not be modified by the caller while they are // stored in the trie. // -// If the requested node is not present in trie, no error will be returned. -// If the trie is corrupted, a MissingNodeError is returned. -func (t *Trie) Update(key, value []byte) error { - // Short circuit if the trie is already committed and not usable. - if t.committed { - return ErrCommitted - } - return t.update(key, value) +// If a node was not found in the database, a MissingNodeError is returned. +func (t *Trie) TryUpdate(key, value []byte) error { + return t.tryUpdate(key, value) } -func (t *Trie) update(key, value []byte) error { +// tryUpdate expects an RLP-encoded value and performs the core function +// for TryUpdate and TryUpdateAccount. +func (t *Trie) tryUpdate(key, value []byte) error { t.unhashed++ k := keybytesToHex(key) if len(value) != 0 { @@ -400,7 +376,7 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error // We've hit a part of the trie that isn't loaded yet. Load // the node and insert into it. This leaves all child nodes on // the path to the value in the trie. - rn, err := t.resolveAndTrack(n, prefix) + rn, err := t.resolveHash(n, prefix) if err != nil { return false, nil, err } @@ -415,23 +391,16 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error } } -// MustDelete is a wrapper of Delete and will omit any encountered error but -// just print out an error message. -func (t *Trie) MustDelete(key []byte) { - if err := t.Delete(key); err != nil { - log.Error("Unhandled trie error in Trie.Delete", "err", err) +// Delete removes any existing value for key from the trie. +func (t *Trie) Delete(key []byte) { + if err := t.TryDelete(key); err != nil { + log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) } } -// Delete removes any existing value for key from the trie. -// -// If the requested node is not present in trie, no error will be returned. -// If the trie is corrupted, a MissingNodeError is returned. -func (t *Trie) Delete(key []byte) error { - // Short circuit if the trie is already committed and not usable. - if t.committed { - return ErrCommitted - } +// TryDelete removes any existing value for key from the trie. +// If a node was not found in the database, a MissingNodeError is returned. +func (t *Trie) TryDelete(key []byte) error { t.unhashed++ k := keybytesToHex(key) _, n, err := t.delete(t.root, nil, k) @@ -561,7 +530,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { // We've hit a part of the trie that isn't loaded yet. Load // the node and delete from it. This leaves all child nodes on // the path to the value in the trie. - rn, err := t.resolveAndTrack(n, prefix) + rn, err := t.resolveHash(n, prefix) if err != nil { return false, nil, err } @@ -585,57 +554,51 @@ func concat(s1 []byte, s2 ...byte) []byte { func (t *Trie) resolve(n node, prefix []byte) (node, error) { if n, ok := n.(hashNode); ok { - return t.resolveAndTrack(n, prefix) + return t.resolveHash(n, prefix) } return n, nil } -// resolveAndTrack loads node from the underlying store with the given node hash -// and path prefix and also tracks the loaded node blob in tracer treated as the -// node's original value. The rlp-encoded blob is preferred to be loaded from -// database because it's easy to decode node while complex to encode node to blob. -func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) { - blob, err := t.reader.node(prefix, common.BytesToHash(n)) - if err != nil { - return nil, err +// resolveHash loads node from the underlying database with the provided +// node hash and path prefix. +func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) { + hash := common.BytesToHash(n) + if node := t.db.EncodedNode(hash); node != nil { + return node, nil + } + return nil, &MissingNodeError{Owner: t.owner, NodeHash: hash, Path: prefix} +} + +// resolveHash loads rlp-encoded node blob from the underlying database +// with the provided node hash and path prefix. +func (t *Trie) resolveBlob(n hashNode, prefix []byte) ([]byte, error) { + hash := common.BytesToHash(n) + blob, _ := t.db.RawNode(hash) + if len(blob) != 0 { + return blob, nil } - t.tracer.onRead(prefix, blob) - return mustDecodeNode(n, blob), nil + return nil, &MissingNodeError{Owner: t.owner, NodeHash: hash, Path: prefix} } // Hash returns the root hash of the trie. It does not write to the // database and can be used even if the trie doesn't have one. func (t *Trie) Hash() common.Hash { - hash, cached := t.hashRoot() + hash, cached, _ := t.hashRoot() t.root = cached return common.BytesToHash(hash.(hashNode)) } -// Commit collects all dirty nodes in the trie and replaces them with the -// corresponding node hash. All collected nodes (including dirty leaves if +// Commit collects all dirty nodes in the trie and replace them with the +// corresponding node hash. All collected nodes(including dirty leaves if // collectLeaf is true) will be encapsulated into a nodeset for return. -// The returned nodeset can be nil if the trie is clean (nothing to commit). +// The returned nodeset can be nil if the trie is clean(nothing to commit). // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage -func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { +func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) { defer t.tracer.reset() - defer func() { - t.committed = true - }() - // Trie is empty and can be classified into two types of situations: - // (a) The trie was empty and no update happens => return nil - // (b) The trie was non-empty and all nodes are dropped => return - // the node set includes all deleted nodes + if t.root == nil { - paths := t.tracer.deletedNodes() - if len(paths) == 0 { - return types.EmptyRootHash, nil, nil // case (a) - } - nodes := trienode.NewNodeSet(t.owner) - for _, path := range paths { - nodes.AddNode([]byte(path), trienode.NewDeleted()) - } - return types.EmptyRootHash, nodes, nil // case (b) + return emptyRoot, nil, nil } // Derive the hash for all dirty nodes first. We hold the assumption // in the following procedure that all nodes are hashed. @@ -649,27 +612,26 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) t.root = hashedNode return rootHash, nil, nil } - nodes := trienode.NewNodeSet(t.owner) - for _, path := range t.tracer.deletedNodes() { - nodes.AddNode([]byte(path), trienode.NewDeleted()) + h := newCommitter(t.owner, collectLeaf) + newRoot, nodes, err := h.Commit(t.root) + if err != nil { + return common.Hash{}, nil, err } - t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root) + t.root = newRoot return rootHash, nodes, nil } // hashRoot calculates the root hash of the given trie -func (t *Trie) hashRoot() (node, node) { +func (t *Trie) hashRoot() (node, node, error) { if t.root == nil { - return hashNode(types.EmptyRootHash.Bytes()), nil + return hashNode(emptyRoot.Bytes()), nil, nil } // If the number of changes is below 100, we let one thread handle it h := newHasher(t.unhashed >= 100) - defer func() { - returnHasherToPool(h) - t.unhashed = 0 - }() + defer returnHasherToPool(h) hashed, cached := h.hash(t.root, true) - return hashed, cached + t.unhashed = 0 + return hashed, cached, nil } // Reset drops the referenced root node and cleans all internal state. @@ -677,6 +639,6 @@ func (t *Trie) Reset() { t.root = nil t.owner = common.Hash{} t.unhashed = 0 + //t.db = nil t.tracer.reset() - t.committed = false } diff --git a/trie/util_test.go b/trie/util_test.go new file mode 100644 index 0000000000..6c924e7fed --- /dev/null +++ b/trie/util_test.go @@ -0,0 +1,134 @@ +// (c) 2022, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/tenderly/coreth/core/rawdb" +) + +// Tests if the trie diffs are tracked correctly. +func TestTrieTracer(t *testing.T) { + db := NewDatabase(rawdb.NewMemoryDatabase()) + trie := NewEmpty(db) + trie.tracer = newTracer() + + // Insert a batch of entries, all the nodes should be marked as inserted + vals := []struct{ k, v string }{ + {"do", "verb"}, + {"ether", "wookiedoo"}, + {"horse", "stallion"}, + {"shaman", "horse"}, + {"doge", "coin"}, + {"dog", "puppy"}, + {"somethingveryoddindeedthis is", "myothernodedata"}, + } + for _, val := range vals { + trie.Update([]byte(val.k), []byte(val.v)) + } + trie.Hash() + + seen := make(map[string]struct{}) + it := trie.NodeIterator(nil) + for it.Next(true) { + if it.Leaf() { + continue + } + seen[string(it.Path())] = struct{}{} + } + inserted := trie.tracer.insertList() + if len(inserted) != len(seen) { + t.Fatalf("Unexpected inserted node tracked want %d got %d", len(seen), len(inserted)) + } + for _, k := range inserted { + _, ok := seen[string(k)] + if !ok { + t.Fatalf("Unexpected inserted node") + } + } + deleted := trie.tracer.deleteList() + if len(deleted) != 0 { + t.Fatalf("Unexpected deleted node tracked %d", len(deleted)) + } + + // Commit the changes and re-create with new root + root, nodes, _ := trie.Commit(false) + db.Update(NewWithNodeSet(nodes)) + trie, _ = New(common.Hash{}, root, db) + trie.tracer = newTracer() + + // Delete all the elements, check deletion set + for _, val := range vals { + trie.Delete([]byte(val.k)) + } + trie.Hash() + + inserted = trie.tracer.insertList() + if len(inserted) != 0 { + t.Fatalf("Unexpected inserted node tracked %d", len(inserted)) + } + deleted = trie.tracer.deleteList() + if len(deleted) != len(seen) { + t.Fatalf("Unexpected deleted node tracked want %d got %d", len(seen), len(deleted)) + } + for _, k := range deleted { + _, ok := seen[string(k)] + if !ok { + t.Fatalf("Unexpected inserted node") + } + } +} + +func TestTrieTracerNoop(t *testing.T) { + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie.tracer = newTracer() + + // Insert a batch of entries, all the nodes should be marked as inserted + vals := []struct{ k, v string }{ + {"do", "verb"}, + {"ether", "wookiedoo"}, + {"horse", "stallion"}, + {"shaman", "horse"}, + {"doge", "coin"}, + {"dog", "puppy"}, + {"somethingveryoddindeedthis is", "myothernodedata"}, + } + for _, val := range vals { + trie.Update([]byte(val.k), []byte(val.v)) + } + for _, val := range vals { + trie.Delete([]byte(val.k)) + } + if len(trie.tracer.insertList()) != 0 { + t.Fatalf("Unexpected inserted node tracked %d", len(trie.tracer.insertList())) + } + if len(trie.tracer.deleteList()) != 0 { + t.Fatalf("Unexpected deleted node tracked %d", len(trie.tracer.deleteList())) + } +} diff --git a/trie/utils.go b/trie/utils.go new file mode 100644 index 0000000000..8d1593ebf3 --- /dev/null +++ b/trie/utils.go @@ -0,0 +1,177 @@ +// (c) 2022, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +// tracer tracks the changes of trie nodes. During the trie operations, +// some nodes can be deleted from the trie, while these deleted nodes +// won't be captured by trie.Hasher or trie.Committer. Thus, these deleted +// nodes won't be removed from the disk at all. Tracer is an auxiliary tool +// used to track all insert and delete operations of trie and capture all +// deleted nodes eventually. +// +// The changed nodes can be mainly divided into two categories: the leaf +// node and intermediate node. The former is inserted/deleted by callers +// while the latter is inserted/deleted in order to follow the rule of trie. +// This tool can track all of them no matter the node is embedded in its +// parent or not, but valueNode is never tracked. +// +// Besides, it's also used for recording the original value of the nodes +// when they are resolved from the disk. The pre-value of the nodes will +// be used to construct reverse-diffs in the future. +// +// Note tracer is not thread-safe, callers should be responsible for handling +// the concurrency issues by themselves. +type tracer struct { + insert map[string]struct{} + delete map[string]struct{} + origin map[string][]byte +} + +// newTracer initializes the tracer for capturing trie changes. +func newTracer() *tracer { + return &tracer{ + insert: make(map[string]struct{}), + delete: make(map[string]struct{}), + origin: make(map[string][]byte), + } +} + +/* +// onRead tracks the newly loaded trie node and caches the rlp-encoded blob internally. +// Don't change the value outside of function since it's not deep-copied. +func (t *tracer) onRead(key []byte, val []byte) { + // Tracer isn't used right now, remove this check later. + if t == nil { + return + } + t.origin[string(key)] = val +} +*/ + +// onInsert tracks the newly inserted trie node. If it's already in the deletion set +// (resurrected node), then just wipe it from the deletion set as the "untouched". +func (t *tracer) onInsert(key []byte) { + // Tracer isn't used right now, remove this check later. + if t == nil { + return + } + if _, present := t.delete[string(key)]; present { + delete(t.delete, string(key)) + return + } + t.insert[string(key)] = struct{}{} +} + +// onDelete tracks the newly deleted trie node. If it's already +// in the addition set, then just wipe it from the addition set +// as it's untouched. +func (t *tracer) onDelete(key []byte) { + // Tracer isn't used right now, remove this check later. + if t == nil { + return + } + if _, present := t.insert[string(key)]; present { + delete(t.insert, string(key)) + return + } + t.delete[string(key)] = struct{}{} +} + +// insertList returns the tracked inserted trie nodes in list format. +func (t *tracer) insertList() [][]byte { + // Tracer isn't used right now, remove this check later. + if t == nil { + return nil + } + var ret [][]byte + for key := range t.insert { + ret = append(ret, []byte(key)) + } + return ret +} + +// deleteList returns the tracked deleted trie nodes in list format. +func (t *tracer) deleteList() [][]byte { + // Tracer isn't used right now, remove this check later. + if t == nil { + return nil + } + var ret [][]byte + for key := range t.delete { + ret = append(ret, []byte(key)) + } + return ret +} + +/* +// getPrev returns the cached original value of the specified node. +func (t *tracer) getPrev(key []byte) []byte { + // Don't panic on uninitialized tracer, it's possible in testing. + if t == nil { + return nil + } + return t.origin[string(key)] +} +*/ + +// reset clears the content tracked by tracer. +func (t *tracer) reset() { + // Tracer isn't used right now, remove this check later. + if t == nil { + return + } + t.insert = make(map[string]struct{}) + t.delete = make(map[string]struct{}) + t.origin = make(map[string][]byte) +} + +// copy returns a deep copied tracer instance. +func (t *tracer) copy() *tracer { + // Tracer isn't used right now, remove this check later. + if t == nil { + return nil + } + var ( + insert = make(map[string]struct{}) + delete = make(map[string]struct{}) + origin = make(map[string][]byte) + ) + for key := range t.insert { + insert[key] = struct{}{} + } + for key := range t.delete { + delete[key] = struct{}{} + } + for key, val := range t.origin { + origin[key] = val + } + return &tracer{ + insert: insert, + delete: delete, + origin: origin, + } +}