From 3a6dbe4d85ffb8d77bef93461d5ac16f05196d6d Mon Sep 17 00:00:00 2001 From: galaio <12880651+galaio@users.noreply.github.com> Date: Fri, 11 Oct 2024 16:44:50 +0800 Subject: [PATCH 1/5] ethclient: move TransactionOpts to avoid import internal package; (#2736) --- accounts/abi/bind/backend.go | 3 +- accounts/abi/bind/base_test.go | 3 +- .../ethapi => core/types}/gen_tx_opts_json.go | 3 +- core/types/transaction_options.go | 43 ++++++++++++++++++ .../types}/transaction_options_test.go | 33 +++++++------- ethclient/ethclient.go | 3 +- ethclient/ethclient_test.go | 7 ++- ethclient/simulated/backend.go | 3 +- internal/ethapi/api.go | 4 +- ...n_options.go => transaction_opts_utils.go} | 45 ++----------------- 10 files changed, 73 insertions(+), 74 deletions(-) rename {internal/ethapi => core/types}/gen_tx_opts_json.go (99%) create mode 100644 core/types/transaction_options.go rename {internal/ethapi => core/types}/transaction_options_test.go (80%) rename internal/ethapi/{transaction_options.go => transaction_opts_utils.go} (51%) diff --git a/accounts/abi/bind/backend.go b/accounts/abi/bind/backend.go index 153ba46415..f43142d60a 100644 --- a/accounts/abi/bind/backend.go +++ b/accounts/abi/bind/backend.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/internal/ethapi" ) var ( @@ -101,7 +100,7 @@ type ContractTransactor interface { PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) // SendTransactionConditional injects the conditional transaction into the pending pool for execution after verification. - SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts ethapi.TransactionOpts) error + SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts types.TransactionOpts) error } // DeployBackend wraps the operations needed by WaitMined and WaitDeployed. diff --git a/accounts/abi/bind/base_test.go b/accounts/abi/bind/base_test.go index 99621767ff..181cd43753 100644 --- a/accounts/abi/bind/base_test.go +++ b/accounts/abi/bind/base_test.go @@ -31,7 +31,6 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" ) @@ -76,7 +75,7 @@ func (mt *mockTransactor) SendTransaction(ctx context.Context, tx *types.Transac return nil } -func (mt *mockTransactor) SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts ethapi.TransactionOpts) error { +func (mt *mockTransactor) SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts types.TransactionOpts) error { return nil } diff --git a/internal/ethapi/gen_tx_opts_json.go b/core/types/gen_tx_opts_json.go similarity index 99% rename from internal/ethapi/gen_tx_opts_json.go rename to core/types/gen_tx_opts_json.go index 19832cede1..e5ac99bd2d 100644 --- a/internal/ethapi/gen_tx_opts_json.go +++ b/core/types/gen_tx_opts_json.go @@ -1,10 +1,9 @@ // Code generated by github.com/fjl/gencodec. DO NOT EDIT. -package ethapi +package types import ( "encoding/json" - "github.com/ethereum/go-ethereum/common/hexutil" ) diff --git a/core/types/transaction_options.go b/core/types/transaction_options.go new file mode 100644 index 0000000000..4f0484b3fd --- /dev/null +++ b/core/types/transaction_options.go @@ -0,0 +1,43 @@ +package types + +import ( + "encoding/json" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +type AccountStorage struct { + StorageRoot *common.Hash + StorageSlots map[common.Hash]common.Hash +} + +func (a *AccountStorage) UnmarshalJSON(data []byte) error { + var hash common.Hash + if err := json.Unmarshal(data, &hash); err == nil { + a.StorageRoot = &hash + return nil + } + return json.Unmarshal(data, &a.StorageSlots) +} + +func (a AccountStorage) MarshalJSON() ([]byte, error) { + if a.StorageRoot != nil { + return json.Marshal(*a.StorageRoot) + } + return json.Marshal(a.StorageSlots) +} + +type KnownAccounts map[common.Address]AccountStorage + +// It is known that marshaling is broken +// https://github.com/golang/go/issues/55890 + +//go:generate go run github.com/fjl/gencodec -type TransactionOpts -out gen_tx_opts_json.go +type TransactionOpts struct { + KnownAccounts KnownAccounts `json:"knownAccounts"` + BlockNumberMin *hexutil.Uint64 `json:"blockNumberMin,omitempty"` + BlockNumberMax *hexutil.Uint64 `json:"blockNumberMax,omitempty"` + TimestampMin *hexutil.Uint64 `json:"timestampMin,omitempty"` + TimestampMax *hexutil.Uint64 `json:"timestampMax,omitempty"` +} diff --git a/internal/ethapi/transaction_options_test.go b/core/types/transaction_options_test.go similarity index 80% rename from internal/ethapi/transaction_options_test.go rename to core/types/transaction_options_test.go index 479f46a145..59627c0a36 100644 --- a/internal/ethapi/transaction_options_test.go +++ b/core/types/transaction_options_test.go @@ -1,4 +1,4 @@ -package ethapi_test +package types import ( "encoding/json" @@ -7,7 +7,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/internal/ethapi" ) func ptr(hash common.Hash) *common.Hash { @@ -23,15 +22,15 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) { name string input string mustFail bool - expected ethapi.TransactionOpts + expected TransactionOpts }{ { "StateRoot", `{"knownAccounts":{"0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0":"0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"}}`, false, - ethapi.TransactionOpts{ - KnownAccounts: map[common.Address]ethapi.AccountStorage{ - common.HexToAddress("0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0"): ethapi.AccountStorage{ + TransactionOpts{ + KnownAccounts: map[common.Address]AccountStorage{ + common.HexToAddress("0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0"): AccountStorage{ StorageRoot: ptr(common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")), }, }, @@ -41,9 +40,9 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) { "StorageSlots", `{"knownAccounts":{"0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0":{"0xc65a7bb8d6351c1cf70c95a316cc6a92839c986682d98bc35f958f4883f9d2a8":"0x0000000000000000000000000000000000000000000000000000000000000000"}}}`, false, - ethapi.TransactionOpts{ - KnownAccounts: map[common.Address]ethapi.AccountStorage{ - common.HexToAddress("0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0"): ethapi.AccountStorage{ + TransactionOpts{ + KnownAccounts: map[common.Address]AccountStorage{ + common.HexToAddress("0x6b3A8798E5Fb9fC5603F3aB5eA2e8136694e55d0"): AccountStorage{ StorageRoot: nil, StorageSlots: map[common.Hash]common.Hash{ common.HexToHash("0xc65a7bb8d6351c1cf70c95a316cc6a92839c986682d98bc35f958f4883f9d2a8"): common.HexToHash("0x"), @@ -56,15 +55,15 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) { "EmptyObject", `{"knownAccounts":{}}`, false, - ethapi.TransactionOpts{ - KnownAccounts: make(map[common.Address]ethapi.AccountStorage), + TransactionOpts{ + KnownAccounts: make(map[common.Address]AccountStorage), }, }, { "EmptyStrings", `{"knownAccounts":{"":""}}`, true, - ethapi.TransactionOpts{ + TransactionOpts{ KnownAccounts: nil, }, }, @@ -72,7 +71,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) { "BlockNumberMin", `{"blockNumberMin":"0x1"}`, false, - ethapi.TransactionOpts{ + TransactionOpts{ BlockNumberMin: u64Ptr(1), }, }, @@ -80,7 +79,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) { "BlockNumberMax", `{"blockNumberMin":"0x1", "blockNumberMax":"0x2"}`, false, - ethapi.TransactionOpts{ + TransactionOpts{ BlockNumberMin: u64Ptr(1), BlockNumberMax: u64Ptr(2), }, @@ -89,7 +88,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) { "TimestampMin", `{"timestampMin":"0xffff"}`, false, - ethapi.TransactionOpts{ + TransactionOpts{ TimestampMin: u64Ptr(0xffff), }, }, @@ -97,7 +96,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) { "TimestampMax", `{"timestampMax":"0xffffff"}`, false, - ethapi.TransactionOpts{ + TransactionOpts{ TimestampMax: u64Ptr(0xffffff), }, }, @@ -105,7 +104,7 @@ func TestTransactionOptsJSONUnMarshalTrip(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - var opts ethapi.TransactionOpts + var opts TransactionOpts err := json.Unmarshal([]byte(test.input), &opts) if test.mustFail && err == nil { t.Errorf("Test %s should fail", test.name) diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 8cf50eb528..1a586ec039 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -29,7 +29,6 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/rpc" ) @@ -737,7 +736,7 @@ func (ec *Client) SendTransaction(ctx context.Context, tx *types.Transaction) er // // If the transaction was a contract creation use the TransactionReceipt method to get the // contract address after the transaction has been mined. -func (ec *Client) SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts ethapi.TransactionOpts) error { +func (ec *Client) SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts types.TransactionOpts) error { data, err := tx.MarshalBinary() if err != nil { return err diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go index 54ce597b09..7abc104fd1 100644 --- a/ethclient/ethclient_test.go +++ b/ethclient/ethclient_test.go @@ -34,7 +34,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/ethconfig" - "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" @@ -770,9 +769,9 @@ func sendTransactionConditional(ec *Client) error { } root := common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - return ec.SendTransactionConditional(context.Background(), tx, ethapi.TransactionOpts{ - KnownAccounts: map[common.Address]ethapi.AccountStorage{ - testAddr: ethapi.AccountStorage{ + return ec.SendTransactionConditional(context.Background(), tx, types.TransactionOpts{ + KnownAccounts: map[common.Address]types.AccountStorage{ + testAddr: types.AccountStorage{ StorageRoot: &root, }, }, diff --git a/ethclient/simulated/backend.go b/ethclient/simulated/backend.go index 13e7cad586..a577343df3 100644 --- a/ethclient/simulated/backend.go +++ b/ethclient/simulated/backend.go @@ -30,7 +30,6 @@ import ( "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/params" @@ -39,7 +38,7 @@ import ( // TransactionConditionalSender injects the conditional transaction into the pending pool for execution after verification. type TransactionConditionalSender interface { - SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts ethapi.TransactionOpts) error + SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts types.TransactionOpts) error } // Client exposes the methods provided by the Ethereum RPC client. diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 6a2ac19319..0f029a503d 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -2314,7 +2314,7 @@ func (s *TransactionAPI) SendRawTransaction(ctx context.Context, input hexutil.B // SendRawTransactionConditional will add the signed transaction to the transaction pool. // The sender/bundler is responsible for signing the transaction -func (s *TransactionAPI) SendRawTransactionConditional(ctx context.Context, input hexutil.Bytes, opts TransactionOpts) (common.Hash, error) { +func (s *TransactionAPI) SendRawTransactionConditional(ctx context.Context, input hexutil.Bytes, opts types.TransactionOpts) (common.Hash, error) { tx := new(types.Transaction) if err := tx.UnmarshalBinary(input); err != nil { return common.Hash{}, err @@ -2324,7 +2324,7 @@ func (s *TransactionAPI) SendRawTransactionConditional(ctx context.Context, inpu if state == nil || err != nil { return common.Hash{}, err } - if err := opts.Check(header.Number.Uint64(), header.Time, state); err != nil { + if err := TxOptsCheck(opts, header.Number.Uint64(), header.Time, state); err != nil { return common.Hash{}, err } return SubmitTransaction(ctx, s.b, tx) diff --git a/internal/ethapi/transaction_options.go b/internal/ethapi/transaction_opts_utils.go similarity index 51% rename from internal/ethapi/transaction_options.go rename to internal/ethapi/transaction_opts_utils.go index 77b1729037..131ac82e29 100644 --- a/internal/ethapi/transaction_options.go +++ b/internal/ethapi/transaction_opts_utils.go @@ -2,52 +2,15 @@ package ethapi import ( "bytes" - "encoding/json" "errors" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" ) -type AccountStorage struct { - StorageRoot *common.Hash - StorageSlots map[common.Hash]common.Hash -} - -func (a *AccountStorage) UnmarshalJSON(data []byte) error { - var hash common.Hash - if err := json.Unmarshal(data, &hash); err == nil { - a.StorageRoot = &hash - return nil - } - return json.Unmarshal(data, &a.StorageSlots) -} - -func (a AccountStorage) MarshalJSON() ([]byte, error) { - if a.StorageRoot != nil { - return json.Marshal(*a.StorageRoot) - } - return json.Marshal(a.StorageSlots) -} - -type KnownAccounts map[common.Address]AccountStorage - -// It is known that marshaling is broken -// https://github.com/golang/go/issues/55890 - -//go:generate go run github.com/fjl/gencodec -type TransactionOpts -out gen_tx_opts_json.go -type TransactionOpts struct { - KnownAccounts KnownAccounts `json:"knownAccounts"` - BlockNumberMin *hexutil.Uint64 `json:"blockNumberMin,omitempty"` - BlockNumberMax *hexutil.Uint64 `json:"blockNumberMax,omitempty"` - TimestampMin *hexutil.Uint64 `json:"timestampMin,omitempty"` - TimestampMax *hexutil.Uint64 `json:"timestampMax,omitempty"` -} - const MaxNumberOfEntries = 1000 -func (o *TransactionOpts) Check(blockNumber uint64, timeStamp uint64, statedb *state.StateDB) error { +func TxOptsCheck(o types.TransactionOpts, blockNumber uint64, timeStamp uint64, statedb *state.StateDB) error { if o.BlockNumberMin != nil && blockNumber < uint64(*o.BlockNumberMin) { return errors.New("BlockNumberMin condition not met") } @@ -71,10 +34,10 @@ func (o *TransactionOpts) Check(blockNumber uint64, timeStamp uint64, statedb *s if counter > MaxNumberOfEntries { return errors.New("knownAccounts too large") } - return o.CheckStorage(statedb) + return TxOptsCheckStorage(o, statedb) } -func (o *TransactionOpts) CheckStorage(statedb *state.StateDB) error { +func TxOptsCheckStorage(o types.TransactionOpts, statedb *state.StateDB) error { for address, accountStorage := range o.KnownAccounts { if accountStorage.StorageRoot != nil { rootHash := statedb.GetRoot(address) From 00a36bb0ccedec0625f6151e328f10e41fe34058 Mon Sep 17 00:00:00 2001 From: Ethan Date: Sat, 12 Oct 2024 09:14:28 +0800 Subject: [PATCH 2/5] feat: modify LOCK_PERIOD_FOR_TOKEN_RECOVER to 300 seconds on BSC Testnet in pascal hardfork (#2737) * feat: modify LOCK_PERIOD_FOR_TOKEN_RECOVER to 300 seconds on BSC Testnet * feat: update commit id in pascal hardfork --- .../pascal/chapel/TokenHubContract | 2 +- core/systemcontracts/upgrade.go | 102 +++++++++--------- 2 files changed, 52 insertions(+), 52 deletions(-) diff --git a/core/systemcontracts/pascal/chapel/TokenHubContract b/core/systemcontracts/pascal/chapel/TokenHubContract index f8ae4da3d4..94d1d827f7 100644 --- a/core/systemcontracts/pascal/chapel/TokenHubContract +++ b/core/systemcontracts/pascal/chapel/TokenHubContract @@ -1 +1 @@ -60806040526004361061036f5760003560e01c80639509b980116101c6578063bbface1f116100f7578063e04c83a711610095578063f9a2bbc71161006f578063f9a2bbc714610e57578063fa9e915914610e6c578063fc1a598f14610e81578063fd6a687914610eb4576103b7565b8063e04c83a714610de5578063e1c7392a14610e0f578063e8f35cea14610e24576103b7565b8063c8509d81116100d1578063c8509d8114610960578063cf41984414610d80578063dc927faf14610dbb578063df8079e914610dd0576103b7565b8063bbface1f14610d05578063bd46646114610d38578063c81b166214610d6b576103b7565b8063aa82dce111610164578063ac4317511161013e578063ac43175114610bd7578063b99328c514610ca2578063b9fd21e314610cdb578063ba35ead614610cf0576103b7565b8063aa82dce114610b7f578063aad5606314610b94578063ab51bb9614610ba9576103b7565b80639dc09262116101a05780639dc0926214610af9578063a1a11bf514610b0e578063a78abc1614610b23578063aa7415f514610b38576103b7565b80639509b98014610a725780639a854bbd14610aab5780639a99b4f014610ac0576103b7565b806359b92789116102a057806375d47a0a1161023e578063831d65d111610218578063831d65d1146109605780638525db03146109e55780638eff336c14610a1e57806393ab703f14610a5d576103b7565b806375d47a0a146108f7578063799758b91461090c5780637e434d541461094b576103b7565b80636e0565201161027a5780636e056520146107775780636e47b482146108a357806371d30863146108b8578063727be1f8146108cd576103b7565b806359b927891461070d5780635d499b1b14610737578063613684751461074c576103b7565b80633fd8b02f1161030d578063493279b1116102e7578063493279b1146106a257806350432d32146106ce57806351b4dce3146106e357806351e80672146106f8576103b7565b80633fd8b02f1461066357806343756e5c1461067857806343a368b91461068d576103b7565b8063149d14d911610349578063149d14d91461052257806328087028146105495780632ae454831461055e5780633d713223146105b2576103b7565b80630e2374a5146103bc5780631182b875146103ed57806312234582146104e7576103b7565b366103b75734156103b5576040805133815234602082015281517f6c98249d85d88c3753a04a22230f595e4dc8d3dc86c34af35deeeedc861b89db929181900390910190a15b005b600080fd5b3480156103c857600080fd5b506103d1610ec9565b604080516001600160a01b039092168252519081900360200190f35b3480156103f957600080fd5b506104726004803603604081101561041057600080fd5b60ff8235169190810190604081016020820135600160201b81111561043457600080fd5b82018360208201111561044657600080fd5b803590602001918460018302840111600160201b8311171561046757600080fd5b509092509050610ecf565b6040805160208082528351818301528351919283929083019185019080838360005b838110156104ac578181015183820152602001610494565b50505050905090810190601f1680156104d95780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156104f357600080fd5b506103b56004803603604081101561050a57600080fd5b506001600160a01b0381358116916020013516610f17565b34801561052e57600080fd5b50610537610f91565b60408051918252519081900360200190f35b34801561055557600080fd5b506103d1610f97565b34801561056a57600080fd5b506105996004803603604081101561058157600080fd5b506001600160a01b0381358116916020013516610f9d565b6040805192835260208301919091528051918290030190f35b3480156105be57600080fd5b506103d1600480360360208110156105d557600080fd5b810190602081018135600160201b8111156105ef57600080fd5b82018360208201111561060157600080fd5b803590602001918460018302840111600160201b8311171561062257600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610fc1945050505050565b34801561066f57600080fd5b50610537610fe2565b34801561068457600080fd5b506103d1610fe8565b34801561069957600080fd5b50610537610fee565b3480156106ae57600080fd5b506106b7610ffa565b6040805161ffff9092168252519081900360200190f35b3480156106da57600080fd5b50610537610fff565b3480156106ef57600080fd5b506103d161100a565b34801561070457600080fd5b506103d1611010565b34801561071957600080fd5b506103d16004803603602081101561073057600080fd5b5035611016565b34801561074357600080fd5b50610537611031565b34801561075857600080fd5b5061076161103a565b6040805160ff9092168252519081900360200190f35b61088f6004803603608081101561078d57600080fd5b810190602081018135600160201b8111156107a757600080fd5b8201836020820111156107b957600080fd5b803590602001918460208302840111600160201b831117156107da57600080fd5b919390929091602081019035600160201b8111156107f757600080fd5b82018360208201111561080957600080fd5b803590602001918460208302840111600160201b8311171561082a57600080fd5b919390929091602081019035600160201b81111561084757600080fd5b82018360208201111561085957600080fd5b803590602001918460208302840111600160201b8311171561087a57600080fd5b91935091503567ffffffffffffffff1661103f565b604080519115158252519081900360200190f35b3480156108af57600080fd5b506103d1611085565b3480156108c457600080fd5b5061053761108b565b3480156108d957600080fd5b5061088f600480360360208110156108f057600080fd5b5035611091565b34801561090357600080fd5b506103d16110cd565b34801561091857600080fd5b506103b56004803603606081101561092f57600080fd5b508035906001600160a01b0360208201351690604001356110d3565b34801561095757600080fd5b506103d1611383565b34801561096c57600080fd5b506103b56004803603604081101561098357600080fd5b60ff8235169190810190604081016020820135600160201b8111156109a757600080fd5b8201836020820111156109b957600080fd5b803590602001918460018302840111600160201b831117156109da57600080fd5b509092509050611389565b3480156109f157600080fd5b506103b560048036036040811015610a0857600080fd5b50803590602001356001600160a01b03166113ce565b348015610a2a57600080fd5b506103b560048036036060811015610a4157600080fd5b508035906001600160a01b036020820135169060400135611537565b348015610a6957600080fd5b50610537611577565b348015610a7e57600080fd5b506103b560048036036040811015610a9557600080fd5b506001600160a01b03813516906020013561157e565b348015610ab757600080fd5b50610537611640565b348015610acc57600080fd5b5061053760048036036040811015610ae357600080fd5b506001600160a01b03813516906020013561164c565b348015610b0557600080fd5b506103d16116d2565b348015610b1a57600080fd5b506103d16116d8565b348015610b2f57600080fd5b5061088f6116de565b61088f60048036036080811015610b4e57600080fd5b5080356001600160a01b03908116916020810135909116906040810135906060013567ffffffffffffffff1661103f565b348015610b8b57600080fd5b506103d16116e7565b348015610ba057600080fd5b506103d16116ed565b348015610bb557600080fd5b50610bbe6116f3565b6040805163ffffffff9092168252519081900360200190f35b348015610be357600080fd5b506103b560048036036040811015610bfa57600080fd5b810190602081018135600160201b811115610c1457600080fd5b820183602082011115610c2657600080fd5b803590602001918460018302840111600160201b83111715610c4757600080fd5b919390929091602081019035600160201b811115610c6457600080fd5b820183602082011115610c7657600080fd5b803590602001918460018302840111600160201b83111715610c9757600080fd5b5090925090506116f8565b348015610cae57600080fd5b506103b560048036036040811015610cc557600080fd5b50803590602001356001600160a01b0316611537565b348015610ce757600080fd5b50610537611738565b348015610cfc57600080fd5b50610537611742565b348015610d1157600080fd5b5061053760048036036020811015610d2857600080fd5b50356001600160a01b0316611748565b348015610d4457600080fd5b5061053760048036036020811015610d5b57600080fd5b50356001600160a01b031661175a565b348015610d7757600080fd5b506103d1611775565b348015610d8c57600080fd5b506103b560048036036040811015610da357600080fd5b506001600160a01b038135811691602001351661177b565b348015610dc757600080fd5b506103d1611a4f565b348015610ddc57600080fd5b506103d1611a55565b348015610df157600080fd5b5061088f60048036036020811015610e0857600080fd5b5035611a5b565b348015610e1b57600080fd5b506103b5611ab3565b348015610e3057600080fd5b5061053760048036036020811015610e4757600080fd5b50356001600160a01b0316611b53565b348015610e6357600080fd5b506103d1611b65565b348015610e7857600080fd5b50610537611b6b565b348015610e8d57600080fd5b5061047260048036036020811015610ea457600080fd5b50356001600160a01b0316611b71565b348015610ec057600080fd5b506103d1611c98565b61200181565b60005460609060ff16610f17576040805162461bcd60e51b81526020600482015260196024820152600080516020612049833981519152604482015290519081900360640190fd5b3361200014610f575760405162461bcd60e51b815260040180806020018281038252602f815260200180611ff7602f913960400191505060405180910390fd5b6040805162461bcd60e51b815260206004820152600a60248201526919195c1c9958d85d195960b21b604482015290519081900360640190fd5b60015490565b61200581565b60076020908152600092835260408084209091529082529020805460019091015482565b6020908101516000908152600490915260409020546001600160a01b031690565b60055481565b61100181565b670de0b6b3a764000081565b606181565b66071afd498d000081565b61200681565b61200081565b6000908152600460205260409020546001600160a01b031690565b6402540be40081565b600881565b6000805460ff16610f57576040805162461bcd60e51b81526020600482015260196024820152600080516020612049833981519152604482015290519081900360640190fd5b61100581565b60015481565b6040805162461bcd60e51b815260206004820152600a60248201526919195c1c9958d85d195960b21b6044820152905160009181900360640190fd5b61100881565b60005460ff16611118576040805162461bcd60e51b81526020600482015260196024820152600080516020612049833981519152604482015290519081900360640190fd5b33613000146111585760405162461bcd60e51b815260040180806020018281038252602b815260200180611f7d602b913960400191505060405180910390fd5b677ce66c50e284000081111561119f5760405162461bcd60e51b8152600401808060200182810382526035815260200180611f486035913960400191505060405180910390fd5b60006221272160e91b8414611309576000848152600460205260409020546001600160a01b031680611212576040805184815290516001600160a01b0386169187917fc16ee9013bf67c846d37735983debb0acc5b2d1419cb5931c9843ad4689505499181900360200190a3505061137e565b6001600160a01b0381166000908152600260205260409020546112358482611c9e565b604080516370a0823160e01b8152306004820152905191945084916001600160a01b038516916370a08231916024808301926020929190829003018186803b15801561128057600080fd5b505afa158015611294573d6000803e3d6000fd5b505050506040513d60208110156112aa57600080fd5b505110156112f6576040805162461bcd60e51b8152602060048201526014602482015273696e73756666696369656e742062616c616e636560601b604482015290519081900360640190fd5b61130286838588611ce7565b505061137c565b61131e826402540be40063ffffffff611d8116565b90508047101561136c576040805162461bcd60e51b8152602060048201526014602482015273696e73756666696369656e742062616c616e636560601b604482015290519081900360640190fd5b600061137a85828487611ce7565b505b505b505050565b61200381565b60005460ff16610f17576040805162461bcd60e51b81526020600482015260196024820152600080516020612049833981519152604482015290519081900360640190fd5b336130001461140e5760405162461bcd60e51b815260040180806020018281038252602b815260200180611f7d602b913960400191505060405180910390fd5b60006221272160e91b831461147957506000828152600460205260409020546001600160a01b031680611479576040805162461bcd60e51b815260206004820152600e60248201526d1a5b9d985b1a59081cde5b589bdb60921b604482015290519081900360640190fd5b6001600160a01b03808216600090815260076020908152604080832093861683529290522080546114e4576040805162461bcd60e51b815260206004820152601060248201526f1b9bc81b1bd8dad95908185b5bdd5b9d60821b604482015290519081900360640190fd5b8054600082556040805182815290516001600160a01b03808716929086169188917f8041a9a8704332594e2884f5e0f942281cdd7611854c365b4d0aa70b2295d6b6919081900360200190a45050505050565b3361100814610f575760405162461bcd60e51b81526004018080602001828103825260238152602001806120266023913960400191505060405180910390fd5b6201518081565b81806001600160a01b031663893d20e86040518163ffffffff1660e01b815260040160206040518083038186803b1580156115b857600080fd5b505afa1580156115cc573d6000803e3d6000fd5b505050506040513d60208110156115e257600080fd5b50516001600160a01b03163314610f57576040805162461bcd60e51b815260206004820152601860248201527f6e6f74206f776e6572206f6620424550323020746f6b656e0000000000000000604482015290519081900360640190fd5b677ce66c50e284000081565b6000805460ff16611692576040805162461bcd60e51b81526020600482015260196024820152600080516020612049833981519152604482015290519081900360640190fd5b3361100514610f575760405162461bcd60e51b815260040180806020018281038252602f815260200180611f19602f913960400191505060405180910390fd5b61100781565b61100681565b60005460ff1681565b61200281565b61300081565b600081565b3361100714610f575760405162461bcd60e51b815260040180806020018281038252602e815260200180611fc9602e913960400191505060405180910390fd5b6221272160e91b81565b61c35081565b60026020526000908152604090205481565b6001600160a01b031660009081526003602052604090205490565b61100281565b60085460ff16600214156117c7576040805162461bcd60e51b815260206004820152600e60248201526d4e6f2072652d656e7472616e637960901b604482015290519081900360640190fd5b6008805460ff191660021790556001600160a01b038083166000908152600760209081526040808320938516835292905220805461183f576040805162461bcd60e51b815260206004820152601060248201526f1b9bc81b1bd8dad95908185b5bdd5b9d60821b604482015290519081900360640190fd5b8060010154421015611898576040805162461bcd60e51b815260206004820152601760248201527f7374696c6c206f6e206c6f636b696e6720706572696f64000000000000000000604482015290519081900360640190fd5b805460008083556001600160a01b03851661190b576040516001600160a01b038516906127109084906000818181858888f193505050503d80600081146118fb576040519150601f19603f3d011682016040523d82523d6000602084013e611900565b606091505b50508091505061199e565b846001600160a01b031663a9059cbb61c35086856040518463ffffffff1660e01b815260040180836001600160a01b03166001600160a01b0316815260200182815260200192505050602060405180830381600088803b15801561196e57600080fd5b5087f1158015611982573d6000803e3d6000fd5b50505050506040513d602081101561199957600080fd5b505190505b806119f0576040805162461bcd60e51b815260206004820152601e60248201527f776974686472617720756e6c6f636b656420746f6b656e206661696c65640000604482015290519081900360640190fd5b836001600160a01b0316856001600160a01b03167f832fc3e25f2b3e6fb0eb59419a73cba405f2a249fce75f7e31ea5a457a0323f1846040518082815260200191505060405180910390a350506008805460ff19166001179055505050565b61100381565b61200481565b60003361200214610f57576040805162461bcd60e51b815260206004820152601f60248201527f746865206d73672073656e646572206d757374206265207374616b6548756200604482015290519081900360640190fd5b60005460ff1615611b0b576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b66071afd498d000060019081556000808052600260205260127fac33ff75c19e70fe83507db0d683fd3465c996598dc972688b7ace676c89077b55805460ff19169091179055565b60066020526000908152604090205481565b61100081565b61271081565b6001600160a01b03811660009081526003602090815260409182902054825182815280840190935260609290918391906020820181803683375050506020810183905290506000805b60208160ff161015611c0157828160ff1681518110611bd557fe5b01602001516001600160f81b03191615611bf457816001019150611bf9565b611c01565b600101611bba565b5060608160ff166040519080825280601f01601f191660200182016040528015611c32576020820181803683370190505b50905060005b8260ff168160ff161015611c8e57838160ff1681518110611c5557fe5b602001015160f81c60f81b828260ff1681518110611c6f57fe5b60200101906001600160f81b031916908160001a905350600101611c38565b5095945050505050565b61100481565b60006008821115611cc757611cc0836007198401600a0a63ffffffff611d8116565b9050611ce1565b611cde836008849003600a0a63ffffffff611dda16565b90505b92915050565b6001600160a01b0380841660009081526007602090815260408083209385168352929052208054611d1e908463ffffffff611e1c16565b81554262015180016001820181905560408051858152602081019290925280516001600160a01b03808616939088169289927f446d1aa056e7b903901f49880e9f252762c1b81dc4301cf28db6dae526497eaa9281900390910190a45050505050565b600082611d9057506000611ce1565b82820282848281611d9d57fe5b0414611cde5760405162461bcd60e51b8152600401808060200182810382526021815260200180611fa86021913960400191505060405180910390fd5b6000611cde83836040518060400160405280601a81526020017f536166654d6174683a206469766973696f6e206279207a65726f000000000000815250611e76565b600082820183811015611cde576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b60008183611f025760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b83811015611ec7578181015183820152602001611eaf565b50505050905090810190601f168015611ef45780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b506000838581611f0e57fe5b049594505050505056fe746865206d6573736167652073656e646572206d75737420626520696e63656e746976697a6520636f6e7472616374616d6f756e7420697320746f6f206c617267652c20657863656564206d6178696d756d206265703220746f6b656e20616d6f756e74746865206d73672073656e646572206d75737420626520746f6b656e207265636f76657220706f7274616c536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f77746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e7472616374746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374746865206d73672073656e646572206d75737420626520746f6b656e4d616e6167657274686520636f6e7472616374206e6f7420696e69742079657400000000000000a164736f6c6343000604000a \ No newline at end of file +60806040526004361061036f5760003560e01c80639509b980116101c6578063bbface1f116100f7578063e04c83a711610095578063f9a2bbc71161006f578063f9a2bbc714610e57578063fa9e915914610e6c578063fc1a598f14610e81578063fd6a687914610eb4576103b7565b8063e04c83a714610de5578063e1c7392a14610e0f578063e8f35cea14610e24576103b7565b8063c8509d81116100d1578063c8509d8114610960578063cf41984414610d80578063dc927faf14610dbb578063df8079e914610dd0576103b7565b8063bbface1f14610d05578063bd46646114610d38578063c81b166214610d6b576103b7565b8063aa82dce111610164578063ac4317511161013e578063ac43175114610bd7578063b99328c514610ca2578063b9fd21e314610cdb578063ba35ead614610cf0576103b7565b8063aa82dce114610b7f578063aad5606314610b94578063ab51bb9614610ba9576103b7565b80639dc09262116101a05780639dc0926214610af9578063a1a11bf514610b0e578063a78abc1614610b23578063aa7415f514610b38576103b7565b80639509b98014610a725780639a854bbd14610aab5780639a99b4f014610ac0576103b7565b806359b92789116102a057806375d47a0a1161023e578063831d65d111610218578063831d65d1146109605780638525db03146109e55780638eff336c14610a1e57806393ab703f14610a5d576103b7565b806375d47a0a146108f7578063799758b91461090c5780637e434d541461094b576103b7565b80636e0565201161027a5780636e056520146107775780636e47b482146108a357806371d30863146108b8578063727be1f8146108cd576103b7565b806359b927891461070d5780635d499b1b14610737578063613684751461074c576103b7565b80633fd8b02f1161030d578063493279b1116102e7578063493279b1146106a257806350432d32146106ce57806351b4dce3146106e357806351e80672146106f8576103b7565b80633fd8b02f1461066357806343756e5c1461067857806343a368b91461068d576103b7565b8063149d14d911610349578063149d14d91461052257806328087028146105495780632ae454831461055e5780633d713223146105b2576103b7565b80630e2374a5146103bc5780631182b875146103ed57806312234582146104e7576103b7565b366103b75734156103b5576040805133815234602082015281517f6c98249d85d88c3753a04a22230f595e4dc8d3dc86c34af35deeeedc861b89db929181900390910190a15b005b600080fd5b3480156103c857600080fd5b506103d1610ec9565b604080516001600160a01b039092168252519081900360200190f35b3480156103f957600080fd5b506104726004803603604081101561041057600080fd5b60ff8235169190810190604081016020820135600160201b81111561043457600080fd5b82018360208201111561044657600080fd5b803590602001918460018302840111600160201b8311171561046757600080fd5b509092509050610ecf565b6040805160208082528351818301528351919283929083019185019080838360005b838110156104ac578181015183820152602001610494565b50505050905090810190601f1680156104d95780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156104f357600080fd5b506103b56004803603604081101561050a57600080fd5b506001600160a01b0381358116916020013516610f17565b34801561052e57600080fd5b50610537610f91565b60408051918252519081900360200190f35b34801561055557600080fd5b506103d1610f97565b34801561056a57600080fd5b506105996004803603604081101561058157600080fd5b506001600160a01b0381358116916020013516610f9d565b6040805192835260208301919091528051918290030190f35b3480156105be57600080fd5b506103d1600480360360208110156105d557600080fd5b810190602081018135600160201b8111156105ef57600080fd5b82018360208201111561060157600080fd5b803590602001918460018302840111600160201b8311171561062257600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610fc1945050505050565b34801561066f57600080fd5b50610537610fe2565b34801561068457600080fd5b506103d1610fe8565b34801561069957600080fd5b50610537610fee565b3480156106ae57600080fd5b506106b7610ffa565b6040805161ffff9092168252519081900360200190f35b3480156106da57600080fd5b50610537610fff565b3480156106ef57600080fd5b506103d161100a565b34801561070457600080fd5b506103d1611010565b34801561071957600080fd5b506103d16004803603602081101561073057600080fd5b5035611016565b34801561074357600080fd5b50610537611031565b34801561075857600080fd5b5061076161103a565b6040805160ff9092168252519081900360200190f35b61088f6004803603608081101561078d57600080fd5b810190602081018135600160201b8111156107a757600080fd5b8201836020820111156107b957600080fd5b803590602001918460208302840111600160201b831117156107da57600080fd5b919390929091602081019035600160201b8111156107f757600080fd5b82018360208201111561080957600080fd5b803590602001918460208302840111600160201b8311171561082a57600080fd5b919390929091602081019035600160201b81111561084757600080fd5b82018360208201111561085957600080fd5b803590602001918460208302840111600160201b8311171561087a57600080fd5b91935091503567ffffffffffffffff1661103f565b604080519115158252519081900360200190f35b3480156108af57600080fd5b506103d1611085565b3480156108c457600080fd5b5061053761108b565b3480156108d957600080fd5b5061088f600480360360208110156108f057600080fd5b5035611091565b34801561090357600080fd5b506103d16110cd565b34801561091857600080fd5b506103b56004803603606081101561092f57600080fd5b508035906001600160a01b0360208201351690604001356110d3565b34801561095757600080fd5b506103d1611383565b34801561096c57600080fd5b506103b56004803603604081101561098357600080fd5b60ff8235169190810190604081016020820135600160201b8111156109a757600080fd5b8201836020820111156109b957600080fd5b803590602001918460018302840111600160201b831117156109da57600080fd5b509092509050611389565b3480156109f157600080fd5b506103b560048036036040811015610a0857600080fd5b50803590602001356001600160a01b03166113ce565b348015610a2a57600080fd5b506103b560048036036060811015610a4157600080fd5b508035906001600160a01b036020820135169060400135611537565b348015610a6957600080fd5b50610537611577565b348015610a7e57600080fd5b506103b560048036036040811015610a9557600080fd5b506001600160a01b03813516906020013561157d565b348015610ab757600080fd5b5061053761163f565b348015610acc57600080fd5b5061053760048036036040811015610ae357600080fd5b506001600160a01b03813516906020013561164b565b348015610b0557600080fd5b506103d16116d1565b348015610b1a57600080fd5b506103d16116d7565b348015610b2f57600080fd5b5061088f6116dd565b61088f60048036036080811015610b4e57600080fd5b5080356001600160a01b03908116916020810135909116906040810135906060013567ffffffffffffffff1661103f565b348015610b8b57600080fd5b506103d16116e6565b348015610ba057600080fd5b506103d16116ec565b348015610bb557600080fd5b50610bbe6116f2565b6040805163ffffffff9092168252519081900360200190f35b348015610be357600080fd5b506103b560048036036040811015610bfa57600080fd5b810190602081018135600160201b811115610c1457600080fd5b820183602082011115610c2657600080fd5b803590602001918460018302840111600160201b83111715610c4757600080fd5b919390929091602081019035600160201b811115610c6457600080fd5b820183602082011115610c7657600080fd5b803590602001918460018302840111600160201b83111715610c9757600080fd5b5090925090506116f7565b348015610cae57600080fd5b506103b560048036036040811015610cc557600080fd5b50803590602001356001600160a01b0316611537565b348015610ce757600080fd5b50610537611737565b348015610cfc57600080fd5b50610537611741565b348015610d1157600080fd5b5061053760048036036020811015610d2857600080fd5b50356001600160a01b0316611747565b348015610d4457600080fd5b5061053760048036036020811015610d5b57600080fd5b50356001600160a01b0316611759565b348015610d7757600080fd5b506103d1611774565b348015610d8c57600080fd5b506103b560048036036040811015610da357600080fd5b506001600160a01b038135811691602001351661177a565b348015610dc757600080fd5b506103d1611a4e565b348015610ddc57600080fd5b506103d1611a54565b348015610df157600080fd5b5061088f60048036036020811015610e0857600080fd5b5035611a5a565b348015610e1b57600080fd5b506103b5611ab2565b348015610e3057600080fd5b5061053760048036036020811015610e4757600080fd5b50356001600160a01b0316611b52565b348015610e6357600080fd5b506103d1611b64565b348015610e7857600080fd5b50610537611b6a565b348015610e8d57600080fd5b5061047260048036036020811015610ea457600080fd5b50356001600160a01b0316611b70565b348015610ec057600080fd5b506103d1611c97565b61200181565b60005460609060ff16610f17576040805162461bcd60e51b81526020600482015260196024820152600080516020612047833981519152604482015290519081900360640190fd5b3361200014610f575760405162461bcd60e51b815260040180806020018281038252602f815260200180611ff5602f913960400191505060405180910390fd5b6040805162461bcd60e51b815260206004820152600a60248201526919195c1c9958d85d195960b21b604482015290519081900360640190fd5b60015490565b61200581565b60076020908152600092835260408084209091529082529020805460019091015482565b6020908101516000908152600490915260409020546001600160a01b031690565b60055481565b61100181565b670de0b6b3a764000081565b606181565b66071afd498d000081565b61200681565b61200081565b6000908152600460205260409020546001600160a01b031690565b6402540be40081565b600881565b6000805460ff16610f57576040805162461bcd60e51b81526020600482015260196024820152600080516020612047833981519152604482015290519081900360640190fd5b61100581565b60015481565b6040805162461bcd60e51b815260206004820152600a60248201526919195c1c9958d85d195960b21b6044820152905160009181900360640190fd5b61100881565b60005460ff16611118576040805162461bcd60e51b81526020600482015260196024820152600080516020612047833981519152604482015290519081900360640190fd5b33613000146111585760405162461bcd60e51b815260040180806020018281038252602b815260200180611f7b602b913960400191505060405180910390fd5b677ce66c50e284000081111561119f5760405162461bcd60e51b8152600401808060200182810382526035815260200180611f466035913960400191505060405180910390fd5b60006221272160e91b8414611309576000848152600460205260409020546001600160a01b031680611212576040805184815290516001600160a01b0386169187917fc16ee9013bf67c846d37735983debb0acc5b2d1419cb5931c9843ad4689505499181900360200190a3505061137e565b6001600160a01b0381166000908152600260205260409020546112358482611c9d565b604080516370a0823160e01b8152306004820152905191945084916001600160a01b038516916370a08231916024808301926020929190829003018186803b15801561128057600080fd5b505afa158015611294573d6000803e3d6000fd5b505050506040513d60208110156112aa57600080fd5b505110156112f6576040805162461bcd60e51b8152602060048201526014602482015273696e73756666696369656e742062616c616e636560601b604482015290519081900360640190fd5b61130286838588611ce6565b505061137c565b61131e826402540be40063ffffffff611d7f16565b90508047101561136c576040805162461bcd60e51b8152602060048201526014602482015273696e73756666696369656e742062616c616e636560601b604482015290519081900360640190fd5b600061137a85828487611ce6565b505b505b505050565b61200381565b60005460ff16610f17576040805162461bcd60e51b81526020600482015260196024820152600080516020612047833981519152604482015290519081900360640190fd5b336130001461140e5760405162461bcd60e51b815260040180806020018281038252602b815260200180611f7b602b913960400191505060405180910390fd5b60006221272160e91b831461147957506000828152600460205260409020546001600160a01b031680611479576040805162461bcd60e51b815260206004820152600e60248201526d1a5b9d985b1a59081cde5b589bdb60921b604482015290519081900360640190fd5b6001600160a01b03808216600090815260076020908152604080832093861683529290522080546114e4576040805162461bcd60e51b815260206004820152601060248201526f1b9bc81b1bd8dad95908185b5bdd5b9d60821b604482015290519081900360640190fd5b8054600082556040805182815290516001600160a01b03808716929086169188917f8041a9a8704332594e2884f5e0f942281cdd7611854c365b4d0aa70b2295d6b6919081900360200190a45050505050565b3361100814610f575760405162461bcd60e51b81526004018080602001828103825260238152602001806120246023913960400191505060405180910390fd5b61012c81565b81806001600160a01b031663893d20e86040518163ffffffff1660e01b815260040160206040518083038186803b1580156115b757600080fd5b505afa1580156115cb573d6000803e3d6000fd5b505050506040513d60208110156115e157600080fd5b50516001600160a01b03163314610f57576040805162461bcd60e51b815260206004820152601860248201527f6e6f74206f776e6572206f6620424550323020746f6b656e0000000000000000604482015290519081900360640190fd5b677ce66c50e284000081565b6000805460ff16611691576040805162461bcd60e51b81526020600482015260196024820152600080516020612047833981519152604482015290519081900360640190fd5b3361100514610f575760405162461bcd60e51b815260040180806020018281038252602f815260200180611f17602f913960400191505060405180910390fd5b61100781565b61100681565b60005460ff1681565b61200281565b61300081565b600081565b3361100714610f575760405162461bcd60e51b815260040180806020018281038252602e815260200180611fc7602e913960400191505060405180910390fd5b6221272160e91b81565b61c35081565b60026020526000908152604090205481565b6001600160a01b031660009081526003602052604090205490565b61100281565b60085460ff16600214156117c6576040805162461bcd60e51b815260206004820152600e60248201526d4e6f2072652d656e7472616e637960901b604482015290519081900360640190fd5b6008805460ff191660021790556001600160a01b038083166000908152600760209081526040808320938516835292905220805461183e576040805162461bcd60e51b815260206004820152601060248201526f1b9bc81b1bd8dad95908185b5bdd5b9d60821b604482015290519081900360640190fd5b8060010154421015611897576040805162461bcd60e51b815260206004820152601760248201527f7374696c6c206f6e206c6f636b696e6720706572696f64000000000000000000604482015290519081900360640190fd5b805460008083556001600160a01b03851661190a576040516001600160a01b038516906127109084906000818181858888f193505050503d80600081146118fa576040519150601f19603f3d011682016040523d82523d6000602084013e6118ff565b606091505b50508091505061199d565b846001600160a01b031663a9059cbb61c35086856040518463ffffffff1660e01b815260040180836001600160a01b03166001600160a01b0316815260200182815260200192505050602060405180830381600088803b15801561196d57600080fd5b5087f1158015611981573d6000803e3d6000fd5b50505050506040513d602081101561199857600080fd5b505190505b806119ef576040805162461bcd60e51b815260206004820152601e60248201527f776974686472617720756e6c6f636b656420746f6b656e206661696c65640000604482015290519081900360640190fd5b836001600160a01b0316856001600160a01b03167f832fc3e25f2b3e6fb0eb59419a73cba405f2a249fce75f7e31ea5a457a0323f1846040518082815260200191505060405180910390a350506008805460ff19166001179055505050565b61100381565b61200481565b60003361200214610f57576040805162461bcd60e51b815260206004820152601f60248201527f746865206d73672073656e646572206d757374206265207374616b6548756200604482015290519081900360640190fd5b60005460ff1615611b0a576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b66071afd498d000060019081556000808052600260205260127fac33ff75c19e70fe83507db0d683fd3465c996598dc972688b7ace676c89077b55805460ff19169091179055565b60066020526000908152604090205481565b61100081565b61271081565b6001600160a01b03811660009081526003602090815260409182902054825182815280840190935260609290918391906020820181803683375050506020810183905290506000805b60208160ff161015611c0057828160ff1681518110611bd457fe5b01602001516001600160f81b03191615611bf357816001019150611bf8565b611c00565b600101611bb9565b5060608160ff166040519080825280601f01601f191660200182016040528015611c31576020820181803683370190505b50905060005b8260ff168160ff161015611c8d57838160ff1681518110611c5457fe5b602001015160f81c60f81b828260ff1681518110611c6e57fe5b60200101906001600160f81b031916908160001a905350600101611c37565b5095945050505050565b61100481565b60006008821115611cc657611cbf836007198401600a0a63ffffffff611d7f16565b9050611ce0565b611cdd836008849003600a0a63ffffffff611dd816565b90505b92915050565b6001600160a01b0380841660009081526007602090815260408083209385168352929052208054611d1d908463ffffffff611e1a16565b81554261012c016001820181905560408051858152602081019290925280516001600160a01b03808616939088169289927f446d1aa056e7b903901f49880e9f252762c1b81dc4301cf28db6dae526497eaa9281900390910190a45050505050565b600082611d8e57506000611ce0565b82820282848281611d9b57fe5b0414611cdd5760405162461bcd60e51b8152600401808060200182810382526021815260200180611fa66021913960400191505060405180910390fd5b6000611cdd83836040518060400160405280601a81526020017f536166654d6174683a206469766973696f6e206279207a65726f000000000000815250611e74565b600082820183811015611cdd576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b60008183611f005760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b83811015611ec5578181015183820152602001611ead565b50505050905090810190601f168015611ef25780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b506000838581611f0c57fe5b049594505050505056fe746865206d6573736167652073656e646572206d75737420626520696e63656e746976697a6520636f6e7472616374616d6f756e7420697320746f6f206c617267652c20657863656564206d6178696d756d206265703220746f6b656e20616d6f756e74746865206d73672073656e646572206d75737420626520746f6b656e207265636f76657220706f7274616c536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f77746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e7472616374746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374746865206d73672073656e646572206d75737420626520746f6b656e4d616e6167657274686520636f6e7472616374206e6f7420696e69742079657400000000000000a164736f6c6343000604000a \ No newline at end of file diff --git a/core/systemcontracts/upgrade.go b/core/systemcontracts/upgrade.go index 7ba62aaa60..ac5bdfd2be 100644 --- a/core/systemcontracts/upgrade.go +++ b/core/systemcontracts/upgrade.go @@ -781,87 +781,87 @@ func init() { Configs: []*UpgradeConfig{ { ContractAddr: common.HexToAddress(ValidatorContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetValidatorContract, }, { ContractAddr: common.HexToAddress(SlashContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetSlashContract, }, { ContractAddr: common.HexToAddress(SystemRewardContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetSystemRewardContract, }, { ContractAddr: common.HexToAddress(LightClientContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetLightClientContract, }, { ContractAddr: common.HexToAddress(TokenHubContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetTokenHubContract, }, { ContractAddr: common.HexToAddress(RelayerIncentivizeContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetRelayerIncentivizeContract, }, { ContractAddr: common.HexToAddress(RelayerHubContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetRelayerHubContract, }, { ContractAddr: common.HexToAddress(GovHubContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetGovHubContract, }, { ContractAddr: common.HexToAddress(TokenManagerContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetTokenManagerContract, }, { ContractAddr: common.HexToAddress(CrossChainContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetCrossChainContract, }, { ContractAddr: common.HexToAddress(StakingContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetStakingContract, }, { ContractAddr: common.HexToAddress(StakeHubContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetStakeHubContract, }, { ContractAddr: common.HexToAddress(StakeCreditContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetStakeCreditContract, }, { ContractAddr: common.HexToAddress(GovernorContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetGovernorContract, }, { ContractAddr: common.HexToAddress(GovTokenContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetGovTokenContract, }, { ContractAddr: common.HexToAddress(TimelockContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetTimelockContract, }, { ContractAddr: common.HexToAddress(TokenRecoverPortalContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.MainnetTokenRecoverPortalContract, }, }, @@ -872,87 +872,87 @@ func init() { Configs: []*UpgradeConfig{ { ContractAddr: common.HexToAddress(ValidatorContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelValidatorContract, }, { ContractAddr: common.HexToAddress(SlashContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelSlashContract, }, { ContractAddr: common.HexToAddress(SystemRewardContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelSystemRewardContract, }, { ContractAddr: common.HexToAddress(LightClientContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelLightClientContract, }, { ContractAddr: common.HexToAddress(TokenHubContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelTokenHubContract, }, { ContractAddr: common.HexToAddress(RelayerIncentivizeContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelRelayerIncentivizeContract, }, { ContractAddr: common.HexToAddress(RelayerHubContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelRelayerHubContract, }, { ContractAddr: common.HexToAddress(GovHubContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelGovHubContract, }, { ContractAddr: common.HexToAddress(TokenManagerContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelTokenManagerContract, }, { ContractAddr: common.HexToAddress(CrossChainContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelCrossChainContract, }, { ContractAddr: common.HexToAddress(StakingContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelStakingContract, }, { ContractAddr: common.HexToAddress(StakeHubContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelStakeHubContract, }, { ContractAddr: common.HexToAddress(StakeCreditContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelStakeCreditContract, }, { ContractAddr: common.HexToAddress(GovernorContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelGovernorContract, }, { ContractAddr: common.HexToAddress(GovTokenContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelGovTokenContract, }, { ContractAddr: common.HexToAddress(TimelockContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelTimelockContract, }, { ContractAddr: common.HexToAddress(TokenRecoverPortalContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.ChapelTokenRecoverPortalContract, }, }, @@ -963,87 +963,87 @@ func init() { Configs: []*UpgradeConfig{ { ContractAddr: common.HexToAddress(ValidatorContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoValidatorContract, }, { ContractAddr: common.HexToAddress(SlashContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoSlashContract, }, { ContractAddr: common.HexToAddress(SystemRewardContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoSystemRewardContract, }, { ContractAddr: common.HexToAddress(LightClientContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoLightClientContract, }, { ContractAddr: common.HexToAddress(TokenHubContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoTokenHubContract, }, { ContractAddr: common.HexToAddress(RelayerIncentivizeContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoRelayerIncentivizeContract, }, { ContractAddr: common.HexToAddress(RelayerHubContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoRelayerHubContract, }, { ContractAddr: common.HexToAddress(GovHubContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoGovHubContract, }, { ContractAddr: common.HexToAddress(TokenManagerContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoTokenManagerContract, }, { ContractAddr: common.HexToAddress(CrossChainContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoCrossChainContract, }, { ContractAddr: common.HexToAddress(StakingContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoStakingContract, }, { ContractAddr: common.HexToAddress(StakeHubContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoStakeHubContract, }, { ContractAddr: common.HexToAddress(StakeCreditContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoStakeCreditContract, }, { ContractAddr: common.HexToAddress(GovernorContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoGovernorContract, }, { ContractAddr: common.HexToAddress(GovTokenContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoGovTokenContract, }, { ContractAddr: common.HexToAddress(TimelockContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoTimelockContract, }, { ContractAddr: common.HexToAddress(TokenRecoverPortalContract), - CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/d1564095cca3bfbd303a5e3de2ad90e719d12564", + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/020c0459e37d1f9d635c1cff86dd1099ab1383fa", Code: pascal.RialtoTokenRecoverPortalContract, }, }, From be0eb10f1f0b07ec95c6fb13cf6c5fc4d986a453 Mon Sep 17 00:00:00 2001 From: Eric <45141191+zlacfzy@users.noreply.github.com> Date: Tue, 15 Oct 2024 19:49:04 +0800 Subject: [PATCH 3/5] concensus/parlia.go: make distribute incoming tx more independence (#2735) --- consensus/parlia/parlia.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index 79ff3f93af..a06067b25b 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -1806,27 +1806,30 @@ func (p *Parlia) getCurrentValidators(blockHash common.Hash, blockNum *big.Int) func (p *Parlia) distributeIncoming(val common.Address, state *state.StateDB, header *types.Header, chain core.ChainContext, txs *[]*types.Transaction, receipts *[]*types.Receipt, receivedTxs *[]*types.Transaction, usedGas *uint64, mining bool) error { coinbase := header.Coinbase - balance := state.GetBalance(consensus.SystemAddress) - if balance.Cmp(common.U2560) <= 0 { - return nil - } - state.SetBalance(consensus.SystemAddress, common.U2560) - state.AddBalance(coinbase, balance) doDistributeSysReward := !p.chainConfig.IsKepler(header.Number, header.Time) && state.GetBalance(common.HexToAddress(systemcontracts.SystemRewardContract)).Cmp(maxSystemBalance) < 0 if doDistributeSysReward { + balance := state.GetBalance(consensus.SystemAddress) rewards := new(uint256.Int) rewards = rewards.Rsh(balance, systemRewardPercent) if rewards.Cmp(common.U2560) > 0 { + state.SetBalance(consensus.SystemAddress, balance.Sub(balance, rewards)) + state.AddBalance(coinbase, rewards) err := p.distributeToSystem(rewards.ToBig(), state, header, chain, txs, receipts, receivedTxs, usedGas, mining) if err != nil { return err } log.Trace("distribute to system reward pool", "block hash", header.Hash(), "amount", rewards) - balance = balance.Sub(balance, rewards) } } + + balance := state.GetBalance(consensus.SystemAddress) + if balance.Cmp(common.U2560) <= 0 { + return nil + } + state.SetBalance(consensus.SystemAddress, common.U2560) + state.AddBalance(coinbase, balance) log.Trace("distribute to validator contract", "block hash", header.Hash(), "amount", balance) return p.distributeToValidator(balance.ToBig(), val, state, header, chain, txs, receipts, receivedTxs, usedGas, mining) } From 675449a1d9e02f130d6e23d5bc2803d8925c9fb0 Mon Sep 17 00:00:00 2001 From: Satyajit Das Date: Thu, 17 Oct 2024 13:23:06 +0530 Subject: [PATCH 4/5] core/txpool/legacypool: add overflowpool for txs (#2660) --- cmd/geth/main.go | 1 + cmd/utils/flags.go | 30 +- core/txpool/legacypool/legacypool.go | 119 +++++++- core/txpool/legacypool/legacypool_test.go | 84 +++++- core/txpool/legacypool/tx_overflowpool.go | 171 +++++++++++ .../txpool/legacypool/tx_overflowpool_test.go | 266 ++++++++++++++++++ 6 files changed, 634 insertions(+), 37 deletions(-) create mode 100644 core/txpool/legacypool/tx_overflowpool.go create mode 100644 core/txpool/legacypool/tx_overflowpool_test.go diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 3a041fcc3a..5eb8fa4146 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -92,6 +92,7 @@ var ( utils.TxPoolGlobalSlotsFlag, utils.TxPoolAccountQueueFlag, utils.TxPoolGlobalQueueFlag, + utils.TxPoolOverflowPoolSlotsFlag, utils.TxPoolLifetimeFlag, utils.TxPoolReannounceTimeFlag, utils.BlobPoolDataDirFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 207628c161..f3aab43d70 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -458,6 +458,12 @@ var ( Value: ethconfig.Defaults.TxPool.GlobalQueue, Category: flags.TxPoolCategory, } + TxPoolOverflowPoolSlotsFlag = &cli.Uint64Flag{ + Name: "txpool.overflowpoolslots", + Usage: "Maximum number of transaction slots in overflow pool", + Value: ethconfig.Defaults.TxPool.OverflowPoolSlots, + Category: flags.TxPoolCategory, + } TxPoolLifetimeFlag = &cli.DurationFlag{ Name: "txpool.lifetime", Usage: "Maximum amount of time non-executable transaction are queued", @@ -1789,6 +1795,9 @@ func setTxPool(ctx *cli.Context, cfg *legacypool.Config) { if ctx.IsSet(TxPoolGlobalQueueFlag.Name) { cfg.GlobalQueue = ctx.Uint64(TxPoolGlobalQueueFlag.Name) } + if ctx.IsSet(TxPoolOverflowPoolSlotsFlag.Name) { + cfg.OverflowPoolSlots = ctx.Uint64(TxPoolOverflowPoolSlotsFlag.Name) + } if ctx.IsSet(TxPoolLifetimeFlag.Name) { cfg.Lifetime = ctx.Duration(TxPoolLifetimeFlag.Name) } @@ -2310,16 +2319,17 @@ func EnableNodeInfo(poolConfig *legacypool.Config, nodeInfo *p2p.NodeInfo) Setup return func() { // register node info into metrics metrics.NewRegisteredLabel("node-info", nil).Mark(map[string]interface{}{ - "Enode": nodeInfo.Enode, - "ENR": nodeInfo.ENR, - "ID": nodeInfo.ID, - "PriceLimit": poolConfig.PriceLimit, - "PriceBump": poolConfig.PriceBump, - "AccountSlots": poolConfig.AccountSlots, - "GlobalSlots": poolConfig.GlobalSlots, - "AccountQueue": poolConfig.AccountQueue, - "GlobalQueue": poolConfig.GlobalQueue, - "Lifetime": poolConfig.Lifetime, + "Enode": nodeInfo.Enode, + "ENR": nodeInfo.ENR, + "ID": nodeInfo.ID, + "PriceLimit": poolConfig.PriceLimit, + "PriceBump": poolConfig.PriceBump, + "AccountSlots": poolConfig.AccountSlots, + "GlobalSlots": poolConfig.GlobalSlots, + "AccountQueue": poolConfig.AccountQueue, + "GlobalQueue": poolConfig.GlobalQueue, + "OverflowPoolSlots": poolConfig.OverflowPoolSlots, + "Lifetime": poolConfig.Lifetime, }) } } diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 91cd01e7b4..0d5a1fb183 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -19,6 +19,7 @@ package legacypool import ( "errors" + "fmt" "math" "math/big" "sort" @@ -99,10 +100,11 @@ var ( // that this number is pretty low, since txpool reorgs happen very frequently. dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015)) - pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) - queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) - localGauge = metrics.NewRegisteredGauge("txpool/local", nil) - slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) + pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) + queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) + localGauge = metrics.NewRegisteredGauge("txpool/local", nil) + slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) + OverflowPoolGauge = metrics.NewRegisteredGauge("txpool/overflowpool", nil) reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) ) @@ -133,10 +135,11 @@ type Config struct { PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) - AccountSlots uint64 // Number of executable transaction slots guaranteed per account - GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts - AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account - GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts + AccountSlots uint64 // Number of executable transaction slots guaranteed per account + GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts + AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account + GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts + OverflowPoolSlots uint64 // Maximum number of transaction slots in overflow pool Lifetime time.Duration // Maximum amount of time non-executable transaction are queued ReannounceTime time.Duration // Duration for announcing local pending transactions again @@ -150,10 +153,11 @@ var DefaultConfig = Config{ PriceLimit: 1, PriceBump: 10, - AccountSlots: 16, - GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio - AccountQueue: 64, - GlobalQueue: 1024, + AccountSlots: 16, + GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio + AccountQueue: 64, + GlobalQueue: 1024, + OverflowPoolSlots: 0, Lifetime: 3 * time.Hour, ReannounceTime: 10 * 365 * 24 * time.Hour, @@ -235,6 +239,8 @@ type LegacyPool struct { all *lookup // All transactions to allow lookups priced *pricedList // All transactions sorted by price + localBufferPool *TxOverflowPool // Local buffer transactions + reqResetCh chan *txpoolResetRequest reqPromoteCh chan *accountSet queueTxEventCh chan *types.Transaction @@ -272,6 +278,7 @@ func New(config Config, chain BlockChain) *LegacyPool { reorgDoneCh: make(chan chan struct{}), reorgShutdownCh: make(chan struct{}), initDoneCh: make(chan struct{}), + localBufferPool: NewTxOverflowPoolHeap(config.OverflowPoolSlots), } pool.locals = newAccountSet(pool.signer) for _, addr := range config.Locals { @@ -408,7 +415,6 @@ func (pool *LegacyPool) loop() { if !pool.locals.contains(addr) { continue } - for _, tx := range list.Flatten() { // Default ReannounceTime is 10 years, won't announce by default. if time.Since(tx.Time()) < pool.config.ReannounceTime { @@ -517,6 +523,17 @@ func (pool *LegacyPool) Stats() (int, int) { return pool.stats() } +func (pool *LegacyPool) statsOverflowPool() int { + pool.mu.RLock() + defer pool.mu.RUnlock() + + if pool.localBufferPool == nil { + return 0 + } + + return pool.localBufferPool.Size() +} + // stats retrieves the current pool stats, namely the number of pending and the // number of queued (non-executable) transactions. func (pool *LegacyPool) stats() (int, int) { @@ -831,6 +848,8 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e } } + pool.addToOverflowPool(drop, isLocal) + // Kick out the underpriced remote transactions. for _, tx := range drop { log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) @@ -887,6 +906,29 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e return replaced, nil } +func (pool *LegacyPool) addToOverflowPool(drop types.Transactions, isLocal bool) { + // calculate total number of slots in drop. Accordingly add them to OverflowPool (if there is space) + availableSlotsOverflowPool := pool.availableSlotsOverflowPool() + if availableSlotsOverflowPool > 0 { + // transfer availableSlotsOverflowPool number of transactions slots from drop to OverflowPool + currentSlotsUsed := 0 + for i, tx := range drop { + txSlots := numSlots(tx) + if currentSlotsUsed+txSlots <= availableSlotsOverflowPool { + from, _ := types.Sender(pool.signer, tx) + pool.localBufferPool.Add(tx) + log.Debug("adding to OverflowPool", "transaction", tx.Hash().String(), "from", from.String()) + currentSlotsUsed += txSlots + } else { + log.Debug("not all got added to OverflowPool", "totalAdded", i+1) + return + } + } + } else { + log.Debug("adding to OverflowPool unsuccessful", "availableSlotsOverflowPool", availableSlotsOverflowPool) + } +} + // isGapped reports whether the given transaction is immediately executable. func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) bool { // Short circuit if transaction falls within the scope of the pending list @@ -1333,7 +1375,6 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, reorgDurationTimer.Update(time.Since(t0)) }(time.Now()) defer close(done) - var promoteAddrs []common.Address if dirtyAccounts != nil && reset == nil { // Only dirty accounts need to be promoted, unless we're resetting. @@ -1391,6 +1432,9 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, pool.changesSinceReorg = 0 // Reset change counter pool.mu.Unlock() + // Transfer transactions from OverflowPool to MainPool for new block import + pool.transferTransactions() + // Notify subsystems for newly added transactions for _, tx := range promoted { addr, _ := types.Sender(pool.signer, tx) @@ -2038,3 +2082,50 @@ func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions { func numSlots(tx *types.Transaction) int { return int((tx.Size() + txSlotSize - 1) / txSlotSize) } + +// transferTransactions moves transactions from OverflowPool to MainPool +func (pool *LegacyPool) transferTransactions() { + // Fail fast if the overflow pool is empty + if pool.localBufferPool.Size() == 0 { + return + } + + maxMainPoolSize := int(pool.config.GlobalSlots + pool.config.GlobalQueue) + // Use pool.all.Slots() to get the total slots used by all transactions + currentMainPoolSize := pool.all.Slots() + if currentMainPoolSize >= maxMainPoolSize { + return + } + + extraSlots := maxMainPoolSize - currentMainPoolSize + extraTransactions := (extraSlots + 3) / 4 // Since a transaction can take up to 4 slots + log.Debug("Will attempt to transfer from OverflowPool to MainPool", "transactions", extraTransactions) + txs := pool.localBufferPool.Flush(extraTransactions) + if len(txs) == 0 { + return + } + + pool.Add(txs, true, false) +} + +func (pool *LegacyPool) availableSlotsOverflowPool() int { + maxOverflowPoolSize := int(pool.config.OverflowPoolSlots) + availableSlots := maxOverflowPoolSize - pool.localBufferPool.Size() + if availableSlots > 0 { + return availableSlots + } + return 0 +} + +func (pool *LegacyPool) PrintTxStats() { + for _, l := range pool.pending { + for _, transaction := range l.txs.items { + from, _ := types.Sender(pool.signer, transaction) + fmt.Println("from: ", from, " Pending:", transaction.Hash().String(), transaction.GasFeeCap(), transaction.GasTipCap()) + } + } + + pool.localBufferPool.PrintTxStats() + fmt.Println("length of all: ", pool.all.Slots()) + fmt.Println("----------------------------------------------------") +} diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index 5f7a625f13..53c62b9bd3 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -40,6 +40,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/holiman/uint256" + "github.com/stretchr/testify/assert" ) var ( @@ -1739,6 +1740,7 @@ func TestRepricingKeepsLocals(t *testing.T) { // Note, local transactions are never allowed to be dropped. func TestUnderpricing(t *testing.T) { t.Parallel() + testTxPoolConfig.OverflowPoolSlots = 5 // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) @@ -1931,6 +1933,8 @@ func TestUnderpricingDynamicFee(t *testing.T) { pool.config.GlobalSlots = 2 pool.config.GlobalQueue = 2 + pool.config.OverflowPoolSlots = 0 + // Keep track of transaction events to ensure all executables get announced events := make(chan core.NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) @@ -1955,7 +1959,6 @@ func TestUnderpricingDynamicFee(t *testing.T) { // Import the batch and that both pending and queued transactions match up pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1 pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1 - pending, queued := pool.Stats() if pending != 3 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) @@ -1995,9 +1998,9 @@ func TestUnderpricingDynamicFee(t *testing.T) { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) } if queued != 2 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) } - if err := validateEvents(events, 2); err != nil { + if err := validateEvents(events, 2); err != nil { // todo make it 4...After this validateEvents the pending becomes 3?! t.Fatalf("additional event firing failed: %v", err) } if err := validatePoolInternals(pool); err != nil { @@ -2012,11 +2015,12 @@ func TestUnderpricingDynamicFee(t *testing.T) { if err := pool.addLocal(ltx); err != nil { t.Fatalf("failed to add new underpriced local transaction: %v", err) } + pending, queued = pool.Stats() - if pending != 3 { + if pending != 3 { // 3 t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) } - if queued != 1 { + if queued != 1 { // 1 t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) } if err := validateEvents(events, 2); err != nil { @@ -2032,41 +2036,51 @@ func TestUnderpricingDynamicFee(t *testing.T) { func TestDualHeapEviction(t *testing.T) { t.Parallel() + testTxPoolConfig.OverflowPoolSlots = 1 pool, _ := setupPoolWithConfig(eip1559Config) defer pool.Close() - pool.config.GlobalSlots = 10 - pool.config.GlobalQueue = 10 + pool.config.GlobalSlots = 2 + pool.config.GlobalQueue = 2 + pool.config.OverflowPoolSlots = 1 var ( highTip, highCap *types.Transaction baseFee int + highCapValue int64 + highTipValue int64 ) check := func(tx *types.Transaction, name string) { if pool.all.GetRemote(tx.Hash()) == nil { - t.Fatalf("highest %s transaction evicted from the pool", name) + t.Fatalf("highest %s transaction evicted from the pool, gasTip: %s, gasFeeCap: %s, hash: %s", name, highTip.GasTipCap().String(), highCap.GasFeeCap().String(), tx.Hash().String()) } } add := func(urgent bool) { - for i := 0; i < 20; i++ { + for i := 0; i < 4; i++ { var tx *types.Transaction // Create a test accounts and fund it key, _ := crypto.GenerateKey() testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000000)) if urgent { tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+1+i)), big.NewInt(int64(1+i)), key) - highTip = tx + if int64(1+i) > highTipValue || (int64(1+i) == highTipValue && int64(baseFee+1+i) > highTip.GasFeeCap().Int64()) { + highTipValue = int64(1 + i) + highTip = tx + } } else { tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key) - highCap = tx + if int64(baseFee+200+i) > highCapValue { + highCapValue = int64(baseFee + 200 + i) + highCap = tx + } } pool.addRemotesSync([]*types.Transaction{tx}) } pending, queued := pool.Stats() - if pending+queued != 20 { - t.Fatalf("transaction count mismatch: have %d, want %d", pending+queued, 10) + if pending+queued != 4 { + t.Fatalf("transaction count mismatch: have %d, want %d, pending %d, queued %d, OverflowPool %d", pending+queued, 5, pending, queued, pool.localBufferPool.Size()) } } @@ -2231,6 +2245,50 @@ func TestReplacement(t *testing.T) { } } +func TestTransferTransactions(t *testing.T) { + t.Parallel() + testTxPoolConfig.OverflowPoolSlots = 1 + pool, _ := setupPoolWithConfig(eip1559Config) + defer pool.Close() + + pool.config.GlobalSlots = 1 + pool.config.GlobalQueue = 2 + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 5) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + + tx := dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[0]) + from, _ := types.Sender(pool.signer, tx) + pool.addToOverflowPool([]*types.Transaction{tx}, true) + pending, queue := pool.Stats() + + assert.Equal(t, 0, pending, "pending transactions mismatched") + assert.Equal(t, 0, queue, "queued transactions mismatched") + assert.Equal(t, 1, pool.statsOverflowPool(), "OverflowPool size unexpected") + + tx2 := dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1]) + pool.addToOverflowPool([]*types.Transaction{tx2}, true) + assert.Equal(t, 1, pool.statsOverflowPool(), "OverflowPool size unexpected") + <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) + pending, queue = pool.Stats() + + assert.Equal(t, 0, pending, "pending transactions mismatched") + assert.Equal(t, 1, queue, "queued transactions mismatched") + assert.Equal(t, 0, pool.statsOverflowPool(), "OverflowPool size unexpected") + + tx3 := dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[2]) + pool.addToOverflowPool([]*types.Transaction{tx3}, true) + pending, queue = pool.Stats() + + assert.Equal(t, 1, pending, "pending transactions mismatched") + assert.Equal(t, 0, queue, "queued transactions mismatched") + assert.Equal(t, 1, pool.statsOverflowPool(), "OverflowPool size unexpected") +} + // Tests that the pool rejects replacement dynamic fee transactions that don't // meet the minimum price bump required. func TestReplacementDynamicFee(t *testing.T) { diff --git a/core/txpool/legacypool/tx_overflowpool.go b/core/txpool/legacypool/tx_overflowpool.go new file mode 100644 index 0000000000..4bfd4b6f5a --- /dev/null +++ b/core/txpool/legacypool/tx_overflowpool.go @@ -0,0 +1,171 @@ +package legacypool + +import ( + "container/heap" + "fmt" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// txHeapItem implements the Interface interface (https://pkg.go.dev/container/heap#Interface) of heap so that it can be heapified +type txHeapItem struct { + tx *types.Transaction + timestamp int64 // Unix timestamp (nanoseconds) of when the transaction was added + index int +} + +type txHeap []*txHeapItem + +func (h txHeap) Len() int { return len(h) } +func (h txHeap) Less(i, j int) bool { + return h[i].timestamp < h[j].timestamp +} +func (h txHeap) Swap(i, j int) { + if i < 0 || j < 0 || i >= len(h) || j >= len(h) { + return // Silently fail if indices are out of bounds + } + h[i], h[j] = h[j], h[i] + if h[i] != nil { + h[i].index = i + } + if h[j] != nil { + h[j].index = j + } +} + +func (h *txHeap) Push(x interface{}) { + item, ok := x.(*txHeapItem) + if !ok { + return + } + n := len(*h) + item.index = n + *h = append(*h, item) +} + +func (h *txHeap) Pop() interface{} { + old := *h + n := len(old) + if n == 0 { + return nil // Return nil if the heap is empty + } + item := old[n-1] + old[n-1] = nil // avoid memory leak + *h = old[0 : n-1] + if item != nil { + item.index = -1 // for safety + } + return item +} + +type TxOverflowPool struct { + txHeap txHeap + index map[common.Hash]*txHeapItem + mu sync.RWMutex + maxSize uint64 + totalSize int +} + +func NewTxOverflowPoolHeap(estimatedMaxSize uint64) *TxOverflowPool { + return &TxOverflowPool{ + txHeap: make(txHeap, 0, estimatedMaxSize), + index: make(map[common.Hash]*txHeapItem, estimatedMaxSize), + maxSize: estimatedMaxSize, + } +} + +func (tp *TxOverflowPool) Add(tx *types.Transaction) { + tp.mu.Lock() + defer tp.mu.Unlock() + + if _, exists := tp.index[tx.Hash()]; exists { + // Transaction already in pool, ignore + return + } + + if uint64(len(tp.txHeap)) >= tp.maxSize { + // Remove the oldest transaction to make space + oldestItem, ok := heap.Pop(&tp.txHeap).(*txHeapItem) + if !ok || oldestItem == nil { + return + } + delete(tp.index, oldestItem.tx.Hash()) + tp.totalSize -= numSlots(oldestItem.tx) + OverflowPoolGauge.Dec(1) + } + + item := &txHeapItem{ + tx: tx, + timestamp: time.Now().UnixNano(), + } + heap.Push(&tp.txHeap, item) + tp.index[tx.Hash()] = item + tp.totalSize += numSlots(tx) + OverflowPoolGauge.Inc(1) +} + +func (tp *TxOverflowPool) Get(hash common.Hash) (*types.Transaction, bool) { + tp.mu.RLock() + defer tp.mu.RUnlock() + if item, ok := tp.index[hash]; ok { + return item.tx, true + } + return nil, false +} + +func (tp *TxOverflowPool) Remove(hash common.Hash) { + tp.mu.Lock() + defer tp.mu.Unlock() + if item, ok := tp.index[hash]; ok { + heap.Remove(&tp.txHeap, item.index) + delete(tp.index, hash) + tp.totalSize -= numSlots(item.tx) + OverflowPoolGauge.Dec(1) + } +} + +func (tp *TxOverflowPool) Flush(n int) []*types.Transaction { + tp.mu.Lock() + defer tp.mu.Unlock() + if n > tp.txHeap.Len() { + n = tp.txHeap.Len() + } + txs := make([]*types.Transaction, n) + for i := 0; i < n; i++ { + item, ok := heap.Pop(&tp.txHeap).(*txHeapItem) + if !ok || item == nil { + continue + } + txs[i] = item.tx + delete(tp.index, item.tx.Hash()) + tp.totalSize -= numSlots(item.tx) + } + + OverflowPoolGauge.Dec(int64(n)) + return txs +} + +func (tp *TxOverflowPool) Len() int { + tp.mu.RLock() + defer tp.mu.RUnlock() + return tp.txHeap.Len() +} + +func (tp *TxOverflowPool) Size() int { + tp.mu.RLock() + defer tp.mu.RUnlock() + return tp.totalSize +} + +func (tp *TxOverflowPool) PrintTxStats() { + tp.mu.RLock() + defer tp.mu.RUnlock() + for _, item := range tp.txHeap { + tx := item.tx + fmt.Printf("Hash: %s, Timestamp: %d, GasFeeCap: %s, GasTipCap: %s\n", + tx.Hash().String(), item.timestamp, tx.GasFeeCap().String(), tx.GasTipCap().String()) + } +} diff --git a/core/txpool/legacypool/tx_overflowpool_test.go b/core/txpool/legacypool/tx_overflowpool_test.go new file mode 100644 index 0000000000..9a4aee5008 --- /dev/null +++ b/core/txpool/legacypool/tx_overflowpool_test.go @@ -0,0 +1,266 @@ +package legacypool + +import ( + "math/big" + rand2 "math/rand" + "testing" + "time" + + "github.com/cometbft/cometbft/libs/rand" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// Helper function to create a test transaction +func createTestTx(nonce uint64, gasPrice *big.Int) *types.Transaction { + to := common.HexToAddress("0x1234567890123456789012345678901234567890") + return types.NewTransaction(nonce, to, big.NewInt(1000), 21000, gasPrice, nil) +} + +func TestNewTxOverflowPoolHeap(t *testing.T) { + pool := NewTxOverflowPoolHeap(0) + if pool == nil { + t.Fatal("NewTxOverflowPoolHeap returned nil") + } + if pool.Len() != 0 { + t.Errorf("New pool should be empty, got length %d", pool.Len()) + } +} + +func TestTxOverflowPoolHeapAdd(t *testing.T) { + pool := NewTxOverflowPoolHeap(1) + tx := createTestTx(1, big.NewInt(1000)) + + pool.Add(tx) + if pool.Len() != 1 { + t.Errorf("Pool should have 1 transaction, got %d", pool.Len()) + } + + // Add the same transaction again + pool.Add(tx) + if pool.Len() != 1 { + t.Errorf("Pool should still have 1 transaction after adding duplicate, got %d", pool.Len()) + } +} + +func TestTxOverflowPoolHeapGet(t *testing.T) { + pool := NewTxOverflowPoolHeap(1) + tx := createTestTx(1, big.NewInt(1000)) + pool.Add(tx) + + gotTx, exists := pool.Get(tx.Hash()) + if !exists { + t.Fatal("Get returned false for existing transaction") + } + if gotTx.Hash() != tx.Hash() { + t.Errorf("Get returned wrong transaction. Want %v, got %v", tx.Hash(), gotTx.Hash()) + } + + _, exists = pool.Get(common.Hash{}) + if exists { + t.Error("Get returned true for non-existent transaction") + } +} + +func TestTxOverflowPoolHeapRemove(t *testing.T) { + pool := NewTxOverflowPoolHeap(1) + tx := createTestTx(1, big.NewInt(1000)) + pool.Add(tx) + + pool.Remove(tx.Hash()) + if pool.Len() != 0 { + t.Errorf("Pool should be empty after removing the only transaction, got length %d", pool.Len()) + } + + // Try to remove non-existent transaction + pool.Remove(common.Hash{}) + if pool.Len() != 0 { + t.Error("Removing non-existent transaction should not affect pool size") + } +} + +func TestTxOverflowPoolHeapPopN(t *testing.T) { + pool := NewTxOverflowPoolHeap(3) + tx1 := createTestTx(1, big.NewInt(1000)) + tx2 := createTestTx(2, big.NewInt(2000)) + tx3 := createTestTx(3, big.NewInt(3000)) + + pool.Add(tx1) + time.Sleep(time.Millisecond) // Ensure different timestamps + pool.Add(tx2) + time.Sleep(time.Millisecond) + pool.Add(tx3) + + popped := pool.Flush(2) + if len(popped) != 2 { + t.Fatalf("PopN(2) should return 2 transactions, got %d", len(popped)) + } + if popped[0].Hash() != tx1.Hash() || popped[1].Hash() != tx2.Hash() { + t.Error("PopN returned transactions in wrong order") + } + if pool.Len() != 1 { + t.Errorf("Pool should have 1 transaction left, got %d", pool.Len()) + } + + // Pop more than available + popped = pool.Flush(2) + if len(popped) != 1 { + t.Fatalf("PopN(2) should return 1 transaction when only 1 is left, got %d", len(popped)) + } + if popped[0].Hash() != tx3.Hash() { + t.Error("PopN returned wrong transaction") + } + if pool.Len() != 0 { + t.Errorf("Pool should be empty, got length %d", pool.Len()) + } +} + +func TestTxOverflowPoolHeapOrdering(t *testing.T) { + pool := NewTxOverflowPoolHeap(3) + tx1 := createTestTx(1, big.NewInt(1000)) + tx2 := createTestTx(2, big.NewInt(2000)) + tx3 := createTestTx(3, big.NewInt(3000)) + + pool.Add(tx2) + time.Sleep(time.Millisecond) // Ensure different timestamps + pool.Add(tx1) + pool.Add(tx3) // Added immediately after tx1, should have same timestamp but higher sequence + + popped := pool.Flush(3) + if len(popped) != 3 { + t.Fatalf("PopN(3) should return 3 transactions, got %d", len(popped)) + } + if popped[0].Hash() != tx2.Hash() || popped[1].Hash() != tx1.Hash() || popped[2].Hash() != tx3.Hash() { + t.Error("Transactions not popped in correct order (earliest timestamp first, then by sequence)") + } +} + +func TestTxOverflowPoolHeapLen(t *testing.T) { + pool := NewTxOverflowPoolHeap(2) + if pool.Len() != 0 { + t.Errorf("New pool should have length 0, got %d", pool.Len()) + } + + pool.Add(createTestTx(1, big.NewInt(1000))) + if pool.Len() != 1 { + t.Errorf("Pool should have length 1 after adding a transaction, got %d", pool.Len()) + } + + pool.Add(createTestTx(2, big.NewInt(2000))) + if pool.Len() != 2 { + t.Errorf("Pool should have length 2 after adding another transaction, got %d", pool.Len()) + } + + pool.Flush(1) + if pool.Len() != 1 { + t.Errorf("Pool should have length 1 after popping a transaction, got %d", pool.Len()) + } +} + +// Helper function to create a random test transaction +func createRandomTestTx() *types.Transaction { + nonce := uint64(rand.Intn(1000000)) + to := common.BytesToAddress(rand.Bytes(20)) + amount := new(big.Int).Rand(rand2.New(rand2.NewSource(rand.Int63())), big.NewInt(1e18)) + gasLimit := uint64(21000) + gasPrice := new(big.Int).Rand(rand2.New(rand2.NewSource(rand.Int63())), big.NewInt(1e9)) + data := rand.Bytes(100) + return types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data) +} + +func createRandomTestTxs(n int) []*types.Transaction { + txs := make([]*types.Transaction, n) + for i := 0; i < n; i++ { + txs[i] = createRandomTestTx() + } + return txs +} + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/core/txpool/legacypool +// BenchmarkTxOverflowPoolHeapAdd-8 813326 2858 ns/op +func BenchmarkTxOverflowPoolHeapAdd(b *testing.B) { + pool := NewTxOverflowPoolHeap(uint64(b.N)) + txs := createRandomTestTxs(b.N) + b.ResetTimer() + for i := 0; i < b.N; i++ { + pool.Add(txs[i]) + } +} + +// BenchmarkTxOverflowPoolHeapGet-8 32613938 35.63 ns/op +func BenchmarkTxOverflowPoolHeapGet(b *testing.B) { + pool := NewTxOverflowPoolHeap(1000) + txs := createRandomTestTxs(1000) + for _, tx := range txs { + pool.Add(tx) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + pool.Get(txs[i%1000].Hash()) + } +} + +// BenchmarkTxOverflowPoolHeapRemove-8 3020841 417.8 ns/op +func BenchmarkTxOverflowPoolHeapRemove(b *testing.B) { + pool := NewTxOverflowPoolHeap(uint64(b.N)) + txs := createRandomTestTxs(b.N) + for _, tx := range txs { + pool.Add(tx) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + pool.Remove(txs[i].Hash()) + } +} + +// BenchmarkTxOverflowPoolHeapFlush-8 42963656 29.90 ns/op +func BenchmarkTxOverflowPoolHeapFlush(b *testing.B) { + pool := NewTxOverflowPoolHeap(1000) + txs := createRandomTestTxs(1000) + for _, tx := range txs { + pool.Add(tx) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + pool.Flush(10) + } +} + +// BenchmarkTxOverflowPoolHeapLen-8 79147188 20.07 ns/op +func BenchmarkTxOverflowPoolHeapLen(b *testing.B) { + pool := NewTxOverflowPoolHeap(1000) + txs := createRandomTestTxs(1000) + for _, tx := range txs { + pool.Add(tx) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + pool.Len() + } +} + +// BenchmarkTxOverflowPoolHeapAddRemove-8 902896 1546 ns/op +func BenchmarkTxOverflowPoolHeapAddRemove(b *testing.B) { + pool := NewTxOverflowPoolHeap(uint64(b.N)) + txs := createRandomTestTxs(b.N) + b.ResetTimer() + for i := 0; i < b.N; i++ { + pool.Add(txs[i]) + pool.Remove(txs[i].Hash()) + } +} + +// BenchmarkTxOverflowPoolHeapAddFlush-8 84417 14899 ns/op +func BenchmarkTxOverflowPoolHeapAddFlush(b *testing.B) { + pool := NewTxOverflowPoolHeap(uint64(b.N * 10)) + txs := createRandomTestTxs(b.N * 10) + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 10; j++ { + pool.Add(txs[i*10+j]) + } + pool.Flush(10) + } +} From caa9e7991a95f7c5fbc27d8651ca1ad8c52fd95b Mon Sep 17 00:00:00 2001 From: buddho Date: Wed, 23 Oct 2024 16:15:39 +0800 Subject: [PATCH 5/5] feat: remove pipecommit (#2742) --- cmd/geth/main.go | 1 - cmd/utils/flags.go | 8 - core/block_validator.go | 30 +-- core/blockchain.go | 78 +------- core/blockchain_diff_test.go | 2 +- core/blockchain_reader.go | 6 - core/blockchain_test.go | 271 ++++++++++---------------- core/error.go | 3 - core/state/snapshot/difflayer.go | 43 +--- core/state/snapshot/difflayer_test.go | 36 ++-- core/state/snapshot/disklayer.go | 17 +- core/state/snapshot/disklayer_test.go | 8 +- core/state/snapshot/iterator_test.go | 90 ++++----- core/state/snapshot/journal.go | 2 +- core/state/snapshot/snapshot.go | 23 +-- core/state/snapshot/snapshot_test.go | 30 +-- core/state/statedb.go | 193 ++---------------- eth/backend.go | 3 - eth/ethconfig/config.go | 1 - eth/ethconfig/gen_config.go | 6 - miner/worker.go | 9 - triedb/hashdb/database.go | 44 ++--- 22 files changed, 234 insertions(+), 670 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 5eb8fa4146..899d839596 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -67,7 +67,6 @@ var ( utils.DirectBroadcastFlag, utils.DisableSnapProtocolFlag, utils.EnableTrustProtocolFlag, - utils.PipeCommitFlag, utils.RangeLimitFlag, utils.USBFlag, utils.SmartCardDaemonPathFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index f3aab43d70..5c174dbee7 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -117,11 +117,6 @@ var ( Usage: "Enable trust protocol", Category: flags.FastNodeCategory, } - PipeCommitFlag = &cli.BoolFlag{ - Name: "pipecommit", - Usage: "Enable MPT pipeline commit, it will improve syncing performance. It is an experimental feature(default is false)", - Category: flags.DeprecatedCategory, - } RangeLimitFlag = &cli.BoolFlag{ Name: "rangelimit", Usage: "Enable 5000 blocks limit for range query", @@ -1982,9 +1977,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.IsSet(EnableTrustProtocolFlag.Name) { cfg.EnableTrustProtocol = ctx.IsSet(EnableTrustProtocolFlag.Name) } - if ctx.IsSet(PipeCommitFlag.Name) { - log.Warn("The --pipecommit flag is deprecated and could be removed in the future!") - } if ctx.IsSet(RangeLimitFlag.Name) { cfg.RangeLimit = ctx.Bool(RangeLimitFlag.Name) } diff --git a/core/block_validator.go b/core/block_validator.go index d15e2cd786..6b292ddbe4 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -19,9 +19,7 @@ package core import ( "errors" "fmt" - "time" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -29,8 +27,6 @@ import ( "github.com/ethereum/go-ethereum/trie" ) -const badBlockCacheExpire = 30 * time.Second - type BlockValidatorOption func(*BlockValidator) *BlockValidator func EnableRemoteVerifyManager(remoteValidator *remoteVerifyManager) BlockValidatorOption { @@ -74,9 +70,6 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) { return ErrKnownBlock } - if v.bc.isCachedBadBlock(block) { - return ErrKnownBadBlock - } // Header validity is known at this point. Here we verify that uncles, transactions // and withdrawals given in the block body match the header. header := block.Header() @@ -192,23 +185,12 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD return nil }, } - if statedb.IsPipeCommit() { - validateFuns = append(validateFuns, func() error { - if err := statedb.WaitPipeVerification(); err != nil { - return err - } - statedb.CorrectAccountsRoot(common.Hash{}) - statedb.Finalise(v.config.IsEIP158(header.Number)) - return nil - }) - } else { - validateFuns = append(validateFuns, func() error { - if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { - return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error()) - } - return nil - }) - } + validateFuns = append(validateFuns, func() error { + if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { + return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error()) + } + return nil + }) validateRes := make(chan error, len(validateFuns)) for _, f := range validateFuns { tmpFunc := f diff --git a/core/blockchain.go b/core/blockchain.go index b2a56d74bf..1d3ffb12e7 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -102,11 +102,10 @@ var ( blockRecvTimeDiffGauge = metrics.NewRegisteredGauge("chain/block/recvtimediff", nil) - errStateRootVerificationFailed = errors.New("state root verification failed") - errInsertionInterrupted = errors.New("insertion is interrupted") - errChainStopped = errors.New("blockchain is stopped") - errInvalidOldChain = errors.New("invalid old chain") - errInvalidNewChain = errors.New("invalid new chain") + errInsertionInterrupted = errors.New("insertion is interrupted") + errChainStopped = errors.New("blockchain is stopped") + errInvalidOldChain = errors.New("invalid old chain") + errInvalidNewChain = errors.New("invalid new chain") ) const ( @@ -116,7 +115,6 @@ const ( receiptsCacheLimit = 10000 sidecarsCacheLimit = 1024 txLookupCacheLimit = 1024 - maxBadBlockLimit = 16 maxFutureBlocks = 256 maxTimeFutureBlocks = 30 TriesInMemory = 128 @@ -126,8 +124,6 @@ const ( diffLayerFreezerRecheckInterval = 3 * time.Second maxDiffForkDist = 11 // Maximum allowed backward distance from the chain head - rewindBadBlockInterval = 1 * time.Second - // BlockChainVersion ensures that an incompatible database forces a resync from scratch. // // Changelog: @@ -294,8 +290,6 @@ type BlockChain struct { // future blocks are blocks added for later processing futureBlocks *lru.Cache[common.Hash, *types.Block] - // Cache for the blocks that failed to pass MPT root verification - badBlockCache *lru.Cache[common.Hash, time.Time] // trusted diff layers diffLayerCache *exlru.Cache // Cache for the diffLayers @@ -316,7 +310,6 @@ type BlockChain struct { processor Processor // Block transaction processor interface forker *ForkChoice vmConfig vm.Config - pipeCommit bool // monitor doubleSignMonitor *monitor.DoubleSignMonitor @@ -378,7 +371,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit), futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks), - badBlockCache: lru.NewCache[common.Hash, time.Time](maxBadBlockLimit), diffLayerCache: diffLayerCache, diffLayerChanCache: diffLayerChanCache, engine: engine, @@ -559,11 +551,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis bc.wg.Add(1) go bc.trustedDiffLayerLoop() } - if bc.pipeCommit { - // check current block and rewind invalid one - bc.wg.Add(1) - go bc.rewindInvalidHeaderBlockLoop() - } if bc.doubleSignMonitor != nil { bc.wg.Add(1) @@ -817,26 +804,6 @@ func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error { return nil } -func (bc *BlockChain) tryRewindBadBlocks() { - if !bc.chainmu.TryLock() { - return - } - defer bc.chainmu.Unlock() - block := bc.CurrentBlock() - snaps := bc.snaps - // Verified and Result is false - if snaps != nil && snaps.Snapshot(block.Root) != nil && - snaps.Snapshot(block.Root).Verified() && !snaps.Snapshot(block.Root).WaitAndGetVerifyRes() { - // Rewind by one block - log.Warn("current block verified failed, rewind to its parent", "height", block.Number.Uint64(), "hash", block.Hash()) - bc.futureBlocks.Remove(block.Hash()) - bc.badBlockCache.Add(block.Hash(), time.Now()) - bc.diffLayerCache.Remove(block.Hash()) - bc.reportBlock(bc.GetBlockByHash(block.Hash()), nil, errStateRootVerificationFailed) - bc.setHeadBeyondRoot(block.Number.Uint64()-1, 0, common.Hash{}, false) - } -} - // rewindHashHead implements the logic of rewindHead in the context of hash scheme. func (bc *BlockChain) rewindHashHead(head *types.Header, root common.Hash) (*types.Header, uint64) { var ( @@ -1893,7 +1860,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. return nil } // Commit all cached state changes into underlying memory database. - _, diffLayer, err := state.Commit(block.NumberU64(), bc.tryRewindBadBlocks, tryCommitTrieDB) + _, diffLayer, err := state.Commit(block.NumberU64(), tryCommitTrieDB) if err != nil { return err } @@ -2269,9 +2236,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) } // Process block using the parent state as reference point - if bc.pipeCommit { - statedb.EnablePipeCommit() - } statedb.SetExpectedStateRoot(block.Root()) pstart := time.Now() statedb, receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) @@ -2889,22 +2853,6 @@ func (bc *BlockChain) updateFutureBlocks() { } } -func (bc *BlockChain) rewindInvalidHeaderBlockLoop() { - recheck := time.NewTicker(rewindBadBlockInterval) - defer func() { - recheck.Stop() - bc.wg.Done() - }() - for { - select { - case <-recheck.C: - bc.tryRewindBadBlocks() - case <-bc.quit: - return - } - } -} - func (bc *BlockChain) trustedDiffLayerLoop() { recheck := time.NewTicker(diffLayerFreezerRecheckInterval) defer func() { @@ -3042,17 +2990,6 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool { return false } -func (bc *BlockChain) isCachedBadBlock(block *types.Block) bool { - if timeAt, exist := bc.badBlockCache.Get(block.Hash()); exist { - if time.Since(timeAt) >= badBlockCacheExpire { - bc.badBlockCache.Remove(block.Hash()) - return false - } - return true - } - return false -} - // reportBlock logs a bad block error. // bad block need not save receipts & sidecars. func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { @@ -3114,11 +3051,6 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header) (int, error) { func (bc *BlockChain) TriesInMemory() uint64 { return bc.triesInMemory } -func EnablePipelineCommit(bc *BlockChain) (*BlockChain, error) { - bc.pipeCommit = false - return bc, nil -} - func EnablePersistDiff(limit uint64) BlockChainOption { return func(chain *BlockChain) (*BlockChain, error) { chain.diffLayerFreezerBlockLimit = limit diff --git a/core/blockchain_diff_test.go b/core/blockchain_diff_test.go index 8ec14bce43..50facedac6 100644 --- a/core/blockchain_diff_test.go +++ b/core/blockchain_diff_test.go @@ -237,7 +237,7 @@ func TestFreezeDiffLayer(t *testing.T) { // Wait for the buffer to be zero. } // Minus one empty block. - if fullBackend.chain.diffQueue.Size() > blockNum-1 && fullBackend.chain.diffQueue.Size() < blockNum-2 { + if fullBackend.chain.diffQueue.Size() != blockNum-1 { t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum-1, fullBackend.chain.diffQueue.Size()) } diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 8a05a6cfb1..44fbe2e20e 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -351,12 +351,6 @@ func (bc *BlockChain) HasState(hash common.Hash) bool { if bc.NoTries() { return bc.snaps != nil && bc.snaps.Snapshot(hash) != nil } - if bc.pipeCommit && bc.snaps != nil { - // If parent snap is pending on verification, treat it as state exist - if s := bc.snaps.Snapshot(hash); s != nil && !s.Verified() { - return true - } - } _, err := bc.stateCache.OpenTrie(hash) return err == nil } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 3917117b91..8d1b8bc453 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -51,8 +51,7 @@ import ( // So we can deterministically seed different blockchains var ( canonicalSeed = 1 - forkSeed1 = 2 - forkSeed2 = 3 + forkSeed = 2 TestTriesInMemory = 128 ) @@ -61,19 +60,15 @@ var ( // chain. Depending on the full flag, it creates either a full block chain or a // header only chain. The database and genesis specification for block generation // are also returned in case more test blocks are needed later. -func newCanonical(engine consensus.Engine, n int, full bool, scheme string, pipeline bool) (ethdb.Database, *Genesis, *BlockChain, error) { +func newCanonical(engine consensus.Engine, n int, full bool, scheme string) (ethdb.Database, *Genesis, *BlockChain, error) { var ( genesis = &Genesis{ BaseFee: big.NewInt(params.InitialBaseFee), Config: params.AllEthashProtocolChanges, } ) - // Initialize a fresh chain with only a genesis block var ops []BlockChainOption - if pipeline { - ops = append(ops, EnablePipelineCommit) - } blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil, ops...) // Create and inject the requested chain if n == 0 { @@ -96,53 +91,9 @@ func newGwei(n int64) *big.Int { } // Test fork of length N starting from block i -func testInvalidStateRootBlockImport(t *testing.T, blockchain *BlockChain, i, n int, pipeline bool) { +func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string) { // Copy old chain up to #i into a new db - db, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, true, rawdb.HashScheme, pipeline) - if err != nil { - t.Fatal("could not make new canonical in testFork", err) - } - defer blockchain2.Stop() - - // Assert the chains have the same header/block at #i - hash1 := blockchain.GetBlockByNumber(uint64(i)).Hash() - hash2 := blockchain2.GetBlockByNumber(uint64(i)).Hash() - if hash1 != hash2 { - t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1) - } - // Extend the newly created chain - blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), db, forkSeed1) - for idx, block := range blockChainB { - block.SetRoot(common.Hash{0: byte(forkSeed1), 19: byte(idx)}) - } - previousBlock := blockchain.CurrentBlock() - // Sanity check that the forked chain can be imported into the original - if _, err := blockchain.InsertChain(blockChainB); err == nil { - t.Fatalf("failed to report insert error") - } - - time.Sleep(2 * rewindBadBlockInterval) - latestBlock := blockchain.CurrentBlock() - if latestBlock.Hash() != previousBlock.Hash() || latestBlock.Number.Uint64() != previousBlock.Number.Uint64() { - t.Fatalf("rewind do not take effect") - } - db, _, blockchain3, err := newCanonical(ethash.NewFaker(), i, true, rawdb.HashScheme, pipeline) - if err != nil { - t.Fatal("could not make new canonical in testFork", err) - } - defer blockchain3.Stop() - - blockChainC := makeBlockChain(blockchain3.chainConfig, blockchain3.GetBlockByHash(blockchain3.CurrentBlock().Hash()), n, ethash.NewFaker(), db, forkSeed2) - - if _, err := blockchain.InsertChain(blockChainC); err != nil { - t.Fatalf("failed to insert forking chain: %v", err) - } -} - -// Test fork of length N starting from block i -func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string, pipeline bool) { - // Copy old chain up to #i into a new db - genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme, pipeline) + genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme) if err != nil { t.Fatal("could not make new canonical in testFork", err) } @@ -166,12 +117,12 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara headerChainB []*types.Header ) if full { - blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed1) + blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed) if _, err := blockchain2.InsertChain(blockChainB); err != nil { t.Fatalf("failed to insert forking chain: %v", err) } } else { - headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed1) + headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed) if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil { t.Fatalf("failed to insert forking chain: %v", err) } @@ -182,7 +133,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara if full { cur := blockchain.CurrentBlock() tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64()) - if err := testBlockChainImport(blockChainB, pipeline, blockchain); err != nil { + if err := testBlockChainImport(blockChainB, blockchain); err != nil { t.Fatalf("failed to import forked block chain: %v", err) } last := blockChainB[len(blockChainB)-1] @@ -202,7 +153,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara // testBlockChainImport tries to process a chain of blocks, writing them into // the database if successful. -func testBlockChainImport(chain types.Blocks, pipelineCommit bool, blockchain *BlockChain) error { +func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error { for _, block := range chain { // Try and process the block err := blockchain.engine.VerifyHeader(blockchain, block.Header()) @@ -220,9 +171,6 @@ func testBlockChainImport(chain types.Blocks, pipelineCommit bool, blockchain *B return err } statedb.SetExpectedStateRoot(block.Root()) - if pipelineCommit { - statedb.EnablePipeCommit() - } statedb, receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{}) if err != nil { blockchain.reportBlock(block, receipts, err) @@ -262,26 +210,13 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error return nil } -func TestBlockImportVerification(t *testing.T) { - length := 5 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, true, rawdb.HashScheme, true) - if err != nil { - t.Fatalf("failed to make new canonical chain: %v", err) - } - defer processor.Stop() - // Start fork from current height - processor, _ = EnablePipelineCommit(processor) - testInvalidStateRootBlockImport(t, processor, length, 10, true) -} func TestLastBlock(t *testing.T) { testLastBlock(t, rawdb.HashScheme) testLastBlock(t, rawdb.PathScheme) } func testLastBlock(t *testing.T, scheme string) { - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme, false) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -300,7 +235,7 @@ func testLastBlock(t *testing.T, scheme string) { // The chain is reorged to whatever specified. func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool, scheme string) { // Copy old chain up to #i into a new db - genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme, false) + genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme) if err != nil { t.Fatal("could not make new canonical in testFork", err) } @@ -321,7 +256,7 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b // Extend the newly created chain if full { - blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed1) + blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed) if _, err := blockchain2.InsertChain(blockChainB); err != nil { t.Fatalf("failed to insert forking chain: %v", err) } @@ -332,7 +267,7 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b t.Fatalf("failed to reorg to the given chain") } } else { - headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed1) + headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed) if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil { t.Fatalf("failed to insert forking chain: %v", err) } @@ -348,21 +283,20 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b // Tests that given a starting canonical chain of a given size, it can be extended // with various length chains. func TestExtendCanonicalHeaders(t *testing.T) { - testExtendCanonical(t, false, rawdb.HashScheme, false) - testExtendCanonical(t, false, rawdb.PathScheme, false) + testExtendCanonical(t, false, rawdb.HashScheme) + testExtendCanonical(t, false, rawdb.PathScheme) } func TestExtendCanonicalBlocks(t *testing.T) { - testExtendCanonical(t, true, rawdb.HashScheme, false) - testExtendCanonical(t, true, rawdb.PathScheme, false) - testExtendCanonical(t, true, rawdb.HashScheme, true) + testExtendCanonical(t, true, rawdb.HashScheme) + testExtendCanonical(t, true, rawdb.PathScheme) } -func testExtendCanonical(t *testing.T, full bool, scheme string, pipeline bool) { +func testExtendCanonical(t *testing.T, full bool, scheme string) { length := 5 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -375,10 +309,10 @@ func testExtendCanonical(t *testing.T, full bool, scheme string, pipeline bool) } } // Start fork from current height - testFork(t, processor, length, 1, full, better, scheme, pipeline) - testFork(t, processor, length, 2, full, better, scheme, pipeline) - testFork(t, processor, length, 5, full, better, scheme, pipeline) - testFork(t, processor, length, 10, full, better, scheme, pipeline) + testFork(t, processor, length, 1, full, better, scheme) + testFork(t, processor, length, 2, full, better, scheme) + testFork(t, processor, length, 5, full, better, scheme) + testFork(t, processor, length, 10, full, better, scheme) } // Tests that given a starting canonical chain of a given size, it can be extended @@ -396,7 +330,7 @@ func testExtendCanonicalAfterMerge(t *testing.T, full bool, scheme string) { length := 5 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -409,20 +343,19 @@ func testExtendCanonicalAfterMerge(t *testing.T, full bool, scheme string) { // Tests that given a starting canonical chain of a given size, creating shorter // forks do not take canonical ownership. func TestShorterForkHeaders(t *testing.T) { - testShorterFork(t, false, rawdb.HashScheme, false) - testShorterFork(t, false, rawdb.PathScheme, false) + testShorterFork(t, false, rawdb.HashScheme) + testShorterFork(t, false, rawdb.PathScheme) } func TestShorterForkBlocks(t *testing.T) { - testShorterFork(t, true, rawdb.HashScheme, false) - testShorterFork(t, true, rawdb.PathScheme, false) - testShorterFork(t, true, rawdb.HashScheme, true) + testShorterFork(t, true, rawdb.HashScheme) + testShorterFork(t, true, rawdb.PathScheme) } -func testShorterFork(t *testing.T, full bool, scheme string, pipeline bool) { +func testShorterFork(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -435,12 +368,12 @@ func testShorterFork(t *testing.T, full bool, scheme string, pipeline bool) { } } // Sum of numbers must be less than `length` for this to be a shorter fork - testFork(t, processor, 0, 3, full, worse, scheme, pipeline) - testFork(t, processor, 0, 7, full, worse, scheme, pipeline) - testFork(t, processor, 1, 1, full, worse, scheme, pipeline) - testFork(t, processor, 1, 7, full, worse, scheme, pipeline) - testFork(t, processor, 5, 3, full, worse, scheme, pipeline) - testFork(t, processor, 5, 4, full, worse, scheme, pipeline) + testFork(t, processor, 0, 3, full, worse, scheme) + testFork(t, processor, 0, 7, full, worse, scheme) + testFork(t, processor, 1, 1, full, worse, scheme) + testFork(t, processor, 1, 7, full, worse, scheme) + testFork(t, processor, 5, 3, full, worse, scheme) + testFork(t, processor, 5, 4, full, worse, scheme) } // Tests that given a starting canonical chain of a given size, creating shorter @@ -458,7 +391,7 @@ func testShorterForkAfterMerge(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -475,20 +408,19 @@ func testShorterForkAfterMerge(t *testing.T, full bool, scheme string) { // Tests that given a starting canonical chain of a given size, creating longer // forks do take canonical ownership. func TestLongerForkHeaders(t *testing.T) { - testLongerFork(t, false, rawdb.HashScheme, false) - testLongerFork(t, false, rawdb.PathScheme, false) + testLongerFork(t, false, rawdb.HashScheme) + testLongerFork(t, false, rawdb.PathScheme) } func TestLongerForkBlocks(t *testing.T) { - testLongerFork(t, true, rawdb.HashScheme, false) - testLongerFork(t, true, rawdb.PathScheme, false) - testLongerFork(t, true, rawdb.HashScheme, true) + testLongerFork(t, true, rawdb.HashScheme) + testLongerFork(t, true, rawdb.PathScheme) } -func testLongerFork(t *testing.T, full bool, scheme string, pipeline bool) { +func testLongerFork(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -517,7 +449,7 @@ func testLongerForkAfterMerge(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -534,20 +466,19 @@ func testLongerForkAfterMerge(t *testing.T, full bool, scheme string) { // Tests that given a starting canonical chain of a given size, creating equal // forks do take canonical ownership. func TestEqualForkHeaders(t *testing.T) { - testEqualFork(t, false, rawdb.HashScheme, false) - testEqualFork(t, false, rawdb.PathScheme, false) + testEqualFork(t, false, rawdb.HashScheme) + testEqualFork(t, false, rawdb.PathScheme) } func TestEqualForkBlocks(t *testing.T) { - testEqualFork(t, true, rawdb.HashScheme, false) - testEqualFork(t, true, rawdb.PathScheme, false) - testEqualFork(t, true, rawdb.HashScheme, true) + testEqualFork(t, true, rawdb.HashScheme) + testEqualFork(t, true, rawdb.PathScheme) } -func testEqualFork(t *testing.T, full bool, scheme string, pipeline bool) { +func testEqualFork(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -560,12 +491,12 @@ func testEqualFork(t *testing.T, full bool, scheme string, pipeline bool) { } } // Sum of numbers must be equal to `length` for this to be an equal fork - testFork(t, processor, 0, 10, full, equal, scheme, pipeline) - testFork(t, processor, 1, 9, full, equal, scheme, pipeline) - testFork(t, processor, 2, 8, full, equal, scheme, pipeline) - testFork(t, processor, 5, 5, full, equal, scheme, pipeline) - testFork(t, processor, 6, 4, full, equal, scheme, pipeline) - testFork(t, processor, 9, 1, full, equal, scheme, pipeline) + testFork(t, processor, 0, 10, full, equal, scheme) + testFork(t, processor, 1, 9, full, equal, scheme) + testFork(t, processor, 2, 8, full, equal, scheme) + testFork(t, processor, 5, 5, full, equal, scheme) + testFork(t, processor, 6, 4, full, equal, scheme) + testFork(t, processor, 9, 1, full, equal, scheme) } // Tests that given a starting canonical chain of a given size, creating equal @@ -583,7 +514,7 @@ func testEqualForkAfterMerge(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -599,18 +530,17 @@ func testEqualForkAfterMerge(t *testing.T, full bool, scheme string) { // Tests that chains missing links do not get accepted by the processor. func TestBrokenHeaderChain(t *testing.T) { - testBrokenChain(t, false, rawdb.HashScheme, false) - testBrokenChain(t, false, rawdb.PathScheme, false) + testBrokenChain(t, false, rawdb.HashScheme) + testBrokenChain(t, false, rawdb.PathScheme) } func TestBrokenBlockChain(t *testing.T) { - testBrokenChain(t, true, rawdb.HashScheme, false) - testBrokenChain(t, true, rawdb.PathScheme, false) - testBrokenChain(t, true, rawdb.HashScheme, true) + testBrokenChain(t, true, rawdb.HashScheme) + testBrokenChain(t, true, rawdb.PathScheme) } -func testBrokenChain(t *testing.T, full bool, scheme string, pipeline bool) { +func testBrokenChain(t *testing.T, full bool, scheme string) { // Make chain starting from genesis - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, scheme, pipeline) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -618,12 +548,12 @@ func testBrokenChain(t *testing.T, full bool, scheme string, pipeline bool) { // Create a forked chain, and try to insert with a missing link if full { - chain := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 5, ethash.NewFaker(), genDb, forkSeed1)[1:] - if err := testBlockChainImport(chain, pipeline, blockchain); err == nil { + chain := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 5, ethash.NewFaker(), genDb, forkSeed)[1:] + if err := testBlockChainImport(chain, blockchain); err == nil { t.Errorf("broken block chain not reported") } } else { - chain := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 5, ethash.NewFaker(), genDb, forkSeed1)[1:] + chain := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 5, ethash.NewFaker(), genDb, forkSeed)[1:] if err := testHeaderChainImport(chain, blockchain); err == nil { t.Errorf("broken header chain not reported") } @@ -633,32 +563,30 @@ func testBrokenChain(t *testing.T, full bool, scheme string, pipeline bool) { // Tests that reorganising a long difficult chain after a short easy one // overwrites the canonical numbers and links in the database. func TestReorgLongHeaders(t *testing.T) { - testReorgLong(t, false, rawdb.HashScheme, false) - testReorgLong(t, false, rawdb.PathScheme, false) + testReorgLong(t, false, rawdb.HashScheme) + testReorgLong(t, false, rawdb.PathScheme) } func TestReorgLongBlocks(t *testing.T) { - testReorgLong(t, true, rawdb.HashScheme, false) - testReorgLong(t, true, rawdb.PathScheme, false) - testReorgLong(t, true, rawdb.HashScheme, true) + testReorgLong(t, true, rawdb.HashScheme) + testReorgLong(t, true, rawdb.PathScheme) } -func testReorgLong(t *testing.T, full bool, scheme string, pipeline bool) { - testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, scheme, pipeline) +func testReorgLong(t *testing.T, full bool, scheme string) { + testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, scheme) } // Tests that reorganising a short difficult chain after a long easy one // overwrites the canonical numbers and links in the database. func TestReorgShortHeaders(t *testing.T) { - testReorgShort(t, false, rawdb.HashScheme, false) - testReorgShort(t, false, rawdb.PathScheme, false) + testReorgShort(t, false, rawdb.HashScheme) + testReorgShort(t, false, rawdb.PathScheme) } func TestReorgShortBlocks(t *testing.T) { - testReorgShort(t, true, rawdb.HashScheme, false) - testReorgShort(t, true, rawdb.PathScheme, false) - testReorgShort(t, true, rawdb.HashScheme, true) + testReorgShort(t, true, rawdb.HashScheme) + testReorgShort(t, true, rawdb.PathScheme) } -func testReorgShort(t *testing.T, full bool, scheme string, pipeline bool) { +func testReorgShort(t *testing.T, full bool, scheme string) { // Create a long easy chain vs. a short heavy one. Due to difficulty adjustment // we need a fairly long chain of blocks with different difficulties for a short // one to become heavier than a long one. The 96 is an empirical value. @@ -670,12 +598,12 @@ func testReorgShort(t *testing.T, full bool, scheme string, pipeline bool) { for i := 0; i < len(diff); i++ { diff[i] = -9 } - testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, scheme, pipeline) + testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, scheme) } -func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme string, pipeline bool) { +func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme string) { // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -744,19 +672,18 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme // Tests that the insertion functions detect banned hashes. func TestBadHeaderHashes(t *testing.T) { - testBadHashes(t, false, rawdb.HashScheme, false) - testBadHashes(t, false, rawdb.PathScheme, false) + testBadHashes(t, false, rawdb.HashScheme) + testBadHashes(t, false, rawdb.PathScheme) } func TestBadBlockHashes(t *testing.T) { - testBadHashes(t, true, rawdb.HashScheme, false) - testBadHashes(t, true, rawdb.HashScheme, true) - testBadHashes(t, true, rawdb.PathScheme, false) + testBadHashes(t, true, rawdb.HashScheme) + testBadHashes(t, true, rawdb.PathScheme) } -func testBadHashes(t *testing.T, full bool, scheme string, pipeline bool) { +func testBadHashes(t *testing.T, full bool, scheme string) { // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -786,18 +713,17 @@ func testBadHashes(t *testing.T, full bool, scheme string, pipeline bool) { // Tests that bad hashes are detected on boot, and the chain rolled back to a // good state prior to the bad hash. func TestReorgBadHeaderHashes(t *testing.T) { - testReorgBadHashes(t, false, rawdb.HashScheme, false) - testReorgBadHashes(t, false, rawdb.PathScheme, false) + testReorgBadHashes(t, false, rawdb.HashScheme) + testReorgBadHashes(t, false, rawdb.PathScheme) } func TestReorgBadBlockHashes(t *testing.T) { - testReorgBadHashes(t, true, rawdb.HashScheme, false) - testReorgBadHashes(t, true, rawdb.HashScheme, true) - testReorgBadHashes(t, true, rawdb.PathScheme, false) + testReorgBadHashes(t, true, rawdb.HashScheme) + testReorgBadHashes(t, true, rawdb.PathScheme) } -func testReorgBadHashes(t *testing.T, full bool, scheme string, pipeline bool) { +func testReorgBadHashes(t *testing.T, full bool, scheme string) { // Create a pristine chain and database - genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline) + genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -848,19 +774,18 @@ func testReorgBadHashes(t *testing.T, full bool, scheme string, pipeline bool) { // Tests chain insertions in the face of one entity containing an invalid nonce. func TestHeadersInsertNonceError(t *testing.T) { - testInsertNonceError(t, false, rawdb.HashScheme, false) - testInsertNonceError(t, false, rawdb.PathScheme, false) + testInsertNonceError(t, false, rawdb.HashScheme) + testInsertNonceError(t, false, rawdb.PathScheme) } func TestBlocksInsertNonceError(t *testing.T) { - testInsertNonceError(t, true, rawdb.HashScheme, false) - testInsertNonceError(t, true, rawdb.HashScheme, true) - testInsertNonceError(t, true, rawdb.PathScheme, false) + testInsertNonceError(t, true, rawdb.HashScheme) + testInsertNonceError(t, true, rawdb.PathScheme) } -func testInsertNonceError(t *testing.T, full bool, scheme string, pipeline bool) { +func testInsertNonceError(t *testing.T, full bool, scheme string) { doTest := func(i int) { // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -1611,7 +1536,7 @@ func TestCanonicalBlockRetrieval(t *testing.T) { } func testCanonicalBlockRetrieval(t *testing.T, scheme string) { - _, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme, false) + _, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } diff --git a/core/error.go b/core/error.go index 07ff770432..ca0f0b9d00 100644 --- a/core/error.go +++ b/core/error.go @@ -39,9 +39,6 @@ var ( // ErrCurrentBlockNotFound is returned when current block not found. ErrCurrentBlockNotFound = errors.New("current block not found") - - // ErrKnownBadBlock is return when the block is a known bad block - ErrKnownBadBlock = errors.New("already known bad block") ) // List of evm-call-message pre-checking errors. All state transition messages will diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index eb9fa2ed13..c12dd4c3ea 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -119,9 +119,6 @@ type diffLayer struct { storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted) - verifiedCh chan struct{} // the difflayer is verified when verifiedCh is nil or closed - valid bool // mark the difflayer is valid or not. - diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer lock sync.RWMutex @@ -145,7 +142,7 @@ func storageBloomHash(h0, h1 common.Hash) uint64 { // newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low // level persistent database or a hierarchical diff already. -func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer { +func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { // Create the new layer with some pre-allocated data segments dl := &diffLayer{ parent: parent, @@ -154,7 +151,6 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s accountData: accounts, storageData: storage, storageList: make(map[common.Hash][]common.Hash), - verifiedCh: verified, } switch parent := parent.(type) { @@ -236,39 +232,6 @@ func (dl *diffLayer) Root() common.Hash { return dl.root } -// WaitAndGetVerifyRes will wait until the diff layer been verified and return the verification result -func (dl *diffLayer) WaitAndGetVerifyRes() bool { - if dl.verifiedCh == nil { - return true - } - <-dl.verifiedCh - return dl.valid -} - -func (dl *diffLayer) MarkValid() { - dl.valid = true -} - -// Represent whether the difflayer is been verified, does not means it is a valid or invalid difflayer -func (dl *diffLayer) Verified() bool { - if dl.verifiedCh == nil { - return true - } - select { - case <-dl.verifiedCh: - return true - default: - return false - } -} - -func (dl *diffLayer) CorrectAccounts(accounts map[common.Hash][]byte) { - dl.lock.Lock() - defer dl.lock.Unlock() - - dl.accountData = accounts -} - // Parent returns the subsequent layer of a diff layer. func (dl *diffLayer) Parent() snapshot { dl.lock.RLock() @@ -467,8 +430,8 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([ // Update creates a new layer on top of the existing snapshot diff tree with // the specified data items. -func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer { - return newDiffLayer(dl, blockRoot, destructs, accounts, storage, verified) +func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { + return newDiffLayer(dl, blockRoot, destructs, accounts, storage) } // flatten pushes all data from this point downwards, flattening everything into diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go index 1c0844c44d..674a031b16 100644 --- a/core/state/snapshot/difflayer_test.go +++ b/core/state/snapshot/difflayer_test.go @@ -80,11 +80,11 @@ func TestMergeBasics(t *testing.T) { } } // Add some (identical) layers on top - parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil) - child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil) - child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil) - child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil) - child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil) + parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) + child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) + child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) + child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) + child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) // And flatten merged := (child.flatten()).(*diffLayer) @@ -152,13 +152,13 @@ func TestMergeDelete(t *testing.T) { } } // Add some flipAccs-flopping layers on top - parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage, nil) - child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil) - child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil) - child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil) - child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil) - child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil) - child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil) + parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage) + child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage) + child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) + child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage) + child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) + child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage) + child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) if data, _ := child.Account(h1); data == nil { t.Errorf("last diff layer: expected %x account to be non-nil", h1) @@ -210,7 +210,7 @@ func TestInsertAndMerge(t *testing.T) { accounts = make(map[common.Hash][]byte) storage = make(map[common.Hash]map[common.Hash][]byte) ) - parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage, nil) + parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage) } { var ( @@ -221,7 +221,7 @@ func TestInsertAndMerge(t *testing.T) { accounts[acc] = randomAccount() storage[acc] = make(map[common.Hash][]byte) storage[acc][slot] = []byte{0x01} - child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil) + child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) } // And flatten merged := (child.flatten()).(*diffLayer) @@ -257,7 +257,7 @@ func BenchmarkSearch(b *testing.B) { for i := 0; i < 10000; i++ { accounts[randomHash()] = randomAccount() } - return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil) + return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) } var layer snapshot layer = emptyLayer() @@ -299,7 +299,7 @@ func BenchmarkSearchSlot(b *testing.B) { accStorage[randomHash()] = value storage[accountKey] = accStorage } - return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil) + return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) } var layer snapshot layer = emptyLayer() @@ -336,7 +336,7 @@ func BenchmarkFlatten(b *testing.B) { } storage[accountKey] = accStorage } - return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil) + return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) } b.ResetTimer() for i := 0; i < b.N; i++ { @@ -385,7 +385,7 @@ func BenchmarkJournal(b *testing.B) { } storage[accountKey] = accStorage } - return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil) + return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) } layer := snapshot(emptyLayer()) for i := 1; i < 128; i++ { diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go index 58ce3e3657..1556439f23 100644 --- a/core/state/snapshot/disklayer.go +++ b/core/state/snapshot/disklayer.go @@ -60,19 +60,6 @@ func (dl *diskLayer) Root() common.Hash { return dl.root } -func (dl *diskLayer) WaitAndGetVerifyRes() bool { - return true -} - -func (dl *diskLayer) MarkValid() {} - -func (dl *diskLayer) Verified() bool { - return true -} - -func (dl *diskLayer) CorrectAccounts(map[common.Hash][]byte) { -} - // Parent always returns nil as there's no layer below the disk. func (dl *diskLayer) Parent() snapshot { return nil @@ -191,6 +178,6 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro // Update creates a new layer on top of the existing snapshot diff tree with // the specified data items. Note, the maps are retained by the method to avoid // copying everything. -func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer { - return newDiffLayer(dl, blockHash, destructs, accounts, storage, verified) +func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { + return newDiffLayer(dl, blockHash, destructs, accounts, storage) } diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go index f524253875..168458c405 100644 --- a/core/state/snapshot/disklayer_test.go +++ b/core/state/snapshot/disklayer_test.go @@ -130,7 +130,7 @@ func TestDiskMerge(t *testing.T) { conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, conDelNoCache: {conDelNoCacheSlot: nil}, conDelCache: {conDelCacheSlot: nil}, - }, nil); err != nil { + }); err != nil { t.Fatalf("failed to update snapshot tree: %v", err) } if err := snaps.Cap(diffRoot, 0); err != nil { @@ -353,7 +353,7 @@ func TestDiskPartialMerge(t *testing.T) { conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, conDelNoCache: {conDelNoCacheSlot: nil}, conDelCache: {conDelCacheSlot: nil}, - }, nil); err != nil { + }); err != nil { t.Fatalf("test %d: failed to update snapshot tree: %v", i, err) } if err := snaps.Cap(diffRoot, 0); err != nil { @@ -464,7 +464,7 @@ func TestDiskGeneratorPersistence(t *testing.T) { // Modify or delete some accounts, flatten everything onto disk if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{ accTwo: accTwo[:], - }, nil, nil); err != nil { + }, nil); err != nil { t.Fatalf("failed to update snapshot tree: %v", err) } if err := snaps.Cap(diffRoot, 0); err != nil { @@ -484,7 +484,7 @@ func TestDiskGeneratorPersistence(t *testing.T) { accThree: accThree.Bytes(), }, map[common.Hash]map[common.Hash][]byte{ accThree: {accThreeSlot: accThreeSlot.Bytes()}, - }, nil); err != nil { + }); err != nil { t.Fatalf("failed to update snapshot tree: %v", err) } diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer) diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go index 3e8a5878ef..54614427a5 100644 --- a/core/state/snapshot/iterator_test.go +++ b/core/state/snapshot/iterator_test.go @@ -54,7 +54,7 @@ func TestAccountIteratorBasics(t *testing.T) { } } // Add some (identical) layers on top - diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil) + diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) it := diffLayer.AccountIterator(common.Hash{}) verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator @@ -92,7 +92,7 @@ func TestStorageIteratorBasics(t *testing.T) { nilStorage[h] = nilstorage } // Add some (identical) layers on top - diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage), nil) + diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage)) for account := range accounts { it, _ := diffLayer.StorageIterator(account, common.Hash{}) verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator @@ -223,13 +223,13 @@ func TestAccountIteratorTraversal(t *testing.T) { } // Stack three diff layers on top with various overlaps snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil) + randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil) + randomAccountSet("0xbb", "0xdd", "0xf0"), nil) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil) + randomAccountSet("0xcc", "0xf0", "0xff"), nil) // Verify the single and multi-layer iterators head := snaps.Snapshot(common.HexToHash("0x04")) @@ -270,13 +270,13 @@ func TestStorageIteratorTraversal(t *testing.T) { } // Stack three diff layers on top with various overlaps snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil)) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil)) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil)) // Verify the single and multi-layer iterators head := snaps.Snapshot(common.HexToHash("0x04")) @@ -354,14 +354,14 @@ func TestAccountIteratorTraversalValues(t *testing.T) { } } // Assemble a stack of snapshots from the account layers - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil, nil) - snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil, nil) - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil, nil) - snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil, nil) - snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil, nil) - snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil, nil) - snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil, nil) - snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil, nil) + snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil) + snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil) + snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil) + snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil) + snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil) + snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil) + snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil) + snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil) it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{}) head := snaps.Snapshot(common.HexToHash("0x09")) @@ -453,14 +453,14 @@ func TestStorageIteratorTraversalValues(t *testing.T) { } } // Assemble a stack of snapshots from the account layers - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a), nil) - snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b), nil) - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c), nil) - snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d), nil) - snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e), nil) - snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e), nil) - snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g), nil) - snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h), nil) + snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a)) + snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b)) + snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c)) + snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d)) + snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e)) + snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e)) + snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g)) + snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h)) it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{}) head := snaps.Snapshot(common.HexToHash("0x09")) @@ -523,7 +523,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) { }, } for i := 1; i < 128; i++ { - snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil, nil) + snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil) } // Iterate the entire stack and ensure everything is hit only once head := snaps.Snapshot(common.HexToHash("0x80")) @@ -568,13 +568,13 @@ func TestAccountIteratorFlattening(t *testing.T) { } // Create a stack of diffs on top snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil) + randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil) + randomAccountSet("0xbb", "0xdd", "0xf0"), nil) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil) + randomAccountSet("0xcc", "0xf0", "0xff"), nil) // Create an iterator and flatten the data from underneath it it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) @@ -599,13 +599,13 @@ func TestAccountIteratorSeek(t *testing.T) { }, } snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil) + randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil) + randomAccountSet("0xbb", "0xdd", "0xf0"), nil) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil) + randomAccountSet("0xcc", "0xf0", "0xff"), nil) // Account set is now // 02: aa, ee, f0, ff @@ -663,13 +663,13 @@ func TestStorageIteratorSeek(t *testing.T) { } // Stack three diff layers on top with various overlaps snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil)) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil)) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil)) // Account set is now // 02: 01, 03, 05 @@ -726,17 +726,17 @@ func TestAccountIteratorDeletions(t *testing.T) { } // Stack three diff layers on top with various overlaps snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), - nil, randomAccountSet("0x11", "0x22", "0x33"), nil, nil) + nil, randomAccountSet("0x11", "0x22", "0x33"), nil) deleted := common.HexToHash("0x22") destructed := map[common.Hash]struct{}{ deleted: {}, } snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), - destructed, randomAccountSet("0x11", "0x33"), nil, nil) + destructed, randomAccountSet("0x11", "0x33"), nil) snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), - nil, randomAccountSet("0x33", "0x44", "0x55"), nil, nil) + nil, randomAccountSet("0x33", "0x44", "0x55"), nil) // The output should be 11,33,44,55 it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) @@ -772,10 +772,10 @@ func TestStorageIteratorDeletions(t *testing.T) { } // Stack three diff layers on top with various overlaps snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil)) snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}})) // The output should be 02,04,05,06 it, _ := snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{}) @@ -791,7 +791,7 @@ func TestStorageIteratorDeletions(t *testing.T) { destructed := map[common.Hash]struct{}{ common.HexToHash("0xaa"): {}, } - snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil, nil) + snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil) it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{}) verifyIterator(t, 0, it, verifyStorage) @@ -799,7 +799,7 @@ func TestStorageIteratorDeletions(t *testing.T) { // Re-insert the slots of the same account snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, - randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil) + randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil)) // The output should be 07,08,09 it, _ = snaps.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{}) @@ -807,7 +807,7 @@ func TestStorageIteratorDeletions(t *testing.T) { it.Release() // Destruct the whole storage but re-create the account in the same layer - snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil), nil) + snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil)) it, _ = snaps.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{}) verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12 it.Release() @@ -849,7 +849,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) { }, } for i := 1; i <= 100; i++ { - snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil, nil) + snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil) } // We call this once before the benchmark, so the creation of // sorted accountlists are not included in the results. @@ -944,9 +944,9 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) { base.root: base, }, } - snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil, nil) + snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil) for i := 2; i <= 100; i++ { - snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil, nil) + snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil) } // We call this once before the benchmark, so the creation of // sorted accountlists are not included in the results. diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index cc60f79ce7..40fb51ae1d 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -110,7 +110,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou // etc.), we just discard all diffs and try to recover them later. var current snapshot = base err := iterateJournal(db, func(parent common.Hash, root common.Hash, destructSet map[common.Hash]struct{}, accountData map[common.Hash][]byte, storageData map[common.Hash]map[common.Hash][]byte) error { - current = newDiffLayer(current, root, destructSet, accountData, storageData, nil) + current = newDiffLayer(current, root, destructSet, accountData, storageData) return nil }) if err != nil { diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 117504f8b6..b55d4d1df7 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -100,18 +100,6 @@ type Snapshot interface { // Root returns the root hash for which this snapshot was made. Root() common.Hash - // WaitAndGetVerifyRes will wait until the snapshot been verified and return verification result - WaitAndGetVerifyRes() bool - - // Verified returns whether the snapshot is verified - Verified() bool - - // MarkValid stores the verification result - MarkValid() - - // CorrectAccounts updates account data for storing the correct data during pipecommit - CorrectAccounts(map[common.Hash][]byte) - // Account directly retrieves the account associated with a particular hash in // the snapshot slim data format. Account(hash common.Hash) (*types.SlimAccount, error) @@ -142,7 +130,7 @@ type snapshot interface { // the specified data items. // // Note, the maps are retained by the method to avoid copying everything. - Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer + Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer // Journal commits an entire diff hierarchy to disk into a single journal entry. // This is meant to be used during shutdown to persist the snapshot without @@ -367,7 +355,7 @@ func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot { // Update adds a new snapshot into the tree, if that can be linked to an existing // old parent. It is disallowed to insert a disk layer (the origin of all). -func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) error { +func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { // Reject noop updates to avoid self-loops in the snapshot tree. This is a // special case that can only happen for Clique networks where empty blocks // don't modify the state (0 block subsidy). @@ -382,7 +370,7 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m if parent == nil { return fmt.Errorf("parent [%#x] snapshot missing", parentRoot) } - snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage, verified) + snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage) // Save the new snapshot for later t.lock.Lock() @@ -708,11 +696,6 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) { if snap == nil { return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root) } - // Wait the snapshot(difflayer) is verified, it means the account data also been refreshed with the correct data - if !snap.WaitAndGetVerifyRes() { - return common.Hash{}, ErrSnapshotStale - } - // Run the journaling t.lock.Lock() defer t.lock.Unlock() diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go index 57a1d9f1b4..f6b5324e7b 100644 --- a/core/state/snapshot/snapshot_test.go +++ b/core/state/snapshot/snapshot_test.go @@ -107,7 +107,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) { accounts := map[common.Hash][]byte{ common.HexToHash("0xa1"): randomAccount(), } - if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil { + if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { t.Fatalf("failed to create a diff layer: %v", err) } if n := len(snaps.layers); n != 2 { @@ -151,10 +151,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) { accounts := map[common.Hash][]byte{ common.HexToHash("0xa1"): randomAccount(), } - if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil { + if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { t.Fatalf("failed to create a diff layer: %v", err) } - if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil, nil); err != nil { + if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { t.Fatalf("failed to create a diff layer: %v", err) } if n := len(snaps.layers); n != 3 { @@ -203,13 +203,13 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) { accounts := map[common.Hash][]byte{ common.HexToHash("0xa1"): randomAccount(), } - if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil { + if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { t.Fatalf("failed to create a diff layer: %v", err) } - if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil, nil); err != nil { + if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { t.Fatalf("failed to create a diff layer: %v", err) } - if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil, nil); err != nil { + if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil { t.Fatalf("failed to create a diff layer: %v", err) } if n := len(snaps.layers); n != 4 { @@ -263,12 +263,12 @@ func TestPostCapBasicDataAccess(t *testing.T) { }, } // The lowest difflayer - snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil, nil) - snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil, nil) - snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil, nil) + snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil) + snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil) + snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil) - snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil, nil) - snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil, nil) + snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil) + snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil) // checkExist verifies if an account exists in a snapshot checkExist := func(layer *diffLayer, key string) error { @@ -363,7 +363,7 @@ func TestSnaphots(t *testing.T) { ) for i := 0; i < 129; i++ { head = makeRoot(uint64(i + 2)) - snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil, nil) + snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil) last = head snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk) } @@ -456,9 +456,9 @@ func TestReadStateDuringFlattening(t *testing.T) { }, } // 4 layers in total, 3 diff layers and 1 disk layers - snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil, nil) - snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil, nil) - snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil, nil) + snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil) + snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil) + snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil) // Obtain the topmost snapshot handler for state accessing snap := snaps.Snapshot(common.HexToHash("0xa3")) diff --git a/core/state/statedb.go b/core/state/statedb.go index 525da23b0f..b1eeb2e8f1 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -35,7 +35,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" @@ -82,7 +81,6 @@ type StateDB struct { stateRoot common.Hash // The calculation result of IntermediateRoot fullProcessed bool - pipeCommit bool // These maps hold the state changes (including the corresponding // original value) that occurred in this **block**. @@ -197,8 +195,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) } tr, err := db.OpenTrie(root) - // return error when 1. failed to open trie and 2. the snap is nil or the snap is not nil and done verification - if err != nil && (sdb.snap == nil || sdb.snap.Verified()) { + if err != nil { return nil, err } _, sdb.noTrie = tr.(*trie.EmptyTrie) @@ -300,20 +297,6 @@ func (s *StateDB) SetExpectedStateRoot(root common.Hash) { s.expectedRoot = root } -// Enable the pipeline commit function of statedb -func (s *StateDB) EnablePipeCommit() { - if s.snap != nil && s.snaps.Layers() > 1 { - // after big merge, disable pipeCommit for now, - // because `s.db.TrieDB().Update` should be called after `s.trie.Commit(true)` - s.pipeCommit = false - } -} - -// IsPipeCommit checks whether pipecommit is enabled on the statedb or not -func (s *StateDB) IsPipeCommit() bool { - return s.pipeCommit -} - // Mark that the block is full processed func (s *StateDB) MarkFullProcessed() { s.fullProcessed = true @@ -335,22 +318,6 @@ func (s *StateDB) Error() error { return s.dbErr } -// Not thread safe -func (s *StateDB) Trie() (Trie, error) { - if s.trie == nil { - err := s.WaitPipeVerification() - if err != nil { - return nil, err - } - tr, err := s.db.OpenTrie(s.originalRoot) - if err != nil { - return nil, err - } - s.trie = tr - } - return s.trie, nil -} - func (s *StateDB) AddLog(log *types.Log) { s.journal.append(addLogChange{txhash: s.thash}) @@ -867,8 +834,7 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB { // expectedRoot: s.expectedRoot, // stateRoot: s.stateRoot, originalRoot: s.originalRoot, - // fullProcessed: s.fullProcessed, - // pipeCommit: s.pipeCommit, + // fullProcessed: s.fullProcessed, accounts: make(map[common.Hash][]byte), storages: make(map[common.Hash]map[common.Hash][]byte), accountsOrigin: make(map[common.Address][]byte), @@ -999,17 +965,6 @@ func (s *StateDB) GetRefund() uint64 { return s.refund } -// WaitPipeVerification waits until the snapshot been verified -func (s *StateDB) WaitPipeVerification() error { - // Need to wait for the parent trie to commit - if s.snap != nil { - if valid := s.snap.WaitAndGetVerifyRes(); !valid { - return errors.New("verification on parent snap failed") - } - } - return nil -} - // Finalise finalises the state by removing the destructed objects and clears // the journal as well as the refunds. Finalise, however, will not push any updates // into the tries just yet. Only IntermediateRoot or Commit will do that. @@ -1056,11 +1011,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { } prefetcher := s.prefetcher if prefetcher != nil && len(addressesToPrefetch) > 0 { - if s.snap.Verified() { - prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) - } else if prefetcher.rootParent != (common.Hash{}) { - prefetcher.prefetch(common.Hash{}, prefetcher.rootParent, common.Address{}, addressesToPrefetch) - } + prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) } // Invalidate journal because reverting across transactions is not allowed. s.clearJournalAndRefund() @@ -1076,76 +1027,6 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { return s.StateIntermediateRoot() } -// CorrectAccountsRoot will fix account roots in pipecommit mode -func (s *StateDB) CorrectAccountsRoot(blockRoot common.Hash) { - var snapshot snapshot.Snapshot - if blockRoot == (common.Hash{}) { - snapshot = s.snap - } else if s.snaps != nil { - snapshot = s.snaps.Snapshot(blockRoot) - } - - if snapshot == nil { - return - } - if accounts, err := snapshot.Accounts(); err == nil && accounts != nil { - for _, obj := range s.stateObjects { - if !obj.deleted { - if account, exist := accounts[crypto.Keccak256Hash(obj.address[:])]; exist { - if len(account.Root) == 0 { - obj.data.Root = types.EmptyRootHash - } else { - obj.data.Root = common.BytesToHash(account.Root) - } - } - } - } - } -} - -// PopulateSnapAccountAndStorage tries to populate required accounts and storages for pipecommit -func (s *StateDB) PopulateSnapAccountAndStorage() { - for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; !obj.deleted { - if s.snap != nil { - s.populateSnapStorage(obj) - s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data) - } - } - } -} - -// populateSnapStorage tries to populate required storages for pipecommit, and returns a flag to indicate whether the storage root changed or not -func (s *StateDB) populateSnapStorage(obj *stateObject) bool { - for key, value := range obj.dirtyStorage { - obj.pendingStorage[key] = value - } - if len(obj.pendingStorage) == 0 { - return false - } - hasher := crypto.NewKeccakState() - var storage map[common.Hash][]byte - for key, value := range obj.pendingStorage { - var v []byte - if (value != common.Hash{}) { - // Encoding []byte cannot fail, ok to ignore the error. - v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) - } - // If state snapshotting is active, cache the data til commit - if obj.db.snap != nil { - if storage == nil { - // Retrieve the old storage map, if available, create a new one otherwise - if storage = obj.db.storages[obj.addrHash]; storage == nil { - storage = make(map[common.Hash][]byte) - obj.db.storages[obj.addrHash] = storage - } - } - storage[crypto.HashData(hasher, key[:])] = v // v will be nil if value is 0x00 - } - } - return true -} - func (s *StateDB) AccountsIntermediateRoot() { tasks := make(chan func()) finishCh := make(chan struct{}) @@ -1482,7 +1363,7 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A // // The associated block number of the state transition is also provided // for more chain context. -func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFuncs ...func() error) (common.Hash, *types.DiffLayer, error) { +func (s *StateDB) Commit(block uint64, postCommitFunc func() error) (common.Hash, *types.DiffLayer, error) { // Short circuit in case any database failure occurred earlier. if s.dbErr != nil { s.StopPrefetcher() @@ -1490,38 +1371,17 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc } // Finalize any pending changes and merge everything into the tries var ( - diffLayer *types.DiffLayer - verified chan struct{} - snapUpdated chan struct{} - incomplete map[common.Address]struct{} - nodes = trienode.NewMergedNodeSet() + diffLayer *types.DiffLayer + incomplete map[common.Address]struct{} + nodes = trienode.NewMergedNodeSet() ) if s.snap != nil { diffLayer = &types.DiffLayer{} } - if s.pipeCommit { - // async commit the MPT - verified = make(chan struct{}) - snapUpdated = make(chan struct{}) - } commmitTrie := func() error { commitErr := func() error { - if s.pipeCommit { - <-snapUpdated - // Due to state verification pipeline, the accounts roots are not updated, leading to the data in the difflayer is not correct, capture the correct data here - s.AccountsIntermediateRoot() - if parent := s.snap.Root(); parent != s.expectedRoot { - accountData := make(map[common.Hash][]byte) - for k, v := range s.accounts { - accountData[crypto.Keccak256Hash(k[:])] = v - } - s.snaps.Snapshot(s.expectedRoot).CorrectAccounts(accountData) - } - s.snap = nil - } - if s.stateRoot = s.StateIntermediateRoot(); s.fullProcessed && s.expectedRoot != s.stateRoot { log.Error("Invalid merkle root", "remote", s.expectedRoot, "local", s.stateRoot) return fmt.Errorf("invalid merkle root (remote: %x local: %x)", s.expectedRoot, s.stateRoot) @@ -1629,8 +1489,8 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc } } - for _, postFunc := range postCommitFuncs { - err := postFunc() + if postCommitFunc != nil { + err := postCommitFunc() if err != nil { return err } @@ -1639,19 +1499,6 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc return nil }() - if s.pipeCommit { - if commitErr == nil { - s.snaps.Snapshot(s.stateRoot).MarkValid() - close(verified) - } else { - // The blockchain will do the further rewind if write block not finish yet - close(verified) - if failPostCommitFunc != nil { - failPostCommitFunc() - } - log.Error("state verification failed", "err", commitErr) - } - } return commitErr } @@ -1693,15 +1540,10 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc if metrics.EnabledExpensive { defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now()) } - if s.pipeCommit { - defer close(snapUpdated) - // State verification pipeline - accounts root are not calculated here, just populate needed fields for process - s.PopulateSnapAccountAndStorage() - } diffLayer.Destructs, diffLayer.Accounts, diffLayer.Storages = s.SnapToDiffLayer() // Only update if there's a state transition (skip empty Clique blocks) if parent := s.snap.Root(); parent != s.expectedRoot { - err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages, verified) + err := s.snaps.Update(s.expectedRoot, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages) if err != nil { log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err) @@ -1721,12 +1563,9 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc return nil }, } - if s.pipeCommit { - go commmitTrie() - } else { - defer s.StopPrefetcher() - commitFuncs = append(commitFuncs, commmitTrie) - } + + defer s.StopPrefetcher() + commitFuncs = append(commitFuncs, commmitTrie) commitRes := make(chan error, len(commitFuncs)) for _, f := range commitFuncs { // commitFuncs[0] and commitFuncs[1] both read map `stateObjects`, but no conflicts @@ -1743,11 +1582,7 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc } root := s.stateRoot - if s.pipeCommit { - root = s.expectedRoot - } else { - s.snap = nil - } + s.snap = nil if root == (common.Hash{}) { root = types.EmptyRootHash } diff --git a/eth/backend.go b/eth/backend.go index 56add41cc0..14053f1450 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -296,9 +296,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } ) bcOps := make([]core.BlockChainOption, 0) - if config.PipeCommit { - bcOps = append(bcOps, core.EnablePipelineCommit) - } if config.PersistDiff { bcOps = append(bcOps, core.EnablePersistDiff(config.DiffBlock)) } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 697a9fb91f..74bea35b97 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -107,7 +107,6 @@ type Config struct { DirectBroadcast bool DisableSnapProtocol bool // Whether disable snap protocol EnableTrustProtocol bool // Whether enable trust protocol - PipeCommit bool RangeLimit bool // Deprecated, use 'TransactionHistory' instead. diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 2d86b97ab1..ac79812ac7 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -30,7 +30,6 @@ func (c Config) MarshalTOML() (interface{}, error) { DirectBroadcast bool DisableSnapProtocol bool EnableTrustProtocol bool - PipeCommit bool RangeLimit bool TxLookupLimit uint64 `toml:",omitempty"` TransactionHistory uint64 `toml:",omitempty"` @@ -90,7 +89,6 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.DirectBroadcast = c.DirectBroadcast enc.DisableSnapProtocol = c.DisableSnapProtocol enc.EnableTrustProtocol = c.EnableTrustProtocol - enc.PipeCommit = c.PipeCommit enc.RangeLimit = c.RangeLimit enc.TxLookupLimit = c.TxLookupLimit enc.TransactionHistory = c.TransactionHistory @@ -154,7 +152,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { DirectBroadcast *bool DisableSnapProtocol *bool EnableTrustProtocol *bool - PipeCommit *bool RangeLimit *bool TxLookupLimit *uint64 `toml:",omitempty"` TransactionHistory *uint64 `toml:",omitempty"` @@ -243,9 +240,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.EnableTrustProtocol != nil { c.EnableTrustProtocol = *dec.EnableTrustProtocol } - if dec.PipeCommit != nil { - c.PipeCommit = *dec.PipeCommit - } if dec.RangeLimit != nil { c.RangeLimit = *dec.RangeLimit } diff --git a/miner/worker.go b/miner/worker.go index 8513b6a9b6..135dc45327 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1430,15 +1430,6 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti if interval != nil { interval() } - /* - - err := env.state.WaitPipeVerification() - if err != nil { - return err - } - env.state.CorrectAccountsRoot(w.chain.CurrentBlock().Root) - */ - fees := env.state.GetBalance(consensus.SystemAddress).ToBig() feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether)) // Withdrawals are set to nil here, because this is only called in PoW. diff --git a/triedb/hashdb/database.go b/triedb/hashdb/database.go index 7fd9b06c19..a2c8e977d4 100644 --- a/triedb/hashdb/database.go +++ b/triedb/hashdb/database.go @@ -347,33 +347,27 @@ func (db *Database) Cap(limit common.StorageSize) error { // Keep committing nodes from the flush-list until we're below allowance oldest := db.oldest - err := func() error { - for size > limit && oldest != (common.Hash{}) { - // Fetch the oldest referenced node and push into the batch - node := db.dirties[oldest] - rawdb.WriteLegacyTrieNode(batch, oldest, node.node) - - // If we exceeded the ideal batch size, commit and reset - if batch.ValueSize() >= ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - log.Error("Failed to write flush list to disk", "err", err) - return err - } - batch.Reset() - } - // Iterate to the next flush item, or abort if the size cap was achieved. Size - // is the total size, including the useful cached data (hash -> blob), the - // cache item metadata, as well as external children mappings. - size -= common.StorageSize(common.HashLength + len(node.node) + cachedNodeSize) - if node.external != nil { - size -= common.StorageSize(len(node.external) * common.HashLength) + for size > limit && oldest != (common.Hash{}) { + // Fetch the oldest referenced node and push into the batch + node := db.dirties[oldest] + rawdb.WriteLegacyTrieNode(batch, oldest, node.node) + + // If we exceeded the ideal batch size, commit and reset + if batch.ValueSize() >= ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + log.Error("Failed to write flush list to disk", "err", err) + return err } - oldest = node.flushNext + batch.Reset() } - return nil - }() - if err != nil { - return err + // Iterate to the next flush item, or abort if the size cap was achieved. Size + // is the total size, including the useful cached data (hash -> blob), the + // cache item metadata, as well as external children mappings. + size -= common.StorageSize(common.HashLength + len(node.node) + cachedNodeSize) + if node.external != nil { + size -= common.StorageSize(len(node.external) * common.HashLength) + } + oldest = node.flushNext } // Flush out any remainder data from the last batch if err := batch.Write(); err != nil {