diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 738b932ef4b4..1370e689cdb3 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -10,9 +10,7 @@ crates=($(cargo metadata --format-version=1 --no-deps | jq -r '.packages[].name' exclude_crates=( # The following require investigation if they can be fixed reth-basic-payload-builder - reth-beacon-consensus reth-bench - reth-blockchain-tree reth-cli reth-cli-commands reth-cli-runner @@ -45,7 +43,6 @@ exclude_crates=( reth-optimism-node reth-optimism-payload-builder reth-optimism-rpc - reth-optimism-primitives reth-rpc reth-rpc-api reth-rpc-api-testing-util diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 418fd4cc4e68..e0ae216dd38b 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -73,7 +73,9 @@ jobs: cache-on-failure: true - uses: dcarbone/install-jq-action@v3 - name: Run Wasm checks - run: .github/assets/check_wasm.sh + run: | + sudo apt update && sudo apt install gcc-multilib + .github/assets/check_wasm.sh riscv: runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index da8a17150f24..116f8dcc3c6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -106,9 +106,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.1.52" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f15afc5993458b42739ab3b69bdb6b4c8112acd3997dbea9bc092c9517137c" +checksum = "da226340862e036ab26336dc99ca85311c6b662267c1440e1733890fd688802c" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -460,7 +460,7 @@ checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -700,7 +700,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -716,7 +716,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "syn-solidity", "tiny-keccak", ] @@ -732,7 +732,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "syn-solidity", ] @@ -938,7 +938,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1136,7 +1136,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1172,18 +1172,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] name = "async-trait" -version = "0.1.84" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1244b10dcd56c92219da4e14caa97e312079e185f04ba3eea25061561dc0a0" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1221,7 +1221,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1327,7 +1327,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1510,7 +1510,7 @@ checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "synstructure", ] @@ -1632,7 +1632,7 @@ checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1821,9 +1821,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.23" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +checksum = "9560b07a799281c7e0958b9296854d6fafd4c5f31444a7e5bb1ad6dde5ccf1bd" dependencies = [ "clap_builder", "clap_derive", @@ -1831,9 +1831,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.23" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +checksum = "874e0dd3eb68bf99058751ac9712f622e61e6f393a94f7128fa26e3f02f5c7cd" dependencies = [ "anstream", "anstyle", @@ -1843,14 +1843,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2317,7 +2317,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2341,7 +2341,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2352,7 +2352,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2461,7 +2461,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2482,7 +2482,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "unicode-xid", ] @@ -2596,7 +2596,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2742,7 +2742,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2753,7 +2753,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2773,7 +2773,7 @@ checksum = "3bf679796c0322556351f287a51b49e48f7c4986e727b5dd78c972d30e2e16cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2807,9 +2807,9 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "036c84bd29bff35e29bbee3c8fc0e2fb95db12b6f2f3cae82a827fbc97256f3a" +checksum = "862e41ea8eea7508f70cfd8cd560f0c34bb0af37c719a8e06c2672f0f031d8e5" dependencies = [ "alloy-primitives", "ethereum_serde_utils", @@ -2822,14 +2822,14 @@ dependencies = [ [[package]] name = "ethereum_ssz_derive" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dc8e67e1f770f5aa4c2c2069aaaf9daee7ac21bed357a71b911b37a58966cfb" +checksum = "d31ecf6640112f61dc34b4d8359c081102969af0edd18381fed2052f6db6a410" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2933,6 +2933,7 @@ dependencies = [ "reth", "reth-basic-payload-builder", "reth-chainspec", + "reth-engine-local", "reth-ethereum-payload-builder", "reth-node-api", "reth-node-core", @@ -3398,7 +3399,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -4080,7 +4081,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -4137,7 +4138,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -4258,7 +4259,7 @@ dependencies = [ "indoc", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -4507,7 +4508,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -4902,14 +4903,14 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] name = "metrics-exporter-prometheus" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" +checksum = "12779523996a67c13c84906a876ac6fe4d07a6e1adb54978378e13f199251a62" dependencies = [ "base64 0.22.1", "indexmap 2.7.0", @@ -4937,9 +4938,9 @@ dependencies = [ [[package]] name = "metrics-util" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15b482df36c13dd1869d73d14d28cd4855fbd6cfc32294bee109908a9f4a4ed7" +checksum = "dbd4884b1dd24f7d6628274a2f5ae22465c337c5ba065ec9b6edccddf8acc673" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -4948,6 +4949,8 @@ dependencies = [ "metrics", "ordered-float", "quanta", + "rand 0.8.5", + "rand_xoshiro", "sketches-ddsketch", ] @@ -5049,16 +5052,16 @@ dependencies = [ [[package]] name = "moka" -version = "0.12.9" +version = "0.12.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23db87a7f248211f6a7c8644a1b750541f8a4c68ae7de0f908860e44c0c201f6" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" dependencies = [ "crossbeam-channel", "crossbeam-epoch", "crossbeam-utils", "loom", "parking_lot", - "quanta", + "portable-atomic", "rustc_version 0.4.1", "smallvec", "tagptr", @@ -5290,7 +5293,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -5304,9 +5307,9 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3409fc85ac27b27d971ea7cd1aabafd2eefa6de7e481c8d4f707225c117e81a" +checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" dependencies = [ "alloy-rlp", "arbitrary", @@ -5343,9 +5346,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0adb232ec805af3aa35606c19329aa7dc44c4457ae318ed0b8fc7f799dd7dbfe" +checksum = "250244eadaf1a25e0e2ad263110ad2d1b43c2e57ddf4c025e71552d98196a8d3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5361,9 +5364,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c272cfd65317538f5815c2b7059445230b050d48ebe2d0bab3e861d419a785" +checksum = "98334a9cdccc5878e9d5c48afc9cc1b84da58dbc68d41f9488d8f71688b495d3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5376,9 +5379,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19872a58b7acceeffb8e88ea048bee1690e7cde53068bd652976435d61fcd1de" +checksum = "1dd588157ac14db601d6497b81ae738b2581c60886fc592976fab6c282619604" dependencies = [ "alloy-consensus", "alloy-network", @@ -5391,9 +5394,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad65d040648e0963ed378e88489f5805e24fb56b7e6611362299cd4c24debeb2" +checksum = "753762429c31f838b59c886b31456c9bf02fd38fb890621665523a9087ae06ae" dependencies = [ "alloc-no-stdlib", "alloy-consensus", @@ -5401,9 +5404,10 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-serde", + "alloy-sol-types", "async-trait", "brotli", - "cfg-if", + "derive_more", "miniz_oxide", "op-alloy-consensus", "op-alloy-genesis", @@ -5415,9 +5419,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36b1f2547067c5b60f3144ae1033a54ce1d11341d8327fa8f203b048d51465e9" +checksum = "1f483fb052ef807682ae5b5729c3a61a092ee4f7334e6e6055de67e9f28ef880" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5428,9 +5432,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e68d1a51fe3ee143f102b82f54fa237f21d12635da363276901e6d3ef6c65b7b" +checksum = "37b1d3872021aa28b10fc6cf8252e792e802d89e8b2cdaa57dcb9243c461b286" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5447,9 +5451,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f8833ef149ceb74f8f25a79801d110d88ec2db32e700fa10db6c5f5b5cbb71a" +checksum = "c43f00d4060a6a38f5bf0a8182b4cc4c7071e2bc96942f414619251b522169eb" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5649,9 +5653,9 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ "phf_macros", "phf_shared", @@ -5659,9 +5663,9 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ "phf_shared", "rand 0.8.5", @@ -5669,51 +5673,51 @@ dependencies = [ [[package]] name = "phf_macros" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" dependencies = [ "phf_generator", "phf_shared", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] name = "phf_shared" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ "siphasher", ] [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -5848,12 +5852,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.25" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +checksum = "483f8c21f64f3ea09fe0f30f5d48c3e8eefe5dac9129f0075f76593b4c1da705" dependencies = [ "proc-macro2", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -5904,7 +5908,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -6002,7 +6006,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -6192,6 +6196,15 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand_xoshiro" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "ratatui" version = "0.28.1" @@ -6399,8 +6412,6 @@ dependencies = [ "eyre", "futures", "reth-basic-payload-builder", - "reth-beacon-consensus", - "reth-blockchain-tree", "reth-chainspec", "reth-cli", "reth-cli-commands", @@ -6412,7 +6423,6 @@ dependencies = [ "reth-db", "reth-db-api", "reth-downloaders", - "reth-engine-util", "reth-errors", "reth-ethereum-cli", "reth-ethereum-payload-builder", @@ -6483,62 +6493,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-beacon-consensus" -version = "1.1.5" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-genesis", - "alloy-primitives", - "alloy-rpc-types-engine", - "assert_matches", - "futures", - "itertools 0.13.0", - "metrics", - "reth-blockchain-tree", - "reth-blockchain-tree-api", - "reth-chainspec", - "reth-codecs", - "reth-config", - "reth-consensus", - "reth-db", - "reth-db-api", - "reth-downloaders", - "reth-engine-primitives", - "reth-errors", - "reth-ethereum-consensus", - "reth-ethereum-engine-primitives", - "reth-ethereum-evm", - "reth-evm", - "reth-exex-types", - "reth-metrics", - "reth-network-p2p", - "reth-node-types", - "reth-payload-builder", - "reth-payload-builder-primitives", - "reth-payload-primitives", - "reth-payload-validator", - "reth-primitives", - "reth-primitives-traits", - "reth-provider", - "reth-prune", - "reth-prune-types", - "reth-rpc-types-compat", - "reth-stages", - "reth-stages-api", - "reth-static-file", - "reth-tasks", - "reth-testing-utils", - "reth-tokio-util", - "reth-tracing", - "schnellru", - "thiserror 2.0.9", - "tokio", - "tokio-stream", - "tracing", -] - [[package]] name = "reth-bench" version = "1.1.5" @@ -6575,59 +6529,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-blockchain-tree" -version = "1.1.5" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-genesis", - "alloy-primitives", - "aquamarine", - "assert_matches", - "linked_hash_set", - "metrics", - "parking_lot", - "reth-blockchain-tree-api", - "reth-chainspec", - "reth-consensus", - "reth-db", - "reth-db-api", - "reth-ethereum-evm", - "reth-evm", - "reth-execution-errors", - "reth-execution-types", - "reth-metrics", - "reth-network", - "reth-node-types", - "reth-primitives", - "reth-provider", - "reth-revm", - "reth-stages-api", - "reth-storage-errors", - "reth-testing-utils", - "reth-trie", - "reth-trie-db", - "reth-trie-parallel", - "tokio", - "tracing", -] - -[[package]] -name = "reth-blockchain-tree-api" -version = "1.1.5" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-primitives", - "reth-consensus", - "reth-execution-errors", - "reth-primitives", - "reth-primitives-traits", - "reth-storage-errors", - "thiserror 2.0.9", -] - [[package]] name = "reth-chain-state" version = "1.1.5" @@ -6670,7 +6571,6 @@ dependencies = [ "alloy-trie", "auto_impl", "derive_more", - "once_cell", "reth-ethereum-forks", "reth-network-peers", "reth-primitives-traits", @@ -6712,7 +6612,6 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "ratatui", - "reth-beacon-consensus", "reth-chainspec", "reth-cli", "reth-cli-runner", @@ -6728,6 +6627,7 @@ dependencies = [ "reth-ecies", "reth-eth-wire", "reth-ethereum-cli", + "reth-ethereum-consensus", "reth-evm", "reth-exex", "reth-fs-util", @@ -6816,7 +6716,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -7206,7 +7106,6 @@ dependencies = [ "eyre", "futures-util", "op-alloy-rpc-types-engine", - "reth-beacon-consensus", "reth-chainspec", "reth-consensus", "reth-engine-primitives", @@ -7253,11 +7152,11 @@ version = "1.1.5" dependencies = [ "futures", "pin-project", - "reth-beacon-consensus", "reth-chainspec", "reth-consensus", "reth-engine-primitives", "reth-engine-tree", + "reth-ethereum-consensus", "reth-ethereum-engine-primitives", "reth-ethereum-evm", "reth-evm", @@ -7290,17 +7189,17 @@ dependencies = [ "derive_more", "futures", "metrics", + "moka", "proptest", "rand 0.8.5", "rayon", - "reth-beacon-consensus", - "reth-blockchain-tree-api", "reth-chain-state", "reth-chainspec", "reth-consensus", "reth-db", "reth-engine-primitives", "reth-errors", + "reth-ethereum-consensus", "reth-ethereum-engine-primitives", "reth-evm", "reth-exex-types", @@ -7328,6 +7227,7 @@ dependencies = [ "reth-trie-parallel", "reth-trie-sparse", "revm-primitives", + "schnellru", "thiserror 2.0.9", "tokio", "tracing", @@ -7369,7 +7269,6 @@ dependencies = [ name = "reth-errors" version = "1.1.5" dependencies = [ - "reth-blockchain-tree-api", "reth-consensus", "reth-execution-errors", "reth-fs-util", @@ -8016,7 +7915,6 @@ version = "1.1.5" dependencies = [ "alloy-rpc-types-engine", "eyre", - "reth-beacon-consensus", "reth-consensus", "reth-db-api", "reth-engine-primitives", @@ -8045,8 +7943,6 @@ dependencies = [ "futures", "jsonrpsee", "rayon", - "reth-beacon-consensus", - "reth-blockchain-tree", "reth-chain-state", "reth-chainspec", "reth-cli-util", @@ -8073,7 +7969,6 @@ dependencies = [ "reth-node-events", "reth-node-metrics", "reth-payload-builder", - "reth-payload-validator", "reth-primitives", "reth-provider", "reth-prune", @@ -8165,11 +8060,11 @@ dependencies = [ "futures", "rand 0.8.5", "reth-basic-payload-builder", - "reth-beacon-consensus", "reth-chainspec", "reth-consensus", "reth-db", "reth-e2e-test-utils", + "reth-ethereum-consensus", "reth-ethereum-engine-primitives", "reth-ethereum-evm", "reth-ethereum-payload-builder", @@ -8207,7 +8102,6 @@ dependencies = [ "futures", "humantime", "pin-project", - "reth-beacon-consensus", "reth-engine-primitives", "reth-network-api", "reth-primitives-traits", @@ -8378,6 +8272,7 @@ version = "1.1.5" dependencies = [ "alloy-chains", "alloy-primitives", + "auto_impl", "once_cell", "reth-ethereum-forks", "serde", @@ -8402,7 +8297,6 @@ dependencies = [ "op-alloy-rpc-types-engine", "parking_lot", "reth-basic-payload-builder", - "reth-beacon-consensus", "reth-chainspec", "reth-consensus", "reth-db", @@ -8662,7 +8556,6 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rayon", "reth-chainspec", "reth-codecs", "reth-ethereum-forks", @@ -8698,6 +8591,7 @@ dependencies = [ "derive_more", "k256", "modular-bitfield", + "once_cell", "op-alloy-consensus", "proptest", "proptest-arbitrary-interop", @@ -8731,7 +8625,6 @@ dependencies = [ "parking_lot", "rand 0.8.5", "rayon", - "reth-blockchain-tree-api", "reth-chain-state", "reth-chainspec", "reth-codecs", @@ -8960,7 +8853,6 @@ dependencies = [ "jsonrpsee", "metrics", "pin-project", - "reth-beacon-consensus", "reth-chainspec", "reth-consensus", "reth-engine-primitives", @@ -9010,7 +8902,6 @@ dependencies = [ "jsonrpsee-types", "metrics", "parking_lot", - "reth-beacon-consensus", "reth-chainspec", "reth-engine-primitives", "reth-ethereum-engine-primitives", @@ -9809,7 +9700,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.94", + "syn 2.0.95", "unicode-ident", ] @@ -9893,9 +9784,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.42" +version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" +checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ "bitflags 2.6.0", "errno", @@ -9941,7 +9832,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.1.0", + "security-framework 3.2.0", ] [[package]] @@ -10137,9 +10028,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81d3f8c9bfcc3cbb6b0179eb57042d75b1582bdc65c3cb95f3fa999509c03cbc" +checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ "bitflags 2.6.0", "core-foundation 0.10.0", @@ -10150,9 +10041,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.13.0" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -10214,14 +10105,14 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] name = "serde_json" -version = "1.0.134" +version = "1.0.135" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" +checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" dependencies = [ "indexmap 2.7.0", "itoa", @@ -10249,7 +10140,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10300,7 +10191,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10333,7 +10224,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10490,9 +10381,9 @@ dependencies = [ [[package]] name = "siphasher" -version = "0.3.11" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" [[package]] name = "sketches-ddsketch" @@ -10616,7 +10507,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10674,9 +10565,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.94" +version = "2.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "987bc0be1cdea8b10216bd06e2ca407d40b9543468fafd3ddfb02f36e77f71f3" +checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" dependencies = [ "proc-macro2", "quote", @@ -10692,7 +10583,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10712,7 +10603,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10790,7 +10681,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10838,7 +10729,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10849,7 +10740,7 @@ checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -11006,7 +10897,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -11207,7 +11098,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -11543,7 +11434,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -11613,7 +11504,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "wasm-bindgen-shared", ] @@ -11648,7 +11539,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11814,7 +11705,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -11825,7 +11716,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -11836,7 +11727,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -11847,7 +11738,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -12122,7 +12013,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "synstructure", ] @@ -12144,7 +12035,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -12164,7 +12055,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "synstructure", ] @@ -12185,7 +12076,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -12207,7 +12098,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 64755b1f50d9..e8c5c8c9517e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,8 +11,6 @@ exclude = [".github/"] members = [ "bin/reth-bench/", "bin/reth/", - "crates/blockchain-tree-api/", - "crates/blockchain-tree/", "crates/chain-state/", "crates/chainspec/", "crates/cli/cli/", @@ -20,7 +18,6 @@ members = [ "crates/cli/runner/", "crates/cli/util/", "crates/config/", - "crates/consensus/beacon/", "crates/consensus/common/", "crates/consensus/consensus/", "crates/consensus/debug-client/", @@ -304,10 +301,7 @@ overflow-checks = true op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } reth-basic-payload-builder = { path = "crates/payload/basic" } -reth-beacon-consensus = { path = "crates/consensus/beacon" } reth-bench = { path = "bin/reth-bench" } -reth-blockchain-tree = { path = "crates/blockchain-tree" } -reth-blockchain-tree-api = { path = "crates/blockchain-tree-api" } reth-chain-state = { path = "crates/chain-state" } reth-chainspec = { path = "crates/chainspec", default-features = false } reth-cli = { path = "crates/cli/cli" } @@ -533,13 +527,14 @@ tracing-appender = "0.2" url = { version = "2.3", default-features = false } zstd = "0.13" byteorder = "1" +moka = "0.12" # metrics metrics = "0.24.0" metrics-derive = "0.1" metrics-exporter-prometheus = { version = "0.16.0", default-features = false } metrics-process = "2.1.0" -metrics-util = { default-features = false, version = "0.18.0" } +metrics-util = { default-features = false, version = "0.19.0" } # proc-macros proc-macro2 = "1.0" diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index e3d388b37ece..2866cf8fb45c 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -75,7 +75,7 @@ impl Command { while let Some((block, head, safe, finalized)) = receiver.recv().await { // just put gas used here let gas_used = block.gas_used; - let block_number = block.header.number; + let block_number = block.number; let versioned_hashes: Vec = block.body().blob_versioned_hashes_iter().copied().collect(); diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index fb86a8ced2b3..f7bdfd8ceed2 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -31,12 +31,10 @@ reth-stages.workspace = true reth-execution-types.workspace = true reth-errors.workspace = true reth-transaction-pool.workspace = true -reth-beacon-consensus.workspace = true reth-cli-runner.workspace = true reth-cli-commands.workspace = true reth-cli-util.workspace = true reth-consensus-common.workspace = true -reth-blockchain-tree.workspace = true reth-rpc-builder.workspace = true reth-rpc.workspace = true reth-rpc-types-compat.workspace = true @@ -64,7 +62,6 @@ reth-node-builder.workspace = true reth-node-events.workspace = true reth-node-metrics.workspace = true reth-consensus.workspace = true -reth-engine-util.workspace = true reth-prune.workspace = true # crypto diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index edf85b3c5898..40110fe84988 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -12,22 +12,18 @@ use eyre::Context; use reth_basic_payload_builder::{ BuildArguments, BuildOutcome, Cancelled, PayloadBuilder, PayloadConfig, }; -use reth_beacon_consensus::EthBeaconConsensus; -use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, -}; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_consensus::{Consensus, FullConsensus}; -use reth_errors::RethResult; +use reth_errors::{ConsensusError, RethResult}; use reth_ethereum_payload_builder::EthereumBuilderConfig; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; use reth_node_api::{BlockTy, EngineApiMessageVersion, PayloadBuilderAttributes}; -use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; +use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig, EthExecutorProvider}; use reth_primitives::{ BlockExt, EthPrimitives, SealedBlockFor, SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, @@ -128,24 +124,15 @@ impl> Command { ) -> eyre::Result<()> { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; - let consensus: Arc = + let consensus: Arc> = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); - - // configure blockchain tree - let tree_externals = - TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); - let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default())?; - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - // fetch the best block from the database let best_block = self .lookup_best_block(provider_factory.clone()) .wrap_err("the head block is missing")?; - let blockchain_db = - BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; let blob_store = InMemoryBlobStore::default(); let validator = @@ -251,7 +238,7 @@ impl> Command { debug!(target: "reth::cli", ?block, "Built new payload"); consensus.validate_header_with_total_difficulty(block, U256::MAX)?; - consensus.validate_header(block)?; + consensus.validate_header(block.sealed_header())?; consensus.validate_block_pre_execution(block)?; let senders = block.senders().expect("sender recovery failed"); @@ -287,7 +274,7 @@ impl> Command { let provider_rw = provider_factory.provider_rw()?; provider_rw.append_blocks_with_state( Vec::from([block_with_senders]), - execution_outcome, + &execution_outcome, hashed_post_state.into_sorted(), trie_updates, )?; diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index e25bb6afff66..cc0c70106750 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -5,7 +5,6 @@ use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use clap::Parser; use futures::StreamExt; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; @@ -18,12 +17,13 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_errors::ConsensusError; use reth_exex::ExExManagerHandle; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; use reth_node_api::NodeTypesWithDBAdapter; -use reth_node_ethereum::EthExecutorProvider; +use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider}; use reth_node_events::node::NodeEvent; use reth_primitives::EthPrimitives; use reth_provider::{ @@ -64,7 +64,7 @@ impl> Command { &self, config: &Config, client: Client, - consensus: Arc, + consensus: Arc>, provider_factory: ProviderFactory, task_executor: &TaskExecutor, static_file_producer: StaticFileProducer>, @@ -172,7 +172,7 @@ impl> Command { let Environment { provider_factory, config, data_dir } = self.env.init::(AccessRights::RW)?; - let consensus: Arc = + let consensus: Arc> = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); // Configure and build network diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index bd8c8d1cdcc1..d4c0f3c6c408 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -7,7 +7,6 @@ use crate::{ use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; @@ -20,7 +19,7 @@ use reth_execution_types::ExecutionOutcome; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_node_api::{BlockTy, NodePrimitives}; -use reth_node_ethereum::EthExecutorProvider; +use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider}; use reth_primitives::{BlockExt, EthPrimitives}; use reth_provider::{ providers::ProviderNodeTypes, AccountExtReader, ChainSpecProvider, DatabaseProviderFactory, @@ -177,7 +176,7 @@ impl> Command { .map_err(|_| BlockValidationError::SenderRecoveryError)?, )?; provider_rw.write_state( - execution_outcome, + &execution_outcome, OriginalValuesKnown::No, StorageLocation::Database, )?; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 59fe2bafaf6c..acc346d9e3ad 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -3,14 +3,13 @@ use crate::{args::NetworkArgs, utils::get_single_header}; use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; @@ -18,7 +17,7 @@ use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; use reth_node_api::{BlockTy, NodePrimitives}; -use reth_node_ethereum::EthExecutorProvider; +use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider}; use reth_primitives::EthPrimitives; use reth_provider::{ providers::ProviderNodeTypes, BlockNumReader, BlockWriter, ChainSpecProvider, @@ -129,7 +128,7 @@ impl> Command { info!(target: "reth::cli", target_block_number=self.to, "Finished downloading tip of block range"); // build the full block client - let consensus: Arc = + let consensus: Arc> = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let block_range_client = FullBlockClient::new(fetch_client, consensus); @@ -169,7 +168,7 @@ impl> Command { let execution_outcome = executor.finalize(); provider_rw.write_state( - execution_outcome, + &execution_outcome, OriginalValuesKnown::Yes, StorageLocation::Database, )?; diff --git a/bin/reth/src/commands/debug_cmd/mod.rs b/bin/reth/src/commands/debug_cmd/mod.rs index 4aaa1b1c82ec..26077a1274fb 100644 --- a/bin/reth/src/commands/debug_cmd/mod.rs +++ b/bin/reth/src/commands/debug_cmd/mod.rs @@ -12,7 +12,6 @@ mod build_block; mod execution; mod in_memory_merkle; mod merkle; -mod replay_engine; /// `reth debug` command #[derive(Debug, Parser)] @@ -32,8 +31,6 @@ pub enum Subcommands { InMemoryMerkle(in_memory_merkle::Command), /// Debug block building. BuildBlock(build_block::Command), - /// Debug engine API by replaying stored messages. - ReplayEngine(replay_engine::Command), } impl> Command { @@ -49,7 +46,6 @@ impl> Command { Subcommands::Merkle(command) => command.execute::(ctx).await, Subcommands::InMemoryMerkle(command) => command.execute::(ctx).await, Subcommands::BuildBlock(command) => command.execute::(ctx).await, - Subcommands::ReplayEngine(command) => command.execute::(ctx).await, } } } diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs deleted file mode 100644 index 3d17ea456526..000000000000 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ /dev/null @@ -1,212 +0,0 @@ -use crate::args::NetworkArgs; -use clap::Parser; -use eyre::Context; -use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; -use reth_beacon_consensus::{hooks::EngineHooks, BeaconConsensusEngine, EthBeaconConsensus}; -use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, -}; -use reth_chainspec::ChainSpec; -use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; -use reth_cli_runner::CliContext; -use reth_cli_util::get_secret_key; -use reth_config::Config; -use reth_consensus::FullConsensus; -use reth_db::DatabaseEnv; -use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage}; -use reth_ethereum_payload_builder::EthereumBuilderConfig; -use reth_fs_util as fs; -use reth_network::{BlockDownloaderProvider, NetworkHandle}; -use reth_network_api::NetworkInfo; -use reth_node_api::{EngineApiMessageVersion, NodePrimitives, NodeTypesWithDBAdapter}; -use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthExecutorProvider}; -use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::EthPrimitives; -use reth_provider::{ - providers::{BlockchainProvider, ProviderNodeTypes}, - CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, -}; -use reth_prune::PruneModes; -use reth_stages::Pipeline; -use reth_static_file::StaticFileProducer; -use reth_tasks::TaskExecutor; -use reth_transaction_pool::noop::NoopTransactionPool; -use std::{path::PathBuf, sync::Arc, time::Duration}; -use tokio::sync::oneshot; -use tracing::*; - -/// `reth debug replay-engine` command -/// This script will read stored engine API messages and replay them by the timestamp. -/// It does not require -#[derive(Debug, Parser)] -pub struct Command { - #[command(flatten)] - env: EnvironmentArgs, - - #[command(flatten)] - network: NetworkArgs, - - /// The path to read engine API messages from. - #[arg(long = "engine-api-store", value_name = "PATH")] - engine_api_store: PathBuf, - - /// The number of milliseconds between Engine API messages. - #[arg(long = "interval", default_value_t = 1_000)] - interval: u64, -} - -impl> Command { - async fn build_network< - N: ProviderNodeTypes< - ChainSpec = C::ChainSpec, - Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - BlockHeader = reth_primitives::Header, - >, - >, - >( - &self, - config: &Config, - task_executor: TaskExecutor, - provider_factory: ProviderFactory, - network_secret_path: PathBuf, - default_peers_path: PathBuf, - ) -> eyre::Result { - let secret_key = get_secret_key(&network_secret_path)?; - let network = self - .network - .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) - .with_task_executor(Box::new(task_executor)) - .build(provider_factory) - .start_network() - .await?; - info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network"); - debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID"); - Ok(network) - } - - /// Execute `debug replay-engine` command - pub async fn execute< - N: CliNodeTypes, - >( - self, - ctx: CliContext, - ) -> eyre::Result<()> { - let Environment { provider_factory, config, data_dir } = - self.env.init::(AccessRights::RW)?; - - let consensus: Arc = - Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - - let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); - - // Configure blockchain tree - let tree_externals = - TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); - let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default())?; - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - - // Set up the blockchain provider - let blockchain_db = BlockchainProvider::new(provider_factory.clone(), blockchain_tree)?; - - // Set up network - let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); - let network = self - .build_network( - &config, - ctx.task_executor.clone(), - provider_factory.clone(), - network_secret_path, - data_dir.known_peers(), - ) - .await?; - - // Set up payload builder - let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::new( - EthEvmConfig::new(provider_factory.chain_spec()), - EthereumBuilderConfig::new(Default::default()), - ); - - let payload_generator = BasicPayloadJobGenerator::with_builder( - blockchain_db.clone(), - NoopTransactionPool::default(), - ctx.task_executor.clone(), - BasicPayloadJobGeneratorConfig::default(), - payload_builder, - ); - - let (payload_service, payload_builder): (_, PayloadBuilderHandle) = - PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); - - ctx.task_executor.spawn_critical("payload builder service", payload_service); - - // Configure the consensus engine - let network_client = network.fetch_client().await?; - let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::new( - network_client, - Pipeline::>>::builder().build( - provider_factory.clone(), - StaticFileProducer::new(provider_factory.clone(), PruneModes::none()), - ), - blockchain_db.clone(), - Box::new(ctx.task_executor.clone()), - Box::new(network), - None, - payload_builder, - None, - u64::MAX, - EngineHooks::new(), - )?; - info!(target: "reth::cli", "Consensus engine initialized"); - - // Run consensus engine to completion - let (tx, rx) = oneshot::channel(); - info!(target: "reth::cli", "Starting consensus engine"); - ctx.task_executor.spawn_critical_blocking("consensus engine", async move { - let res = beacon_consensus_engine.await; - let _ = tx.send(res); - }); - - let engine_api_store = EngineMessageStore::new(self.engine_api_store.clone()); - for filepath in engine_api_store.engine_messages_iter()? { - let contents = - fs::read(&filepath).wrap_err(format!("failed to read: {}", filepath.display()))?; - let message = serde_json::from_slice(&contents) - .wrap_err(format!("failed to parse: {}", filepath.display()))?; - debug!(target: "reth::cli", filepath = %filepath.display(), ?message, "Forwarding Engine API message"); - match message { - StoredEngineApiMessage::ForkchoiceUpdated { state, payload_attrs } => { - let response = beacon_engine_handle - .fork_choice_updated( - state, - payload_attrs, - EngineApiMessageVersion::default(), - ) - .await?; - debug!(target: "reth::cli", ?response, "Received for forkchoice updated"); - } - StoredEngineApiMessage::NewPayload { payload, sidecar } => { - let response = beacon_engine_handle.new_payload(payload, sidecar).await?; - debug!(target: "reth::cli", ?response, "Received for new payload"); - } - }; - - // Pause before next message - tokio::time::sleep(Duration::from_millis(self.interval)).await; - } - - info!(target: "reth::cli", "Finished replaying engine API messages"); - - match rx.await? { - Ok(()) => info!("Beacon consensus engine exited successfully"), - Err(error) => { - error!(target: "reth::cli", %error, "Beacon consensus engine exited with an error") - } - }; - - Ok(()) - } -} diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 53c592063eca..cbe1a1660773 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -103,13 +103,9 @@ pub mod primitives { pub use reth_primitives::*; } -/// Re-exported from `reth_beacon_consensus`. +/// Re-exported from `reth_ethereum_consensus`. pub mod beacon_consensus { - pub use reth_beacon_consensus::*; -} -/// Re-exported from `reth_blockchain_tree`. -pub mod blockchain_tree { - pub use reth_blockchain_tree::*; + pub use reth_node_ethereum::consensus::*; } /// Re-exported from `reth_consensus`. diff --git a/bin/reth/src/main.rs b/bin/reth/src/main.rs index f1f0a7d68cfb..5daaa93ee3bf 100644 --- a/bin/reth/src/main.rs +++ b/bin/reth/src/main.rs @@ -13,7 +13,7 @@ use reth_node_builder::{ EngineNodeLauncher, }; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; use reth_tracing::tracing::warn; use tracing::info; @@ -79,7 +79,7 @@ fn main() { .with_memory_block_buffer_target(engine_args.memory_block_buffer_target) .with_state_root_task(engine_args.state_root_task_enabled); let handle = builder - .with_types_and_provider::>() + .with_types_and_provider::>() .with_components(EthereumNode::components()) .with_add_ons(EthereumAddOns::default()) .launch_with_fn(|builder| { diff --git a/book/SUMMARY.md b/book/SUMMARY.md index f93daeaba397..666f4e4ca559 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -70,7 +70,6 @@ - [`reth debug merkle`](./cli/reth/debug/merkle.md) - [`reth debug in-memory-merkle`](./cli/reth/debug/in-memory-merkle.md) - [`reth debug build-block`](./cli/reth/debug/build-block.md) - - [`reth debug replay-engine`](./cli/reth/debug/replay-engine.md) - [`reth recover`](./cli/reth/recover.md) - [`reth recover storage-tries`](./cli/reth/recover/storage-tries.md) - [`reth prune`](./cli/reth/prune.md) diff --git a/book/cli/SUMMARY.md b/book/cli/SUMMARY.md index 5f338a0d1ec7..6a18ff0cdfe2 100644 --- a/book/cli/SUMMARY.md +++ b/book/cli/SUMMARY.md @@ -40,7 +40,6 @@ - [`reth debug merkle`](./reth/debug/merkle.md) - [`reth debug in-memory-merkle`](./reth/debug/in-memory-merkle.md) - [`reth debug build-block`](./reth/debug/build-block.md) - - [`reth debug replay-engine`](./reth/debug/replay-engine.md) - [`reth recover`](./reth/recover.md) - [`reth recover storage-tries`](./reth/recover/storage-tries.md) - [`reth prune`](./reth/prune.md) diff --git a/book/cli/reth/debug.md b/book/cli/reth/debug.md index ab016d631d61..c3f98e1452ed 100644 --- a/book/cli/reth/debug.md +++ b/book/cli/reth/debug.md @@ -13,7 +13,6 @@ Commands: merkle Debug the clean & incremental state root calculations in-memory-merkle Debug in-memory state root calculation build-block Debug block building - replay-engine Debug engine API by replaying stored messages help Print this message or the help of the given subcommand(s) Options: diff --git a/crates/blockchain-tree-api/Cargo.toml b/crates/blockchain-tree-api/Cargo.toml deleted file mode 100644 index 83ae378090b6..000000000000 --- a/crates/blockchain-tree-api/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "reth-blockchain-tree-api" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[lints] -workspace = true - -[dependencies] -reth-consensus.workspace = true -reth-execution-errors.workspace = true -reth-primitives.workspace = true -reth-primitives-traits.workspace = true -reth-storage-errors.workspace = true - -# alloy -alloy-consensus.workspace = true -alloy-primitives.workspace = true -alloy-eips.workspace = true - -# misc -thiserror.workspace = true diff --git a/crates/blockchain-tree-api/src/error.rs b/crates/blockchain-tree-api/src/error.rs deleted file mode 100644 index ddd7cea7993c..000000000000 --- a/crates/blockchain-tree-api/src/error.rs +++ /dev/null @@ -1,530 +0,0 @@ -//! Error handling for the blockchain tree - -use alloy_consensus::BlockHeader; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_consensus::ConsensusError; -use reth_execution_errors::{ - BlockExecutionError, BlockValidationError, InternalBlockExecutionError, -}; -use reth_primitives::{SealedBlock, SealedBlockFor}; -use reth_primitives_traits::{Block, BlockBody}; -pub use reth_storage_errors::provider::ProviderError; - -/// Various error cases that can occur when a block violates tree assumptions. -#[derive(Debug, Clone, Copy, thiserror::Error, Eq, PartialEq)] -pub enum BlockchainTreeError { - /// Thrown if the block number is lower than the last finalized block number. - #[error("block number is lower than the last finalized block number #{last_finalized}")] - PendingBlockIsFinalized { - /// The block number of the last finalized block. - last_finalized: BlockNumber, - }, - /// Thrown if no side chain could be found for the block. - #[error("chainId can't be found in BlockchainTree with internal index {chain_id}")] - BlockSideChainIdConsistency { - /// The internal identifier for the side chain. - chain_id: u64, - }, - /// Thrown if a canonical chain header cannot be found. - #[error("canonical chain header {block_hash} can't be found")] - CanonicalChain { - /// The block hash of the missing canonical chain header. - block_hash: BlockHash, - }, - /// Thrown if a block number cannot be found in the blockchain tree chain. - #[error("block number #{block_number} not found in blockchain tree chain")] - BlockNumberNotFoundInChain { - /// The block number that could not be found. - block_number: BlockNumber, - }, - /// Thrown if a block hash cannot be found in the blockchain tree chain. - #[error("block hash {block_hash} not found in blockchain tree chain")] - BlockHashNotFoundInChain { - /// The block hash that could not be found. - block_hash: BlockHash, - }, - /// Thrown if the block failed to buffer - #[error("block with hash {block_hash} failed to buffer")] - BlockBufferingFailed { - /// The block hash of the block that failed to buffer. - block_hash: BlockHash, - }, - /// Thrown when trying to access genesis parent. - #[error("genesis block has no parent")] - GenesisBlockHasNoParent, -} - -/// Canonical Errors -#[derive(thiserror::Error, Debug, Clone)] -pub enum CanonicalError { - /// Error originating from validation operations. - #[error(transparent)] - Validation(#[from] BlockValidationError), - /// Error originating from blockchain tree operations. - #[error(transparent)] - BlockchainTree(#[from] BlockchainTreeError), - /// Error originating from a provider operation. - #[error(transparent)] - Provider(#[from] ProviderError), - /// Error indicating a transaction reverted during execution. - #[error("transaction error on revert: {0}")] - CanonicalRevert(String), - /// Error indicating a transaction failed to commit during execution. - #[error("transaction error on commit: {0}")] - CanonicalCommit(String), - /// Error indicating that a previous optimistic sync target was re-orged - #[error("optimistic sync target was re-orged at block: {0}")] - OptimisticTargetRevert(BlockNumber), -} - -impl CanonicalError { - /// Returns `true` if the error is fatal. - pub const fn is_fatal(&self) -> bool { - matches!(self, Self::CanonicalCommit(_) | Self::CanonicalRevert(_)) - } - - /// Returns `true` if the underlying error matches - /// [`BlockchainTreeError::BlockHashNotFoundInChain`]. - pub const fn is_block_hash_not_found(&self) -> bool { - matches!(self, Self::BlockchainTree(BlockchainTreeError::BlockHashNotFoundInChain { .. })) - } - - /// Returns `Some(BlockNumber)` if the underlying error matches - /// [`CanonicalError::OptimisticTargetRevert`]. - pub const fn optimistic_revert_block_number(&self) -> Option { - match self { - Self::OptimisticTargetRevert(block_number) => Some(*block_number), - _ => None, - } - } -} - -/// Error thrown when inserting a block failed because the block is considered invalid. -#[derive(thiserror::Error)] -#[error(transparent)] -pub struct InsertBlockError { - inner: Box, -} - -// === impl InsertBlockError === - -impl InsertBlockError { - /// Create a new `InsertInvalidBlockError` - pub fn new(block: SealedBlock, kind: InsertBlockErrorKind) -> Self { - Self { inner: InsertBlockErrorData::boxed(block, kind) } - } - - /// Create a new `InsertInvalidBlockError` from a tree error - pub fn tree_error(error: BlockchainTreeError, block: SealedBlock) -> Self { - Self::new(block, InsertBlockErrorKind::Tree(error)) - } - - /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn consensus_error(error: ConsensusError, block: SealedBlock) -> Self { - Self::new(block, InsertBlockErrorKind::Consensus(error)) - } - - /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn sender_recovery_error(block: SealedBlock) -> Self { - Self::new(block, InsertBlockErrorKind::SenderRecovery) - } - - /// Create a new `InsertInvalidBlockError` from an execution error - pub fn execution_error(error: BlockExecutionError, block: SealedBlock) -> Self { - Self::new(block, InsertBlockErrorKind::Execution(error)) - } - - /// Consumes the error and returns the block that resulted in the error - #[inline] - pub fn into_block(self) -> SealedBlock { - self.inner.block - } - - /// Returns the error kind - #[inline] - pub const fn kind(&self) -> &InsertBlockErrorKind { - &self.inner.kind - } - - /// Returns the block that resulted in the error - #[inline] - pub const fn block(&self) -> &SealedBlock { - &self.inner.block - } - - /// Consumes the type and returns the block and error kind. - #[inline] - pub fn split(self) -> (SealedBlock, InsertBlockErrorKind) { - let inner = *self.inner; - (inner.block, inner.kind) - } -} - -impl std::fmt::Debug for InsertBlockError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - std::fmt::Debug::fmt(&self.inner, f) - } -} - -#[derive(thiserror::Error, Debug)] -#[error("Failed to insert block (hash={}, number={}, parent_hash={}): {kind}", - .block.hash(), - .block.number, - .block.parent_hash)] -struct InsertBlockErrorData { - block: SealedBlock, - #[source] - kind: InsertBlockErrorKind, -} - -impl InsertBlockErrorData { - const fn new(block: SealedBlock, kind: InsertBlockErrorKind) -> Self { - Self { block, kind } - } - - fn boxed(block: SealedBlock, kind: InsertBlockErrorKind) -> Box { - Box::new(Self::new(block, kind)) - } -} - -#[derive(thiserror::Error)] -#[error("Failed to insert block (hash={}, number={}, parent_hash={}): {}", - .block.hash(), - .block.number(), - .block.parent_hash(), - .kind)] -struct InsertBlockErrorDataTwo { - block: SealedBlockFor, - #[source] - kind: InsertBlockErrorKindTwo, -} - -impl std::fmt::Debug for InsertBlockErrorDataTwo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("InsertBlockError") - .field("error", &self.kind) - .field("hash", &self.block.hash()) - .field("number", &self.block.number()) - .field("parent_hash", &self.block.parent_hash()) - .field("num_txs", &self.block.body().transactions().len()) - .finish_non_exhaustive() - } -} - -impl InsertBlockErrorDataTwo { - const fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { - Self { block, kind } - } - - fn boxed(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Box { - Box::new(Self::new(block, kind)) - } -} - -/// Error thrown when inserting a block failed because the block is considered invalid. -#[derive(thiserror::Error)] -#[error(transparent)] -pub struct InsertBlockErrorTwo { - inner: Box>, -} - -// === impl InsertBlockErrorTwo === - -impl InsertBlockErrorTwo { - /// Create a new `InsertInvalidBlockErrorTwo` - pub fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { - Self { inner: InsertBlockErrorDataTwo::boxed(block, kind) } - } - - /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn consensus_error(error: ConsensusError, block: SealedBlockFor) -> Self { - Self::new(block, InsertBlockErrorKindTwo::Consensus(error)) - } - - /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn sender_recovery_error(block: SealedBlockFor) -> Self { - Self::new(block, InsertBlockErrorKindTwo::SenderRecovery) - } - - /// Create a new `InsertInvalidBlockError` from an execution error - pub fn execution_error(error: BlockExecutionError, block: SealedBlockFor) -> Self { - Self::new(block, InsertBlockErrorKindTwo::Execution(error)) - } - - /// Consumes the error and returns the block that resulted in the error - #[inline] - pub fn into_block(self) -> SealedBlockFor { - self.inner.block - } - - /// Returns the error kind - #[inline] - pub const fn kind(&self) -> &InsertBlockErrorKindTwo { - &self.inner.kind - } - - /// Returns the block that resulted in the error - #[inline] - pub const fn block(&self) -> &SealedBlockFor { - &self.inner.block - } - - /// Consumes the type and returns the block and error kind. - #[inline] - pub fn split(self) -> (SealedBlockFor, InsertBlockErrorKindTwo) { - let inner = *self.inner; - (inner.block, inner.kind) - } -} - -impl std::fmt::Debug for InsertBlockErrorTwo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - std::fmt::Debug::fmt(&self.inner, f) - } -} - -/// All error variants possible when inserting a block -#[derive(Debug, thiserror::Error)] -pub enum InsertBlockErrorKindTwo { - /// Failed to recover senders for the block - #[error("failed to recover senders for block")] - SenderRecovery, - /// Block violated consensus rules. - #[error(transparent)] - Consensus(#[from] ConsensusError), - /// Block execution failed. - #[error(transparent)] - Execution(#[from] BlockExecutionError), - /// Provider error. - #[error(transparent)] - Provider(#[from] ProviderError), - /// Other errors. - #[error(transparent)] - Other(#[from] Box), -} - -impl InsertBlockErrorKindTwo { - /// Returns an [`InsertBlockValidationError`] if the error is caused by an invalid block. - /// - /// Returns an [`InsertBlockFatalError`] if the error is caused by an error that is not - /// validation related or is otherwise fatal. - /// - /// This is intended to be used to determine if we should respond `INVALID` as a response when - /// processing a new block. - pub fn ensure_validation_error( - self, - ) -> Result { - match self { - Self::SenderRecovery => Ok(InsertBlockValidationError::SenderRecovery), - Self::Consensus(err) => Ok(InsertBlockValidationError::Consensus(err)), - // other execution errors that are considered internal errors - Self::Execution(err) => { - match err { - BlockExecutionError::Validation(err) => { - Ok(InsertBlockValidationError::Validation(err)) - } - BlockExecutionError::Consensus(err) => { - Ok(InsertBlockValidationError::Consensus(err)) - } - // these are internal errors, not caused by an invalid block - BlockExecutionError::Internal(error) => { - Err(InsertBlockFatalError::BlockExecutionError(error)) - } - } - } - Self::Provider(err) => Err(InsertBlockFatalError::Provider(err)), - Self::Other(err) => Err(InternalBlockExecutionError::Other(err).into()), - } - } -} - -/// Error variants that are not caused by invalid blocks -#[derive(Debug, thiserror::Error)] -pub enum InsertBlockFatalError { - /// A provider error - #[error(transparent)] - Provider(#[from] ProviderError), - /// An internal / fatal block execution error - #[error(transparent)] - BlockExecutionError(#[from] InternalBlockExecutionError), -} - -/// Error variants that are caused by invalid blocks -#[derive(Debug, thiserror::Error)] -pub enum InsertBlockValidationError { - /// Failed to recover senders for the block - #[error("failed to recover senders for block")] - SenderRecovery, - /// Block violated consensus rules. - #[error(transparent)] - Consensus(#[from] ConsensusError), - /// Validation error, transparently wrapping [`BlockValidationError`] - #[error(transparent)] - Validation(#[from] BlockValidationError), -} - -impl InsertBlockValidationError { - /// Returns true if this is a block pre merge error. - pub const fn is_block_pre_merge(&self) -> bool { - matches!(self, Self::Validation(BlockValidationError::BlockPreMerge { .. })) - } -} - -/// All error variants possible when inserting a block -#[derive(Debug, thiserror::Error)] -pub enum InsertBlockErrorKind { - /// Failed to recover senders for the block - #[error("failed to recover senders for block")] - SenderRecovery, - /// Block violated consensus rules. - #[error(transparent)] - Consensus(#[from] ConsensusError), - /// Block execution failed. - #[error(transparent)] - Execution(#[from] BlockExecutionError), - /// Block violated tree invariants. - #[error(transparent)] - Tree(#[from] BlockchainTreeError), - /// Provider error. - #[error(transparent)] - Provider(#[from] ProviderError), - /// An internal error occurred, like interacting with the database. - #[error(transparent)] - Internal(#[from] Box), - /// Canonical error. - #[error(transparent)] - Canonical(#[from] CanonicalError), -} - -impl InsertBlockErrorKind { - /// Returns true if the error is a tree error - pub const fn is_tree_error(&self) -> bool { - matches!(self, Self::Tree(_)) - } - - /// Returns true if the error is a consensus error - pub const fn is_consensus_error(&self) -> bool { - matches!(self, Self::Consensus(_)) - } - - /// Returns true if this error is a state root error - pub const fn is_state_root_error(&self) -> bool { - // we need to get the state root errors inside of the different variant branches - match self { - Self::Execution(err) => { - matches!( - err, - BlockExecutionError::Validation(BlockValidationError::StateRoot { .. }) - ) - } - Self::Canonical(err) => { - matches!( - err, - CanonicalError::Validation(BlockValidationError::StateRoot { .. }) | - CanonicalError::Provider( - ProviderError::StateRootMismatch(_) | - ProviderError::UnwindStateRootMismatch(_) - ) - ) - } - Self::Provider(err) => { - matches!( - err, - ProviderError::StateRootMismatch(_) | ProviderError::UnwindStateRootMismatch(_) - ) - } - _ => false, - } - } - - /// Returns true if the error is caused by an invalid block - /// - /// This is intended to be used to determine if the block should be marked as invalid. - #[allow(clippy::match_same_arms)] - pub const fn is_invalid_block(&self) -> bool { - match self { - Self::SenderRecovery | Self::Consensus(_) => true, - // other execution errors that are considered internal errors - Self::Execution(err) => { - match err { - BlockExecutionError::Validation(_) | BlockExecutionError::Consensus(_) => { - // this is caused by an invalid block - true - } - // these are internal errors, not caused by an invalid block - BlockExecutionError::Internal(_) => false, - } - } - Self::Tree(err) => { - match err { - BlockchainTreeError::PendingBlockIsFinalized { .. } => { - // the block's number is lower than the finalized block's number - true - } - BlockchainTreeError::BlockSideChainIdConsistency { .. } | - BlockchainTreeError::CanonicalChain { .. } | - BlockchainTreeError::BlockNumberNotFoundInChain { .. } | - BlockchainTreeError::BlockHashNotFoundInChain { .. } | - BlockchainTreeError::BlockBufferingFailed { .. } | - BlockchainTreeError::GenesisBlockHasNoParent => false, - } - } - Self::Provider(_) | Self::Internal(_) => { - // any other error, such as database errors, are considered internal errors - false - } - Self::Canonical(err) => match err { - CanonicalError::BlockchainTree(_) | - CanonicalError::CanonicalCommit(_) | - CanonicalError::CanonicalRevert(_) | - CanonicalError::OptimisticTargetRevert(_) | - CanonicalError::Provider(_) => false, - CanonicalError::Validation(_) => true, - }, - } - } - - /// Returns true if this is a block pre merge error. - pub const fn is_block_pre_merge(&self) -> bool { - matches!( - self, - Self::Execution(BlockExecutionError::Validation( - BlockValidationError::BlockPreMerge { .. } - )) - ) - } - - /// Returns true if the error is an execution error - pub const fn is_execution_error(&self) -> bool { - matches!(self, Self::Execution(_)) - } - - /// Returns true if the error is an internal error - pub const fn is_internal(&self) -> bool { - matches!(self, Self::Internal(_)) - } - - /// Returns the error if it is a tree error - pub const fn as_tree_error(&self) -> Option { - match self { - Self::Tree(err) => Some(*err), - _ => None, - } - } - - /// Returns the error if it is a consensus error - pub const fn as_consensus_error(&self) -> Option<&ConsensusError> { - match self { - Self::Consensus(err) => Some(err), - _ => None, - } - } - - /// Returns the error if it is an execution error - pub const fn as_execution_error(&self) -> Option<&BlockExecutionError> { - match self { - Self::Execution(err) => Some(err), - _ => None, - } - } -} diff --git a/crates/blockchain-tree-api/src/lib.rs b/crates/blockchain-tree-api/src/lib.rs deleted file mode 100644 index 7e1d0d714c14..000000000000 --- a/crates/blockchain-tree-api/src/lib.rs +++ /dev/null @@ -1,372 +0,0 @@ -//! Interfaces and types for interacting with the blockchain tree. -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -use self::error::CanonicalError; -use crate::error::InsertBlockError; -use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use std::collections::BTreeMap; - -pub mod error; - -/// * [`BlockchainTreeEngine::insert_block`]: Connect block to chain, execute it and if valid insert -/// block inside tree. -/// * [`BlockchainTreeEngine::finalize_block`]: Remove chains that join to now finalized block, as -/// chain becomes invalid. -/// * [`BlockchainTreeEngine::make_canonical`]: Check if we have the hash of block that we want to -/// finalize and commit it to db. If we don't have the block, syncing should start to fetch the -/// blocks from p2p. Do reorg in tables if canonical chain if needed. -pub trait BlockchainTreeEngine: BlockchainTreeViewer + Send + Sync { - /// Recover senders and call [`BlockchainTreeEngine::insert_block`]. - /// - /// This will recover all senders of the transactions in the block first, and then try to insert - /// the block. - fn insert_block_without_senders( - &self, - block: SealedBlock, - validation_kind: BlockValidationKind, - ) -> Result { - match block.try_seal_with_senders() { - Ok(block) => self.insert_block(block, validation_kind), - Err(block) => Err(InsertBlockError::sender_recovery_error(block)), - } - } - - /// Recover senders and call [`BlockchainTreeEngine::buffer_block`]. - /// - /// This will recover all senders of the transactions in the block first, and then try to buffer - /// the block. - fn buffer_block_without_senders(&self, block: SealedBlock) -> Result<(), InsertBlockError> { - match block.try_seal_with_senders() { - Ok(block) => self.buffer_block(block), - Err(block) => Err(InsertBlockError::sender_recovery_error(block)), - } - } - - /// Buffer block with senders - fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError>; - - /// Inserts block with senders - /// - /// The `validation_kind` parameter controls which validation checks are performed. - /// - /// Caution: If the block was received from the consensus layer, this should always be called - /// with [`BlockValidationKind::Exhaustive`] to validate the state root, if possible to adhere - /// to the engine API spec. - fn insert_block( - &self, - block: SealedBlockWithSenders, - validation_kind: BlockValidationKind, - ) -> Result; - - /// Finalize blocks up until and including `finalized_block`, and remove them from the tree. - fn finalize_block(&self, finalized_block: BlockNumber) -> ProviderResult<()>; - - /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree by attempting to connect the buffered blocks to canonical hashes. - /// - /// - /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the - /// `BLOCKHASH` opcode in the EVM. - /// - /// # Note - /// - /// This finalizes `last_finalized_block` prior to reading the canonical hashes (using - /// [`BlockchainTreeEngine::finalize_block`]). - fn connect_buffered_blocks_to_canonical_hashes_and_finalize( - &self, - last_finalized_block: BlockNumber, - ) -> Result<(), CanonicalError>; - - /// Update all block hashes. iterate over present and new list of canonical hashes and compare - /// them. Remove all mismatches, disconnect them, removes all chains and clears all buffered - /// blocks before the tip. - fn update_block_hashes_and_clear_buffered( - &self, - ) -> Result, CanonicalError>; - - /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree by attempting to connect the buffered blocks to canonical hashes. - /// - /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the - /// `BLOCKHASH` opcode in the EVM. - fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError>; - - /// Make a block and its parent chain part of the canonical chain by committing it to the - /// database. - /// - /// # Note - /// - /// This unwinds the database if necessary, i.e. if parts of the canonical chain have been - /// re-orged. - /// - /// # Returns - /// - /// Returns `Ok` if the blocks were canonicalized, or if the blocks were already canonical. - fn make_canonical(&self, block_hash: BlockHash) -> Result; -} - -/// Represents the kind of validation that should be performed when inserting a block. -/// -/// The motivation for this is that the engine API spec requires that block's state root is -/// validated when received from the CL. -/// -/// This step is very expensive due to how changesets are stored in the database, so we want to -/// avoid doing it if not necessary. Blocks can also originate from the network where this step is -/// not required. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] -pub enum BlockValidationKind { - /// All validation checks that can be performed. - /// - /// This includes validating the state root, if possible. - /// - /// Note: This should always be used when inserting blocks that originate from the consensus - /// layer. - #[default] - Exhaustive, - /// Perform all validation checks except for state root validation. - SkipStateRootValidation, -} - -impl BlockValidationKind { - /// Returns true if the state root should be validated if possible. - pub const fn is_exhaustive(&self) -> bool { - matches!(self, Self::Exhaustive) - } -} - -impl std::fmt::Display for BlockValidationKind { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::Exhaustive => { - write!(f, "Exhaustive") - } - Self::SkipStateRootValidation => { - write!(f, "SkipStateRootValidation") - } - } - } -} - -/// All possible outcomes of a canonicalization attempt of [`BlockchainTreeEngine::make_canonical`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum CanonicalOutcome { - /// The block is already canonical. - AlreadyCanonical { - /// Block number and hash of current head. - head: BlockNumHash, - /// The corresponding [`SealedHeader`] that was attempted to be made a current head and - /// is already canonical. - header: SealedHeader, - }, - /// Committed the block to the database. - Committed { - /// The new corresponding canonical head - head: SealedHeader, - }, -} - -impl CanonicalOutcome { - /// Returns the header of the block that was made canonical. - pub const fn header(&self) -> &SealedHeader { - match self { - Self::AlreadyCanonical { header, .. } => header, - Self::Committed { head } => head, - } - } - - /// Consumes the outcome and returns the header of the block that was made canonical. - pub fn into_header(self) -> SealedHeader { - match self { - Self::AlreadyCanonical { header, .. } => header, - Self::Committed { head } => head, - } - } - - /// Returns true if the block was already canonical. - pub const fn is_already_canonical(&self) -> bool { - matches!(self, Self::AlreadyCanonical { .. }) - } -} - -/// Block inclusion can be valid, accepted, or invalid. Invalid blocks are returned as an error -/// variant. -/// -/// If we don't know the block's parent, we return `Disconnected`, as we can't claim that the block -/// is valid or not. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum BlockStatus2 { - /// The block is valid and block extends canonical chain. - Valid, - /// The block may be valid and has an unknown missing ancestor. - Disconnected { - /// Current canonical head. - head: BlockNumHash, - /// The lowest ancestor block that is not connected to the canonical chain. - missing_ancestor: BlockNumHash, - }, -} - -/// How a payload was inserted if it was valid. -/// -/// If the payload was valid, but has already been seen, [`InsertPayloadOk2::AlreadySeen(_)`] is -/// returned, otherwise [`InsertPayloadOk2::Inserted(_)`] is returned. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum InsertPayloadOk2 { - /// The payload was valid, but we have already seen it. - AlreadySeen(BlockStatus2), - /// The payload was valid and inserted into the tree. - Inserted(BlockStatus2), -} - -/// From Engine API spec, block inclusion can be valid, accepted or invalid. -/// Invalid case is already covered by error, but we need to make distinction -/// between valid blocks that extend canonical chain and the ones that fork off -/// into side chains (see [`BlockAttachment`]). If we don't know the block -/// parent we are returning Disconnected status as we can't make a claim if -/// block is valid or not. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum BlockStatus { - /// If block is valid and block extends canonical chain. - /// In `BlockchainTree` terms, it forks off canonical tip. - Valid(BlockAttachment), - /// If block is valid and block forks off canonical chain. - /// If blocks is not connected to canonical chain. - Disconnected { - /// Current canonical head. - head: BlockNumHash, - /// The lowest ancestor block that is not connected to the canonical chain. - missing_ancestor: BlockNumHash, - }, -} - -/// Represents what kind of block is being executed and validated. -/// -/// This is required to: -/// - differentiate whether trie state updates should be cached. -/// - inform other -/// -/// This is required because the state root check can only be performed if the targeted block can be -/// traced back to the canonical __head__. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum BlockAttachment { - /// The `block` is canonical or a descendant of the canonical head. - /// ([`head..(block.parent)*,block`]) - Canonical, - /// The block can be traced back to an ancestor of the canonical head: a historical block, but - /// this chain does __not__ include the canonical head. - HistoricalFork, -} - -impl BlockAttachment { - /// Returns `true` if the block is canonical or a descendant of the canonical head. - #[inline] - pub const fn is_canonical(&self) -> bool { - matches!(self, Self::Canonical) - } -} - -/// How a payload was inserted if it was valid. -/// -/// If the payload was valid, but has already been seen, [`InsertPayloadOk::AlreadySeen(_)`] is -/// returned, otherwise [`InsertPayloadOk::Inserted(_)`] is returned. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum InsertPayloadOk { - /// The payload was valid, but we have already seen it. - AlreadySeen(BlockStatus), - /// The payload was valid and inserted into the tree. - Inserted(BlockStatus), -} - -/// Allows read only functionality on the blockchain tree. -/// -/// Tree contains all blocks that are not canonical that can potentially be included -/// as canonical chain. For better explanation we can group blocks into four groups: -/// * Canonical chain blocks -/// * Side chain blocks. Side chain are block that forks from canonical chain but not its tip. -/// * Pending blocks that extend the canonical chain but are not yet included. -/// * Future pending blocks that extend the pending blocks. -pub trait BlockchainTreeViewer: Send + Sync { - /// Returns the header with matching hash from the tree, if it exists. - /// - /// Caution: This will not return headers from the canonical chain. - fn header_by_hash(&self, hash: BlockHash) -> Option; - - /// Returns the block with matching hash from the tree, if it exists. - /// - /// Caution: This will not return blocks from the canonical chain or buffered blocks that are - /// disconnected from the canonical chain. - fn block_by_hash(&self, hash: BlockHash) -> Option; - - /// Returns the block with matching hash from the tree, if it exists. - /// - /// Caution: This will not return blocks from the canonical chain or buffered blocks that are - /// disconnected from the canonical chain. - fn block_with_senders_by_hash(&self, hash: BlockHash) -> Option; - - /// Returns the _buffered_ (disconnected) header with matching hash from the internal buffer if - /// it exists. - /// - /// Caution: Unlike [`Self::block_by_hash`] this will only return headers that are currently - /// disconnected from the canonical chain. - fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option; - - /// Returns true if the tree contains the block with matching hash. - fn contains(&self, hash: BlockHash) -> bool { - self.block_by_hash(hash).is_some() - } - - /// Return whether or not the block is known and in the canonical chain. - fn is_canonical(&self, hash: BlockHash) -> Result; - - /// Given the hash of a block, this checks the buffered blocks for the lowest ancestor in the - /// buffer. - /// - /// If there is a buffered block with the given hash, this returns the block itself. - fn lowest_buffered_ancestor(&self, hash: BlockHash) -> Option; - - /// Return `BlockchainTree` best known canonical chain tip (`BlockHash`, `BlockNumber`) - fn canonical_tip(&self) -> BlockNumHash; - - /// Return block number and hash that extends the canonical chain tip by one. - /// - /// If there is no such block, this returns `None`. - fn pending_block_num_hash(&self) -> Option; - - /// Returns the pending block if there is one. - fn pending_block(&self) -> Option { - self.block_by_hash(self.pending_block_num_hash()?.hash) - } - - /// Returns the pending block if there is one. - fn pending_block_with_senders(&self) -> Option { - self.block_with_senders_by_hash(self.pending_block_num_hash()?.hash) - } - - /// Returns the pending block and its receipts in one call. - /// - /// This exists to prevent a potential data race if the pending block changes in between - /// [`Self::pending_block`] and [`Self::pending_receipts`] calls. - fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)>; - - /// Returns the pending receipts if there is one. - fn pending_receipts(&self) -> Option> { - self.receipts_by_block_hash(self.pending_block_num_hash()?.hash) - } - - /// Returns the pending receipts if there is one. - fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option>; - - /// Returns the pending block if there is one. - fn pending_header(&self) -> Option { - self.header_by_hash(self.pending_block_num_hash()?.hash) - } -} diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml deleted file mode 100644 index f77bd7f852a7..000000000000 --- a/crates/blockchain-tree/Cargo.toml +++ /dev/null @@ -1,89 +0,0 @@ -[package] -name = "reth-blockchain-tree" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[lints] -workspace = true - -[dependencies] -# reth -reth-blockchain-tree-api.workspace = true -reth-primitives.workspace = true -reth-storage-errors.workspace = true -reth-execution-errors.workspace = true -reth-db.workspace = true -reth-db-api.workspace = true -reth-evm.workspace = true -reth-revm.workspace = true -reth-provider.workspace = true -reth-execution-types.workspace = true -reth-stages-api.workspace = true -reth-trie = { workspace = true, features = ["metrics"] } -reth-trie-db = { workspace = true, features = ["metrics"] } -reth-trie-parallel.workspace = true -reth-network.workspace = true -reth-consensus.workspace = true -reth-node-types.workspace = true - -# ethereum -alloy-consensus.workspace = true -alloy-primitives.workspace = true -alloy-eips.workspace = true - -# common -parking_lot.workspace = true -tracing.workspace = true -tokio = { workspace = true, features = ["macros", "sync"] } - -# metrics -reth-metrics = { workspace = true, features = ["common"] } -metrics.workspace = true - -# misc -aquamarine.workspace = true -linked_hash_set.workspace = true - -[dev-dependencies] -reth-chainspec.workspace = true -reth-db = { workspace = true, features = ["test-utils"] } -reth-primitives = { workspace = true, features = ["test-utils"] } -reth-provider = { workspace = true, features = ["test-utils"] } -reth-evm = { workspace = true, features = ["test-utils"] } -reth-consensus = { workspace = true, features = ["test-utils"] } -reth-testing-utils.workspace = true -reth-revm.workspace = true -reth-ethereum-evm.workspace = true -reth-execution-types.workspace = true -parking_lot.workspace = true -assert_matches.workspace = true -alloy-genesis.workspace = true -alloy-consensus.workspace = true - -[features] -test-utils = [ - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-evm/test-utils", - "reth-network/test-utils", - "reth-primitives/test-utils", - "reth-revm/test-utils", - "reth-stages-api/test-utils", - "reth-db/test-utils", - "reth-db-api/test-utils", - "reth-provider/test-utils", - "reth-trie-db/test-utils", - "reth-trie/test-utils", - "reth-trie-parallel/test-utils" -] -optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-execution-types/optimism", - "reth-db/optimism", - "reth-db-api/optimism" -] diff --git a/crates/blockchain-tree/docs/mermaid/tree.mmd b/crates/blockchain-tree/docs/mermaid/tree.mmd deleted file mode 100644 index c9b41b857b17..000000000000 --- a/crates/blockchain-tree/docs/mermaid/tree.mmd +++ /dev/null @@ -1,21 +0,0 @@ -flowchart BT - subgraph canonical chain - CanonState:::state - block0canon:::canon -->block1canon:::canon -->block2canon:::canon -->block3canon:::canon --> - block4canon:::canon --> block5canon:::canon - end - block5canon --> block6pending1:::pending - block5canon --> block6pending2:::pending - subgraph sidechain2 - S2State:::state - block3canon --> block4s2:::sidechain --> block5s2:::sidechain - end - subgraph sidechain1 - S1State:::state - block2canon --> block3s1:::sidechain --> block4s1:::sidechain --> block5s1:::sidechain --> - block6s1:::sidechain - end - classDef state fill:#1882C4 - classDef canon fill:#8AC926 - classDef pending fill:#FFCA3A - classDef sidechain fill:#FF595E diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs deleted file mode 100644 index 994ed82cfb94..000000000000 --- a/crates/blockchain-tree/src/block_buffer.rs +++ /dev/null @@ -1,494 +0,0 @@ -use crate::metrics::BlockBufferMetrics; -use alloy_consensus::BlockHeader; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_network::cache::LruCache; -use reth_node_types::Block; -use reth_primitives::SealedBlockWithSenders; -use std::collections::{BTreeMap, HashMap, HashSet}; - -/// Contains the tree of pending blocks that cannot be executed due to missing parent. -/// It allows to store unconnected blocks for potential future inclusion. -/// -/// The buffer has three main functionalities: -/// * [`BlockBuffer::insert_block`] for inserting blocks inside the buffer. -/// * [`BlockBuffer::remove_block_with_children`] for connecting blocks if the parent gets received -/// and inserted. -/// * [`BlockBuffer::remove_old_blocks`] to remove old blocks that precede the finalized number. -/// -/// Note: Buffer is limited by number of blocks that it can contain and eviction of the block -/// is done by last recently used block. -#[derive(Debug)] -pub struct BlockBuffer { - /// All blocks in the buffer stored by their block hash. - pub(crate) blocks: HashMap>, - /// Map of any parent block hash (even the ones not currently in the buffer) - /// to the buffered children. - /// Allows connecting buffered blocks by parent. - pub(crate) parent_to_child: HashMap>, - /// `BTreeMap` tracking the earliest blocks by block number. - /// Used for removal of old blocks that precede finalization. - pub(crate) earliest_blocks: BTreeMap>, - /// LRU used for tracing oldest inserted blocks that are going to be - /// first in line for evicting if `max_blocks` limit is hit. - /// - /// Used as counter of amount of blocks inside buffer. - pub(crate) lru: LruCache, - /// Various metrics for the block buffer. - pub(crate) metrics: BlockBufferMetrics, -} - -impl BlockBuffer { - /// Create new buffer with max limit of blocks - pub fn new(limit: u32) -> Self { - Self { - blocks: Default::default(), - parent_to_child: Default::default(), - earliest_blocks: Default::default(), - lru: LruCache::new(limit), - metrics: Default::default(), - } - } - - /// Return reference to buffered blocks - pub const fn blocks(&self) -> &HashMap> { - &self.blocks - } - - /// Return reference to the requested block. - pub fn block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { - self.blocks.get(hash) - } - - /// Return a reference to the lowest ancestor of the given block in the buffer. - pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { - let mut current_block = self.blocks.get(hash)?; - while let Some(parent) = self.blocks.get(¤t_block.parent_hash()) { - current_block = parent; - } - Some(current_block) - } - - /// Insert a correct block inside the buffer. - pub fn insert_block(&mut self, block: SealedBlockWithSenders) { - let hash = block.hash(); - - self.parent_to_child.entry(block.parent_hash()).or_default().insert(hash); - self.earliest_blocks.entry(block.number()).or_default().insert(hash); - self.blocks.insert(hash, block); - - if let (_, Some(evicted_hash)) = self.lru.insert_and_get_evicted(hash) { - // evict the block if limit is hit - if let Some(evicted_block) = self.remove_block(&evicted_hash) { - // evict the block if limit is hit - self.remove_from_parent(evicted_block.parent_hash(), &evicted_hash); - } - } - self.metrics.blocks.set(self.blocks.len() as f64); - } - - /// Removes the given block from the buffer and also all the children of the block. - /// - /// This is used to get all the blocks that are dependent on the block that is included. - /// - /// Note: that order of returned blocks is important and the blocks with lower block number - /// in the chain will come first so that they can be executed in the correct order. - pub fn remove_block_with_children( - &mut self, - parent_hash: &BlockHash, - ) -> Vec> { - let removed = self - .remove_block(parent_hash) - .into_iter() - .chain(self.remove_children(vec![*parent_hash])) - .collect(); - self.metrics.blocks.set(self.blocks.len() as f64); - removed - } - - /// Discard all blocks that precede block number from the buffer. - pub fn remove_old_blocks(&mut self, block_number: BlockNumber) { - let mut block_hashes_to_remove = Vec::new(); - - // discard all blocks that are before the finalized number. - while let Some(entry) = self.earliest_blocks.first_entry() { - if *entry.key() > block_number { - break - } - let block_hashes = entry.remove(); - block_hashes_to_remove.extend(block_hashes); - } - - // remove from other collections. - for block_hash in &block_hashes_to_remove { - // It's fine to call - self.remove_block(block_hash); - } - - self.remove_children(block_hashes_to_remove); - self.metrics.blocks.set(self.blocks.len() as f64); - } - - /// Remove block entry - fn remove_from_earliest_blocks(&mut self, number: BlockNumber, hash: &BlockHash) { - if let Some(entry) = self.earliest_blocks.get_mut(&number) { - entry.remove(hash); - if entry.is_empty() { - self.earliest_blocks.remove(&number); - } - } - } - - /// Remove from parent child connection. This method does not remove children. - fn remove_from_parent(&mut self, parent_hash: BlockHash, hash: &BlockHash) { - // remove from parent to child connection, but only for this block parent. - if let Some(entry) = self.parent_to_child.get_mut(&parent_hash) { - entry.remove(hash); - // if set is empty remove block entry. - if entry.is_empty() { - self.parent_to_child.remove(&parent_hash); - } - } - } - - /// Removes block from inner collections. - /// This method will only remove the block if it's present inside `self.blocks`. - /// The block might be missing from other collections, the method will only ensure that it has - /// been removed. - fn remove_block(&mut self, hash: &BlockHash) -> Option> { - let block = self.blocks.remove(hash)?; - self.remove_from_earliest_blocks(block.number(), hash); - self.remove_from_parent(block.parent_hash(), hash); - self.lru.remove(hash); - Some(block) - } - - /// Remove all children and their descendants for the given blocks and return them. - fn remove_children(&mut self, parent_hashes: Vec) -> Vec> { - // remove all parent child connection and all the child children blocks that are connected - // to the discarded parent blocks. - let mut remove_parent_children = parent_hashes; - let mut removed_blocks = Vec::new(); - while let Some(parent_hash) = remove_parent_children.pop() { - // get this child blocks children and add them to the remove list. - if let Some(parent_children) = self.parent_to_child.remove(&parent_hash) { - // remove child from buffer - for child_hash in &parent_children { - if let Some(block) = self.remove_block(child_hash) { - removed_blocks.push(block); - } - } - remove_parent_children.extend(parent_children); - } - } - removed_blocks - } -} - -#[cfg(test)] -mod tests { - use crate::BlockBuffer; - use alloy_eips::BlockNumHash; - use alloy_primitives::BlockHash; - use reth_primitives::SealedBlockWithSenders; - use reth_testing_utils::generators::{self, random_block, BlockParams, Rng}; - use std::collections::HashMap; - - /// Create random block with specified number and parent hash. - fn create_block(rng: &mut R, number: u64, parent: BlockHash) -> SealedBlockWithSenders { - let block = - random_block(rng, number, BlockParams { parent: Some(parent), ..Default::default() }); - block.seal_with_senders().unwrap() - } - - /// Assert that all buffer collections have the same data length. - fn assert_buffer_lengths(buffer: &BlockBuffer, expected: usize) { - assert_eq!(buffer.blocks.len(), expected); - assert_eq!(buffer.lru.len(), expected); - assert_eq!( - buffer.parent_to_child.iter().fold(0, |acc, (_, hashes)| acc + hashes.len()), - expected - ); - assert_eq!( - buffer.earliest_blocks.iter().fold(0, |acc, (_, hashes)| acc + hashes.len()), - expected - ); - } - - /// Assert that the block was removed from all buffer collections. - fn assert_block_removal(buffer: &BlockBuffer, block: &SealedBlockWithSenders) { - assert!(!buffer.blocks.contains_key(&block.hash())); - assert!(buffer - .parent_to_child - .get(&block.parent_hash) - .and_then(|p| p.get(&block.hash())) - .is_none()); - assert!(buffer - .earliest_blocks - .get(&block.number) - .and_then(|hashes| hashes.get(&block.hash())) - .is_none()); - } - - #[test] - fn simple_insertion() { - let mut rng = generators::rng(); - let parent = rng.gen(); - let block1 = create_block(&mut rng, 10, parent); - let mut buffer = BlockBuffer::new(3); - - buffer.insert_block(block1.clone()); - assert_buffer_lengths(&buffer, 1); - assert_eq!(buffer.block(&block1.hash()), Some(&block1)); - } - - #[test] - fn take_entire_chain_of_children() { - let mut rng = generators::rng(); - - let main_parent_hash = rng.gen(); - let block1 = create_block(&mut rng, 10, main_parent_hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 12, block2.hash()); - let parent4 = rng.gen(); - let block4 = create_block(&mut rng, 14, parent4); - - let mut buffer = BlockBuffer::new(5); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2.clone()); - buffer.insert_block(block3.clone()); - buffer.insert_block(block4.clone()); - - assert_buffer_lengths(&buffer, 4); - assert_eq!(buffer.block(&block4.hash()), Some(&block4)); - assert_eq!(buffer.block(&block2.hash()), Some(&block2)); - assert_eq!(buffer.block(&main_parent_hash), None); - - assert_eq!(buffer.lowest_ancestor(&block4.hash()), Some(&block4)); - assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block1)); - assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1)); - assert_eq!( - buffer.remove_block_with_children(&main_parent_hash), - vec![block1, block2, block3] - ); - assert_buffer_lengths(&buffer, 1); - } - - #[test] - fn take_all_multi_level_children() { - let mut rng = generators::rng(); - - let main_parent_hash = rng.gen(); - let block1 = create_block(&mut rng, 10, main_parent_hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 11, block1.hash()); - let block4 = create_block(&mut rng, 12, block2.hash()); - - let mut buffer = BlockBuffer::new(5); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2.clone()); - buffer.insert_block(block3.clone()); - buffer.insert_block(block4.clone()); - - assert_buffer_lengths(&buffer, 4); - assert_eq!( - buffer - .remove_block_with_children(&main_parent_hash) - .into_iter() - .map(|b| (b.hash(), b)) - .collect::>(), - HashMap::from([ - (block1.hash(), block1), - (block2.hash(), block2), - (block3.hash(), block3), - (block4.hash(), block4) - ]) - ); - assert_buffer_lengths(&buffer, 0); - } - - #[test] - fn take_block_with_children() { - let mut rng = generators::rng(); - - let main_parent = BlockNumHash::new(9, rng.gen()); - let block1 = create_block(&mut rng, 10, main_parent.hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 11, block1.hash()); - let block4 = create_block(&mut rng, 12, block2.hash()); - - let mut buffer = BlockBuffer::new(5); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2.clone()); - buffer.insert_block(block3.clone()); - buffer.insert_block(block4.clone()); - - assert_buffer_lengths(&buffer, 4); - assert_eq!( - buffer - .remove_block_with_children(&block1.hash()) - .into_iter() - .map(|b| (b.hash(), b)) - .collect::>(), - HashMap::from([ - (block1.hash(), block1), - (block2.hash(), block2), - (block3.hash(), block3), - (block4.hash(), block4) - ]) - ); - assert_buffer_lengths(&buffer, 0); - } - - #[test] - fn remove_chain_of_children() { - let mut rng = generators::rng(); - - let main_parent = BlockNumHash::new(9, rng.gen()); - let block1 = create_block(&mut rng, 10, main_parent.hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 12, block2.hash()); - let parent4 = rng.gen(); - let block4 = create_block(&mut rng, 14, parent4); - - let mut buffer = BlockBuffer::new(5); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2); - buffer.insert_block(block3); - buffer.insert_block(block4); - - assert_buffer_lengths(&buffer, 4); - buffer.remove_old_blocks(block1.number); - assert_buffer_lengths(&buffer, 1); - } - - #[test] - fn remove_all_multi_level_children() { - let mut rng = generators::rng(); - - let main_parent = BlockNumHash::new(9, rng.gen()); - let block1 = create_block(&mut rng, 10, main_parent.hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 11, block1.hash()); - let block4 = create_block(&mut rng, 12, block2.hash()); - - let mut buffer = BlockBuffer::new(5); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2); - buffer.insert_block(block3); - buffer.insert_block(block4); - - assert_buffer_lengths(&buffer, 4); - buffer.remove_old_blocks(block1.number); - assert_buffer_lengths(&buffer, 0); - } - - #[test] - fn remove_multi_chains() { - let mut rng = generators::rng(); - - let main_parent = BlockNumHash::new(9, rng.gen()); - let block1 = create_block(&mut rng, 10, main_parent.hash); - let block1a = create_block(&mut rng, 10, main_parent.hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block2a = create_block(&mut rng, 11, block1.hash()); - let random_parent1 = rng.gen(); - let random_block1 = create_block(&mut rng, 10, random_parent1); - let random_parent2 = rng.gen(); - let random_block2 = create_block(&mut rng, 11, random_parent2); - let random_parent3 = rng.gen(); - let random_block3 = create_block(&mut rng, 12, random_parent3); - - let mut buffer = BlockBuffer::new(10); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block1a.clone()); - buffer.insert_block(block2.clone()); - buffer.insert_block(block2a.clone()); - buffer.insert_block(random_block1.clone()); - buffer.insert_block(random_block2.clone()); - buffer.insert_block(random_block3.clone()); - - // check that random blocks are their own ancestor, and that chains have proper ancestors - assert_eq!(buffer.lowest_ancestor(&random_block1.hash()), Some(&random_block1)); - assert_eq!(buffer.lowest_ancestor(&random_block2.hash()), Some(&random_block2)); - assert_eq!(buffer.lowest_ancestor(&random_block3.hash()), Some(&random_block3)); - - // descendants have ancestors - assert_eq!(buffer.lowest_ancestor(&block2a.hash()), Some(&block1)); - assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block1)); - - // roots are themselves - assert_eq!(buffer.lowest_ancestor(&block1a.hash()), Some(&block1a)); - assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1)); - - assert_buffer_lengths(&buffer, 7); - buffer.remove_old_blocks(10); - assert_buffer_lengths(&buffer, 2); - } - - #[test] - fn evict_with_gap() { - let mut rng = generators::rng(); - - let main_parent = BlockNumHash::new(9, rng.gen()); - let block1 = create_block(&mut rng, 10, main_parent.hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 12, block2.hash()); - let parent4 = rng.gen(); - let block4 = create_block(&mut rng, 13, parent4); - - let mut buffer = BlockBuffer::new(3); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2.clone()); - buffer.insert_block(block3.clone()); - - // pre-eviction block1 is the root - assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block1)); - assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block1)); - assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1)); - - buffer.insert_block(block4.clone()); - - assert_eq!(buffer.lowest_ancestor(&block4.hash()), Some(&block4)); - - // block1 gets evicted - assert_block_removal(&buffer, &block1); - - // check lowest ancestor results post eviction - assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block2)); - assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block2)); - assert_eq!(buffer.lowest_ancestor(&block1.hash()), None); - - assert_buffer_lengths(&buffer, 3); - } - - #[test] - fn simple_eviction() { - let mut rng = generators::rng(); - - let main_parent = BlockNumHash::new(9, rng.gen()); - let block1 = create_block(&mut rng, 10, main_parent.hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 12, block2.hash()); - let parent4 = rng.gen(); - let block4 = create_block(&mut rng, 13, parent4); - - let mut buffer = BlockBuffer::new(3); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2); - buffer.insert_block(block3); - buffer.insert_block(block4); - - // block3 gets evicted - assert_block_removal(&buffer, &block1); - - assert_buffer_lengths(&buffer, 3); - } -} diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs deleted file mode 100644 index 26a676f4d36c..000000000000 --- a/crates/blockchain-tree/src/block_indices.rs +++ /dev/null @@ -1,620 +0,0 @@ -//! Implementation of [`BlockIndices`] related to [`super::BlockchainTree`] - -use super::state::SidechainId; -use crate::canonical_chain::CanonicalChain; -use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockHash, BlockNumber}; -use linked_hash_set::LinkedHashSet; -use reth_execution_types::Chain; -use reth_primitives::SealedBlockWithSenders; -use std::collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}; - -/// Internal indices of the blocks and chains. -/// -/// This is main connection between blocks, chains and canonical chain. -/// -/// It contains a list of canonical block hashes, forks to child blocks, and a mapping of block hash -/// to chain ID. -#[derive(Debug, Clone)] -pub struct BlockIndices { - /// Last finalized block. - last_finalized_block: BlockNumber, - /// Non-finalized canonical chain. Contains N number (depends on `finalization_depth`) of - /// blocks. These blocks are found in `fork_to_child` but not inside `blocks_to_chain` or - /// `number_to_block` as those are sidechain specific indices. - canonical_chain: CanonicalChain, - /// Index needed when discarding the chain, so we can remove connected chains from tree. - /// - /// This maintains insertion order for all child blocks, so - /// [`BlockIndices::pending_block_num_hash`] returns always the same block: the first child - /// block we inserted. - /// - /// NOTE: It contains just blocks that are forks as a key and not all blocks. - fork_to_child: HashMap>, - /// Utility index for Block number to block hash(s). - /// - /// This maps all blocks with same block number to their hash. - /// - /// Can be used for RPC fetch block(s) in chain by its number. - /// - /// Note: This is a bijection: at all times `blocks_to_chain` and this map contain the block - /// hashes. - block_number_to_block_hashes: BTreeMap>, - /// Block hashes to the sidechain IDs they belong to. - blocks_to_chain: HashMap, -} - -impl BlockIndices { - /// Create new block indices structure - pub fn new( - last_finalized_block: BlockNumber, - canonical_chain: BTreeMap, - ) -> Self { - Self { - last_finalized_block, - canonical_chain: CanonicalChain::new(canonical_chain), - fork_to_child: Default::default(), - blocks_to_chain: Default::default(), - block_number_to_block_hashes: Default::default(), - } - } - - /// Return fork to child indices - pub const fn fork_to_child(&self) -> &HashMap> { - &self.fork_to_child - } - - /// Return block to sidechain id - #[allow(dead_code)] - pub(crate) const fn blocks_to_chain(&self) -> &HashMap { - &self.blocks_to_chain - } - - /// Returns the hash and number of the pending block. - /// - /// It is possible that multiple child blocks for the canonical tip exist. - /// This will always return the _first_ child we recorded for the canonical tip. - pub(crate) fn pending_block_num_hash(&self) -> Option { - let canonical_tip = self.canonical_tip(); - let hash = self.fork_to_child.get(&canonical_tip.hash)?.front().copied()?; - Some(BlockNumHash { number: canonical_tip.number + 1, hash }) - } - - /// Returns all pending block hashes. - /// - /// Pending blocks are considered blocks that are extending the canonical tip by one block - /// number and have their parent hash set to the canonical tip. - pub fn pending_blocks(&self) -> (BlockNumber, Vec) { - let canonical_tip = self.canonical_tip(); - let pending_blocks = self - .fork_to_child - .get(&canonical_tip.hash) - .cloned() - .unwrap_or_default() - .into_iter() - .collect(); - (canonical_tip.number + 1, pending_blocks) - } - - /// Last finalized block - pub const fn last_finalized_block(&self) -> BlockNumber { - self.last_finalized_block - } - - /// Insert non fork block. - pub(crate) fn insert_non_fork_block( - &mut self, - block_number: BlockNumber, - block_hash: BlockHash, - chain_id: SidechainId, - ) { - self.block_number_to_block_hashes.entry(block_number).or_default().insert(block_hash); - self.blocks_to_chain.insert(block_hash, chain_id); - } - - /// Insert block to chain and fork child indices of the new chain - pub(crate) fn insert_chain(&mut self, chain_id: SidechainId, chain: &Chain) { - for (number, block) in chain.blocks() { - // add block -> chain_id index - self.blocks_to_chain.insert(block.hash(), chain_id); - // add number -> block - self.block_number_to_block_hashes.entry(*number).or_default().insert(block.hash()); - } - let first = chain.first(); - // add parent block -> block index - self.fork_to_child.entry(first.parent_hash).or_default().insert_if_absent(first.hash()); - } - - /// Get the [`SidechainId`] for the given block hash if it exists. - pub(crate) fn get_side_chain_id(&self, block: &BlockHash) -> Option { - self.blocks_to_chain.get(block).copied() - } - - /// Update all block hashes. iterate over present and new list of canonical hashes and compare - /// them. Remove all mismatches, disconnect them and return all chains that needs to be - /// removed. - pub(crate) fn update_block_hashes( - &mut self, - hashes: BTreeMap, - ) -> (BTreeSet, Vec) { - // set new canonical hashes. - self.canonical_chain.replace(hashes.clone()); - - let mut new_hashes = hashes.into_iter(); - let mut old_hashes = self.canonical_chain().clone().into_iter(); - - let mut removed = Vec::new(); - let mut added = Vec::new(); - - let mut new_hash = new_hashes.next(); - let mut old_hash = old_hashes.next(); - - loop { - let Some(old_block_value) = old_hash else { - // end of old_hashes canonical chain. New chain has more blocks than old chain. - while let Some(new) = new_hash { - // add new blocks to added list. - added.push(new.into()); - new_hash = new_hashes.next(); - } - break - }; - let Some(new_block_value) = new_hash else { - // Old canonical chain had more block than new chain. - // remove all present block. - // this is mostly not going to happen as reorg should make new chain in Tree. - while let Some(rem) = old_hash { - removed.push(rem); - old_hash = old_hashes.next(); - } - break - }; - // compare old and new canonical block number - match new_block_value.0.cmp(&old_block_value.0) { - std::cmp::Ordering::Less => { - // new chain has more past blocks than old chain - added.push(new_block_value.into()); - new_hash = new_hashes.next(); - } - std::cmp::Ordering::Equal => { - if new_block_value.1 != old_block_value.1 { - // remove block hash as it is different - removed.push(old_block_value); - added.push(new_block_value.into()) - } - new_hash = new_hashes.next(); - old_hash = old_hashes.next(); - } - std::cmp::Ordering::Greater => { - // old chain has more past blocks than new chain - removed.push(old_block_value); - old_hash = old_hashes.next() - } - } - } - - // remove children of removed blocks - ( - removed.into_iter().fold(BTreeSet::new(), |mut fold, (number, hash)| { - fold.extend(self.remove_block(number, hash)); - fold - }), - added, - ) - } - - /// Remove chain from indices and return dependent chains that need to be removed. - /// Does the cleaning of the tree and removing blocks from the chain. - pub(crate) fn remove_chain(&mut self, chain: &Chain) -> BTreeSet { - chain - .blocks() - .iter() - .flat_map(|(block_number, block)| { - let block_hash = block.hash(); - self.remove_block(*block_number, block_hash) - }) - .collect() - } - - /// Remove Blocks from indices. - fn remove_block( - &mut self, - block_number: BlockNumber, - block_hash: BlockHash, - ) -> BTreeSet { - // rm number -> block - if let btree_map::Entry::Occupied(mut entry) = - self.block_number_to_block_hashes.entry(block_number) - { - let set = entry.get_mut(); - set.remove(&block_hash); - // remove set if empty - if set.is_empty() { - entry.remove(); - } - } - - // rm block -> chain_id - self.blocks_to_chain.remove(&block_hash); - - // rm fork -> child - let removed_fork = self.fork_to_child.remove(&block_hash); - removed_fork - .map(|fork_blocks| { - fork_blocks - .into_iter() - .filter_map(|fork_child| self.blocks_to_chain.remove(&fork_child)) - .collect() - }) - .unwrap_or_default() - } - - /// Remove all blocks from canonical list and insert new blocks to it. - /// - /// It is assumed that blocks are interconnected and that they connect to canonical chain - pub fn canonicalize_blocks(&mut self, blocks: &BTreeMap) { - if blocks.is_empty() { - return - } - - // Remove all blocks from canonical chain - let first_number = *blocks.first_key_value().unwrap().0; - - // this will remove all blocks numbers that are going to be replaced. - self.canonical_chain.retain(|&number, _| number < first_number); - - // remove them from block to chain_id index - blocks.iter().map(|(_, b)| (b.number, b.hash(), b.parent_hash)).for_each( - |(number, hash, parent_hash)| { - // rm block -> chain_id - self.blocks_to_chain.remove(&hash); - - // rm number -> block - if let btree_map::Entry::Occupied(mut entry) = - self.block_number_to_block_hashes.entry(number) - { - let set = entry.get_mut(); - set.remove(&hash); - // remove set if empty - if set.is_empty() { - entry.remove(); - } - } - // rm fork block -> hash - if let hash_map::Entry::Occupied(mut entry) = self.fork_to_child.entry(parent_hash) - { - let set = entry.get_mut(); - set.remove(&hash); - // remove set if empty - if set.is_empty() { - entry.remove(); - } - } - }, - ); - - // insert new canonical - self.canonical_chain.extend(blocks.iter().map(|(number, block)| (*number, block.hash()))) - } - - /// this is function that is going to remove N number of last canonical hashes. - /// - /// NOTE: This is not safe standalone, as it will not disconnect - /// blocks that depend on unwinded canonical chain. And should be - /// used when canonical chain is reinserted inside Tree. - pub(crate) fn unwind_canonical_chain(&mut self, unwind_to: BlockNumber) { - // this will remove all blocks numbers that are going to be replaced. - self.canonical_chain.retain(|num, _| *num <= unwind_to); - } - - /// Used for finalization of block. - /// - /// Return list of chains for removal that depend on finalized canonical chain. - pub(crate) fn finalize_canonical_blocks( - &mut self, - finalized_block: BlockNumber, - num_of_additional_canonical_hashes_to_retain: u64, - ) -> BTreeSet { - // get finalized chains. blocks between [self.last_finalized,finalized_block). - // Dont remove finalized_block, as sidechain can point to it. - let finalized_blocks: Vec = self - .canonical_chain - .iter() - .filter(|(number, _)| *number >= self.last_finalized_block && *number < finalized_block) - .map(|(_, hash)| hash) - .collect(); - - // remove unneeded canonical hashes. - let remove_until = - finalized_block.saturating_sub(num_of_additional_canonical_hashes_to_retain); - self.canonical_chain.retain(|&number, _| number >= remove_until); - - let mut lose_chains = BTreeSet::new(); - - for block_hash in finalized_blocks { - // there is a fork block. - if let Some(fork_blocks) = self.fork_to_child.remove(&block_hash) { - lose_chains = fork_blocks.into_iter().fold(lose_chains, |mut fold, fork_child| { - if let Some(lose_chain) = self.blocks_to_chain.remove(&fork_child) { - fold.insert(lose_chain); - } - fold - }); - } - } - - // set last finalized block. - self.last_finalized_block = finalized_block; - - lose_chains - } - - /// Returns the block hash of the canonical block with the given number. - #[inline] - pub fn canonical_hash(&self, block_number: &BlockNumber) -> Option { - self.canonical_chain.canonical_hash(block_number) - } - - /// Returns the block number of the canonical block with the given hash. - #[inline] - pub fn canonical_number(&self, block_hash: &BlockHash) -> Option { - self.canonical_chain.canonical_number(block_hash) - } - - /// get canonical tip - #[inline] - pub fn canonical_tip(&self) -> BlockNumHash { - self.canonical_chain.tip() - } - - /// Canonical chain needed for execution of EVM. It should contain last 256 block hashes. - #[inline] - pub(crate) const fn canonical_chain(&self) -> &CanonicalChain { - &self.canonical_chain - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_consensus::Header; - use alloy_primitives::B256; - use reth_primitives::{SealedBlock, SealedHeader}; - - #[test] - fn pending_block_num_hash_returns_none_if_no_fork() { - // Create a new canonical chain with a single block (represented by its number and hash). - let canonical_chain = BTreeMap::from([(0, B256::from_slice(&[1; 32]))]); - - let block_indices = BlockIndices::new(0, canonical_chain); - - // No fork to child blocks, so there is no pending block. - assert_eq!(block_indices.pending_block_num_hash(), None); - } - - #[test] - fn pending_block_num_hash_works() { - // Create a canonical chain with multiple blocks at heights 1, 2, and 3. - let canonical_chain = BTreeMap::from([ - (1, B256::from_slice(&[1; 32])), - (2, B256::from_slice(&[2; 32])), - (3, B256::from_slice(&[3; 32])), - ]); - - let mut block_indices = BlockIndices::new(3, canonical_chain); - - // Define the hash of the parent block (the block at height 3 in the canonical chain). - let parent_hash = B256::from_slice(&[3; 32]); - - // Define the hashes of two child blocks that extend the canonical chain. - let child_hash_1 = B256::from_slice(&[2; 32]); - let child_hash_2 = B256::from_slice(&[3; 32]); - - // Create a set to store both child block hashes. - let mut child_set = LinkedHashSet::new(); - child_set.insert(child_hash_1); - child_set.insert(child_hash_2); - - // Associate the parent block hash with its children in the fork_to_child mapping. - block_indices.fork_to_child.insert(parent_hash, child_set); - - // Pending block should be the first child block. - assert_eq!( - block_indices.pending_block_num_hash(), - Some(BlockNumHash { number: 4, hash: child_hash_1 }) - ); - } - - #[test] - fn pending_blocks_returns_empty_if_no_fork() { - // Create a canonical chain with a single block at height 10. - let canonical_chain = BTreeMap::from([(10, B256::from_slice(&[1; 32]))]); - let block_indices = BlockIndices::new(0, canonical_chain); - - // No child blocks are associated with the canonical tip. - assert_eq!(block_indices.pending_blocks(), (11, Vec::new())); - } - - #[test] - fn pending_blocks_returns_multiple_children() { - // Define the hash of the parent block (the block at height 5 in the canonical chain). - let parent_hash = B256::from_slice(&[3; 32]); - - // Create a canonical chain with a block at height 5. - let canonical_chain = BTreeMap::from([(5, parent_hash)]); - let mut block_indices = BlockIndices::new(0, canonical_chain); - - // Define the hashes of two child blocks. - let child_hash_1 = B256::from_slice(&[4; 32]); - let child_hash_2 = B256::from_slice(&[5; 32]); - - // Create a set to store both child block hashes. - let mut child_set = LinkedHashSet::new(); - child_set.insert(child_hash_1); - child_set.insert(child_hash_2); - - // Associate the parent block hash with its children. - block_indices.fork_to_child.insert(parent_hash, child_set); - - // Pending blocks should be the two child blocks. - assert_eq!(block_indices.pending_blocks(), (6, vec![child_hash_1, child_hash_2])); - } - - #[test] - fn pending_blocks_with_multiple_forked_chains() { - // Define hashes for parent blocks and child blocks. - let parent_hash_1 = B256::from_slice(&[6; 32]); - let parent_hash_2 = B256::from_slice(&[7; 32]); - - // Create a canonical chain with blocks at heights 1 and 2. - let canonical_chain = BTreeMap::from([(1, parent_hash_1), (2, parent_hash_2)]); - - let mut block_indices = BlockIndices::new(2, canonical_chain); - - // Define hashes for child blocks. - let child_hash_1 = B256::from_slice(&[8; 32]); - let child_hash_2 = B256::from_slice(&[9; 32]); - - // Create sets to store child blocks for each parent block. - let mut child_set_1 = LinkedHashSet::new(); - let mut child_set_2 = LinkedHashSet::new(); - child_set_1.insert(child_hash_1); - child_set_2.insert(child_hash_2); - - // Associate parent block hashes with their child blocks. - block_indices.fork_to_child.insert(parent_hash_1, child_set_1); - block_indices.fork_to_child.insert(parent_hash_2, child_set_2); - - // Check that the pending blocks are only those extending the canonical tip. - assert_eq!(block_indices.pending_blocks(), (3, vec![child_hash_2])); - } - - #[test] - fn insert_non_fork_block_adds_block_correctly() { - // Create a new BlockIndices instance with an empty state. - let mut block_indices = BlockIndices::new(0, BTreeMap::new()); - - // Define test parameters. - let block_number = 1; - let block_hash = B256::from_slice(&[1; 32]); - let chain_id = SidechainId::from(42); - - // Insert the block into the BlockIndices instance. - block_indices.insert_non_fork_block(block_number, block_hash, chain_id); - - // Check that the block number to block hashes mapping includes the new block hash. - assert_eq!( - block_indices.block_number_to_block_hashes.get(&block_number), - Some(&HashSet::from([block_hash])) - ); - - // Check that the block hash to chain ID mapping includes the new entry. - assert_eq!(block_indices.blocks_to_chain.get(&block_hash), Some(&chain_id)); - } - - #[test] - fn insert_non_fork_block_combined_tests() { - // Create a new BlockIndices instance with an empty state. - let mut block_indices = BlockIndices::new(0, BTreeMap::new()); - - // Define test parameters. - let block_number_1 = 2; - let block_hash_1 = B256::from_slice(&[1; 32]); - let block_hash_2 = B256::from_slice(&[2; 32]); - let chain_id_1 = SidechainId::from(84); - - let block_number_2 = 4; - let block_hash_3 = B256::from_slice(&[3; 32]); - let chain_id_2 = SidechainId::from(200); - - // Insert multiple hashes for the same block number. - block_indices.insert_non_fork_block(block_number_1, block_hash_1, chain_id_1); - block_indices.insert_non_fork_block(block_number_1, block_hash_2, chain_id_1); - - // Insert blocks with different numbers. - block_indices.insert_non_fork_block(block_number_2, block_hash_3, chain_id_2); - - // Block number 1 should have two block hashes associated with it. - let mut expected_hashes_for_block_1 = HashSet::default(); - expected_hashes_for_block_1.insert(block_hash_1); - expected_hashes_for_block_1.insert(block_hash_2); - assert_eq!( - block_indices.block_number_to_block_hashes.get(&block_number_1), - Some(&expected_hashes_for_block_1) - ); - - // Check that the block hashes for block_number_1 are associated with the correct chain ID. - assert_eq!(block_indices.blocks_to_chain.get(&block_hash_1), Some(&chain_id_1)); - assert_eq!(block_indices.blocks_to_chain.get(&block_hash_2), Some(&chain_id_1)); - - // Block number 2 should have a single block hash associated with it. - assert_eq!( - block_indices.block_number_to_block_hashes.get(&block_number_2), - Some(&HashSet::from([block_hash_3])) - ); - - // Block hash 3 should be associated with the correct chain ID. - assert_eq!(block_indices.blocks_to_chain.get(&block_hash_3), Some(&chain_id_2)); - } - - #[test] - fn insert_chain_validates_insertion() { - // Create a new BlockIndices instance with an empty state. - let mut block_indices = BlockIndices::new(0, BTreeMap::new()); - - // Define test parameters. - let chain_id = SidechainId::from(42); - - // Define some example blocks and their hashes. - let block_hash_1 = B256::from_slice(&[1; 32]); - let block_hash_2 = B256::from_slice(&[2; 32]); - let parent_hash = B256::from_slice(&[0; 32]); - - // Define blocks with their numbers and parent hashes. - let block_1 = SealedBlockWithSenders { - block: SealedBlock::new( - SealedHeader::new( - Header { parent_hash, number: 1, ..Default::default() }, - block_hash_1, - ), - Default::default(), - ), - ..Default::default() - }; - let block_2 = SealedBlockWithSenders { - block: SealedBlock::new( - SealedHeader::new( - Header { parent_hash: block_hash_1, number: 2, ..Default::default() }, - block_hash_2, - ), - Default::default(), - ), - ..Default::default() - }; - - // Define a chain containing the blocks. - let chain = Chain::new(vec![block_1, block_2], Default::default(), Default::default()); - - // Insert the chain into the BlockIndices. - block_indices.insert_chain(chain_id, &chain); - - // Check that the blocks are correctly mapped to the chain ID. - assert_eq!(block_indices.blocks_to_chain.get(&block_hash_1), Some(&chain_id)); - assert_eq!(block_indices.blocks_to_chain.get(&block_hash_2), Some(&chain_id)); - - // Check that block numbers map to their respective hashes. - let mut expected_hashes_1 = HashSet::default(); - expected_hashes_1.insert(block_hash_1); - assert_eq!(block_indices.block_number_to_block_hashes.get(&1), Some(&expected_hashes_1)); - - let mut expected_hashes_2 = HashSet::default(); - expected_hashes_2.insert(block_hash_2); - assert_eq!(block_indices.block_number_to_block_hashes.get(&2), Some(&expected_hashes_2)); - - // Check that the fork_to_child mapping contains the correct parent-child relationship. - // We take the first block of the chain. - let mut expected_children = LinkedHashSet::new(); - expected_children.insert(block_hash_1); - assert_eq!(block_indices.fork_to_child.get(&parent_hash), Some(&expected_children)); - } -} diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs deleted file mode 100644 index 4846619d42cb..000000000000 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ /dev/null @@ -1,2441 +0,0 @@ -//! Implementation of [`BlockchainTree`] - -use crate::{ - externals::TreeNodeTypes, - metrics::{MakeCanonicalAction, MakeCanonicalDurationsRecorder, TreeMetrics}, - state::{SidechainId, TreeState}, - AppendableChain, BlockIndices, BlockchainTreeConfig, ExecutionData, TreeExternals, -}; -use alloy_eips::{BlockNumHash, ForkBlock}; -use alloy_primitives::{BlockHash, BlockNumber, B256, U256}; -use reth_blockchain_tree_api::{ - error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, - BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, -}; -use reth_consensus::{Consensus, ConsensusError}; -use reth_evm::execute::BlockExecutorProvider; -use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_execution_types::{Chain, ExecutionOutcome}; -use reth_node_types::NodeTypesWithDB; -use reth_primitives::{ - EthereumHardfork, GotExpected, Hardforks, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, StaticFileSegment, -}; -use reth_provider::{ - BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, - CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit, - ChainSplitTarget, DBProvider, DisplayBlocksChain, HashedPostStateProvider, HeaderProvider, - ProviderError, StaticFileProviderFactory, StorageLocation, -}; -use reth_stages_api::{MetricEvent, MetricEventsSender}; -use reth_storage_errors::provider::{ProviderResult, RootMismatch}; -use reth_trie::{hashed_cursor::HashedPostStateCursorFactory, StateRoot}; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseStateRoot}; -use std::{ - collections::{btree_map::Entry, BTreeMap, HashSet}, - sync::Arc, -}; -use tracing::{debug, error, info, instrument, trace, warn}; - -#[cfg_attr(doc, aquamarine::aquamarine)] -/// A Tree of chains. -/// -/// The flowchart represents all the states a block can have inside the tree. -/// -/// - Green blocks belong to the canonical chain and are saved inside the database. -/// - Pending blocks and sidechains are found in-memory inside [`BlockchainTree`]. -/// -/// Both pending chains and sidechains have the same mechanisms, the only difference is when they -/// get committed to the database. -/// -/// For pending, it is an append operation, but for sidechains they need to move the current -/// canonical blocks to the tree (by removing them from the database), and commit the sidechain -/// blocks to the database to become the canonical chain (reorg). -/// -/// `include_mmd!("docs/mermaid/tree.mmd`") -/// -/// # Main functions -/// * [`BlockchainTree::insert_block`]: Connect a block to a chain, execute it, and if valid, insert -/// the block into the tree. -/// * [`BlockchainTree::finalize_block`]: Remove chains that branch off of the now finalized block. -/// * [`BlockchainTree::make_canonical`]: Check if we have the hash of a block that is the current -/// canonical head and commit it to db. -#[derive(Debug)] -pub struct BlockchainTree { - /// The state of the tree - /// - /// Tracks all the chains, the block indices, and the block buffer. - state: TreeState, - /// External components (the database, consensus engine etc.) - externals: TreeExternals, - /// Tree configuration - config: BlockchainTreeConfig, - /// Broadcast channel for canon state changes notifications. - canon_state_notification_sender: CanonStateNotificationSender, - /// Metrics for sync stages. - sync_metrics_tx: Option, - /// Metrics for the blockchain tree. - metrics: TreeMetrics, -} - -impl BlockchainTree { - /// Subscribe to new blocks events. - /// - /// Note: Only canonical blocks are emitted by the tree. - pub fn subscribe_canon_state(&self) -> CanonStateNotifications { - self.canon_state_notification_sender.subscribe() - } - - /// Returns a clone of the sender for the canonical state notifications. - pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender { - self.canon_state_notification_sender.clone() - } -} - -impl BlockchainTree -where - N: TreeNodeTypes, - E: BlockExecutorProvider, -{ - /// Builds the blockchain tree for the node. - /// - /// This method configures the blockchain tree, which is a critical component of the node, - /// responsible for managing the blockchain state, including blocks, transactions, and receipts. - /// It integrates with the consensus mechanism and the EVM for executing transactions. - /// - /// # Parameters - /// - `externals`: External components required by the blockchain tree: - /// - `provider_factory`: A factory for creating various blockchain-related providers, such - /// as for accessing the database or static files. - /// - `consensus`: The consensus configuration, which defines how the node reaches agreement - /// on the blockchain state with other nodes. - /// - `evm_config`: The EVM (Ethereum Virtual Machine) configuration, which affects how - /// smart contracts and transactions are executed. Proper validation of this configuration - /// is crucial for the correct execution of transactions. - /// - `tree_config`: Configuration for the blockchain tree, including any parameters that affect - /// its structure or performance. - pub fn new( - externals: TreeExternals, - config: BlockchainTreeConfig, - ) -> ProviderResult { - let max_reorg_depth = config.max_reorg_depth() as usize; - // The size of the broadcast is twice the maximum reorg depth, because at maximum reorg - // depth at least N blocks must be sent at once. - let (canon_state_notification_sender, _receiver) = - tokio::sync::broadcast::channel(max_reorg_depth * 2); - - let last_canonical_hashes = - externals.fetch_latest_canonical_hashes(config.num_of_canonical_hashes() as usize)?; - - // If we haven't written the finalized block, assume it's zero - let last_finalized_block_number = - externals.fetch_latest_finalized_block_number()?.unwrap_or_default(); - - Ok(Self { - externals, - state: TreeState::new( - last_finalized_block_number, - last_canonical_hashes, - config.max_unconnected_blocks(), - ), - config, - canon_state_notification_sender, - sync_metrics_tx: None, - metrics: Default::default(), - }) - } - - /// Replaces the canon state notification sender. - /// - /// Caution: this will close any existing subscriptions to the previous sender. - #[doc(hidden)] - pub fn with_canon_state_notification_sender( - mut self, - canon_state_notification_sender: CanonStateNotificationSender, - ) -> Self { - self.canon_state_notification_sender = canon_state_notification_sender; - self - } - - /// Set the sync metric events sender. - /// - /// A transmitter for sending synchronization metrics. This is used for monitoring the node's - /// synchronization process with the blockchain network. - pub fn with_sync_metrics_tx(mut self, metrics_tx: MetricEventsSender) -> Self { - self.sync_metrics_tx = Some(metrics_tx); - self - } - - /// Check if the block is known to blockchain tree or database and return its status. - /// - /// Function will check: - /// * if block is inside database returns [`BlockStatus::Valid`]. - /// * if block is inside buffer returns [`BlockStatus::Disconnected`]. - /// * if block is part of the canonical returns [`BlockStatus::Valid`]. - /// - /// Returns an error if - /// - an error occurred while reading from the database. - /// - the block is already finalized - pub(crate) fn is_block_known( - &self, - block: BlockNumHash, - ) -> Result, InsertBlockErrorKind> { - // check if block is canonical - if self.is_block_hash_canonical(&block.hash)? { - return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))); - } - - let last_finalized_block = self.block_indices().last_finalized_block(); - // check db if block is finalized. - if block.number <= last_finalized_block { - // check if block is inside database - if self.externals.provider_factory.provider()?.block_number(block.hash)?.is_some() { - return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))); - } - - return Err(BlockchainTreeError::PendingBlockIsFinalized { - last_finalized: last_finalized_block, - } - .into()) - } - - // is block inside chain - if let Some(attachment) = self.is_block_inside_sidechain(&block) { - return Ok(Some(BlockStatus::Valid(attachment))); - } - - // check if block is disconnected - if let Some(block) = self.state.buffered_blocks.block(&block.hash) { - return Ok(Some(BlockStatus::Disconnected { - head: self.state.block_indices.canonical_tip(), - missing_ancestor: block.parent_num_hash(), - })) - } - - Ok(None) - } - - /// Expose internal indices of the `BlockchainTree`. - #[inline] - pub const fn block_indices(&self) -> &BlockIndices { - self.state.block_indices() - } - - /// Returns the block with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - #[inline] - pub fn sidechain_block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { - self.state.block_by_hash(block_hash) - } - - /// Returns the block with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - #[inline] - pub fn block_with_senders_by_hash( - &self, - block_hash: BlockHash, - ) -> Option<&SealedBlockWithSenders> { - self.state.block_with_senders_by_hash(block_hash) - } - - /// Returns the block's receipts with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { - self.state.receipts_by_block_hash(block_hash) - } - - /// Returns the block that's considered the `Pending` block, if it exists. - pub fn pending_block(&self) -> Option<&SealedBlock> { - let b = self.block_indices().pending_block_num_hash()?; - self.sidechain_block_by_hash(b.hash) - } - - /// Return items needed to execute on the pending state. - /// This includes: - /// * `BlockHash` of canonical block that chain connects to. Needed for creating database - /// provider for the rest of the state. - /// * `BundleState` changes that happened at the asked `block_hash` - /// * `BTreeMap` list of past pending and canonical hashes, That are - /// needed for evm `BLOCKHASH` opcode. - /// Return none if: - /// * block unknown. - /// * `chain_id` not present in state. - /// * there are no parent hashes stored. - pub fn post_state_data(&self, block_hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?block_hash, "Searching for post state data"); - - let canonical_chain = self.state.block_indices.canonical_chain(); - - // if it is part of the chain - if let Some(chain_id) = self.block_indices().get_side_chain_id(&block_hash) { - trace!(target: "blockchain_tree", ?block_hash, "Constructing post state data based on non-canonical chain"); - // get block state - let Some(chain) = self.state.chains.get(&chain_id) else { - debug!(target: "blockchain_tree", ?chain_id, "Chain with ID not present"); - return None; - }; - let block_number = chain.block_number(block_hash)?; - let execution_outcome = chain.execution_outcome_at_block(block_number)?; - - // get parent hashes - let mut parent_block_hashes = self.all_chain_hashes(chain_id); - let Some((first_pending_block_number, _)) = parent_block_hashes.first_key_value() - else { - debug!(target: "blockchain_tree", ?chain_id, "No block hashes stored"); - return None; - }; - let canonical_chain = canonical_chain - .iter() - .filter(|&(key, _)| &key < first_pending_block_number) - .collect::>(); - parent_block_hashes.extend(canonical_chain); - - // get canonical fork. - let canonical_fork = self.canonical_fork(chain_id)?; - return Some(ExecutionData { execution_outcome, parent_block_hashes, canonical_fork }); - } - - // check if there is canonical block - if let Some(canonical_number) = canonical_chain.canonical_number(&block_hash) { - trace!(target: "blockchain_tree", %block_hash, "Constructing post state data based on canonical chain"); - return Some(ExecutionData { - canonical_fork: ForkBlock { number: canonical_number, hash: block_hash }, - execution_outcome: ExecutionOutcome::default(), - parent_block_hashes: canonical_chain.inner().clone(), - }); - } - - None - } - - /// Try inserting a validated [Self::validate_block] block inside the tree. - /// - /// If the block's parent block is unknown, this returns [`BlockStatus::Disconnected`] and the - /// block will be buffered until the parent block is inserted and then attached to sidechain - #[instrument(level = "trace", skip_all, fields(block = ?block.num_hash()), target = "blockchain_tree", ret)] - fn try_insert_validated_block( - &mut self, - block: SealedBlockWithSenders, - block_validation_kind: BlockValidationKind, - ) -> Result { - debug_assert!(self.validate_block(&block).is_ok(), "Block must be validated"); - - let parent = block.parent_num_hash(); - - // check if block parent can be found in any side chain. - if let Some(chain_id) = self.block_indices().get_side_chain_id(&parent.hash) { - // found parent in side tree, try to insert there - return self.try_insert_block_into_side_chain(block, chain_id, block_validation_kind); - } - - // if not found, check if the parent can be found inside canonical chain. - if self.is_block_hash_canonical(&parent.hash)? { - return self.try_append_canonical_chain(block.clone(), block_validation_kind); - } - - // this is another check to ensure that if the block points to a canonical block its block - // is valid - if let Some(canonical_parent_number) = - self.block_indices().canonical_number(&block.parent_hash) - { - // we found the parent block in canonical chain - if canonical_parent_number != parent.number { - return Err(ConsensusError::ParentBlockNumberMismatch { - parent_block_number: canonical_parent_number, - block_number: block.number, - } - .into()) - } - } - - // if there is a parent inside the buffer, validate against it. - if let Some(buffered_parent) = self.state.buffered_blocks.block(&parent.hash) { - self.externals.consensus.validate_header_against_parent(&block, buffered_parent)?; - } - - // insert block inside unconnected block buffer. Delaying its execution. - self.state.buffered_blocks.insert_block(block.clone()); - - let block_hash = block.hash(); - // find the lowest ancestor of the block in the buffer to return as the missing parent - // this shouldn't return None because that only happens if the block was evicted, which - // shouldn't happen right after insertion - let lowest_ancestor = self - .state - .buffered_blocks - .lowest_ancestor(&block_hash) - .ok_or(BlockchainTreeError::BlockBufferingFailed { block_hash })?; - - Ok(BlockStatus::Disconnected { - head: self.state.block_indices.canonical_tip(), - missing_ancestor: lowest_ancestor.parent_num_hash(), - }) - } - - /// This tries to append the given block to the canonical chain. - /// - /// WARNING: this expects that the block extends the canonical chain: The block's parent is - /// part of the canonical chain (e.g. the block's parent is the latest canonical hash). See also - /// [Self::is_block_hash_canonical]. - #[instrument(level = "trace", skip_all, target = "blockchain_tree")] - fn try_append_canonical_chain( - &mut self, - block: SealedBlockWithSenders, - block_validation_kind: BlockValidationKind, - ) -> Result { - let parent = block.parent_num_hash(); - let block_num_hash = block.num_hash(); - debug!(target: "blockchain_tree", head = ?block_num_hash.hash, ?parent, "Appending block to canonical chain"); - - let provider = self.externals.provider_factory.provider()?; - - // Validate that the block is post merge - let parent_td = provider - .header_td(&block.parent_hash)? - .ok_or_else(|| BlockchainTreeError::CanonicalChain { block_hash: block.parent_hash })?; - - if !self - .externals - .provider_factory - .chain_spec() - .fork(EthereumHardfork::Paris) - .active_at_ttd(parent_td, U256::ZERO) - { - return Err(BlockExecutionError::Validation(BlockValidationError::BlockPreMerge { - hash: block.hash(), - }) - .into()) - } - - let parent_header = provider - .header(&block.parent_hash)? - .ok_or_else(|| BlockchainTreeError::CanonicalChain { block_hash: block.parent_hash })?; - - let parent_sealed_header = SealedHeader::new(parent_header, block.parent_hash); - - let canonical_chain = self.state.block_indices.canonical_chain(); - - let block_attachment = if block.parent_hash == canonical_chain.tip().hash { - BlockAttachment::Canonical - } else { - BlockAttachment::HistoricalFork - }; - - let chain = AppendableChain::new_canonical_fork( - block, - &parent_sealed_header, - canonical_chain.inner(), - parent, - &self.externals, - block_attachment, - block_validation_kind, - )?; - - self.insert_chain(chain); - self.try_connect_buffered_blocks(block_num_hash); - - Ok(BlockStatus::Valid(block_attachment)) - } - - /// Try inserting a block into the given side chain. - /// - /// WARNING: This expects a valid side chain id, see [BlockIndices::get_side_chain_id] - #[instrument(level = "trace", skip_all, target = "blockchain_tree")] - fn try_insert_block_into_side_chain( - &mut self, - block: SealedBlockWithSenders, - chain_id: SidechainId, - block_validation_kind: BlockValidationKind, - ) -> Result { - let block_num_hash = block.num_hash(); - debug!(target: "blockchain_tree", ?block_num_hash, ?chain_id, "Inserting block into side chain"); - // Create a new sidechain by forking the given chain, or append the block if the parent - // block is the top of the given chain. - let block_hashes = self.all_chain_hashes(chain_id); - - // get canonical fork. - let canonical_fork = self.canonical_fork(chain_id).ok_or_else(|| { - BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() } - })?; - - // get chain that block needs to join to. - let parent_chain = self.state.chains.get_mut(&chain_id).ok_or_else(|| { - BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() } - })?; - - let chain_tip = parent_chain.tip().hash(); - let canonical_chain = self.state.block_indices.canonical_chain(); - - // append the block if it is continuing the side chain. - let block_attachment = if chain_tip == block.parent_hash { - // check if the chain extends the currently tracked canonical head - let block_attachment = if canonical_fork.hash == canonical_chain.tip().hash { - BlockAttachment::Canonical - } else { - BlockAttachment::HistoricalFork - }; - - let block_hash = block.hash(); - let block_number = block.number; - debug!(target: "blockchain_tree", ?block_hash, ?block_number, "Appending block to side chain"); - parent_chain.append_block( - block, - block_hashes, - canonical_chain.inner(), - &self.externals, - canonical_fork, - block_attachment, - block_validation_kind, - )?; - - self.state.block_indices.insert_non_fork_block(block_number, block_hash, chain_id); - block_attachment - } else { - debug!(target: "blockchain_tree", ?canonical_fork, "Starting new fork from side chain"); - // the block starts a new fork - let chain = parent_chain.new_chain_fork( - block, - block_hashes, - canonical_chain.inner(), - canonical_fork, - &self.externals, - block_validation_kind, - )?; - self.insert_chain(chain); - BlockAttachment::HistoricalFork - }; - - // After we inserted the block, we try to connect any buffered blocks - self.try_connect_buffered_blocks(block_num_hash); - - Ok(BlockStatus::Valid(block_attachment)) - } - - /// Get all block hashes from a sidechain that are not part of the canonical chain. - /// This is a one time operation per block. - /// - /// # Note - /// - /// This is not cached in order to save memory. - fn all_chain_hashes(&self, chain_id: SidechainId) -> BTreeMap { - let mut chain_id = chain_id; - let mut hashes = BTreeMap::new(); - loop { - let Some(chain) = self.state.chains.get(&chain_id) else { return hashes }; - - // The parent chains might contain blocks with overlapping numbers or numbers greater - // than original chain tip. Insert the block hash only if it's not present - // for the given block number and the block number does not exceed the - // original chain tip. - let latest_block_number = hashes - .last_key_value() - .map(|(number, _)| *number) - .unwrap_or_else(|| chain.tip().number); - for block in chain.blocks().values().filter(|b| b.number <= latest_block_number) { - if let Entry::Vacant(e) = hashes.entry(block.number) { - e.insert(block.hash()); - } - } - - let fork_block = chain.fork_block(); - if let Some(next_chain_id) = self.block_indices().get_side_chain_id(&fork_block.hash) { - chain_id = next_chain_id; - } else { - // if there is no fork block that point to other chains, break the loop. - // it means that this fork joins to canonical block. - break - } - } - hashes - } - - /// Get the block at which the given chain forks off the current canonical chain. - /// - /// This is used to figure out what kind of state provider the executor should use to execute - /// the block on - /// - /// Returns `None` if the chain is unknown. - fn canonical_fork(&self, chain_id: SidechainId) -> Option { - let mut chain_id = chain_id; - let mut fork; - loop { - // chain fork block - fork = self.state.chains.get(&chain_id)?.fork_block(); - // get fork block chain - if let Some(fork_chain_id) = self.block_indices().get_side_chain_id(&fork.hash) { - chain_id = fork_chain_id; - continue - } - break - } - (self.block_indices().canonical_hash(&fork.number) == Some(fork.hash)).then_some(fork) - } - - /// Insert a chain into the tree. - /// - /// Inserts a chain into the tree and builds the block indices. - fn insert_chain(&mut self, chain: AppendableChain) -> Option { - self.state.insert_chain(chain) - } - - /// Iterate over all child chains that depend on this block and return - /// their ids. - fn find_all_dependent_chains(&self, block: &BlockHash) -> HashSet { - // Find all forks of given block. - let mut dependent_block = - self.block_indices().fork_to_child().get(block).cloned().unwrap_or_default(); - let mut dependent_chains = HashSet::default(); - - while let Some(block) = dependent_block.pop_back() { - // Get chain of dependent block. - let Some(chain_id) = self.block_indices().get_side_chain_id(&block) else { - debug!(target: "blockchain_tree", ?block, "Block not in tree"); - return Default::default(); - }; - - // Find all blocks that fork from this chain. - let Some(chain) = self.state.chains.get(&chain_id) else { - debug!(target: "blockchain_tree", ?chain_id, "Chain not in tree"); - return Default::default(); - }; - for chain_block in chain.blocks().values() { - if let Some(forks) = self.block_indices().fork_to_child().get(&chain_block.hash()) { - // If there are sub forks append them for processing. - dependent_block.extend(forks); - } - } - // Insert dependent chain id. - dependent_chains.insert(chain_id); - } - dependent_chains - } - - /// Inserts unwound chain back into the tree and updates any dependent chains. - /// - /// This method searches for any chain that depended on this block being part of the canonical - /// chain. Each dependent chain's state is then updated with state entries removed from the - /// plain state during the unwind. - /// Returns the result of inserting the chain or None if any of the dependent chains is not - /// in the tree. - fn insert_unwound_chain(&mut self, chain: AppendableChain) -> Option { - // iterate over all blocks in chain and find any fork blocks that are in tree. - for (number, block) in chain.blocks() { - let hash = block.hash(); - - // find all chains that fork from this block. - let chains_to_bump = self.find_all_dependent_chains(&hash); - if !chains_to_bump.is_empty() { - // if there is such chain, revert state to this block. - let mut cloned_execution_outcome = chain.execution_outcome().clone(); - cloned_execution_outcome.revert_to(*number); - - // prepend state to all chains that fork from this block. - for chain_id in chains_to_bump { - let Some(chain) = self.state.chains.get_mut(&chain_id) else { - debug!(target: "blockchain_tree", ?chain_id, "Chain not in tree"); - return None; - }; - - debug!(target: "blockchain_tree", - unwound_block= ?block.num_hash(), - chain_id = ?chain_id, - chain_tip = ?chain.tip().num_hash(), - "Prepend unwound block state to blockchain tree chain"); - - chain.prepend_state(cloned_execution_outcome.state().clone()) - } - } - } - // Insert unwound chain to the tree. - self.insert_chain(chain) - } - - /// Checks the block buffer for the given block. - pub fn get_buffered_block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { - self.state.get_buffered_block(hash) - } - - /// Gets the lowest ancestor for the given block in the block buffer. - pub fn lowest_buffered_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { - self.state.lowest_buffered_ancestor(hash) - } - - /// Insert a new block into the tree. - /// - /// # Note - /// - /// This recovers transaction signers (unlike [`BlockchainTree::insert_block`]). - pub fn insert_block_without_senders( - &mut self, - block: SealedBlock, - ) -> Result { - match block.try_seal_with_senders() { - Ok(block) => self.insert_block(block, BlockValidationKind::Exhaustive), - Err(block) => Err(InsertBlockError::sender_recovery_error(block)), - } - } - - /// Insert block for future execution. - /// - /// Returns an error if the block is invalid. - pub fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { - // validate block consensus rules - if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockError::consensus_error(err, block.block)); - } - - self.state.buffered_blocks.insert_block(block); - Ok(()) - } - - /// Validate if block is correct and satisfies all the consensus rules that concern the header - /// and block body itself. - fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { - if let Err(e) = - self.externals.consensus.validate_header_with_total_difficulty(block, U256::MAX) - { - error!(?block, "Failed to validate total difficulty for block {}: {e}", block.hash()); - return Err(e); - } - - if let Err(e) = self.externals.consensus.validate_header(block) { - error!(?block, "Failed to validate header {}: {e}", block.hash()); - return Err(e); - } - - if let Err(e) = self.externals.consensus.validate_block_pre_execution(block) { - error!(?block, "Failed to validate block {}: {e}", block.hash()); - return Err(e); - } - - Ok(()) - } - - /// Check if block is found inside a sidechain and its attachment. - /// - /// if it is canonical or extends the canonical chain, return [`BlockAttachment::Canonical`] - /// if it does not extend the canonical chain, return [`BlockAttachment::HistoricalFork`] - /// if the block is not in the tree or its chain id is not valid, return None - #[track_caller] - fn is_block_inside_sidechain(&self, block: &BlockNumHash) -> Option { - // check if block known and is already in the tree - if let Some(chain_id) = self.block_indices().get_side_chain_id(&block.hash) { - // find the canonical fork of this chain - let Some(canonical_fork) = self.canonical_fork(chain_id) else { - debug!(target: "blockchain_tree", chain_id=?chain_id, block=?block.hash, "Chain id not valid"); - return None; - }; - // if the block's chain extends canonical chain - return if canonical_fork == self.block_indices().canonical_tip() { - Some(BlockAttachment::Canonical) - } else { - Some(BlockAttachment::HistoricalFork) - }; - } - None - } - - /// Insert a block (with recovered senders) into the tree. - /// - /// Returns the [`BlockStatus`] on success: - /// - /// - The block is already part of a sidechain in the tree, or - /// - The block is already part of the canonical chain, or - /// - The parent is part of a sidechain in the tree, and we can fork at this block, or - /// - The parent is part of the canonical chain, and we can fork at this block - /// - /// Otherwise, an error is returned, indicating that neither the block nor its parent are part - /// of the chain or any sidechains. - /// - /// This means that if the block becomes canonical, we need to fetch the missing blocks over - /// P2P. - /// - /// If the [`BlockValidationKind::SkipStateRootValidation`] variant is provided the state root - /// is not validated. - /// - /// # Note - /// - /// If the senders have not already been recovered, call - /// [`BlockchainTree::insert_block_without_senders`] instead. - pub fn insert_block( - &mut self, - block: SealedBlockWithSenders, - block_validation_kind: BlockValidationKind, - ) -> Result { - // check if we already have this block - match self.is_block_known(block.num_hash()) { - Ok(Some(status)) => return Ok(InsertPayloadOk::AlreadySeen(status)), - Err(err) => return Err(InsertBlockError::new(block.block, err)), - _ => {} - } - - // validate block consensus rules - if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockError::consensus_error(err, block.block)); - } - - let status = self - .try_insert_validated_block(block.clone(), block_validation_kind) - .map_err(|kind| InsertBlockError::new(block.block, kind))?; - Ok(InsertPayloadOk::Inserted(status)) - } - - /// Discard all blocks that precede block number from the buffer. - pub fn remove_old_blocks(&mut self, block: BlockNumber) { - self.state.buffered_blocks.remove_old_blocks(block); - } - - /// Finalize blocks up until and including `finalized_block`, and remove them from the tree. - pub fn finalize_block(&mut self, finalized_block: BlockNumber) -> ProviderResult<()> { - // remove blocks - let mut remove_chains = self.state.block_indices.finalize_canonical_blocks( - finalized_block, - self.config.num_of_additional_canonical_block_hashes(), - ); - // remove chains of removed blocks - while let Some(chain_id) = remove_chains.pop_first() { - if let Some(chain) = self.state.chains.remove(&chain_id) { - remove_chains.extend(self.state.block_indices.remove_chain(&chain)); - } - } - // clean block buffer. - self.remove_old_blocks(finalized_block); - - // save finalized block in db. - self.externals.save_finalized_block_number(finalized_block)?; - - Ok(()) - } - - /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree by attempting to connect the buffered blocks to canonical hashes. - /// - /// - /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the - /// `BLOCKHASH` opcode in the EVM. - /// - /// # Note - /// - /// This finalizes `last_finalized_block` prior to reading the canonical hashes (using - /// [`BlockchainTree::finalize_block`]). - pub fn connect_buffered_blocks_to_canonical_hashes_and_finalize( - &mut self, - last_finalized_block: BlockNumber, - ) -> ProviderResult<()> { - self.finalize_block(last_finalized_block)?; - - let last_canonical_hashes = self.update_block_hashes()?; - - self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; - - Ok(()) - } - - /// Update all block hashes. iterate over present and new list of canonical hashes and compare - /// them. Remove all mismatches, disconnect them and removes all chains. - pub fn update_block_hashes(&mut self) -> ProviderResult> { - let last_canonical_hashes = self - .externals - .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; - - let (mut remove_chains, _) = - self.state.block_indices.update_block_hashes(last_canonical_hashes.clone()); - - // remove all chains that got discarded - while let Some(chain_id) = remove_chains.first() { - if let Some(chain) = self.state.chains.remove(chain_id) { - remove_chains.extend(self.state.block_indices.remove_chain(&chain)); - } - } - - Ok(last_canonical_hashes) - } - - /// Update all block hashes. iterate over present and new list of canonical hashes and compare - /// them. Remove all mismatches, disconnect them, removes all chains and clears all buffered - /// blocks before the tip. - pub fn update_block_hashes_and_clear_buffered( - &mut self, - ) -> ProviderResult> { - let chain = self.update_block_hashes()?; - - if let Some((block, _)) = chain.last_key_value() { - self.remove_old_blocks(*block); - } - - Ok(chain) - } - - /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree by attempting to connect the buffered blocks to canonical hashes. - /// - /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the - /// `BLOCKHASH` opcode in the EVM. - pub fn connect_buffered_blocks_to_canonical_hashes(&mut self) -> ProviderResult<()> { - let last_canonical_hashes = self - .externals - .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; - self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; - - Ok(()) - } - - fn connect_buffered_blocks_to_hashes( - &mut self, - hashes: impl IntoIterator>, - ) -> ProviderResult<()> { - // check unconnected block buffer for children of the canonical hashes - for added_block in hashes { - self.try_connect_buffered_blocks(added_block.into()) - } - - // check unconnected block buffer for children of the chains - let mut all_chain_blocks = Vec::new(); - for chain in self.state.chains.values() { - all_chain_blocks.reserve_exact(chain.blocks().len()); - for (&number, block) in chain.blocks() { - all_chain_blocks.push(BlockNumHash { number, hash: block.hash() }) - } - } - for block in all_chain_blocks { - self.try_connect_buffered_blocks(block) - } - - Ok(()) - } - - /// Connect unconnected (buffered) blocks if the new block closes a gap. - /// - /// This will try to insert all children of the new block, extending its chain. - /// - /// If all children are valid, then this essentially appends all child blocks to the - /// new block's chain. - fn try_connect_buffered_blocks(&mut self, new_block: BlockNumHash) { - trace!(target: "blockchain_tree", ?new_block, "try_connect_buffered_blocks"); - - // first remove all the children of the new block from the buffer - let include_blocks = self.state.buffered_blocks.remove_block_with_children(&new_block.hash); - // then try to reinsert them into the tree - for block in include_blocks { - // don't fail on error, just ignore the block. - let _ = self - .try_insert_validated_block(block, BlockValidationKind::SkipStateRootValidation) - .map_err(|err| { - debug!(target: "blockchain_tree", %err, "Failed to insert buffered block"); - err - }); - } - } - - /// Removes chain corresponding to provided chain id from block indices, - /// splits it at split target, and returns the canonical part of it. - /// Returns [None] if chain is missing. - /// - /// The pending part of the chain is reinserted back into the tree with the same `chain_id`. - fn remove_and_split_chain( - &mut self, - chain_id: SidechainId, - split_at: ChainSplitTarget, - ) -> Option { - let chain = self.state.chains.remove(&chain_id)?; - match chain.into_inner().split(split_at) { - ChainSplit::Split { canonical, pending } => { - trace!(target: "blockchain_tree", ?canonical, ?pending, "Split chain"); - // rest of split chain is inserted back with same chain_id. - self.state.block_indices.insert_chain(chain_id, &pending); - self.state.chains.insert(chain_id, AppendableChain::new(pending)); - Some(canonical) - } - ChainSplit::NoSplitCanonical(canonical) => { - trace!(target: "blockchain_tree", "No split on canonical chain"); - Some(canonical) - } - ChainSplit::NoSplitPending(_) => { - unreachable!("Should not happen as block indices guarantee structure of blocks") - } - } - } - - /// Attempts to find the header for the given block hash if it is canonical. - /// - /// Returns `Ok(None)` if the block hash is not canonical (block hash does not exist, or is - /// included in a sidechain). - /// - /// Note: this does not distinguish between a block that is finalized and a block that is not - /// finalized yet, only whether it is part of the canonical chain or not. - pub fn find_canonical_header( - &self, - hash: &BlockHash, - ) -> Result, ProviderError> { - // if the indices show that the block hash is not canonical, it's either in a sidechain or - // canonical, but in the db. If it is in a sidechain, it is not canonical. If it is missing - // in the db, then it is also not canonical. - - let provider = self.externals.provider_factory.provider()?; - - let mut header = None; - if let Some(num) = self.block_indices().canonical_number(hash) { - header = provider.header_by_number(num)?; - } - - if header.is_none() && self.sidechain_block_by_hash(*hash).is_some() { - return Ok(None) - } - - if header.is_none() { - header = provider.header(hash)? - } - - Ok(header.map(|header| SealedHeader::new(header, *hash))) - } - - /// Determines whether or not a block is canonical, checking the db if necessary. - /// - /// Note: this does not distinguish between a block that is finalized and a block that is not - /// finalized yet, only whether it is part of the canonical chain or not. - pub fn is_block_hash_canonical(&self, hash: &BlockHash) -> Result { - self.find_canonical_header(hash).map(|header| header.is_some()) - } - - /// Make a block and its parent(s) part of the canonical chain and commit them to the database - /// - /// # Note - /// - /// This unwinds the database if necessary, i.e. if parts of the canonical chain have been - /// reorged. - /// - /// # Returns - /// - /// Returns `Ok` if the blocks were canonicalized, or if the blocks were already canonical. - #[track_caller] - #[instrument(level = "trace", skip(self), target = "blockchain_tree")] - pub fn make_canonical( - &mut self, - block_hash: BlockHash, - ) -> Result { - let mut durations_recorder = MakeCanonicalDurationsRecorder::default(); - - let old_block_indices = self.block_indices().clone(); - let old_buffered_blocks = self.state.buffered_blocks.parent_to_child.clone(); - durations_recorder.record_relative(MakeCanonicalAction::CloneOldBlocks); - - // If block is already canonical don't return error. - let canonical_header = self.find_canonical_header(&block_hash)?; - durations_recorder.record_relative(MakeCanonicalAction::FindCanonicalHeader); - if let Some(header) = canonical_header { - info!(target: "blockchain_tree", %block_hash, "Block is already canonical, ignoring."); - // TODO: this could be fetched from the chainspec first - let td = - self.externals.provider_factory.provider()?.header_td(&block_hash)?.ok_or_else( - || { - CanonicalError::from(BlockValidationError::MissingTotalDifficulty { - hash: block_hash, - }) - }, - )?; - - if !self - .externals - .provider_factory - .chain_spec() - .fork(EthereumHardfork::Paris) - .active_at_ttd(td, U256::ZERO) - { - return Err(CanonicalError::from(BlockValidationError::BlockPreMerge { - hash: block_hash, - })) - } - - let head = self.state.block_indices.canonical_tip(); - return Ok(CanonicalOutcome::AlreadyCanonical { header, head }); - } - - let Some(chain_id) = self.block_indices().get_side_chain_id(&block_hash) else { - debug!(target: "blockchain_tree", ?block_hash, "Block hash not found in block indices"); - return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { - block_hash, - })) - }; - - // we are splitting chain at the block hash that we want to make canonical - let Some(canonical) = self.remove_and_split_chain(chain_id, block_hash.into()) else { - debug!(target: "blockchain_tree", ?block_hash, ?chain_id, "Chain not present"); - return Err(CanonicalError::from(BlockchainTreeError::BlockSideChainIdConsistency { - chain_id: chain_id.into(), - })) - }; - trace!(target: "blockchain_tree", chain = ?canonical, "Found chain to make canonical"); - durations_recorder.record_relative(MakeCanonicalAction::SplitChain); - - let mut fork_block = canonical.fork_block(); - let mut chains_to_promote = vec![canonical]; - - // loop while fork blocks are found in Tree. - while let Some(chain_id) = self.block_indices().get_side_chain_id(&fork_block.hash) { - // canonical chain is lower part of the chain. - let Some(canonical) = - self.remove_and_split_chain(chain_id, ChainSplitTarget::Number(fork_block.number)) - else { - debug!(target: "blockchain_tree", ?fork_block, ?chain_id, "Fork not present"); - return Err(CanonicalError::from( - BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() }, - )); - }; - fork_block = canonical.fork_block(); - chains_to_promote.push(canonical); - } - durations_recorder.record_relative(MakeCanonicalAction::SplitChainForks); - - let old_tip = self.block_indices().canonical_tip(); - // Merge all chains into one chain. - let Some(mut new_canon_chain) = chains_to_promote.pop() else { - debug!(target: "blockchain_tree", "No blocks in the chain to make canonical"); - return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { - block_hash: fork_block.hash, - })) - }; - trace!(target: "blockchain_tree", ?new_canon_chain, "Merging chains"); - let mut chain_appended = false; - for chain in chains_to_promote.into_iter().rev() { - trace!(target: "blockchain_tree", ?chain, "Appending chain"); - let block_hash = chain.fork_block().hash; - new_canon_chain.append_chain(chain).map_err(|_| { - CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }) - })?; - chain_appended = true; - } - durations_recorder.record_relative(MakeCanonicalAction::MergeAllChains); - - if chain_appended { - trace!(target: "blockchain_tree", ?new_canon_chain, "Canonical chain appended"); - } - // update canonical index - self.state.block_indices.canonicalize_blocks(new_canon_chain.blocks()); - durations_recorder.record_relative(MakeCanonicalAction::UpdateCanonicalIndex); - - debug!( - target: "blockchain_tree", - "Committing new canonical chain: {}", DisplayBlocksChain(new_canon_chain.blocks()) - ); - - // If chain extends the tip - let chain_notification = if new_canon_chain.fork_block().hash == old_tip.hash { - // Commit new canonical chain to database. - self.commit_canonical_to_database(new_canon_chain.clone(), &mut durations_recorder)?; - CanonStateNotification::Commit { new: Arc::new(new_canon_chain) } - } else { - // It forks to canonical block that is not the tip. - let canon_fork: BlockNumHash = new_canon_chain.fork_block(); - // sanity check - if self.block_indices().canonical_hash(&canon_fork.number) != Some(canon_fork.hash) { - error!( - target: "blockchain_tree", - ?canon_fork, - block_indices=?self.block_indices(), - "All chains should point to canonical chain" - ); - unreachable!("all chains should point to canonical chain."); - } - - let old_canon_chain = - self.revert_canonical_from_database(canon_fork.number).inspect_err(|error| { - error!( - target: "blockchain_tree", - "Reverting canonical chain failed with error: {:?}\n\ - Old BlockIndices are:{:?}\n\ - New BlockIndices are: {:?}\n\ - Old BufferedBlocks are:{:?}", - error, old_block_indices, self.block_indices(), old_buffered_blocks - ); - })?; - durations_recorder - .record_relative(MakeCanonicalAction::RevertCanonicalChainFromDatabase); - - // Commit new canonical chain. - self.commit_canonical_to_database(new_canon_chain.clone(), &mut durations_recorder)?; - - if let Some(old_canon_chain) = old_canon_chain { - self.update_reorg_metrics(old_canon_chain.len() as f64); - - // Insert old canonical chain back into tree. - self.insert_unwound_chain(AppendableChain::new(old_canon_chain.clone())); - durations_recorder.record_relative(MakeCanonicalAction::InsertOldCanonicalChain); - - CanonStateNotification::Reorg { - old: Arc::new(old_canon_chain), - new: Arc::new(new_canon_chain), - } - } else { - // error here to confirm that we are reverting nothing from db. - error!(target: "blockchain_tree", %block_hash, "Nothing was removed from database"); - CanonStateNotification::Commit { new: Arc::new(new_canon_chain) } - } - }; - - debug!( - target: "blockchain_tree", - actions = ?durations_recorder.actions, - "Canonicalization finished" - ); - - // clear trie updates for other children - self.block_indices() - .fork_to_child() - .get(&old_tip.hash) - .cloned() - .unwrap_or_default() - .into_iter() - .for_each(|child| { - if let Some(chain_id) = self.block_indices().get_side_chain_id(&child) { - if let Some(chain) = self.state.chains.get_mut(&chain_id) { - chain.clear_trie_updates(); - } - } - }); - - durations_recorder.record_relative(MakeCanonicalAction::ClearTrieUpdatesForOtherChildren); - - // Send notification about new canonical chain and return outcome of canonicalization. - let outcome = CanonicalOutcome::Committed { head: chain_notification.tip().header.clone() }; - let _ = self.canon_state_notification_sender.send(chain_notification); - Ok(outcome) - } - - /// Write the given chain to the database as canonical. - fn commit_canonical_to_database( - &self, - chain: Chain, - recorder: &mut MakeCanonicalDurationsRecorder, - ) -> Result<(), CanonicalError> { - let (blocks, state, chain_trie_updates) = chain.into_inner(); - let hashed_state = self.externals.provider_factory.hashed_post_state(state.state()); - let prefix_sets = hashed_state.construct_prefix_sets().freeze(); - let hashed_state_sorted = hashed_state.into_sorted(); - - // Compute state root or retrieve cached trie updates before opening write transaction. - let block_hash_numbers = - blocks.iter().map(|(number, b)| (number, b.hash())).collect::>(); - let trie_updates = match chain_trie_updates { - Some(updates) => { - debug!(target: "blockchain_tree", blocks = ?block_hash_numbers, "Using cached trie updates"); - self.metrics.trie_updates_insert_cached.increment(1); - updates - } - None => { - debug!(target: "blockchain_tree", blocks = ?block_hash_numbers, "Recomputing state root for insert"); - let provider = self - .externals - .provider_factory - .provider()? - // State root calculation can take a while, and we're sure no write transaction - // will be open in parallel. See https://github.com/paradigmxyz/reth/issues/6168. - .disable_long_read_transaction_safety(); - let (state_root, trie_updates) = StateRoot::from_tx(provider.tx_ref()) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider.tx_ref()), - &hashed_state_sorted, - )) - .with_prefix_sets(prefix_sets) - .root_with_updates() - .map_err(BlockValidationError::from)?; - let tip = blocks.tip(); - if state_root != tip.state_root { - return Err(ProviderError::StateRootMismatch(Box::new(RootMismatch { - root: GotExpected { got: state_root, expected: tip.state_root }, - block_number: tip.number, - block_hash: tip.hash(), - })) - .into()) - } - self.metrics.trie_updates_insert_recomputed.increment(1); - trie_updates - } - }; - recorder.record_relative(MakeCanonicalAction::RetrieveStateTrieUpdates); - - let provider_rw = self.externals.provider_factory.provider_rw()?; - provider_rw - .append_blocks_with_state( - blocks.into_blocks().collect(), - state, - hashed_state_sorted, - trie_updates, - ) - .map_err(|e| CanonicalError::CanonicalCommit(e.to_string()))?; - - provider_rw.commit()?; - recorder.record_relative(MakeCanonicalAction::CommitCanonicalChainToDatabase); - - Ok(()) - } - - /// Unwind tables and put it inside state - pub fn unwind(&mut self, unwind_to: BlockNumber) -> Result<(), CanonicalError> { - // nothing to be done if unwind_to is higher then the tip - if self.block_indices().canonical_tip().number <= unwind_to { - return Ok(()); - } - // revert `N` blocks from current canonical chain and put them inside BlockchainTree - let old_canon_chain = self.revert_canonical_from_database(unwind_to)?; - - // check if there is block in chain - if let Some(old_canon_chain) = old_canon_chain { - self.state.block_indices.unwind_canonical_chain(unwind_to); - // insert old canonical chain to BlockchainTree. - self.insert_unwound_chain(AppendableChain::new(old_canon_chain)); - } - - Ok(()) - } - - /// Reverts the canonical chain down to the given block from the database and returns the - /// unwound chain. - /// - /// The block, `revert_until`, is __non-inclusive__, i.e. `revert_until` stays in the database. - fn revert_canonical_from_database( - &self, - revert_until: BlockNumber, - ) -> Result, CanonicalError> { - // This should only happen when an optimistic sync target was re-orged. - // - // Static files generally contain finalized data. The blockchain tree only deals - // with non-finalized data. The only scenario where canonical reverts go past the highest - // static file is when an optimistic sync occurred and non-finalized data was written to - // static files. - if self - .externals - .provider_factory - .static_file_provider() - .get_highest_static_file_block(StaticFileSegment::Headers) - .unwrap_or_default() > - revert_until - { - trace!( - target: "blockchain_tree", - "Reverting optimistic canonical chain to block {}", - revert_until - ); - return Err(CanonicalError::OptimisticTargetRevert(revert_until)); - } - - // read data that is needed for new sidechain - let provider_rw = self.externals.provider_factory.provider_rw()?; - - let tip = provider_rw.last_block_number()?; - let revert_range = (revert_until + 1)..=tip; - info!(target: "blockchain_tree", "REORG: revert canonical from database by unwinding chain blocks {:?}", revert_range); - // read block and execution result from database. and remove traces of block from tables. - let blocks_and_execution = provider_rw - .take_block_and_execution_above(revert_until, StorageLocation::Database) - .map_err(|e| CanonicalError::CanonicalRevert(e.to_string()))?; - - provider_rw.commit()?; - - if blocks_and_execution.is_empty() { - Ok(None) - } else { - Ok(Some(blocks_and_execution)) - } - } - - fn update_reorg_metrics(&self, reorg_depth: f64) { - self.metrics.reorgs.increment(1); - self.metrics.latest_reorg_depth.set(reorg_depth); - } - - /// Update blockchain tree chains (canonical and sidechains) and sync metrics. - /// - /// NOTE: this method should not be called during the pipeline sync, because otherwise the sync - /// checkpoint metric will get overwritten. Buffered blocks metrics are updated in - /// [`BlockBuffer`](crate::block_buffer::BlockBuffer) during the pipeline sync. - pub(crate) fn update_chains_metrics(&mut self) { - let height = self.state.block_indices.canonical_tip().number; - - let longest_sidechain_height = - self.state.chains.values().map(|chain| chain.tip().number).max(); - if let Some(longest_sidechain_height) = longest_sidechain_height { - self.metrics.longest_sidechain_height.set(longest_sidechain_height as f64); - } - - self.metrics.sidechains.set(self.state.chains.len() as f64); - self.metrics.canonical_chain_height.set(height as f64); - if let Some(metrics_tx) = self.sync_metrics_tx.as_mut() { - let _ = metrics_tx.send(MetricEvent::SyncHeight { height }); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_consensus::{Header, TxEip1559, EMPTY_ROOT_HASH}; - use alloy_eips::{ - eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, INITIAL_BASE_FEE}, - eip4895::Withdrawals, - }; - use alloy_genesis::{Genesis, GenesisAccount}; - use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, B256}; - use assert_matches::assert_matches; - use linked_hash_set::LinkedHashSet; - use reth_chainspec::{ChainSpecBuilder, MAINNET, MIN_TRANSACTION_GAS}; - use reth_consensus::test_utils::TestConsensus; - use reth_db::tables; - use reth_db_api::transaction::DbTxMut; - use reth_ethereum_evm::execute::EthExecutorProvider; - use reth_evm::test_utils::MockExecutorProvider; - use reth_node_types::FullNodePrimitives; - use reth_primitives::{ - proofs::{calculate_receipt_root, calculate_transaction_root}, - Account, BlockBody, RecoveredTx, Transaction, TransactionSigned, - }; - use reth_provider::{ - providers::ProviderNodeTypes, - test_utils::{ - blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec, - MockNodeTypesWithDB, - }, - ProviderFactory, StorageLocation, - }; - use reth_stages_api::StageCheckpoint; - use reth_trie::{root::state_root_unhashed, StateRoot}; - use std::collections::HashMap; - - fn setup_externals( - exec_res: Vec, - ) -> TreeExternals { - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .shanghai_activated() - .build(), - ); - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); - let consensus = Arc::new(TestConsensus::default()); - let executor_factory = MockExecutorProvider::default(); - executor_factory.extend(exec_res); - - TreeExternals::new(provider_factory, consensus, executor_factory) - } - - fn setup_genesis< - N: ProviderNodeTypes< - Primitives: FullNodePrimitives< - BlockBody = reth_primitives::BlockBody, - BlockHeader = reth_primitives::Header, - >, - >, - >( - factory: &ProviderFactory, - mut genesis: SealedBlock, - ) { - // insert genesis to db. - - genesis.header.set_block_number(10); - genesis.header.set_state_root(EMPTY_ROOT_HASH); - let provider = factory.provider_rw().unwrap(); - - provider - .insert_historical_block( - genesis.try_seal_with_senders().expect("invalid tx signature in genesis"), - ) - .unwrap(); - - // insert first 10 blocks - for i in 0..10 { - provider - .tx_ref() - .put::(i, B256::new([100 + i as u8; 32])) - .unwrap(); - } - provider - .tx_ref() - .put::("Finish".to_string(), StageCheckpoint::new(10)) - .unwrap(); - provider.commit().unwrap(); - } - - /// Test data structure that will check tree internals - #[derive(Default, Debug)] - struct TreeTester { - /// Number of chains - chain_num: Option, - /// Check block to chain index - block_to_chain: Option>, - /// Check fork to child index - fork_to_child: Option>>, - /// Pending blocks - pending_blocks: Option<(BlockNumber, HashSet)>, - /// Buffered blocks - buffered_blocks: Option>, - } - - impl TreeTester { - const fn with_chain_num(mut self, chain_num: usize) -> Self { - self.chain_num = Some(chain_num); - self - } - - fn with_block_to_chain(mut self, block_to_chain: HashMap) -> Self { - self.block_to_chain = Some(block_to_chain); - self - } - - fn with_fork_to_child( - mut self, - fork_to_child: HashMap>, - ) -> Self { - self.fork_to_child = Some(fork_to_child); - self - } - - fn with_buffered_blocks( - mut self, - buffered_blocks: HashMap, - ) -> Self { - self.buffered_blocks = Some(buffered_blocks); - self - } - - fn with_pending_blocks( - mut self, - pending_blocks: (BlockNumber, HashSet), - ) -> Self { - self.pending_blocks = Some(pending_blocks); - self - } - - fn assert(self, tree: &BlockchainTree) { - if let Some(chain_num) = self.chain_num { - assert_eq!(tree.state.chains.len(), chain_num); - } - if let Some(block_to_chain) = self.block_to_chain { - assert_eq!(*tree.state.block_indices.blocks_to_chain(), block_to_chain); - } - if let Some(fork_to_child) = self.fork_to_child { - let mut x: HashMap> = - HashMap::with_capacity(fork_to_child.len()); - for (key, hash_set) in fork_to_child { - x.insert(key, hash_set.into_iter().collect()); - } - assert_eq!(*tree.state.block_indices.fork_to_child(), x); - } - if let Some(pending_blocks) = self.pending_blocks { - let (num, hashes) = tree.state.block_indices.pending_blocks(); - let hashes = hashes.into_iter().collect::>(); - assert_eq!((num, hashes), pending_blocks); - } - if let Some(buffered_blocks) = self.buffered_blocks { - assert_eq!(*tree.state.buffered_blocks.blocks(), buffered_blocks); - } - } - } - - #[test] - fn consecutive_reorgs() { - let signer = Address::random(); - let initial_signer_balance = U256::from(10).pow(U256::from(18)); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(Genesis { - alloc: BTreeMap::from([( - signer, - GenesisAccount { balance: initial_signer_balance, ..Default::default() }, - )]), - ..MAINNET.genesis.clone() - }) - .shanghai_activated() - .build(), - ); - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); - let consensus = Arc::new(TestConsensus::default()); - let executor_provider = EthExecutorProvider::ethereum(chain_spec.clone()); - - { - let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw - .insert_block( - SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()) - .try_seal_with_senders() - .unwrap(), - StorageLocation::Database, - ) - .unwrap(); - let account = Account { balance: initial_signer_balance, ..Default::default() }; - provider_rw.tx_ref().put::(signer, account).unwrap(); - provider_rw.tx_ref().put::(keccak256(signer), account).unwrap(); - provider_rw.commit().unwrap(); - } - - let single_tx_cost = U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); - let mock_tx = |nonce: u64| -> RecoveredTx<_> { - TransactionSigned::new_unhashed( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce, - gas_limit: MIN_TRANSACTION_GAS, - to: Address::ZERO.into(), - max_fee_per_gas: INITIAL_BASE_FEE as u128, - ..Default::default() - }), - Signature::test_signature(), - ) - .with_signer(signer) - }; - - let mock_block = |number: u64, - parent: Option, - body: Vec>, - num_of_signer_txs: u64| - -> SealedBlockWithSenders { - let signed_body = body.clone().into_iter().map(|tx| tx.into_tx()).collect::>(); - let transactions_root = calculate_transaction_root(&signed_body); - let receipts = body - .iter() - .enumerate() - .map(|(idx, tx)| { - Receipt { - tx_type: tx.tx_type(), - success: true, - cumulative_gas_used: (idx as u64 + 1) * MIN_TRANSACTION_GAS, - ..Default::default() - } - .with_bloom() - }) - .collect::>(); - - // receipts root computation is different for OP - let receipts_root = calculate_receipt_root(&receipts); - - let header = Header { - number, - parent_hash: parent.unwrap_or_default(), - gas_used: body.len() as u64 * MIN_TRANSACTION_GAS, - gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, - mix_hash: B256::random(), - base_fee_per_gas: Some(INITIAL_BASE_FEE), - transactions_root, - receipts_root, - state_root: state_root_unhashed(HashMap::from([( - signer, - Account { - balance: initial_signer_balance - - (single_tx_cost * U256::from(num_of_signer_txs)), - nonce: num_of_signer_txs, - ..Default::default() - } - .into_trie_account(EMPTY_ROOT_HASH), - )])), - ..Default::default() - }; - - SealedBlockWithSenders::new( - SealedBlock::new( - SealedHeader::seal(header), - BlockBody { - transactions: signed_body, - ommers: Vec::new(), - withdrawals: Some(Withdrawals::default()), - }, - ), - body.iter().map(|tx| tx.signer()).collect(), - ) - .unwrap() - }; - - let fork_block = mock_block(1, Some(chain_spec.genesis_hash()), Vec::from([mock_tx(0)]), 1); - - let canonical_block_1 = - mock_block(2, Some(fork_block.hash()), Vec::from([mock_tx(1), mock_tx(2)]), 3); - let canonical_block_2 = mock_block(3, Some(canonical_block_1.hash()), Vec::new(), 3); - let canonical_block_3 = - mock_block(4, Some(canonical_block_2.hash()), Vec::from([mock_tx(3)]), 4); - - let sidechain_block_1 = mock_block(2, Some(fork_block.hash()), Vec::from([mock_tx(1)]), 2); - let sidechain_block_2 = - mock_block(3, Some(sidechain_block_1.hash()), Vec::from([mock_tx(2)]), 3); - - let mut tree = BlockchainTree::new( - TreeExternals::new(provider_factory, consensus, executor_provider), - BlockchainTreeConfig::default(), - ) - .expect("failed to create tree"); - - tree.insert_block(fork_block.clone(), BlockValidationKind::Exhaustive).unwrap(); - - assert_eq!( - tree.make_canonical(fork_block.hash()).unwrap(), - CanonicalOutcome::Committed { head: fork_block.header.clone() } - ); - - assert_eq!( - tree.insert_block(canonical_block_1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.make_canonical(canonical_block_1.hash()).unwrap(), - CanonicalOutcome::Committed { head: canonical_block_1.header.clone() } - ); - - assert_eq!( - tree.insert_block(canonical_block_2, BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(sidechain_block_1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - assert_eq!( - tree.make_canonical(sidechain_block_1.hash()).unwrap(), - CanonicalOutcome::Committed { head: sidechain_block_1.header.clone() } - ); - - assert_eq!( - tree.make_canonical(canonical_block_1.hash()).unwrap(), - CanonicalOutcome::Committed { head: canonical_block_1.header.clone() } - ); - - assert_eq!( - tree.insert_block(sidechain_block_2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - assert_eq!( - tree.make_canonical(sidechain_block_2.hash()).unwrap(), - CanonicalOutcome::Committed { head: sidechain_block_2.header.clone() } - ); - - assert_eq!( - tree.insert_block(canonical_block_3.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - assert_eq!( - tree.make_canonical(canonical_block_3.hash()).unwrap(), - CanonicalOutcome::Committed { head: canonical_block_3.header.clone() } - ); - } - - #[test] - fn sidechain_block_hashes() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let (block3, exec3) = data.blocks[2].clone(); - let (block4, exec4) = data.blocks[3].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = - setup_externals(vec![exec3.clone(), exec2.clone(), exec4, exec3, exec2, exec1]); - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - // genesis block 10 is already canonical - tree.make_canonical(B256::ZERO).unwrap(); - - // make genesis block 10 as finalized - tree.finalize_block(10).unwrap(); - - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block3.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block4, BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - let mut block2a = block2; - let block2a_hash = B256::new([0x34; 32]); - block2a.set_hash(block2a_hash); - - assert_eq!( - tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - let mut block3a = block3; - let block3a_hash = B256::new([0x35; 32]); - block3a.set_hash(block3a_hash); - block3a.set_parent_hash(block2a.hash()); - - assert_eq!( - tree.insert_block(block3a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) /* TODO: this is incorrect, figure out why */ - ); - - let block3a_chain_id = tree.state.block_indices.get_side_chain_id(&block3a.hash()).unwrap(); - assert_eq!( - tree.all_chain_hashes(block3a_chain_id), - BTreeMap::from([ - (block1.number, block1.hash()), - (block2a.number, block2a.hash()), - (block3a.number, block3a.hash()), - ]) - ); - } - - #[test] - fn cached_trie_updates() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let (block3, exec3) = data.blocks[2].clone(); - let (block4, exec4) = data.blocks[3].clone(); - let (block5, exec5) = data.blocks[4].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = setup_externals(vec![exec5.clone(), exec4, exec3, exec2, exec1]); - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - // genesis block 10 is already canonical - tree.make_canonical(B256::ZERO).unwrap(); - - // make genesis block 10 as finalized - tree.finalize_block(10).unwrap(); - - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - let block1_chain_id = tree.state.block_indices.get_side_chain_id(&block1.hash()).unwrap(); - let block1_chain = tree.state.chains.get(&block1_chain_id).unwrap(); - assert!(block1_chain.trie_updates().is_some()); - - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - let block2_chain_id = tree.state.block_indices.get_side_chain_id(&block2.hash()).unwrap(); - let block2_chain = tree.state.chains.get(&block2_chain_id).unwrap(); - assert!(block2_chain.trie_updates().is_none()); - - assert_eq!( - tree.make_canonical(block2.hash()).unwrap(), - CanonicalOutcome::Committed { head: block2.header.clone() } - ); - - assert_eq!( - tree.insert_block(block3.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - let block3_chain_id = tree.state.block_indices.get_side_chain_id(&block3.hash()).unwrap(); - let block3_chain = tree.state.chains.get(&block3_chain_id).unwrap(); - assert!(block3_chain.trie_updates().is_some()); - - assert_eq!( - tree.make_canonical(block3.hash()).unwrap(), - CanonicalOutcome::Committed { head: block3.header.clone() } - ); - - assert_eq!( - tree.insert_block(block4.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - let block4_chain_id = tree.state.block_indices.get_side_chain_id(&block4.hash()).unwrap(); - let block4_chain = tree.state.chains.get(&block4_chain_id).unwrap(); - assert!(block4_chain.trie_updates().is_some()); - - assert_eq!( - tree.insert_block(block5.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - let block5_chain_id = tree.state.block_indices.get_side_chain_id(&block5.hash()).unwrap(); - let block5_chain = tree.state.chains.get(&block5_chain_id).unwrap(); - assert!(block5_chain.trie_updates().is_none()); - - assert_eq!( - tree.make_canonical(block5.hash()).unwrap(), - CanonicalOutcome::Committed { head: block5.header.clone() } - ); - - let provider = tree.externals.provider_factory.provider().unwrap(); - let prefix_sets = tree - .externals - .provider_factory - .hashed_post_state(exec5.state()) - .construct_prefix_sets() - .freeze(); - let state_root = - StateRoot::from_tx(provider.tx_ref()).with_prefix_sets(prefix_sets).root().unwrap(); - assert_eq!(state_root, block5.state_root); - } - - #[test] - fn test_side_chain_fork() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = setup_externals(vec![exec2.clone(), exec2, exec1]); - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - // genesis block 10 is already canonical - tree.make_canonical(B256::ZERO).unwrap(); - - // make genesis block 10 as finalized - tree.finalize_block(10).unwrap(); - - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // we have one chain that has two blocks. - // Trie state: - // b2 (pending block) - // | - // | - // b1 (pending block) - // / - // / - // g1 (canonical blocks) - // | - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([ - (block1.hash(), 0.into()), - (block2.hash(), 0.into()), - ])) - .with_fork_to_child(HashMap::from([( - block1.parent_hash, - HashSet::from([block1.hash()]), - )])) - .assert(&tree); - - let mut block2a = block2.clone(); - let block2a_hash = B256::new([0x34; 32]); - block2a.set_hash(block2a_hash); - - assert_eq!( - tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - // fork chain. - // Trie state: - // b2 b2a (pending blocks in tree) - // | / - // | / - // b1 - // / - // / - // g1 (canonical blocks) - // | - - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1.hash(), 0.into()), - (block2.hash(), 0.into()), - (block2a.hash(), 1.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1.hash()])), - (block2a.parent_hash, HashSet::from([block2a.hash()])), - ])) - .assert(&tree); - // chain 0 has two blocks so receipts and reverts len is 2 - let chain0 = tree.state.chains.get(&0.into()).unwrap().execution_outcome(); - assert_eq!(chain0.receipts().len(), 2); - assert_eq!(chain0.state().reverts.len(), 2); - assert_eq!(chain0.first_block(), block1.number); - // chain 1 has one block so receipts and reverts len is 1 - let chain1 = tree.state.chains.get(&1.into()).unwrap().execution_outcome(); - assert_eq!(chain1.receipts().len(), 1); - assert_eq!(chain1.state().reverts.len(), 1); - assert_eq!(chain1.first_block(), block2.number); - } - - #[test] - fn sanity_path() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = setup_externals(vec![exec2.clone(), exec1.clone(), exec2, exec1]); - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - - let mut canon_notif = tree.subscribe_canon_state(); - // genesis block 10 is already canonical - let head = BlockNumHash::new(10, B256::ZERO); - tree.make_canonical(head.hash).unwrap(); - - // make sure is_block_hash_canonical returns true for genesis block - tree.is_block_hash_canonical(&B256::ZERO).unwrap(); - - // make genesis block 10 as finalized - tree.finalize_block(head.number).unwrap(); - - // block 2 parent is not known, block2 is buffered. - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Disconnected { - head, - missing_ancestor: block2.parent_num_hash() - }) - ); - - // Buffered block: [block2] - // Trie state: - // | - // g1 (canonical blocks) - // | - - TreeTester::default() - .with_buffered_blocks(HashMap::from([(block2.hash(), block2.clone())])) - .assert(&tree); - - assert_eq!( - tree.is_block_known(block2.num_hash()).unwrap(), - Some(BlockStatus::Disconnected { head, missing_ancestor: block2.parent_num_hash() }) - ); - - // check if random block is known - let old_block = BlockNumHash::new(1, B256::new([32; 32])); - let err = BlockchainTreeError::PendingBlockIsFinalized { last_finalized: 10 }; - - assert_eq!(tree.is_block_known(old_block).unwrap_err().as_tree_error(), Some(err)); - - // insert block1 and buffered block2 is inserted - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // Buffered blocks: [] - // Trie state: - // b2 (pending block) - // | - // | - // b1 (pending block) - // / - // / - // g1 (canonical blocks) - // | - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([ - (block1.hash(), 0.into()), - (block2.hash(), 0.into()), - ])) - .with_fork_to_child(HashMap::from([( - block1.parent_hash, - HashSet::from([block1.hash()]), - )])) - .with_pending_blocks((block1.number, HashSet::from([block1.hash()]))) - .assert(&tree); - - // already inserted block will `InsertPayloadOk::AlreadySeen(_)` - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::AlreadySeen(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // block two is already inserted. - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::AlreadySeen(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // make block1 canonical - tree.make_canonical(block1.hash()).unwrap(); - // check notification - assert_matches!(canon_notif.try_recv(), Ok(CanonStateNotification::Commit{ new}) if *new.blocks() == BTreeMap::from([(block1.number,block1.clone())])); - - // make block2 canonicals - tree.make_canonical(block2.hash()).unwrap(); - // check notification. - assert_matches!(canon_notif.try_recv(), Ok(CanonStateNotification::Commit{ new}) if *new.blocks() == BTreeMap::from([(block2.number,block2.clone())])); - - // Trie state: - // b2 (canonical block) - // | - // | - // b1 (canonical block) - // | - // | - // g1 (canonical blocks) - // | - TreeTester::default() - .with_chain_num(0) - .with_block_to_chain(HashMap::from([])) - .with_fork_to_child(HashMap::from([])) - .assert(&tree); - - /**** INSERT SIDE BLOCKS *** */ - - let mut block1a = block1.clone(); - let block1a_hash = B256::new([0x33; 32]); - block1a.set_hash(block1a_hash); - let mut block2a = block2.clone(); - let block2a_hash = B256::new([0x34; 32]); - block2a.set_hash(block2a_hash); - - // reinsert two blocks that point to canonical chain - assert_eq!( - tree.insert_block(block1a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([(block1a_hash, 1.into())])) - .with_fork_to_child(HashMap::from([( - block1.parent_hash, - HashSet::from([block1a_hash]), - )])) - .with_pending_blocks((block2.number + 1, HashSet::from([]))) - .assert(&tree); - - assert_eq!( - tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - // Trie state: - // b2 b2a (side chain) - // | / - // | / - // b1 b1a (side chain) - // | / - // |/ - // g1 (10) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1a_hash, 1.into()), - (block2a_hash, 2.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1a_hash])), - (block1.hash(), HashSet::from([block2a_hash])), - ])) - .with_pending_blocks((block2.number + 1, HashSet::from([]))) - .assert(&tree); - - // make b2a canonical - assert!(tree.make_canonical(block2a_hash).is_ok()); - // check notification. - assert_matches!(canon_notif.try_recv(), - Ok(CanonStateNotification::Reorg{ old, new}) - if *old.blocks() == BTreeMap::from([(block2.number,block2.clone())]) - && *new.blocks() == BTreeMap::from([(block2a.number,block2a.clone())])); - - // Trie state: - // b2a b2 (side chain) - // | / - // | / - // b1 b1a (side chain) - // | / - // |/ - // g1 (10) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1a_hash, 1.into()), - (block2.hash(), 3.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1a_hash])), - (block1.hash(), HashSet::from([block2.hash()])), - ])) - .with_pending_blocks((block2.number + 1, HashSet::default())) - .assert(&tree); - - assert_matches!(tree.make_canonical(block1a_hash), Ok(_)); - // Trie state: - // b2a b2 (side chain) - // | / - // | / - // b1a b1 (side chain) - // | / - // |/ - // g1 (10) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1.hash(), 4.into()), - (block2a_hash, 4.into()), - (block2.hash(), 3.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1.hash()])), - (block1.hash(), HashSet::from([block2.hash()])), - ])) - .with_pending_blocks((block1a.number + 1, HashSet::default())) - .assert(&tree); - - // check notification. - assert_matches!(canon_notif.try_recv(), - Ok(CanonStateNotification::Reorg{ old, new}) - if *old.blocks() == BTreeMap::from([(block1.number,block1.clone()),(block2a.number,block2a.clone())]) - && *new.blocks() == BTreeMap::from([(block1a.number,block1a.clone())])); - - // check that b2 and b1 are not canonical - assert!(!tree.is_block_hash_canonical(&block2.hash()).unwrap()); - assert!(!tree.is_block_hash_canonical(&block1.hash()).unwrap()); - - // ensure that b1a is canonical - assert!(tree.is_block_hash_canonical(&block1a.hash()).unwrap()); - - // make b2 canonical - tree.make_canonical(block2.hash()).unwrap(); - // Trie state: - // b2 b2a (side chain) - // | / - // | / - // b1 b1a (side chain) - // | / - // |/ - // g1 (10) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1a_hash, 5.into()), - (block2a_hash, 4.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1a_hash])), - (block1.hash(), HashSet::from([block2a_hash])), - ])) - .with_pending_blocks((block2.number + 1, HashSet::default())) - .assert(&tree); - - // check notification. - assert_matches!(canon_notif.try_recv(), - Ok(CanonStateNotification::Reorg{ old, new}) - if *old.blocks() == BTreeMap::from([(block1a.number,block1a.clone())]) - && *new.blocks() == BTreeMap::from([(block1.number,block1.clone()),(block2.number,block2.clone())])); - - // check that b2 is now canonical - assert!(tree.is_block_hash_canonical(&block2.hash()).unwrap()); - - // finalize b1 that would make b1a removed from tree - tree.finalize_block(11).unwrap(); - // Trie state: - // b2 b2a (side chain) - // | / - // | / - // b1 (canon) - // | - // g1 (10) - // | - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([(block2a_hash, 4.into())])) - .with_fork_to_child(HashMap::from([(block1.hash(), HashSet::from([block2a_hash]))])) - .with_pending_blocks((block2.number + 1, HashSet::from([]))) - .assert(&tree); - - // unwind canonical - assert!(tree.unwind(block1.number).is_ok()); - // Trie state: - // b2 b2a (pending block) - // / / - // / / - // / / - // b1 (canonical block) - // | - // | - // g1 (canonical blocks) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block2a_hash, 4.into()), - (block2.hash(), 6.into()), - ])) - .with_fork_to_child(HashMap::from([( - block1.hash(), - HashSet::from([block2a_hash, block2.hash()]), - )])) - .with_pending_blocks((block2.number, HashSet::from([block2.hash(), block2a.hash()]))) - .assert(&tree); - - // commit b2a - tree.make_canonical(block2.hash()).unwrap(); - - // Trie state: - // b2 b2a (side chain) - // | / - // | / - // b1 (finalized) - // | - // g1 (10) - // | - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([(block2a_hash, 4.into())])) - .with_fork_to_child(HashMap::from([(block1.hash(), HashSet::from([block2a_hash]))])) - .with_pending_blocks((block2.number + 1, HashSet::default())) - .assert(&tree); - - // check notification. - assert_matches!(canon_notif.try_recv(), - Ok(CanonStateNotification::Commit{ new }) - if *new.blocks() == BTreeMap::from([(block2.number,block2.clone())])); - - // insert unconnected block2b - let mut block2b = block2a.clone(); - block2b.set_hash(B256::new([0x99; 32])); - block2b.set_parent_hash(B256::new([0x88; 32])); - - assert_eq!( - tree.insert_block(block2b.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Disconnected { - head: block2.header.num_hash(), - missing_ancestor: block2b.parent_num_hash() - }) - ); - - TreeTester::default() - .with_buffered_blocks(HashMap::from([(block2b.hash(), block2b.clone())])) - .assert(&tree); - - // update canonical block to b2, this would make b2a be removed - assert!(tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(12).is_ok()); - - assert_eq!( - tree.is_block_known(block2.num_hash()).unwrap(), - Some(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // Trie state: - // b2 (finalized) - // | - // b1 (finalized) - // | - // g1 (10) - // | - TreeTester::default() - .with_chain_num(0) - .with_block_to_chain(HashMap::default()) - .with_fork_to_child(HashMap::default()) - .with_pending_blocks((block2.number + 1, HashSet::default())) - .with_buffered_blocks(HashMap::default()) - .assert(&tree); - } - - #[test] - fn last_finalized_block_initialization() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let (block3, exec3) = data.blocks[2].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = - setup_externals(vec![exec3.clone(), exec2.clone(), exec1.clone(), exec3, exec2, exec1]); - let cloned_externals_1 = TreeExternals { - provider_factory: externals.provider_factory.clone(), - executor_factory: externals.executor_factory.clone(), - consensus: externals.consensus.clone(), - }; - let cloned_externals_2 = TreeExternals { - provider_factory: externals.provider_factory.clone(), - executor_factory: externals.executor_factory.clone(), - consensus: externals.consensus.clone(), - }; - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block3, BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - tree.make_canonical(block2.hash()).unwrap(); - - // restart - let mut tree = - BlockchainTree::new(cloned_externals_1, config).expect("failed to create tree"); - assert_eq!(tree.block_indices().last_finalized_block(), 0); - - let mut block1a = block1; - let block1a_hash = B256::new([0x33; 32]); - block1a.set_hash(block1a_hash); - - assert_eq!( - tree.insert_block(block1a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - tree.make_canonical(block1a.hash()).unwrap(); - tree.finalize_block(block1a.number).unwrap(); - - // restart - let tree = BlockchainTree::new(cloned_externals_2, config).expect("failed to create tree"); - - assert_eq!(tree.block_indices().last_finalized_block(), block1a.number); - } -} diff --git a/crates/blockchain-tree/src/bundle.rs b/crates/blockchain-tree/src/bundle.rs deleted file mode 100644 index ef9fc21670c8..000000000000 --- a/crates/blockchain-tree/src/bundle.rs +++ /dev/null @@ -1,69 +0,0 @@ -//! [`ExecutionDataProvider`] implementations used by the tree. - -use alloy_eips::ForkBlock; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_provider::{BlockExecutionForkProvider, ExecutionDataProvider, ExecutionOutcome}; -use std::collections::BTreeMap; - -/// Structure that combines references of required data to be an [`ExecutionDataProvider`]. -#[derive(Clone, Debug)] -pub struct BundleStateDataRef<'a> { - /// The execution outcome after execution of one or more transactions and/or blocks. - pub execution_outcome: &'a ExecutionOutcome, - /// The blocks in the sidechain. - pub sidechain_block_hashes: &'a BTreeMap, - /// The blocks in the canonical chain. - pub canonical_block_hashes: &'a BTreeMap, - /// Canonical fork - pub canonical_fork: ForkBlock, -} - -impl ExecutionDataProvider for BundleStateDataRef<'_> { - fn execution_outcome(&self) -> &ExecutionOutcome { - self.execution_outcome - } - - fn block_hash(&self, block_number: BlockNumber) -> Option { - let block_hash = self.sidechain_block_hashes.get(&block_number).copied(); - if block_hash.is_some() { - return block_hash; - } - - self.canonical_block_hashes.get(&block_number).copied() - } -} - -impl BlockExecutionForkProvider for BundleStateDataRef<'_> { - fn canonical_fork(&self) -> ForkBlock { - self.canonical_fork - } -} - -/// Structure that owns the relevant data needs to be an [`ExecutionDataProvider`] -#[derive(Clone, Debug)] -pub struct ExecutionData { - /// Execution outcome. - pub execution_outcome: ExecutionOutcome, - /// Parent block hashes needs for evm BLOCKHASH opcode. - /// NOTE: it does not mean that all hashes are there but all until finalized are there. - /// Other hashes can be obtained from provider - pub parent_block_hashes: BTreeMap, - /// Canonical block where state forked from. - pub canonical_fork: ForkBlock, -} - -impl ExecutionDataProvider for ExecutionData { - fn execution_outcome(&self) -> &ExecutionOutcome { - &self.execution_outcome - } - - fn block_hash(&self, block_number: BlockNumber) -> Option { - self.parent_block_hashes.get(&block_number).copied() - } -} - -impl BlockExecutionForkProvider for ExecutionData { - fn canonical_fork(&self) -> ForkBlock { - self.canonical_fork - } -} diff --git a/crates/blockchain-tree/src/canonical_chain.rs b/crates/blockchain-tree/src/canonical_chain.rs deleted file mode 100644 index 253f799fe0f8..000000000000 --- a/crates/blockchain-tree/src/canonical_chain.rs +++ /dev/null @@ -1,241 +0,0 @@ -use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockHash, BlockNumber}; -use std::collections::BTreeMap; - -/// This keeps track of (non-finalized) blocks of the canonical chain. -/// -/// This is a wrapper type around an ordered set of block numbers and hashes that belong to the -/// canonical chain that is not yet finalized. -#[derive(Debug, Clone, Default)] -pub(crate) struct CanonicalChain { - /// All blocks of the canonical chain in order of their block number. - chain: BTreeMap, -} - -impl CanonicalChain { - pub(crate) const fn new(chain: BTreeMap) -> Self { - Self { chain } - } - - /// Replaces the current chain with the given one. - #[inline] - pub(crate) fn replace(&mut self, chain: BTreeMap) { - self.chain = chain; - } - - /// Returns the block hash of the (non-finalized) canonical block with the given number. - #[inline] - pub(crate) fn canonical_hash(&self, number: &BlockNumber) -> Option { - self.chain.get(number).copied() - } - - /// Returns the block number of the (non-finalized) canonical block with the given hash. - #[inline] - pub(crate) fn canonical_number(&self, block_hash: &BlockHash) -> Option { - self.chain.iter().find_map(|(number, hash)| (hash == block_hash).then_some(*number)) - } - - /// Extends all items from the given iterator to the chain. - #[inline] - pub(crate) fn extend(&mut self, blocks: impl Iterator) { - self.chain.extend(blocks) - } - - /// Retains only the elements specified by the predicate. - #[inline] - pub(crate) fn retain(&mut self, f: F) - where - F: FnMut(&BlockNumber, &mut BlockHash) -> bool, - { - self.chain.retain(f) - } - - #[inline] - pub(crate) const fn inner(&self) -> &BTreeMap { - &self.chain - } - - #[inline] - pub(crate) fn tip(&self) -> BlockNumHash { - self.chain - .last_key_value() - .map(|(&number, &hash)| BlockNumHash { number, hash }) - .unwrap_or_default() - } - - #[inline] - pub(crate) fn iter(&self) -> impl Iterator + '_ { - self.chain.iter().map(|(&number, &hash)| (number, hash)) - } - - #[inline] - pub(crate) fn into_iter(self) -> impl Iterator { - self.chain.into_iter() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_replace_canonical_chain() { - // Initialize a chain with some blocks - let mut initial_chain = BTreeMap::new(); - initial_chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); - initial_chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); - - let mut canonical_chain = CanonicalChain::new(initial_chain.clone()); - - // Verify initial chain state - assert_eq!(canonical_chain.chain.len(), 2); - assert_eq!( - canonical_chain.chain.get(&BlockNumber::from(1u64)), - Some(&BlockHash::from([0x01; 32])) - ); - - // Replace with a new chain - let mut new_chain = BTreeMap::new(); - new_chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); - new_chain.insert(BlockNumber::from(4u64), BlockHash::from([0x04; 32])); - new_chain.insert(BlockNumber::from(5u64), BlockHash::from([0x05; 32])); - - canonical_chain.replace(new_chain.clone()); - - // Verify replaced chain state - assert_eq!(canonical_chain.chain.len(), 3); - assert!(!canonical_chain.chain.contains_key(&BlockNumber::from(1u64))); - assert_eq!( - canonical_chain.chain.get(&BlockNumber::from(3u64)), - Some(&BlockHash::from([0x03; 32])) - ); - } - - #[test] - fn test_canonical_hash_canonical_chain() { - // Initialize a chain with some blocks - let mut chain = BTreeMap::new(); - chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); - chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); - chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); - - // Create an instance of a canonical chain - let canonical_chain = CanonicalChain::new(chain.clone()); - - // Check that the function returns the correct hash for a given block number - let block_number = BlockNumber::from(2u64); - let expected_hash = BlockHash::from([0x02; 32]); - assert_eq!(canonical_chain.canonical_hash(&block_number), Some(expected_hash)); - - // Check that a non-existent block returns None - let non_existent_block = BlockNumber::from(5u64); - assert_eq!(canonical_chain.canonical_hash(&non_existent_block), None); - } - - #[test] - fn test_canonical_number_canonical_chain() { - // Initialize a chain with some blocks - let mut chain = BTreeMap::new(); - chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); - chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); - chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); - - // Create an instance of a canonical chain - let canonical_chain = CanonicalChain::new(chain.clone()); - - // Check that the function returns the correct block number for a given block hash - let block_hash = BlockHash::from([0x02; 32]); - let expected_number = BlockNumber::from(2u64); - assert_eq!(canonical_chain.canonical_number(&block_hash), Some(expected_number)); - - // Check that a non-existent block hash returns None - let non_existent_hash = BlockHash::from([0x05; 32]); - assert_eq!(canonical_chain.canonical_number(&non_existent_hash), None); - } - - #[test] - fn test_extend_canonical_chain() { - // Initialize an empty chain - let mut canonical_chain = CanonicalChain::new(BTreeMap::new()); - - // Create an iterator with some blocks - let blocks = vec![ - (BlockNumber::from(1u64), BlockHash::from([0x01; 32])), - (BlockNumber::from(2u64), BlockHash::from([0x02; 32])), - ] - .into_iter(); - - // Extend the chain with the created blocks - canonical_chain.extend(blocks); - - // Check if the blocks were added correctly - assert_eq!(canonical_chain.chain.len(), 2); - assert_eq!( - canonical_chain.chain.get(&BlockNumber::from(1u64)), - Some(&BlockHash::from([0x01; 32])) - ); - assert_eq!( - canonical_chain.chain.get(&BlockNumber::from(2u64)), - Some(&BlockHash::from([0x02; 32])) - ); - - // Test extending with additional blocks again - let more_blocks = vec![(BlockNumber::from(3u64), BlockHash::from([0x03; 32]))].into_iter(); - canonical_chain.extend(more_blocks); - - assert_eq!(canonical_chain.chain.len(), 3); - assert_eq!( - canonical_chain.chain.get(&BlockNumber::from(3u64)), - Some(&BlockHash::from([0x03; 32])) - ); - } - - #[test] - fn test_retain_canonical_chain() { - // Initialize a chain with some blocks - let mut chain = BTreeMap::new(); - chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); - chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); - chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); - - // Create an instance of CanonicalChain - let mut canonical_chain = CanonicalChain::new(chain); - - // Retain only blocks with even block numbers - canonical_chain.retain(|number, _| number % 2 == 0); - - // Check if the chain only contains the block with number 2 - assert_eq!(canonical_chain.chain.len(), 1); - assert_eq!( - canonical_chain.chain.get(&BlockNumber::from(2u64)), - Some(&BlockHash::from([0x02; 32])) - ); - - // Ensure that the blocks with odd numbers were removed - assert_eq!(canonical_chain.chain.get(&BlockNumber::from(1u64)), None); - assert_eq!(canonical_chain.chain.get(&BlockNumber::from(3u64)), None); - } - - #[test] - fn test_tip_canonical_chain() { - // Initialize a chain with some blocks - let mut chain = BTreeMap::new(); - chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); - chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); - chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); - - // Create an instance of a canonical chain - let canonical_chain = CanonicalChain::new(chain); - - // Call the tip method and verify the returned value - let tip = canonical_chain.tip(); - assert_eq!(tip.number, BlockNumber::from(3u64)); - assert_eq!(tip.hash, BlockHash::from([0x03; 32])); - - // Test with an empty chain - let empty_chain = CanonicalChain::new(BTreeMap::new()); - let empty_tip = empty_chain.tip(); - assert_eq!(empty_tip.number, BlockNumber::default()); - assert_eq!(empty_tip.hash, BlockHash::default()); - } -} diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs deleted file mode 100644 index e607d00d2d9b..000000000000 --- a/crates/blockchain-tree/src/chain.rs +++ /dev/null @@ -1,311 +0,0 @@ -//! A chain in a [`BlockchainTree`][super::BlockchainTree]. -//! -//! A [`Chain`] contains the state of accounts for the chain after execution of its constituent -//! blocks, as well as a list of the blocks the chain is composed of. - -use super::externals::TreeExternals; -use crate::BundleStateDataRef; -use alloy_eips::ForkBlock; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_blockchain_tree_api::{ - error::{BlockchainTreeError, InsertBlockErrorKind}, - BlockAttachment, BlockValidationKind, -}; -use reth_consensus::{ConsensusError, PostExecutionInput}; -use reth_evm::execute::{BlockExecutorProvider, Executor}; -use reth_execution_errors::BlockExecutionError; -use reth_execution_types::{Chain, ExecutionOutcome}; -use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader}; -use reth_provider::{ - providers::{BundleStateProvider, ConsistentDbView, TreeNodeTypes}, - DBProvider, FullExecutionDataProvider, HashedPostStateProvider, ProviderError, - StateRootProvider, TryIntoHistoricalStateProvider, -}; -use reth_revm::database::StateProviderDatabase; -use reth_trie::{updates::TrieUpdates, TrieInput}; -use reth_trie_parallel::root::ParallelStateRoot; -use std::{ - collections::BTreeMap, - ops::{Deref, DerefMut}, - time::Instant, -}; - -/// A chain in the blockchain tree that has functionality to execute blocks and append them to -/// itself. -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct AppendableChain { - chain: Chain, -} - -impl Deref for AppendableChain { - type Target = Chain; - - fn deref(&self) -> &Self::Target { - &self.chain - } -} - -impl DerefMut for AppendableChain { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.chain - } -} - -impl AppendableChain { - /// Create a new appendable chain from a given chain. - pub const fn new(chain: Chain) -> Self { - Self { chain } - } - - /// Get the chain. - pub fn into_inner(self) -> Chain { - self.chain - } - - /// Create a new chain that forks off of the canonical chain. - /// - /// if [`BlockValidationKind::Exhaustive`] is specified, the method will verify the state root - /// of the block. - pub fn new_canonical_fork( - block: SealedBlockWithSenders, - parent_header: &SealedHeader, - canonical_block_hashes: &BTreeMap, - canonical_fork: ForkBlock, - externals: &TreeExternals, - block_attachment: BlockAttachment, - block_validation_kind: BlockValidationKind, - ) -> Result - where - N: TreeNodeTypes, - E: BlockExecutorProvider, - { - let execution_outcome = ExecutionOutcome::default(); - let empty = BTreeMap::new(); - - let state_provider = BundleStateDataRef { - execution_outcome: &execution_outcome, - sidechain_block_hashes: &empty, - canonical_block_hashes, - canonical_fork, - }; - - let (bundle_state, trie_updates) = Self::validate_and_execute( - block.clone(), - parent_header, - state_provider, - externals, - block_attachment, - block_validation_kind, - )?; - - Ok(Self::new(Chain::new(vec![block], bundle_state, trie_updates))) - } - - /// Create a new chain that forks off of an existing sidechain. - /// - /// This differs from [`AppendableChain::new_canonical_fork`] in that this starts a new fork. - pub(crate) fn new_chain_fork( - &self, - block: SealedBlockWithSenders, - side_chain_block_hashes: BTreeMap, - canonical_block_hashes: &BTreeMap, - canonical_fork: ForkBlock, - externals: &TreeExternals, - block_validation_kind: BlockValidationKind, - ) -> Result - where - N: TreeNodeTypes, - E: BlockExecutorProvider, - { - let parent_number = - block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; - let parent = self.blocks().get(&parent_number).ok_or( - BlockchainTreeError::BlockNumberNotFoundInChain { block_number: parent_number }, - )?; - - let mut execution_outcome = self.execution_outcome().clone(); - - // Revert state to the state after execution of the parent block - execution_outcome.revert_to(parent.number); - - // Revert changesets to get the state of the parent that we need to apply the change. - let bundle_state_data = BundleStateDataRef { - execution_outcome: &execution_outcome, - sidechain_block_hashes: &side_chain_block_hashes, - canonical_block_hashes, - canonical_fork, - }; - let (block_state, _) = Self::validate_and_execute( - block.clone(), - parent, - bundle_state_data, - externals, - BlockAttachment::HistoricalFork, - block_validation_kind, - )?; - // extending will also optimize few things, mostly related to selfdestruct and wiping of - // storage. - execution_outcome.extend(block_state); - - // remove all receipts and reverts (except the last one), as they belong to the chain we - // forked from and not the new chain we are creating. - let size = execution_outcome.receipts().len(); - execution_outcome.receipts_mut().drain(0..size - 1); - execution_outcome.state_mut().take_n_reverts(size - 1); - execution_outcome.set_first_block(block.number); - - // If all is okay, return new chain back. Present chain is not modified. - Ok(Self::new(Chain::from_block(block, execution_outcome, None))) - } - - /// Validate and execute the given block that _extends the canonical chain_, validating its - /// state root after execution if possible and requested. - /// - /// Note: State root validation is limited to blocks that extend the canonical chain and is - /// optional, see [`BlockValidationKind`]. So this function takes two parameters to determine - /// if the state can and should be validated. - /// - [`BlockAttachment`] represents if the block extends the canonical chain, and thus we can - /// cache the trie state updates. - /// - [`BlockValidationKind`] determines if the state root __should__ be validated. - fn validate_and_execute( - block: SealedBlockWithSenders, - parent_block: &SealedHeader, - bundle_state_data_provider: EDP, - externals: &TreeExternals, - block_attachment: BlockAttachment, - block_validation_kind: BlockValidationKind, - ) -> Result<(ExecutionOutcome, Option), BlockExecutionError> - where - EDP: FullExecutionDataProvider, - N: TreeNodeTypes, - E: BlockExecutorProvider, - { - // some checks are done before blocks comes here. - externals.consensus.validate_header_against_parent(&block, parent_block)?; - - // get the state provider. - let canonical_fork = bundle_state_data_provider.canonical_fork(); - - // SAFETY: For block execution and parallel state root computation below we open multiple - // independent database transactions. Upon opening the database transaction the consistent - // view will check a current tip in the database and throw an error if it doesn't match - // the one recorded during initialization. - // It is safe to use consistent view without any special error handling as long as - // we guarantee that plain state cannot change during processing of new payload. - // The usage has to be re-evaluated if that was ever to change. - let consistent_view = - ConsistentDbView::new_with_latest_tip(externals.provider_factory.clone())?; - let state_provider = consistent_view - .provider_ro()? - // State root calculation can take a while, and we're sure no write transaction - // will be open in parallel. See https://github.com/paradigmxyz/reth/issues/7509. - .disable_long_read_transaction_safety() - .try_into_history_at_block(canonical_fork.number)?; - - let provider = BundleStateProvider::new(state_provider, bundle_state_data_provider); - - let db = StateProviderDatabase::new(&provider); - let executor = externals.executor_factory.executor(db); - let block_hash = block.hash(); - let block = block.unseal(); - - let state = executor.execute(&block)?; - externals.consensus.validate_block_post_execution( - &block, - PostExecutionInput::new(&state.receipts, &state.requests), - )?; - - let initial_execution_outcome = ExecutionOutcome::from((state, block.number)); - - // check state root if the block extends the canonical chain __and__ if state root - // validation was requested. - if block_validation_kind.is_exhaustive() { - // calculate and check state root - let start = Instant::now(); - let (state_root, trie_updates) = if block_attachment.is_canonical() { - let mut execution_outcome = - provider.block_execution_data_provider.execution_outcome().clone(); - execution_outcome.extend(initial_execution_outcome.clone()); - ParallelStateRoot::new( - consistent_view, - TrieInput::from_state(provider.hashed_post_state(execution_outcome.state())), - ) - .incremental_root_with_updates() - .map(|(root, updates)| (root, Some(updates))) - .map_err(ProviderError::from)? - } else { - let hashed_state = provider.hashed_post_state(initial_execution_outcome.state()); - let state_root = provider.state_root(hashed_state)?; - (state_root, None) - }; - if block.state_root != state_root { - return Err(ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.state_root }.into(), - ) - .into()) - } - - tracing::debug!( - target: "blockchain_tree::chain", - number = block.number, - hash = %block_hash, - elapsed = ?start.elapsed(), - "Validated state root" - ); - - Ok((initial_execution_outcome, trie_updates)) - } else { - Ok((initial_execution_outcome, None)) - } - } - - /// Validate and execute the given block, and append it to this chain. - /// - /// This expects that the block's ancestors can be traced back to the `canonical_fork` (the - /// first parent block of the `block`'s chain that is in the canonical chain). - /// - /// In other words, expects a gap less (side-) chain: [`canonical_fork..block`] in order to be - /// able to __execute__ the block. - /// - /// CAUTION: This will only perform state root check if it's possible: if the `canonical_fork` - /// is the canonical head, or: state root check can't be performed if the given canonical is - /// __not__ the canonical head. - #[track_caller] - #[allow(clippy::too_many_arguments)] - pub(crate) fn append_block( - &mut self, - block: SealedBlockWithSenders, - side_chain_block_hashes: BTreeMap, - canonical_block_hashes: &BTreeMap, - externals: &TreeExternals, - canonical_fork: ForkBlock, - block_attachment: BlockAttachment, - block_validation_kind: BlockValidationKind, - ) -> Result<(), InsertBlockErrorKind> - where - N: TreeNodeTypes, - E: BlockExecutorProvider, - { - let parent_block = self.chain.tip(); - - let bundle_state_data = BundleStateDataRef { - execution_outcome: self.execution_outcome(), - sidechain_block_hashes: &side_chain_block_hashes, - canonical_block_hashes, - canonical_fork, - }; - - let (block_state, _) = Self::validate_and_execute( - block.clone(), - parent_block, - bundle_state_data, - externals, - block_attachment, - block_validation_kind, - )?; - // extend the state. - self.chain.append_block(block, block_state); - - Ok(()) - } -} diff --git a/crates/blockchain-tree/src/config.rs b/crates/blockchain-tree/src/config.rs deleted file mode 100644 index 8dda5dc82098..000000000000 --- a/crates/blockchain-tree/src/config.rs +++ /dev/null @@ -1,91 +0,0 @@ -//! Blockchain tree configuration - -/// The configuration for the blockchain tree. -#[derive(Clone, Copy, Debug)] -pub struct BlockchainTreeConfig { - /// Number of blocks after the last finalized block that we are storing. - /// - /// It should be more than the finalization window for the canonical chain. - max_blocks_in_chain: u64, - /// The number of blocks that can be re-orged (finalization windows) - max_reorg_depth: u64, - /// The number of unconnected blocks that we are buffering - max_unconnected_blocks: u32, - /// Number of additional block hashes to save in blockchain tree. For `BLOCKHASH` EVM opcode we - /// need last 256 block hashes. - /// - /// The total number of block hashes retained in-memory will be - /// `max(additional_canonical_block_hashes, max_reorg_depth)`, and for Ethereum that would - /// be 256. It covers both number of blocks required for reorg, and number of blocks - /// required for `BLOCKHASH` EVM opcode. - num_of_additional_canonical_block_hashes: u64, -} - -impl Default for BlockchainTreeConfig { - fn default() -> Self { - // The defaults for Ethereum mainnet - Self { - // Gasper allows reorgs of any length from 1 to 64. - max_reorg_depth: 64, - // This default is just an assumption. Has to be greater than the `max_reorg_depth`. - max_blocks_in_chain: 65, - // EVM requires that last 256 block hashes are available. - num_of_additional_canonical_block_hashes: 256, - // max unconnected blocks. - max_unconnected_blocks: 200, - } - } -} - -impl BlockchainTreeConfig { - /// Create tree configuration. - pub fn new( - max_reorg_depth: u64, - max_blocks_in_chain: u64, - num_of_additional_canonical_block_hashes: u64, - max_unconnected_blocks: u32, - ) -> Self { - assert!( - max_reorg_depth <= max_blocks_in_chain, - "Side chain size should be more than finalization window" - ); - Self { - max_blocks_in_chain, - max_reorg_depth, - num_of_additional_canonical_block_hashes, - max_unconnected_blocks, - } - } - - /// Return the maximum reorg depth. - pub const fn max_reorg_depth(&self) -> u64 { - self.max_reorg_depth - } - - /// Return the maximum number of blocks in one chain. - pub const fn max_blocks_in_chain(&self) -> u64 { - self.max_blocks_in_chain - } - - /// Return number of additional canonical block hashes that we need to retain - /// in order to have enough information for EVM execution. - pub const fn num_of_additional_canonical_block_hashes(&self) -> u64 { - self.num_of_additional_canonical_block_hashes - } - - /// Return total number of canonical hashes that we need to retain in order to have enough - /// information for reorg and EVM execution. - /// - /// It is calculated as the maximum of `max_reorg_depth` (which is the number of blocks required - /// for the deepest reorg possible according to the consensus protocol) and - /// `num_of_additional_canonical_block_hashes` (which is the number of block hashes needed to - /// satisfy the `BLOCKHASH` opcode in the EVM. See [`crate::BundleStateDataRef`]). - pub fn num_of_canonical_hashes(&self) -> u64 { - self.max_reorg_depth.max(self.num_of_additional_canonical_block_hashes) - } - - /// Return max number of unconnected blocks that we are buffering - pub const fn max_unconnected_blocks(&self) -> u32 { - self.max_unconnected_blocks - } -} diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs deleted file mode 100644 index 9e72008e838f..000000000000 --- a/crates/blockchain-tree/src/externals.rs +++ /dev/null @@ -1,106 +0,0 @@ -//! Blockchain tree externals. - -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_consensus::FullConsensus; -use reth_db::{static_file::BlockHashMask, tables}; -use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; -use reth_node_types::NodeTypesWithDB; -use reth_primitives::StaticFileSegment; -use reth_provider::{ - providers::ProviderNodeTypes, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, - StaticFileProviderFactory, StatsReader, -}; -use reth_storage_errors::provider::ProviderResult; -use std::{collections::BTreeMap, sync::Arc}; - -pub use reth_provider::providers::{NodeTypesForTree, TreeNodeTypes}; - -/// A container for external components. -/// -/// This is a simple container for external components used throughout the blockchain tree -/// implementation: -/// -/// - A handle to the database -/// - A handle to the consensus engine -/// - The executor factory to execute blocks with -#[derive(Debug)] -pub struct TreeExternals { - /// The provider factory, used to commit the canonical chain, or unwind it. - pub(crate) provider_factory: ProviderFactory, - /// The consensus engine. - pub(crate) consensus: Arc, - /// The executor factory to execute blocks with. - pub(crate) executor_factory: E, -} - -impl TreeExternals { - /// Create new tree externals. - pub fn new( - provider_factory: ProviderFactory, - consensus: Arc, - executor_factory: E, - ) -> Self { - Self { provider_factory, consensus, executor_factory } - } -} - -impl TreeExternals { - /// Fetches the latest canonical block hashes by walking backwards from the head. - /// - /// Returns the hashes sorted by increasing block numbers - pub(crate) fn fetch_latest_canonical_hashes( - &self, - num_hashes: usize, - ) -> ProviderResult> { - // Fetch the latest canonical hashes from the database - let mut hashes = self - .provider_factory - .provider()? - .tx_ref() - .cursor_read::()? - .walk_back(None)? - .take(num_hashes) - .collect::, _>>()?; - - // Fetch the same number of latest canonical hashes from the static_files and merge them - // with the database hashes. It is needed due to the fact that we're writing - // directly to static_files in pipeline sync, but to the database in live sync, - // which means that the latest canonical hashes in the static file might be more recent - // than in the database, and vice versa, or even some ranges of the latest - // `num_hashes` blocks may be in database, and some ranges in static_files. - let static_file_provider = self.provider_factory.static_file_provider(); - let total_headers = static_file_provider.count_entries::()? as u64; - if total_headers > 0 { - let range = - total_headers.saturating_sub(1).saturating_sub(num_hashes as u64)..total_headers; - - hashes.extend(range.clone().zip(static_file_provider.fetch_range_with_predicate( - StaticFileSegment::Headers, - range, - |cursor, number| cursor.get_one::(number.into()), - |_| true, - )?)); - } - - // We may have fetched more than `num_hashes` hashes, so we need to truncate the result to - // the requested number. - let hashes = hashes.into_iter().rev().take(num_hashes).collect(); - Ok(hashes) - } - - pub(crate) fn fetch_latest_finalized_block_number( - &self, - ) -> ProviderResult> { - self.provider_factory.provider()?.last_finalized_block_number() - } - - pub(crate) fn save_finalized_block_number( - &self, - block_number: BlockNumber, - ) -> ProviderResult<()> { - let provider_rw = self.provider_factory.provider_rw()?; - provider_rw.save_finalized_block_number(block_number)?; - provider_rw.commit()?; - Ok(()) - } -} diff --git a/crates/blockchain-tree/src/lib.rs b/crates/blockchain-tree/src/lib.rs deleted file mode 100644 index 3f501bead071..000000000000 --- a/crates/blockchain-tree/src/lib.rs +++ /dev/null @@ -1,59 +0,0 @@ -//! Implementation of a tree-like structure for blockchains. -//! -//! The [`BlockchainTree`] can validate, execute, and revert blocks in multiple competing -//! sidechains. This structure is used for Reth's sync mode at the tip instead of the pipeline, and -//! is the primary executor and validator of payloads sent from the consensus layer. -//! -//! Blocks and their resulting state transitions are kept in-memory until they are persisted. -//! -//! ## Feature Flags -//! -//! - `test-utils`: Export utilities for testing - -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] - -/// Re-export of the blockchain tree API. -pub use reth_blockchain_tree_api::*; - -pub mod blockchain_tree; -pub use blockchain_tree::BlockchainTree; - -pub mod block_indices; -pub use block_indices::BlockIndices; - -pub mod chain; -pub use chain::AppendableChain; - -pub mod config; -pub use config::BlockchainTreeConfig; - -pub mod externals; -pub use externals::TreeExternals; - -pub mod shareable; -pub use shareable::ShareableBlockchainTree; - -mod bundle; -pub use bundle::{BundleStateDataRef, ExecutionData}; - -/// Buffer of not executed blocks. -pub mod block_buffer; -mod canonical_chain; - -/// Common blockchain tree metrics. -pub mod metrics; - -pub use block_buffer::BlockBuffer; - -/// Implementation of Tree traits that does nothing. -pub mod noop; - -mod state; - -use aquamarine as _; diff --git a/crates/blockchain-tree/src/metrics.rs b/crates/blockchain-tree/src/metrics.rs deleted file mode 100644 index 121d0a69786f..000000000000 --- a/crates/blockchain-tree/src/metrics.rs +++ /dev/null @@ -1,153 +0,0 @@ -use metrics::Histogram; -use reth_metrics::{ - metrics::{Counter, Gauge}, - Metrics, -}; -use std::time::{Duration, Instant}; - -/// Metrics for the blockchain tree block buffer -#[derive(Metrics)] -#[metrics(scope = "blockchain_tree.block_buffer")] -pub struct BlockBufferMetrics { - /// Total blocks in the block buffer - pub blocks: Gauge, -} - -#[derive(Debug)] -pub(crate) struct MakeCanonicalDurationsRecorder { - start: Instant, - pub(crate) actions: Vec<(MakeCanonicalAction, Duration)>, - latest: Option, - current_metrics: MakeCanonicalMetrics, -} - -impl Default for MakeCanonicalDurationsRecorder { - fn default() -> Self { - Self { - start: Instant::now(), - actions: Vec::new(), - latest: None, - current_metrics: MakeCanonicalMetrics::default(), - } - } -} - -impl MakeCanonicalDurationsRecorder { - /// Records the duration since last record, saves it for future logging and instantly reports as - /// a metric with `action` label. - pub(crate) fn record_relative(&mut self, action: MakeCanonicalAction) { - let elapsed = self.start.elapsed(); - let duration = elapsed - self.latest.unwrap_or_default(); - - self.actions.push((action, duration)); - self.current_metrics.record(action, duration); - self.latest = Some(elapsed); - } -} - -/// Metrics for the entire blockchain tree -#[derive(Metrics)] -#[metrics(scope = "blockchain_tree")] -pub struct TreeMetrics { - /// Total number of sidechains (not including the canonical chain) - pub sidechains: Gauge, - /// The highest block number in the canonical chain - pub canonical_chain_height: Gauge, - /// The number of reorgs - pub reorgs: Counter, - /// The latest reorg depth - pub latest_reorg_depth: Gauge, - /// Longest sidechain height - pub longest_sidechain_height: Gauge, - /// The number of times cached trie updates were used for insert. - pub trie_updates_insert_cached: Counter, - /// The number of times trie updates were recomputed for insert. - pub trie_updates_insert_recomputed: Counter, -} - -/// Represents actions for making a canonical chain. -#[derive(Debug, Copy, Clone)] -pub(crate) enum MakeCanonicalAction { - /// Cloning old blocks for canonicalization. - CloneOldBlocks, - /// Finding the canonical header. - FindCanonicalHeader, - /// Splitting the chain for canonicalization. - SplitChain, - /// Splitting chain forks for canonicalization. - SplitChainForks, - /// Merging all chains for canonicalization. - MergeAllChains, - /// Updating the canonical index during canonicalization. - UpdateCanonicalIndex, - /// Retrieving (cached or recomputed) state trie updates - RetrieveStateTrieUpdates, - /// Committing the canonical chain to the database. - CommitCanonicalChainToDatabase, - /// Reverting the canonical chain from the database. - RevertCanonicalChainFromDatabase, - /// Inserting an old canonical chain. - InsertOldCanonicalChain, - /// Clearing trie updates of other children chains after fork choice update. - ClearTrieUpdatesForOtherChildren, -} - -/// Canonicalization metrics -#[derive(Metrics)] -#[metrics(scope = "blockchain_tree.make_canonical")] -struct MakeCanonicalMetrics { - /// Duration of the clone old blocks action. - clone_old_blocks: Histogram, - /// Duration of the find canonical header action. - find_canonical_header: Histogram, - /// Duration of the split chain action. - split_chain: Histogram, - /// Duration of the split chain forks action. - split_chain_forks: Histogram, - /// Duration of the merge all chains action. - merge_all_chains: Histogram, - /// Duration of the update canonical index action. - update_canonical_index: Histogram, - /// Duration of the retrieve state trie updates action. - retrieve_state_trie_updates: Histogram, - /// Duration of the commit canonical chain to database action. - commit_canonical_chain_to_database: Histogram, - /// Duration of the revert canonical chain from database action. - revert_canonical_chain_from_database: Histogram, - /// Duration of the insert old canonical chain action. - insert_old_canonical_chain: Histogram, - /// Duration of the clear trie updates of other children chains after fork choice update - /// action. - clear_trie_updates_for_other_children: Histogram, -} - -impl MakeCanonicalMetrics { - /// Records the duration for the given action. - pub(crate) fn record(&self, action: MakeCanonicalAction, duration: Duration) { - match action { - MakeCanonicalAction::CloneOldBlocks => self.clone_old_blocks.record(duration), - MakeCanonicalAction::FindCanonicalHeader => self.find_canonical_header.record(duration), - MakeCanonicalAction::SplitChain => self.split_chain.record(duration), - MakeCanonicalAction::SplitChainForks => self.split_chain_forks.record(duration), - MakeCanonicalAction::MergeAllChains => self.merge_all_chains.record(duration), - MakeCanonicalAction::UpdateCanonicalIndex => { - self.update_canonical_index.record(duration) - } - MakeCanonicalAction::RetrieveStateTrieUpdates => { - self.retrieve_state_trie_updates.record(duration) - } - MakeCanonicalAction::CommitCanonicalChainToDatabase => { - self.commit_canonical_chain_to_database.record(duration) - } - MakeCanonicalAction::RevertCanonicalChainFromDatabase => { - self.revert_canonical_chain_from_database.record(duration) - } - MakeCanonicalAction::InsertOldCanonicalChain => { - self.insert_old_canonical_chain.record(duration) - } - MakeCanonicalAction::ClearTrieUpdatesForOtherChildren => { - self.clear_trie_updates_for_other_children.record(duration) - } - } - } -} diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs deleted file mode 100644 index f5d2ad8c6f78..000000000000 --- a/crates/blockchain-tree/src/noop.rs +++ /dev/null @@ -1,140 +0,0 @@ -use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_blockchain_tree_api::{ - self, - error::{BlockchainTreeError, CanonicalError, InsertBlockError, ProviderError}, - BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, - InsertPayloadOk, -}; -use reth_primitives::{EthPrimitives, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; -use reth_provider::{ - BlockchainTreePendingStateProvider, CanonStateNotificationSender, CanonStateNotifications, - CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, -}; -use reth_storage_errors::provider::ProviderResult; -use std::collections::BTreeMap; - -/// A `BlockchainTree` that does nothing. -/// -/// Caution: this is only intended for testing purposes, or for wiring components together. -#[derive(Debug, Clone, Default)] -#[non_exhaustive] -pub struct NoopBlockchainTree { - /// Broadcast channel for canon state changes notifications. - pub canon_state_notification_sender: Option, -} - -impl NoopBlockchainTree { - /// Create a new `NoopBlockchainTree` with a canon state notification sender. - pub const fn with_canon_state_notifications( - canon_state_notification_sender: CanonStateNotificationSender, - ) -> Self { - Self { canon_state_notification_sender: Some(canon_state_notification_sender) } - } -} - -impl BlockchainTreeEngine for NoopBlockchainTree { - fn buffer_block(&self, _block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { - Ok(()) - } - - fn insert_block( - &self, - block: SealedBlockWithSenders, - _validation_kind: BlockValidationKind, - ) -> Result { - Err(InsertBlockError::tree_error( - BlockchainTreeError::BlockHashNotFoundInChain { block_hash: block.hash() }, - block.block, - )) - } - - fn finalize_block(&self, _finalized_block: BlockNumber) -> ProviderResult<()> { - Ok(()) - } - - fn connect_buffered_blocks_to_canonical_hashes_and_finalize( - &self, - _last_finalized_block: BlockNumber, - ) -> Result<(), CanonicalError> { - Ok(()) - } - - fn update_block_hashes_and_clear_buffered( - &self, - ) -> Result, CanonicalError> { - Ok(BTreeMap::new()) - } - - fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { - Ok(()) - } - - fn make_canonical(&self, block_hash: BlockHash) -> Result { - Err(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }.into()) - } -} - -impl BlockchainTreeViewer for NoopBlockchainTree { - fn header_by_hash(&self, _hash: BlockHash) -> Option { - None - } - - fn block_by_hash(&self, _hash: BlockHash) -> Option { - None - } - - fn block_with_senders_by_hash(&self, _hash: BlockHash) -> Option { - None - } - - fn buffered_header_by_hash(&self, _block_hash: BlockHash) -> Option { - None - } - - fn is_canonical(&self, _block_hash: BlockHash) -> Result { - Ok(false) - } - - fn lowest_buffered_ancestor(&self, _hash: BlockHash) -> Option { - None - } - - fn canonical_tip(&self) -> BlockNumHash { - Default::default() - } - - fn pending_block_num_hash(&self) -> Option { - None - } - - fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { - None - } - - fn receipts_by_block_hash(&self, _block_hash: BlockHash) -> Option> { - None - } -} - -impl BlockchainTreePendingStateProvider for NoopBlockchainTree { - fn find_pending_state_provider( - &self, - _block_hash: BlockHash, - ) -> Option> { - None - } -} - -impl NodePrimitivesProvider for NoopBlockchainTree { - type Primitives = EthPrimitives; -} - -impl CanonStateSubscriptions for NoopBlockchainTree { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - self.canon_state_notification_sender - .as_ref() - .map(|sender| sender.subscribe()) - .unwrap_or_else(|| CanonStateNotificationSender::new(1).subscribe()) - } -} diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs deleted file mode 100644 index e668f4e2dac0..000000000000 --- a/crates/blockchain-tree/src/shareable.rs +++ /dev/null @@ -1,205 +0,0 @@ -//! Wrapper around `BlockchainTree` that allows for it to be shared. - -use crate::externals::TreeNodeTypes; - -use super::BlockchainTree; -use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockHash, BlockNumber}; -use parking_lot::RwLock; -use reth_blockchain_tree_api::{ - error::{CanonicalError, InsertBlockError}, - BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, - InsertPayloadOk, -}; -use reth_evm::execute::BlockExecutorProvider; -use reth_node_types::NodeTypesWithDB; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; -use reth_provider::{ - providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateNotifications, - CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, ProviderError, -}; -use reth_storage_errors::provider::ProviderResult; -use std::{collections::BTreeMap, sync::Arc}; -use tracing::trace; - -/// Shareable blockchain tree that is behind a `RwLock` -#[derive(Clone, Debug)] -pub struct ShareableBlockchainTree { - /// `BlockchainTree` - pub tree: Arc>>, -} - -impl ShareableBlockchainTree { - /// Create a new shareable database. - pub fn new(tree: BlockchainTree) -> Self { - Self { tree: Arc::new(RwLock::new(tree)) } - } -} - -impl BlockchainTreeEngine for ShareableBlockchainTree -where - N: TreeNodeTypes, - E: BlockExecutorProvider, -{ - fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { - let mut tree = self.tree.write(); - // Blockchain tree metrics shouldn't be updated here, see - // `BlockchainTree::update_chains_metrics` documentation. - tree.buffer_block(block) - } - - fn insert_block( - &self, - block: SealedBlockWithSenders, - validation_kind: BlockValidationKind, - ) -> Result { - trace!(target: "blockchain_tree", hash = %block.hash(), number = block.number, parent_hash = %block.parent_hash, "Inserting block"); - let mut tree = self.tree.write(); - let res = tree.insert_block(block, validation_kind); - tree.update_chains_metrics(); - res - } - - fn finalize_block(&self, finalized_block: BlockNumber) -> ProviderResult<()> { - trace!(target: "blockchain_tree", finalized_block, "Finalizing block"); - let mut tree = self.tree.write(); - tree.finalize_block(finalized_block)?; - tree.update_chains_metrics(); - - Ok(()) - } - - fn connect_buffered_blocks_to_canonical_hashes_and_finalize( - &self, - last_finalized_block: BlockNumber, - ) -> Result<(), CanonicalError> { - trace!(target: "blockchain_tree", last_finalized_block, "Connecting buffered blocks to canonical hashes and finalizing the tree"); - let mut tree = self.tree.write(); - let res = - tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(last_finalized_block); - tree.update_chains_metrics(); - Ok(res?) - } - - fn update_block_hashes_and_clear_buffered( - &self, - ) -> Result, CanonicalError> { - let mut tree = self.tree.write(); - let res = tree.update_block_hashes_and_clear_buffered(); - tree.update_chains_metrics(); - Ok(res?) - } - - fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { - trace!(target: "blockchain_tree", "Connecting buffered blocks to canonical hashes"); - let mut tree = self.tree.write(); - let res = tree.connect_buffered_blocks_to_canonical_hashes(); - tree.update_chains_metrics(); - Ok(res?) - } - - fn make_canonical(&self, block_hash: BlockHash) -> Result { - trace!(target: "blockchain_tree", %block_hash, "Making block canonical"); - let mut tree = self.tree.write(); - let res = tree.make_canonical(block_hash); - tree.update_chains_metrics(); - res - } -} - -impl BlockchainTreeViewer for ShareableBlockchainTree -where - N: TreeNodeTypes, - E: BlockExecutorProvider, -{ - fn header_by_hash(&self, hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); - self.tree.read().sidechain_block_by_hash(hash).map(|b| b.header.clone()) - } - - fn block_by_hash(&self, block_hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?block_hash, "Returning block by hash"); - self.tree.read().sidechain_block_by_hash(block_hash).cloned() - } - - fn block_with_senders_by_hash(&self, block_hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?block_hash, "Returning block by hash"); - self.tree.read().block_with_senders_by_hash(block_hash).cloned() - } - - fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option { - self.tree.read().get_buffered_block(&block_hash).map(|b| b.header.clone()) - } - - fn is_canonical(&self, hash: BlockHash) -> Result { - trace!(target: "blockchain_tree", ?hash, "Checking if block is canonical"); - self.tree.read().is_block_hash_canonical(&hash) - } - - fn lowest_buffered_ancestor(&self, hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?hash, "Returning lowest buffered ancestor"); - self.tree.read().lowest_buffered_ancestor(&hash).cloned() - } - - fn canonical_tip(&self) -> BlockNumHash { - trace!(target: "blockchain_tree", "Returning canonical tip"); - self.tree.read().block_indices().canonical_tip() - } - - fn pending_block_num_hash(&self) -> Option { - trace!(target: "blockchain_tree", "Returning first pending block"); - self.tree.read().block_indices().pending_block_num_hash() - } - - fn pending_block(&self) -> Option { - trace!(target: "blockchain_tree", "Returning first pending block"); - self.tree.read().pending_block().cloned() - } - - fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { - let tree = self.tree.read(); - let pending_block = tree.pending_block()?.clone(); - let receipts = - tree.receipts_by_block_hash(pending_block.hash())?.into_iter().cloned().collect(); - Some((pending_block, receipts)) - } - - fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { - let tree = self.tree.read(); - Some(tree.receipts_by_block_hash(block_hash)?.into_iter().cloned().collect()) - } -} - -impl BlockchainTreePendingStateProvider for ShareableBlockchainTree -where - N: TreeNodeTypes, - E: BlockExecutorProvider, -{ - fn find_pending_state_provider( - &self, - block_hash: BlockHash, - ) -> Option> { - trace!(target: "blockchain_tree", ?block_hash, "Finding pending state provider"); - let provider = self.tree.read().post_state_data(block_hash)?; - Some(Box::new(provider)) - } -} - -impl NodePrimitivesProvider for ShareableBlockchainTree -where - N: ProviderNodeTypes, - E: Send + Sync, -{ - type Primitives = N::Primitives; -} - -impl CanonStateSubscriptions for ShareableBlockchainTree -where - N: TreeNodeTypes, - E: Send + Sync, -{ - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); - self.tree.read().subscribe_canon_state() - } -} diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs deleted file mode 100644 index 2d01293e20f8..000000000000 --- a/crates/blockchain-tree/src/state.rs +++ /dev/null @@ -1,430 +0,0 @@ -//! Blockchain tree state. - -use crate::{AppendableChain, BlockBuffer, BlockIndices}; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders}; -use std::collections::{BTreeMap, HashMap}; - -/// Container to hold the state of the blockchain tree. -#[derive(Debug)] -pub(crate) struct TreeState { - /// Keeps track of new unique identifiers for chains - block_chain_id_generator: u64, - /// The tracked chains and their current data. - pub(crate) chains: HashMap, - /// Indices to block and their connection to the canonical chain. - /// - /// This gets modified by the tree itself and is read from engine API/RPC to access the pending - /// block for example. - pub(crate) block_indices: BlockIndices, - /// Unconnected block buffer. - pub(crate) buffered_blocks: BlockBuffer, -} - -impl TreeState { - /// Initializes the tree state with the given last finalized block number and last canonical - /// hashes. - pub(crate) fn new( - last_finalized_block_number: BlockNumber, - last_canonical_hashes: impl IntoIterator, - buffer_limit: u32, - ) -> Self { - Self { - block_chain_id_generator: 0, - chains: Default::default(), - block_indices: BlockIndices::new( - last_finalized_block_number, - BTreeMap::from_iter(last_canonical_hashes), - ), - buffered_blocks: BlockBuffer::new(buffer_limit), - } - } - - /// Issues a new unique identifier for a new sidechain. - #[inline] - fn next_id(&mut self) -> SidechainId { - let id = self.block_chain_id_generator; - self.block_chain_id_generator += 1; - SidechainId(id) - } - - /// Expose internal indices of the `BlockchainTree`. - #[inline] - pub(crate) const fn block_indices(&self) -> &BlockIndices { - &self.block_indices - } - - /// Returns the block with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - #[inline] - pub(crate) fn block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { - self.block_with_senders_by_hash(block_hash).map(|block| &block.block) - } - - /// Returns the block with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - #[inline] - pub(crate) fn block_with_senders_by_hash( - &self, - block_hash: BlockHash, - ) -> Option<&SealedBlockWithSenders> { - let id = self.block_indices.get_side_chain_id(&block_hash)?; - let chain = self.chains.get(&id)?; - chain.block_with_senders(block_hash) - } - - /// Returns the block's receipts with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - pub(crate) fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { - let id = self.block_indices.get_side_chain_id(&block_hash)?; - let chain = self.chains.get(&id)?; - chain.receipts_by_block_hash(block_hash) - } - - /// Insert a chain into the tree. - /// - /// Inserts a chain into the tree and builds the block indices. - pub(crate) fn insert_chain(&mut self, chain: AppendableChain) -> Option { - if chain.is_empty() { - return None - } - let chain_id = self.next_id(); - - self.block_indices.insert_chain(chain_id, &chain); - // add chain_id -> chain index - self.chains.insert(chain_id, chain); - Some(chain_id) - } - - /// Checks the block buffer for the given block. - pub(crate) fn get_buffered_block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { - self.buffered_blocks.block(hash) - } - - /// Gets the lowest ancestor for the given block in the block buffer. - pub(crate) fn lowest_buffered_ancestor( - &self, - hash: &BlockHash, - ) -> Option<&SealedBlockWithSenders> { - self.buffered_blocks.lowest_ancestor(hash) - } -} - -/// The ID of a sidechain internally in a [`BlockchainTree`][super::BlockchainTree]. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)] -pub(crate) struct SidechainId(u64); - -impl From for u64 { - fn from(value: SidechainId) -> Self { - value.0 - } -} - -#[cfg(test)] -impl From for SidechainId { - fn from(value: u64) -> Self { - Self(value) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::canonical_chain::CanonicalChain; - use alloy_primitives::B256; - use reth_execution_types::Chain; - use reth_provider::ExecutionOutcome; - - #[test] - fn test_tree_state_initialization() { - // Set up some dummy data for initialization - let last_finalized_block_number = 10u64; - let last_canonical_hashes = vec![(9u64, B256::random()), (10u64, B256::random())]; - let buffer_limit = 5; - - // Initialize the tree state - let tree_state = TreeState::new( - last_finalized_block_number, - last_canonical_hashes.clone(), - buffer_limit, - ); - - // Verify the tree state after initialization - assert_eq!(tree_state.block_chain_id_generator, 0); - assert_eq!(tree_state.block_indices().last_finalized_block(), last_finalized_block_number); - assert_eq!( - *tree_state.block_indices.canonical_chain().inner(), - *CanonicalChain::new(last_canonical_hashes.into_iter().collect()).inner() - ); - assert!(tree_state.chains.is_empty()); - assert!(tree_state.buffered_blocks.lru.is_empty()); - } - - #[test] - fn test_tree_state_next_id() { - // Initialize the tree state - let mut tree_state = TreeState::new(0, vec![], 5); - - // Generate a few sidechain IDs - let first_id = tree_state.next_id(); - let second_id = tree_state.next_id(); - - // Verify the generated sidechain IDs and the updated generator state - assert_eq!(first_id, SidechainId(0)); - assert_eq!(second_id, SidechainId(1)); - assert_eq!(tree_state.block_chain_id_generator, 2); - } - - #[test] - fn test_tree_state_insert_chain() { - // Initialize tree state - let mut tree_state = TreeState::new(0, vec![], 5); - - // Create a chain with two blocks - let block: SealedBlockWithSenders = Default::default(); - let block1_hash = B256::random(); - let block2_hash = B256::random(); - - let mut block1 = block.clone(); - let mut block2 = block; - - block1.block.header.set_hash(block1_hash); - block1.block.header.set_block_number(9); - block2.block.header.set_hash(block2_hash); - block2.block.header.set_block_number(10); - - let chain = AppendableChain::new(Chain::new( - [block1, block2], - Default::default(), - Default::default(), - )); - - // Insert the chain into the TreeState - let chain_id = tree_state.insert_chain(chain).unwrap(); - - // Verify the chain ID and that it was added to the chains collection - assert_eq!(chain_id, SidechainId(0)); - assert!(tree_state.chains.contains_key(&chain_id)); - - // Ensure that the block indices are updated - assert_eq!( - tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(), - SidechainId(0) - ); - assert_eq!( - tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(), - SidechainId(0) - ); - - // Ensure that the block chain ID generator was updated - assert_eq!(tree_state.block_chain_id_generator, 1); - - // Create an empty chain - let chain_empty = AppendableChain::new(Chain::default()); - - // Insert the empty chain into the tree state - let chain_id = tree_state.insert_chain(chain_empty); - - // Ensure that the empty chain was not inserted - assert!(chain_id.is_none()); - - // Nothing should have changed and no new chain should have been added - assert!(tree_state.chains.contains_key(&SidechainId(0))); - assert!(!tree_state.chains.contains_key(&SidechainId(1))); - assert_eq!( - tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(), - SidechainId(0) - ); - assert_eq!( - tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(), - SidechainId(0) - ); - assert_eq!(tree_state.block_chain_id_generator, 1); - } - - #[test] - fn test_block_by_hash_side_chain() { - // Initialize a tree state with some dummy data - let mut tree_state = TreeState::new(0, vec![], 5); - - // Create two side-chain blocks with random hashes - let block1_hash = B256::random(); - let block2_hash = B256::random(); - - let mut block1: SealedBlockWithSenders = Default::default(); - let mut block2: SealedBlockWithSenders = Default::default(); - - block1.block.header.set_hash(block1_hash); - block1.block.header.set_block_number(9); - block2.block.header.set_hash(block2_hash); - block2.block.header.set_block_number(10); - - // Create an chain with these blocks - let chain = AppendableChain::new(Chain::new( - vec![block1.clone(), block2.clone()], - Default::default(), - Default::default(), - )); - - // Insert the side chain into the TreeState - tree_state.insert_chain(chain).unwrap(); - - // Retrieve the blocks by their hashes - let retrieved_block1 = tree_state.block_by_hash(block1_hash); - assert_eq!(*retrieved_block1.unwrap(), block1.block); - - let retrieved_block2 = tree_state.block_by_hash(block2_hash); - assert_eq!(*retrieved_block2.unwrap(), block2.block); - - // Test block_by_hash with a random hash that doesn't exist - let non_existent_hash = B256::random(); - let result = tree_state.block_by_hash(non_existent_hash); - - // Ensure that no block is found - assert!(result.is_none()); - } - - #[test] - fn test_block_with_senders_by_hash() { - // Initialize a tree state with some dummy data - let mut tree_state = TreeState::new(0, vec![], 5); - - // Create two side-chain blocks with random hashes - let block1_hash = B256::random(); - let block2_hash = B256::random(); - - let mut block1: SealedBlockWithSenders = Default::default(); - let mut block2: SealedBlockWithSenders = Default::default(); - - block1.block.header.set_hash(block1_hash); - block1.block.header.set_block_number(9); - block2.block.header.set_hash(block2_hash); - block2.block.header.set_block_number(10); - - // Create a chain with these blocks - let chain = AppendableChain::new(Chain::new( - vec![block1.clone(), block2.clone()], - Default::default(), - Default::default(), - )); - - // Insert the side chain into the TreeState - tree_state.insert_chain(chain).unwrap(); - - // Test to retrieve the blocks with senders by their hashes - let retrieved_block1 = tree_state.block_with_senders_by_hash(block1_hash); - assert_eq!(*retrieved_block1.unwrap(), block1); - - let retrieved_block2 = tree_state.block_with_senders_by_hash(block2_hash); - assert_eq!(*retrieved_block2.unwrap(), block2); - - // Test block_with_senders_by_hash with a random hash that doesn't exist - let non_existent_hash = B256::random(); - let result = tree_state.block_with_senders_by_hash(non_existent_hash); - - // Ensure that no block is found - assert!(result.is_none()); - } - - #[test] - fn test_get_buffered_block() { - // Initialize a tree state with some dummy data - let mut tree_state = TreeState::new(0, vec![], 5); - - // Create a block with a random hash and add it to the buffer - let block_hash = B256::random(); - let mut block: SealedBlockWithSenders = Default::default(); - block.block.header.set_hash(block_hash); - - // Add the block to the buffered blocks in the TreeState - tree_state.buffered_blocks.insert_block(block.clone()); - - // Test get_buffered_block to retrieve the block by its hash - let retrieved_block = tree_state.get_buffered_block(&block_hash); - assert_eq!(*retrieved_block.unwrap(), block); - - // Test get_buffered_block with a non-existent hash - let non_existent_hash = B256::random(); - let result = tree_state.get_buffered_block(&non_existent_hash); - - // Ensure that no block is found - assert!(result.is_none()); - } - - #[test] - fn test_lowest_buffered_ancestor() { - // Initialize a tree state with some dummy data - let mut tree_state = TreeState::new(0, vec![], 5); - - // Create blocks with random hashes and set up parent-child relationships - let ancestor_hash = B256::random(); - let descendant_hash = B256::random(); - - let mut ancestor_block: SealedBlockWithSenders = Default::default(); - let mut descendant_block: SealedBlockWithSenders = Default::default(); - - ancestor_block.block.header.set_hash(ancestor_hash); - descendant_block.block.header.set_hash(descendant_hash); - descendant_block.block.header.set_parent_hash(ancestor_hash); - - // Insert the blocks into the buffer - tree_state.buffered_blocks.insert_block(ancestor_block.clone()); - tree_state.buffered_blocks.insert_block(descendant_block.clone()); - - // Test lowest_buffered_ancestor for the descendant block - let lowest_ancestor = tree_state.lowest_buffered_ancestor(&descendant_hash); - assert!(lowest_ancestor.is_some()); - assert_eq!(lowest_ancestor.unwrap().block.hash(), ancestor_hash); - - // Test lowest_buffered_ancestor with a non-existent hash - let non_existent_hash = B256::random(); - let result = tree_state.lowest_buffered_ancestor(&non_existent_hash); - - // Ensure that no ancestor is found - assert!(result.is_none()); - } - - #[test] - fn test_receipts_by_block_hash() { - // Initialize a tree state with some dummy data - let mut tree_state = TreeState::new(0, vec![], 5); - - // Create a block with a random hash and receipts - let block_hash = B256::random(); - let receipt1 = Receipt::default(); - let receipt2 = Receipt::default(); - - let mut block: SealedBlockWithSenders = Default::default(); - block.block.header.set_hash(block_hash); - - let receipts = vec![receipt1, receipt2]; - - // Create a chain with the block and its receipts - let chain = AppendableChain::new(Chain::new( - vec![block.clone()], - ExecutionOutcome { receipts: receipts.clone().into(), ..Default::default() }, - Default::default(), - )); - - // Insert the chain into the TreeState - tree_state.insert_chain(chain).unwrap(); - - // Test receipts_by_block_hash for the inserted block - let retrieved_receipts = tree_state.receipts_by_block_hash(block_hash); - assert!(retrieved_receipts.is_some()); - - // Check if the correct receipts are returned - let receipts_ref: Vec<&Receipt> = receipts.iter().collect(); - assert_eq!(retrieved_receipts.unwrap(), receipts_ref); - - // Test receipts_by_block_hash with a non-existent block hash - let non_existent_hash = B256::random(); - let result = tree_state.receipts_by_block_hash(non_existent_hash); - - // Ensure that no receipts are found - assert!(result.is_none()); - } -} diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 06d228a8f82e..d0aafbd57e2b 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -18,7 +18,7 @@ use reth_primitives::{ use reth_primitives_traits::{Block, BlockBody as _, SignedTransaction}; use reth_storage_api::StateProviderBox; use reth_trie::{updates::TrieUpdates, HashedPostState}; -use std::{collections::BTreeMap, ops::Deref, sync::Arc, time::Instant}; +use std::{collections::BTreeMap, sync::Arc, time::Instant}; use tokio::sync::{broadcast, watch}; /// Size of the broadcast channel used to notify canonical state events. @@ -181,9 +181,9 @@ impl CanonicalInMemoryState { safe: Option>, ) -> Self { let in_memory_state = InMemoryState::new(blocks, numbers, pending); - let header = in_memory_state - .head_state() - .map_or_else(SealedHeader::default, |state| state.block_ref().block().deref().clone()); + let header = in_memory_state.head_state().map_or_else(SealedHeader::default, |state| { + state.block_ref().block().clone_sealed_header() + }); let chain_info_tracker = ChainInfoTracker::new(header, finalized, safe); let (canon_state_notification_sender, _) = broadcast::channel(CANON_STATE_NOTIFICATION_CHANNEL_SIZE); @@ -229,7 +229,7 @@ impl CanonicalInMemoryState { /// Returns the header corresponding to the given hash. pub fn header_by_hash(&self, hash: B256) -> Option> { - self.state_by_hash(hash).map(|block| block.block_ref().block.header.clone()) + self.state_by_hash(hash).map(|block| block.block_ref().block.clone_sealed_header()) } /// Clears all entries in the in memory state. @@ -462,7 +462,7 @@ impl CanonicalInMemoryState { /// Returns the `SealedHeader` corresponding to the pending state. pub fn pending_sealed_header(&self) -> Option> { - self.pending_state().map(|h| h.block_ref().block().deref().clone()) + self.pending_state().map(|h| h.block_ref().block().clone_sealed_header()) } /// Returns the `Header` corresponding to the pending state. @@ -640,7 +640,7 @@ impl BlockState { pub fn block_with_senders(&self) -> BlockWithSenders { let block = self.block.block().clone(); let senders = self.block.senders().clone(); - let (header, body) = block.split_header_body(); + let (header, body) = block.split(); BlockWithSenders::new_unchecked(N::Block::new(header.unseal(), body), senders) } @@ -1321,7 +1321,7 @@ mod tests { assert_eq!(state.pending_header().unwrap(), block2.block().header().clone()); // Check the pending sealed header - assert_eq!(state.pending_sealed_header().unwrap(), block2.block().header.clone()); + assert_eq!(state.pending_sealed_header().unwrap(), block2.block().clone_sealed_header()); // Check the pending block with senders assert_eq!( diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index c70b5f154b25..8e1d26f1a7d3 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -26,7 +26,6 @@ alloy-consensus.workspace = true # misc auto_impl.workspace = true -once_cell.workspace = true serde_json.workspace = true derive_more.workspace = true @@ -47,7 +46,6 @@ std = [ "alloy-trie/std", "reth-primitives-traits/std", "alloy-consensus/std", - "once_cell/std", "alloy-rlp/std", "reth-ethereum-forks/std", "derive_more/std", diff --git a/crates/chainspec/src/lib.rs b/crates/chainspec/src/lib.rs index 2e97caba07cc..59950050722c 100644 --- a/crates/chainspec/src/lib.rs +++ b/crates/chainspec/src/lib.rs @@ -11,12 +11,6 @@ extern crate alloc; -use once_cell as _; -#[cfg(not(feature = "std"))] -pub(crate) use once_cell::sync::{Lazy as LazyLock, OnceCell as OnceLock}; -#[cfg(feature = "std")] -pub(crate) use std::sync::{LazyLock, OnceLock}; - /// Chain specific constants pub(crate) mod constants; pub use constants::MIN_TRANSACTION_GAS; @@ -40,6 +34,8 @@ pub use spec::{ DepositContract, ForkBaseFeeParams, DEV, HOLESKY, MAINNET, SEPOLIA, }; +use reth_primitives_traits::sync::OnceLock; + /// Simple utility to create a thread-safe sync cell with a value set. pub fn once_cell_set(value: T) -> OnceLock { let once = OnceLock::new(); diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 372891cb2cc6..c49082e7ce99 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -1,6 +1,6 @@ pub use alloy_eips::eip1559::BaseFeeParams; -use crate::{constants::MAINNET_DEPOSIT_CONTRACT, once_cell_set, EthChainSpec, LazyLock, OnceLock}; +use crate::{constants::MAINNET_DEPOSIT_CONTRACT, once_cell_set, EthChainSpec}; use alloc::{boxed::Box, collections::BTreeMap, string::String, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; use alloy_consensus::{ @@ -28,7 +28,10 @@ use reth_network_peers::{ base_nodes, base_testnet_nodes, holesky_nodes, mainnet_nodes, op_nodes, op_testnet_nodes, sepolia_nodes, NodeRecord, }; -use reth_primitives_traits::SealedHeader; +use reth_primitives_traits::{ + sync::{LazyLock, OnceLock}, + SealedHeader, +}; /// The Ethereum mainnet spec pub static MAINNET: LazyLock> = LazyLock::new(|| { @@ -762,6 +765,10 @@ impl Hardforks for ChainSpec { } impl EthereumHardforks for ChainSpec { + fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition { + self.fork(fork) + } + fn get_final_paris_total_difficulty(&self) -> Option { self.get_final_paris_total_difficulty() } diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 2220efda5c6e..c5f6cef0d908 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true [dependencies] # reth -reth-beacon-consensus.workspace = true +reth-ethereum-consensus.workspace = true reth-chainspec.workspace = true reth-cli.workspace = true reth-ethereum-cli.workspace = true diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index adb973815731..7f5fd2d2f1b3 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -3,11 +3,10 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_primitives::B256; use clap::Parser; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_config::Config; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_db::tables; use reth_db_api::transaction::DbTx; use reth_downloaders::{ @@ -15,6 +14,7 @@ use reth_downloaders::{ file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_ethereum_consensus::EthBeaconConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::{ bodies::downloader::BodyDownloader, @@ -169,7 +169,7 @@ pub fn build_import_pipeline( ) -> eyre::Result<(Pipeline, impl Stream>)> where N: ProviderNodeTypes + CliNodeTypes, - C: Consensus, BodyTy> + 'static, + C: Consensus, BodyTy, Error = ConsensusError> + 'static, E: BlockExecutorProvider, { if !file_client.has_canonical_blocks() { diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index d1821ded826e..40708714d38b 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -5,7 +5,7 @@ use std::{path::PathBuf, sync::Arc}; use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::{Parser, Subcommand}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_util::{get_secret_key, hash_or_num_value_parser}; use reth_config::Config; @@ -73,7 +73,7 @@ pub enum Subcommands { Rlpx(rlpx::Command), } -impl> Command { +impl> Command { /// Execute `p2p` command pub async fn execute(self) -> eyre::Result<()> { let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain()); diff --git a/crates/cli/commands/src/stage/mod.rs b/crates/cli/commands/src/stage/mod.rs index 38edcc9ac5bc..c7423930c8b2 100644 --- a/crates/cli/commands/src/stage/mod.rs +++ b/crates/cli/commands/src/stage/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use crate::common::CliNodeTypes; use clap::{Parser, Subcommand}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_eth_wire::NetPrimitivesFor; @@ -40,7 +40,7 @@ pub enum Subcommands { Unwind(unwind::Command), } -impl> Command { +impl> Command { /// Execute `stage` command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 1a4783b9d487..1fb2e2886ce9 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -6,8 +6,7 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::Sealable; use clap::Parser; -use reth_beacon_consensus::EthBeaconConsensus; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; @@ -18,6 +17,7 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_eth_wire::NetPrimitivesFor; +use reth_ethereum_consensus::EthBeaconConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_network::BlockDownloaderProvider; @@ -104,7 +104,7 @@ pub struct Command { network: NetworkArgs, } -impl> Command { +impl> Command { /// Execute `stage` command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml deleted file mode 100644 index dd9c534a6b4c..000000000000 --- a/crates/consensus/beacon/Cargo.toml +++ /dev/null @@ -1,94 +0,0 @@ -[package] -name = "reth-beacon-consensus" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[lints] -workspace = true - -[dependencies] -# reth -reth-ethereum-consensus.workspace = true -reth-blockchain-tree-api.workspace = true -reth-codecs.workspace = true -reth-db-api.workspace = true -reth-primitives.workspace = true -reth-primitives-traits.workspace = true -reth-stages-api.workspace = true -reth-errors.workspace = true -reth-provider.workspace = true -reth-tasks.workspace = true -reth-payload-builder.workspace = true -reth-payload-builder-primitives.workspace = true -reth-payload-primitives.workspace = true -reth-payload-validator.workspace = true -reth-prune.workspace = true -reth-static-file.workspace = true -reth-tokio-util.workspace = true -reth-engine-primitives.workspace = true -reth-network-p2p.workspace = true -reth-node-types.workspace = true -reth-chainspec = { workspace = true, optional = true } - -# ethereum -alloy-primitives.workspace = true -alloy-rpc-types-engine = { workspace = true, features = ["std"] } -alloy-eips.workspace = true -alloy-consensus.workspace = true - -# async -tokio = { workspace = true, features = ["sync"] } -tokio-stream.workspace = true -futures.workspace = true - -# metrics -reth-metrics.workspace = true -metrics.workspace = true - -# misc -tracing.workspace = true -thiserror.workspace = true -schnellru.workspace = true -itertools.workspace = true - -[dev-dependencies] -# reth -reth-payload-builder = { workspace = true, features = ["test-utils"] } -reth-primitives = { workspace = true, features = ["test-utils"] } -reth-consensus = { workspace = true, features = ["test-utils"] } -reth-stages = { workspace = true, features = ["test-utils"] } -reth-blockchain-tree = { workspace = true, features = ["test-utils"] } -reth-db = { workspace = true, features = ["test-utils"] } -reth-db-api.workspace = true -reth-provider = { workspace = true, features = ["test-utils"] } -reth-evm = { workspace = true, features = ["test-utils"] } -reth-network-p2p = { workspace = true, features = ["test-utils"] } -reth-rpc-types-compat.workspace = true -reth-tracing.workspace = true -reth-downloaders.workspace = true -reth-ethereum-evm.workspace = true -reth-ethereum-engine-primitives.workspace = true -reth-config.workspace = true -reth-testing-utils.workspace = true -reth-exex-types.workspace = true -reth-prune-types.workspace = true -reth-chainspec.workspace = true -alloy-genesis.workspace = true -assert_matches.workspace = true - -[features] -optimism = [ - "reth-blockchain-tree/optimism", - "reth-codecs/op", - "reth-chainspec", - "reth-db-api/optimism", - "reth-db/optimism", - "reth-downloaders/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-downloaders/optimism", -] diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs deleted file mode 100644 index 0eef90ea7e97..000000000000 --- a/crates/consensus/beacon/src/engine/error.rs +++ /dev/null @@ -1,79 +0,0 @@ -use crate::engine::hooks::EngineHookError; -use alloy_rpc_types_engine::ForkchoiceUpdateError; -use reth_errors::{DatabaseError, RethError}; -use reth_stages_api::PipelineError; - -/// Beacon engine result. -pub type BeaconEngineResult = Result; - -/// The error type for the beacon consensus engine service -/// [`BeaconConsensusEngine`](crate::BeaconConsensusEngine) -/// -/// Represents all possible error cases for the beacon consensus engine. -#[derive(Debug, thiserror::Error)] -pub enum BeaconConsensusEngineError { - /// Pipeline channel closed. - #[error("pipeline channel closed")] - PipelineChannelClosed, - /// Pipeline error. - #[error(transparent)] - Pipeline(#[from] Box), - /// Pruner channel closed. - #[error("pruner channel closed")] - PrunerChannelClosed, - /// Hook error. - #[error(transparent)] - Hook(#[from] EngineHookError), - /// Common error. Wrapper around [`RethError`]. - #[error(transparent)] - Common(#[from] RethError), -} - -// box the pipeline error as it is a large enum. -impl From for BeaconConsensusEngineError { - fn from(e: PipelineError) -> Self { - Self::Pipeline(Box::new(e)) - } -} - -// for convenience in the beacon engine -impl From for BeaconConsensusEngineError { - fn from(e: DatabaseError) -> Self { - Self::Common(e.into()) - } -} - -/// Represents error cases for an applied forkchoice update. -/// -/// This represents all possible error cases, that must be returned as JSON RPC errors back to the -/// beacon node. -#[derive(Debug, thiserror::Error)] -pub enum BeaconForkChoiceUpdateError { - /// Thrown when a forkchoice update resulted in an error. - #[error("forkchoice update error: {0}")] - ForkchoiceUpdateError(#[from] ForkchoiceUpdateError), - /// Thrown when the engine task is unavailable/stopped. - #[error("beacon consensus engine task stopped")] - EngineUnavailable, - /// An internal error occurred, not necessarily related to the update. - #[error(transparent)] - Internal(Box), -} - -impl BeaconForkChoiceUpdateError { - /// Create a new internal error. - pub fn internal(e: E) -> Self { - Self::Internal(Box::new(e)) - } -} - -impl From for BeaconForkChoiceUpdateError { - fn from(e: RethError) -> Self { - Self::internal(e) - } -} -impl From for BeaconForkChoiceUpdateError { - fn from(e: DatabaseError) -> Self { - Self::internal(e) - } -} diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs deleted file mode 100644 index 7d6dd3cff317..000000000000 --- a/crates/consensus/beacon/src/engine/handle.rs +++ /dev/null @@ -1,94 +0,0 @@ -//! `BeaconConsensusEngine` external API - -use crate::BeaconForkChoiceUpdateError; -use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, -}; -use futures::TryFutureExt; -use reth_engine_primitives::{ - BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, - OnForkChoiceUpdated, -}; -use reth_errors::RethResult; -use tokio::sync::{mpsc::UnboundedSender, oneshot}; - -/// A _shareable_ beacon consensus frontend type. Used to interact with the spawned beacon consensus -/// engine task. -/// -/// See also `BeaconConsensusEngine` -#[derive(Debug, Clone)] -pub struct BeaconConsensusEngineHandle -where - Engine: EngineTypes, -{ - pub(crate) to_engine: UnboundedSender>, -} - -// === impl BeaconConsensusEngineHandle === - -impl BeaconConsensusEngineHandle -where - Engine: EngineTypes, -{ - /// Creates a new beacon consensus engine handle. - pub const fn new(to_engine: UnboundedSender>) -> Self { - Self { to_engine } - } - - /// Sends a new payload message to the beacon consensus engine and waits for a response. - /// - /// See also - pub async fn new_payload( - &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - ) -> Result { - let (tx, rx) = oneshot::channel(); - let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, sidecar, tx }); - rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? - } - - /// Sends a forkchoice update message to the beacon consensus engine and waits for a response. - /// - /// See also - pub async fn fork_choice_updated( - &self, - state: ForkchoiceState, - payload_attrs: Option, - version: EngineApiMessageVersion, - ) -> Result { - Ok(self - .send_fork_choice_updated(state, payload_attrs, version) - .map_err(|_| BeaconForkChoiceUpdateError::EngineUnavailable) - .await?? - .await?) - } - - /// Sends a forkchoice update message to the beacon consensus engine and returns the receiver to - /// wait for a response. - fn send_fork_choice_updated( - &self, - state: ForkchoiceState, - payload_attrs: Option, - version: EngineApiMessageVersion, - ) -> oneshot::Receiver> { - let (tx, rx) = oneshot::channel(); - let _ = self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs, - tx, - version, - }); - rx - } - - /// Sends a transition configuration exchange message to the beacon consensus engine. - /// - /// See also - /// - /// This only notifies about the exchange. The actual exchange is done by the engine API impl - /// itself. - pub fn transition_configuration_exchanged(&self) { - let _ = self.to_engine.send(BeaconEngineMessage::TransitionConfigurationExchanged); - } -} diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs deleted file mode 100644 index 544a4c564b78..000000000000 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ /dev/null @@ -1,390 +0,0 @@ -use crate::hooks::{ - EngineHook, EngineHookContext, EngineHookDBAccessLevel, EngineHookError, EngineHookEvent, - EngineHooks, -}; -use std::{ - collections::VecDeque, - task::{Context, Poll}, -}; -use tracing::debug; - -#[derive(Debug)] -pub(crate) struct PolledHook { - pub(crate) name: &'static str, - pub(crate) event: EngineHookEvent, - pub(crate) db_access_level: EngineHookDBAccessLevel, -} - -/// Manages hooks under the control of the engine. -/// -/// This type polls the initialized hooks one by one, respecting the DB access level -/// (i.e. [`crate::hooks::EngineHookDBAccessLevel::ReadWrite`] that enforces running at most one -/// such hook). -pub(crate) struct EngineHooksController { - /// Collection of hooks. - /// - /// Hooks might be removed from the collection, and returned upon completion. - /// In the current implementation, it only happens when moved to `active_db_write_hook`. - hooks: VecDeque>, - /// Currently running hook with DB write access, if any. - active_db_write_hook: Option>, -} - -impl EngineHooksController { - /// Creates a new [`EngineHooksController`]. - pub(crate) fn new(hooks: EngineHooks) -> Self { - Self { hooks: hooks.inner.into(), active_db_write_hook: None } - } - - /// Polls currently running hook with DB write access, if any. - /// - /// Returns [`Poll::Ready`] if currently running hook with DB write access returned - /// an [event][`crate::hooks::EngineHookEvent`]. - /// - /// Returns [`Poll::Pending`] in all other cases: - /// 1. No hook with DB write access is running. - /// 2. Currently running hook with DB write access returned [`Poll::Pending`] on polling. - /// 3. Currently running hook with DB write access returned [`Poll::Ready`] on polling, but no - /// action to act upon. - pub(crate) fn poll_active_db_write_hook( - &mut self, - cx: &mut Context<'_>, - args: EngineHookContext, - ) -> Poll> { - let Some(mut hook) = self.active_db_write_hook.take() else { return Poll::Pending }; - - match hook.poll(cx, args)? { - Poll::Ready(event) => { - let result = PolledHook { - name: hook.name(), - event, - db_access_level: hook.db_access_level(), - }; - - debug!( - target: "consensus::engine::hooks", - hook = hook.name(), - ?result, - "Polled running hook with db write access" - ); - - if result.event.is_finished() { - self.hooks.push_back(hook); - } else { - self.active_db_write_hook = Some(hook); - } - - return Poll::Ready(Ok(result)) - } - Poll::Pending => { - self.active_db_write_hook = Some(hook); - } - } - - Poll::Pending - } - - /// Polls next engine from the collection. - /// - /// Returns [`Poll::Ready`] if next hook returned an [event][`crate::hooks::EngineHookEvent`]. - /// - /// Returns [`Poll::Pending`] in all other cases: - /// 1. Next hook is [`Option::None`], i.e. taken, meaning it's currently running and has a DB - /// write access. - /// 2. Next hook needs a DB write access, but either there's another hook with DB write access - /// running, or `db_write_active` passed into arguments is `true`. - /// 3. Next hook returned [`Poll::Pending`] on polling. - /// 4. Next hook returned [`Poll::Ready`] on polling, but no action to act upon. - pub(crate) fn poll_next_hook( - &mut self, - cx: &mut Context<'_>, - args: EngineHookContext, - db_write_active: bool, - ) -> Poll> { - let Some(mut hook) = self.hooks.pop_front() else { return Poll::Pending }; - - let result = self.poll_next_hook_inner(cx, &mut hook, args, db_write_active); - - if matches!( - result, - Poll::Ready(Ok(PolledHook { - event: EngineHookEvent::Started, - db_access_level: EngineHookDBAccessLevel::ReadWrite, - .. - })) - ) { - // If a read-write hook started, set `active_db_write_hook` to it - self.active_db_write_hook = Some(hook); - } else { - // Otherwise, push it back to the collection of hooks to poll it next time - self.hooks.push_back(hook); - } - - result - } - - fn poll_next_hook_inner( - &self, - cx: &mut Context<'_>, - hook: &mut Box, - args: EngineHookContext, - db_write_active: bool, - ) -> Poll> { - // Hook with DB write access level is not allowed to run due to any of the following - // reasons: - // - An already running hook with DB write access level - // - Active DB write according to passed argument - // - Missing a finalized block number. We might be on an optimistic sync scenario where we - // cannot skip the FCU with the finalized hash, otherwise CL might misbehave. - if hook.db_access_level().is_read_write() && - (self.active_db_write_hook.is_some() || - db_write_active || - args.finalized_block_number.is_none()) - { - return Poll::Pending - } - - if let Poll::Ready(event) = hook.poll(cx, args)? { - let result = - PolledHook { name: hook.name(), event, db_access_level: hook.db_access_level() }; - - debug!( - target: "consensus::engine::hooks", - hook = hook.name(), - ?result, - "Polled next hook" - ); - - return Poll::Ready(Ok(result)) - } - debug!(target: "consensus::engine::hooks", hook = hook.name(), "Next hook is not ready"); - - Poll::Pending - } - - /// Returns a running hook with DB write access, if there's any. - pub(crate) fn active_db_write_hook(&self) -> Option<&dyn EngineHook> { - self.active_db_write_hook.as_ref().map(|hook| hook.as_ref()) - } -} - -#[cfg(test)] -mod tests { - use crate::hooks::{ - EngineHook, EngineHookContext, EngineHookDBAccessLevel, EngineHookEvent, EngineHooks, - EngineHooksController, - }; - use futures::poll; - use reth_errors::{RethError, RethResult}; - use std::{ - collections::VecDeque, - future::poll_fn, - task::{Context, Poll}, - }; - - struct TestHook { - results: VecDeque>, - name: &'static str, - access_level: EngineHookDBAccessLevel, - } - - impl TestHook { - fn new_ro(name: &'static str) -> Self { - Self { - results: Default::default(), - name, - access_level: EngineHookDBAccessLevel::ReadOnly, - } - } - fn new_rw(name: &'static str) -> Self { - Self { - results: Default::default(), - name, - access_level: EngineHookDBAccessLevel::ReadWrite, - } - } - - fn add_result(&mut self, result: RethResult) { - self.results.push_back(result); - } - } - - impl EngineHook for TestHook { - fn name(&self) -> &'static str { - self.name - } - - fn poll( - &mut self, - _cx: &mut Context<'_>, - _ctx: EngineHookContext, - ) -> Poll> { - self.results.pop_front().map_or(Poll::Pending, Poll::Ready) - } - - fn db_access_level(&self) -> EngineHookDBAccessLevel { - self.access_level - } - } - - #[tokio::test] - async fn poll_active_db_write_hook() { - let mut controller = EngineHooksController::new(EngineHooks::new()); - - let context = EngineHookContext { tip_block_number: 2, finalized_block_number: Some(1) }; - - // No currently running hook with DB write access is set - let result = poll!(poll_fn(|cx| controller.poll_active_db_write_hook(cx, context))); - assert!(result.is_pending()); - - // Currently running hook with DB write access returned `Pending` on polling - controller.active_db_write_hook = Some(Box::new(TestHook::new_rw("read-write"))); - - let result = poll!(poll_fn(|cx| controller.poll_active_db_write_hook(cx, context))); - assert!(result.is_pending()); - - // Currently running hook with DB write access returned `Ready` on polling, but didn't - // return `EngineHookEvent::Finished` yet. - // Currently running hooks with DB write should still be set. - let mut hook = TestHook::new_rw("read-write"); - hook.add_result(Ok(EngineHookEvent::Started)); - controller.active_db_write_hook = Some(Box::new(hook)); - - let result = poll!(poll_fn(|cx| controller.poll_active_db_write_hook(cx, context))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.event.is_started() && polled_hook.db_access_level.is_read_write() - }), - Poll::Ready(true) - ); - assert!(controller.active_db_write_hook.is_some()); - assert!(controller.hooks.is_empty()); - - // Currently running hook with DB write access returned `Ready` on polling and - // `EngineHookEvent::Finished` inside. - // Currently running hooks with DB write should be moved to collection of hooks. - let mut hook = TestHook::new_rw("read-write"); - hook.add_result(Ok(EngineHookEvent::Finished(Ok(())))); - controller.active_db_write_hook = Some(Box::new(hook)); - - let result = poll!(poll_fn(|cx| controller.poll_active_db_write_hook(cx, context))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.event.is_finished() && polled_hook.db_access_level.is_read_write() - }), - Poll::Ready(true) - ); - assert!(controller.active_db_write_hook.is_none()); - assert!(controller.hooks.pop_front().is_some()); - } - - #[tokio::test] - async fn poll_next_hook_db_write_active() { - let context = EngineHookContext { tip_block_number: 2, finalized_block_number: Some(1) }; - - let mut hook_rw = TestHook::new_rw("read-write"); - hook_rw.add_result(Ok(EngineHookEvent::Started)); - - let hook_ro_name = "read-only"; - let mut hook_ro = TestHook::new_ro(hook_ro_name); - hook_ro.add_result(Ok(EngineHookEvent::Started)); - - let mut hooks = EngineHooks::new(); - hooks.add(hook_rw); - hooks.add(hook_ro); - let mut controller = EngineHooksController::new(hooks); - - // Read-write hook can't be polled when external DB write is active - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, true))); - assert!(result.is_pending()); - assert!(controller.active_db_write_hook.is_none()); - - // Read-only hook can be polled when external DB write is active - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, true))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.name == hook_ro_name && - polled_hook.event.is_started() && - polled_hook.db_access_level.is_read_only() - }), - Poll::Ready(true) - ); - } - - #[tokio::test] - async fn poll_next_hook_db_write_inactive() { - let context = EngineHookContext { tip_block_number: 2, finalized_block_number: Some(1) }; - - let hook_rw_1_name = "read-write-1"; - let mut hook_rw_1 = TestHook::new_rw(hook_rw_1_name); - hook_rw_1.add_result(Ok(EngineHookEvent::Started)); - - let hook_rw_2_name = "read-write-2"; - let mut hook_rw_2 = TestHook::new_rw(hook_rw_2_name); - hook_rw_2.add_result(Ok(EngineHookEvent::Started)); - - let hook_ro_name = "read-only"; - let mut hook_ro = TestHook::new_ro(hook_ro_name); - hook_ro.add_result(Ok(EngineHookEvent::Started)); - hook_ro.add_result(Err(RethError::msg("something went wrong"))); - - let mut hooks = EngineHooks::new(); - hooks.add(hook_rw_1); - hooks.add(hook_rw_2); - hooks.add(hook_ro); - - let mut controller = EngineHooksController::new(hooks); - let hooks_len = controller.hooks.len(); - - // Read-write hook can be polled because external DB write is not active - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_rw_1_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.name == hook_rw_1_name && - polled_hook.event.is_started() && - polled_hook.db_access_level.is_read_write() - }), - Poll::Ready(true) - ); - assert_eq!( - controller.active_db_write_hook.as_ref().map(|hook| hook.name()), - Some(hook_rw_1_name) - ); - - // Read-write hook cannot be polled because another read-write hook is running - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_rw_2_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert!(result.is_pending()); - - // Read-only hook can be polled in parallel with already running read-write hook - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_ro_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.name == hook_ro_name && - polled_hook.event.is_started() && - polled_hook.db_access_level.is_read_only() - }), - Poll::Ready(true) - ); - - // Read-write hook still cannot be polled because another read-write hook is running - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_rw_2_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert!(result.is_pending()); - - // Read-only hook has finished with error - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_ro_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert_eq!(result.map(|result| { result.is_err() }), Poll::Ready(true)); - - assert!(controller.active_db_write_hook.is_some()); - assert_eq!(controller.hooks.len(), hooks_len - 1) - } -} diff --git a/crates/consensus/beacon/src/engine/hooks/mod.rs b/crates/consensus/beacon/src/engine/hooks/mod.rs deleted file mode 100644 index 828a6f968500..000000000000 --- a/crates/consensus/beacon/src/engine/hooks/mod.rs +++ /dev/null @@ -1,129 +0,0 @@ -use alloy_primitives::BlockNumber; -use reth_errors::{RethError, RethResult}; -use std::{ - fmt, - task::{Context, Poll}, -}; - -mod controller; -pub(crate) use controller::{EngineHooksController, PolledHook}; - -mod prune; -pub use prune::PruneHook; - -mod static_file; -pub use static_file::StaticFileHook; - -/// Collection of [engine hooks][`EngineHook`]. -#[derive(Default)] -pub struct EngineHooks { - inner: Vec>, -} - -impl fmt::Debug for EngineHooks { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("EngineHooks").field("inner", &self.inner.len()).finish() - } -} - -impl EngineHooks { - /// Creates a new empty collection of [engine hooks][`EngineHook`]. - pub fn new() -> Self { - Self { inner: Vec::new() } - } - - /// Adds a new [engine hook][`EngineHook`] to the collection. - pub fn add(&mut self, hook: H) { - self.inner.push(Box::new(hook)) - } -} - -/// Hook that will be run during the main loop of -/// [consensus engine][`crate::engine::BeaconConsensusEngine`]. -pub trait EngineHook: Send + Sync + 'static { - /// Returns a human-readable name for the hook. - fn name(&self) -> &'static str; - - /// Advances the hook execution, emitting an [event][`EngineHookEvent`]. - fn poll( - &mut self, - cx: &mut Context<'_>, - ctx: EngineHookContext, - ) -> Poll>; - - /// Returns [db access level][`EngineHookDBAccessLevel`] the hook needs. - fn db_access_level(&self) -> EngineHookDBAccessLevel; -} - -/// Engine context passed to the [hook polling function][`EngineHook::poll`]. -#[derive(Copy, Clone, Debug)] -pub struct EngineHookContext { - /// Tip block number. - pub tip_block_number: BlockNumber, - /// Finalized block number, if known. - pub finalized_block_number: Option, -} - -/// An event emitted when [hook][`EngineHook`] is polled. -#[derive(Debug)] -pub enum EngineHookEvent { - /// Hook is not ready. - /// - /// If this is returned, the hook is idle. - NotReady, - /// Hook started. - /// - /// If this is returned, the hook is running. - Started, - /// Hook finished. - /// - /// If this is returned, the hook is idle. - Finished(Result<(), EngineHookError>), -} - -impl EngineHookEvent { - /// Returns `true` if the event is [`EngineHookEvent::Started`]. - pub const fn is_started(&self) -> bool { - matches!(self, Self::Started) - } - - /// Returns `true` if the event is [`EngineHookEvent::Finished`]. - pub const fn is_finished(&self) -> bool { - matches!(self, Self::Finished(_)) - } -} - -/// An error returned by [hook][`EngineHook`]. -#[derive(Debug, thiserror::Error)] -pub enum EngineHookError { - /// Hook channel closed. - #[error("hook channel closed")] - ChannelClosed, - /// Common error. Wrapper around [`RethError`]. - #[error(transparent)] - Common(#[from] RethError), - /// An internal error occurred. - #[error(transparent)] - Internal(#[from] Box), -} - -/// Level of database access the hook needs for execution. -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum EngineHookDBAccessLevel { - /// Read-only database access. - ReadOnly, - /// Read-write database access. - ReadWrite, -} - -impl EngineHookDBAccessLevel { - /// Returns `true` if the hook needs read-only access to the database. - pub const fn is_read_only(&self) -> bool { - matches!(self, Self::ReadOnly) - } - - /// Returns `true` if the hook needs read-write access to the database. - pub const fn is_read_write(&self) -> bool { - matches!(self, Self::ReadWrite) - } -} diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs deleted file mode 100644 index 409fc98b80bb..000000000000 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ /dev/null @@ -1,203 +0,0 @@ -//! Prune hook for the engine implementation. - -use crate::{ - engine::hooks::{EngineHook, EngineHookContext, EngineHookError, EngineHookEvent}, - hooks::EngineHookDBAccessLevel, -}; -use alloy_primitives::BlockNumber; -use futures::FutureExt; -use metrics::Counter; -use reth_errors::{RethError, RethResult}; -use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter}; -use reth_prune::{Pruner, PrunerError, PrunerWithResult}; -use reth_tasks::TaskSpawner; -use std::{ - fmt::{self, Debug}, - task::{ready, Context, Poll}, -}; -use tokio::sync::oneshot; - -/// Manages pruning under the control of the engine. -/// -/// This type controls the [Pruner]. -pub struct PruneHook { - /// The current state of the pruner. - pruner_state: PrunerState, - /// The type that can spawn the pruner task. - pruner_task_spawner: Box, - metrics: Metrics, -} - -impl fmt::Debug for PruneHook -where - PF: DatabaseProviderFactory + fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PruneHook") - .field("pruner_state", &self.pruner_state) - .field("metrics", &self.metrics) - .finish() - } -} - -impl PruneHook { - /// Create a new instance - pub fn new( - pruner: Pruner, - pruner_task_spawner: Box, - ) -> Self { - Self { - pruner_state: PrunerState::Idle(Some(pruner)), - pruner_task_spawner, - metrics: Metrics::default(), - } - } - - /// Advances the pruner state. - /// - /// This checks for the result in the channel, or returns pending if the pruner is idle. - fn poll_pruner(&mut self, cx: &mut Context<'_>) -> Poll> { - let result = match self.pruner_state { - PrunerState::Idle(_) => return Poll::Pending, - PrunerState::Running(ref mut fut) => { - ready!(fut.poll_unpin(cx)) - } - }; - - let event = match result { - Ok((pruner, result)) => { - self.pruner_state = PrunerState::Idle(Some(pruner)); - - match result { - Ok(_) => EngineHookEvent::Finished(Ok(())), - Err(err) => EngineHookEvent::Finished(Err(err.into())), - } - } - Err(_) => { - // failed to receive the pruner - EngineHookEvent::Finished(Err(EngineHookError::ChannelClosed)) - } - }; - - Poll::Ready(Ok(event)) - } -} - -impl PruneHook -where - PF: DatabaseProviderFactory - + 'static, -{ - /// This will try to spawn the pruner if it is idle: - /// 1. Check if pruning is needed through [`Pruner::is_pruning_needed`]. - /// - /// 2.1. If pruning is needed, pass tip block number to the [`Pruner::run`] and spawn it in a - /// separate task. Set pruner state to [`PrunerState::Running`]. - /// 2.2. If pruning is not needed, set pruner state back to [`PrunerState::Idle`]. - /// - /// If pruner is already running, do nothing. - fn try_spawn_pruner(&mut self, tip_block_number: BlockNumber) -> Option { - match &mut self.pruner_state { - PrunerState::Idle(pruner) => { - let mut pruner = pruner.take()?; - - // Check tip for pruning - if pruner.is_pruning_needed(tip_block_number) { - let (tx, rx) = oneshot::channel(); - self.pruner_task_spawner.spawn_critical_blocking( - "pruner task", - Box::pin(async move { - let result = pruner.run(tip_block_number); - let _ = tx.send((pruner, result)); - }), - ); - self.metrics.runs_total.increment(1); - self.pruner_state = PrunerState::Running(rx); - - Some(EngineHookEvent::Started) - } else { - self.pruner_state = PrunerState::Idle(Some(pruner)); - Some(EngineHookEvent::NotReady) - } - } - PrunerState::Running(_) => None, - } - } -} - -impl EngineHook for PruneHook -where - PF: DatabaseProviderFactory - + 'static, -{ - fn name(&self) -> &'static str { - "Prune" - } - - fn poll( - &mut self, - cx: &mut Context<'_>, - ctx: EngineHookContext, - ) -> Poll> { - // Try to spawn a pruner - match self.try_spawn_pruner(ctx.tip_block_number) { - Some(EngineHookEvent::NotReady) => return Poll::Pending, - Some(event) => return Poll::Ready(Ok(event)), - None => (), - } - - // Poll pruner and check its status - self.poll_pruner(cx) - } - - fn db_access_level(&self) -> EngineHookDBAccessLevel { - EngineHookDBAccessLevel::ReadWrite - } -} - -/// The possible pruner states within the sync controller. -/// -/// [`PrunerState::Idle`] means that the pruner is currently idle. -/// [`PrunerState::Running`] means that the pruner is currently running. -/// -/// NOTE: The differentiation between these two states is important, because when the pruner is -/// running, it acquires the write lock over the database. This means that we cannot forward to the -/// blockchain tree any messages that would result in database writes, since it would result in a -/// deadlock. -enum PrunerState { - /// Pruner is idle. - Idle(Option>), - /// Pruner is running and waiting for a response - Running(oneshot::Receiver>), -} - -impl fmt::Debug for PrunerState -where - PF: DatabaseProviderFactory + Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Idle(f0) => f.debug_tuple("Idle").field(&f0).finish(), - Self::Running(f0) => f.debug_tuple("Running").field(&f0).finish(), - } - } -} - -#[derive(reth_metrics::Metrics)] -#[metrics(scope = "consensus.engine.prune")] -struct Metrics { - /// The number of times the pruner was run. - runs_total: Counter, -} - -impl From for EngineHookError { - fn from(err: PrunerError) -> Self { - match err { - PrunerError::PruneSegment(_) | PrunerError::InconsistentData(_) => { - Self::Internal(Box::new(err)) - } - PrunerError::Database(err) => RethError::Database(err).into(), - PrunerError::Provider(err) => RethError::Provider(err).into(), - } - } -} diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs deleted file mode 100644 index 99387492c3bf..000000000000 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ /dev/null @@ -1,209 +0,0 @@ -//! `StaticFile` hook for the engine implementation. - -use crate::{ - engine::hooks::{EngineHook, EngineHookContext, EngineHookError, EngineHookEvent}, - hooks::EngineHookDBAccessLevel, -}; -use alloy_primitives::BlockNumber; -use futures::FutureExt; -use reth_codecs::Compact; -use reth_db_api::table::Value; -use reth_errors::RethResult; -use reth_primitives::{static_file::HighestStaticFiles, NodePrimitives}; -use reth_provider::{ - BlockReader, ChainStateBlockReader, DatabaseProviderFactory, StageCheckpointReader, - StaticFileProviderFactory, -}; -use reth_static_file::{StaticFileProducer, StaticFileProducerWithResult}; -use reth_tasks::TaskSpawner; -use std::task::{ready, Context, Poll}; -use tokio::sync::oneshot; -use tracing::trace; - -/// Manages producing static files under the control of the engine. -/// -/// This type controls the [`StaticFileProducer`]. -#[derive(Debug)] -pub struct StaticFileHook { - /// The current state of the `static_file_producer`. - state: StaticFileProducerState, - /// The type that can spawn the `static_file_producer` task. - task_spawner: Box, -} - -impl StaticFileHook -where - Provider: StaticFileProviderFactory - + DatabaseProviderFactory< - Provider: StaticFileProviderFactory< - Primitives: NodePrimitives< - SignedTx: Value + Compact, - BlockHeader: Value + Compact, - Receipt: Value + Compact, - >, - > + StageCheckpointReader - + BlockReader - + ChainStateBlockReader, - > + 'static, -{ - /// Create a new instance - pub fn new( - static_file_producer: StaticFileProducer, - task_spawner: Box, - ) -> Self { - Self { state: StaticFileProducerState::Idle(Some(static_file_producer)), task_spawner } - } - - /// Advances the `static_file_producer` state. - /// - /// This checks for the result in the channel, or returns pending if the `static_file_producer` - /// is idle. - fn poll_static_file_producer( - &mut self, - cx: &mut Context<'_>, - ) -> Poll> { - let result = match self.state { - StaticFileProducerState::Idle(_) => return Poll::Pending, - StaticFileProducerState::Running(ref mut fut) => { - ready!(fut.poll_unpin(cx)) - } - }; - - let event = match result { - Ok((static_file_producer, result)) => { - self.state = StaticFileProducerState::Idle(Some(static_file_producer)); - - match result { - Ok(_) => EngineHookEvent::Finished(Ok(())), - Err(err) => EngineHookEvent::Finished(Err(EngineHookError::Common(err.into()))), - } - } - Err(_) => { - // failed to receive the static_file_producer - EngineHookEvent::Finished(Err(EngineHookError::ChannelClosed)) - } - }; - - Poll::Ready(Ok(event)) - } - - /// This will try to spawn the `static_file_producer` if it is idle: - /// 1. Check if producing static files is needed through - /// [`StaticFileProducer::get_static_file_targets`](reth_static_file::StaticFileProducerInner::get_static_file_targets) - /// and then [`StaticFileTargets::any`](reth_static_file::StaticFileTargets::any). - /// - /// 2.1. If producing static files is needed, pass static file request to the - /// [`StaticFileProducer::run`](reth_static_file::StaticFileProducerInner::run) and - /// spawn it in a separate task. Set static file producer state to - /// [`StaticFileProducerState::Running`]. - /// 2.2. If producing static files is not needed, set static file producer state back to - /// [`StaticFileProducerState::Idle`]. - /// - /// If `static_file_producer` is already running, do nothing. - fn try_spawn_static_file_producer( - &mut self, - finalized_block_number: BlockNumber, - ) -> RethResult> { - Ok(match &mut self.state { - StaticFileProducerState::Idle(static_file_producer) => { - let Some(static_file_producer) = static_file_producer.take() else { - trace!(target: "consensus::engine::hooks::static_file", "StaticFileProducer is already running but the state is idle"); - return Ok(None) - }; - - let Some(locked_static_file_producer) = static_file_producer.try_lock_arc() else { - trace!(target: "consensus::engine::hooks::static_file", "StaticFileProducer lock is already taken"); - return Ok(None) - }; - - let finalized_block_number = locked_static_file_producer - .last_finalized_block()? - .map(|on_disk| finalized_block_number.min(on_disk)) - .unwrap_or(finalized_block_number); - - let targets = - locked_static_file_producer.get_static_file_targets(HighestStaticFiles { - headers: Some(finalized_block_number), - receipts: Some(finalized_block_number), - transactions: Some(finalized_block_number), - })?; - - // Check if the moving data to static files has been requested. - if targets.any() { - let (tx, rx) = oneshot::channel(); - self.task_spawner.spawn_critical_blocking( - "static_file_producer task", - Box::pin(async move { - let result = locked_static_file_producer.run(targets); - let _ = tx.send((static_file_producer, result)); - }), - ); - self.state = StaticFileProducerState::Running(rx); - - Some(EngineHookEvent::Started) - } else { - self.state = StaticFileProducerState::Idle(Some(static_file_producer)); - Some(EngineHookEvent::NotReady) - } - } - StaticFileProducerState::Running(_) => None, - }) - } -} - -impl EngineHook for StaticFileHook -where - Provider: StaticFileProviderFactory - + DatabaseProviderFactory< - Provider: StaticFileProviderFactory< - Primitives: NodePrimitives< - SignedTx: Value + Compact, - BlockHeader: Value + Compact, - Receipt: Value + Compact, - >, - > + StageCheckpointReader - + BlockReader - + ChainStateBlockReader, - > + 'static, -{ - fn name(&self) -> &'static str { - "StaticFile" - } - - fn poll( - &mut self, - cx: &mut Context<'_>, - ctx: EngineHookContext, - ) -> Poll> { - let Some(finalized_block_number) = ctx.finalized_block_number else { - trace!(target: "consensus::engine::hooks::static_file", ?ctx, "Finalized block number is not available"); - return Poll::Pending - }; - - // Try to spawn a static_file_producer - match self.try_spawn_static_file_producer(finalized_block_number)? { - Some(EngineHookEvent::NotReady) => return Poll::Pending, - Some(event) => return Poll::Ready(Ok(event)), - None => (), - } - - // Poll static_file_producer and check its status - self.poll_static_file_producer(cx) - } - - fn db_access_level(&self) -> EngineHookDBAccessLevel { - EngineHookDBAccessLevel::ReadOnly - } -} - -/// The possible `static_file_producer` states within the sync controller. -/// -/// [`StaticFileProducerState::Idle`] means that the static file producer is currently idle. -/// [`StaticFileProducerState::Running`] means that the static file producer is currently running. -#[derive(Debug)] -enum StaticFileProducerState { - /// [`StaticFileProducer`] is idle. - Idle(Option>), - /// [`StaticFileProducer`] is running and waiting for a response - Running(oneshot::Receiver>), -} diff --git a/crates/consensus/beacon/src/engine/metrics.rs b/crates/consensus/beacon/src/engine/metrics.rs deleted file mode 100644 index 67bae71be8b7..000000000000 --- a/crates/consensus/beacon/src/engine/metrics.rs +++ /dev/null @@ -1,32 +0,0 @@ -use reth_metrics::{ - metrics::{Counter, Gauge, Histogram}, - Metrics, -}; - -/// Beacon consensus engine metrics. -#[derive(Metrics)] -#[metrics(scope = "consensus.engine.beacon")] -pub(crate) struct EngineMetrics { - /// The number of times the pipeline was run. - pub(crate) pipeline_runs: Counter, - /// The total count of forkchoice updated messages received. - pub(crate) forkchoice_updated_messages: Counter, - /// The total count of new payload messages received. - pub(crate) new_payload_messages: Counter, - /// Latency for making canonical already canonical block - pub(crate) make_canonical_already_canonical_latency: Histogram, - /// Latency for making canonical committed block - pub(crate) make_canonical_committed_latency: Histogram, - /// Latency for making canonical returns error - pub(crate) make_canonical_error_latency: Histogram, - /// Latency for all making canonical results - pub(crate) make_canonical_latency: Histogram, -} - -/// Metrics for the `EngineSyncController`. -#[derive(Metrics)] -#[metrics(scope = "consensus.engine.beacon")] -pub(crate) struct EngineSyncMetrics { - /// How many blocks are currently being downloaded. - pub(crate) active_block_downloads: Gauge, -} diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs deleted file mode 100644 index 2dc139acedb4..000000000000 --- a/crates/consensus/beacon/src/engine/mod.rs +++ /dev/null @@ -1,2962 +0,0 @@ -use alloy_consensus::{BlockHeader, Header}; -use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash}; -use alloy_primitives::{BlockNumber, B256}; -use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, - PayloadValidationError, -}; -use futures::{stream::BoxStream, Future, StreamExt}; -use itertools::Either; -use reth_blockchain_tree_api::{ - error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, - BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, -}; -use reth_engine_primitives::{ - BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, - ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus, OnForkChoiceUpdated, - PayloadTypes, -}; -use reth_errors::{BlockValidationError, ProviderResult, RethError, RethResult}; -use reth_network_p2p::{ - sync::{NetworkSyncUpdater, SyncState}, - EthBlockClient, -}; -use reth_node_types::{Block, BlockTy, HeaderTy, NodeTypesWithEngine}; -use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_builder_primitives::PayloadBuilder; -use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; -use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Head, SealedBlock, SealedHeader}; -use reth_provider::{ - providers::{ProviderNodeTypes, TreeNodeTypes}, - BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, - StageCheckpointReader, -}; -use reth_stages_api::{ControlFlow, Pipeline, PipelineTarget, StageId}; -use reth_tasks::TaskSpawner; -use reth_tokio_util::EventSender; -use std::{ - pin::Pin, - sync::Arc, - task::{Context, Poll}, - time::{Duration, Instant}, -}; -use tokio::sync::{ - mpsc::{self, UnboundedSender}, - oneshot, -}; -use tokio_stream::wrappers::UnboundedReceiverStream; -use tracing::*; - -mod error; -pub use error::{BeaconConsensusEngineError, BeaconEngineResult, BeaconForkChoiceUpdateError}; - -mod invalid_headers; -pub use invalid_headers::InvalidHeaderCache; - -mod event; -pub use event::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; - -mod handle; -pub use handle::BeaconConsensusEngineHandle; - -mod metrics; -use metrics::EngineMetrics; - -pub mod sync; -use sync::{EngineSyncController, EngineSyncEvent}; - -/// Hooks for running during the main loop of -/// [consensus engine][`crate::engine::BeaconConsensusEngine`]. -pub mod hooks; -use hooks::{EngineHookContext, EngineHookEvent, EngineHooks, EngineHooksController, PolledHook}; - -#[cfg(test)] -pub mod test_utils; - -/// The maximum number of invalid headers that can be tracked by the engine. -const MAX_INVALID_HEADERS: u32 = 512u32; - -/// The largest gap for which the tree will be used for sync. See docs for `pipeline_run_threshold` -/// for more information. -/// -/// This is the default threshold, the distance to the head that the tree will be used for sync. -/// If the distance exceeds this threshold, the pipeline will be used for sync. -pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; - -/// Helper trait expressing requirements for node types to be used in engine. -pub trait EngineNodeTypes: ProviderNodeTypes + NodeTypesWithEngine {} - -impl EngineNodeTypes for T where T: ProviderNodeTypes + NodeTypesWithEngine {} - -/// Represents a pending forkchoice update. -/// -/// This type encapsulates the necessary components for a pending forkchoice update -/// in the context of a beacon consensus engine. -/// -/// It consists of: -/// - The current fork choice state. -/// - Optional payload attributes specific to the engine type. -/// - Sender for the result of an oneshot channel, conveying the outcome of the fork choice update. -type PendingForkchoiceUpdate = - (ForkchoiceState, Option, oneshot::Sender>); - -/// The beacon consensus engine is the driver that switches between historical and live sync. -/// -/// The beacon consensus engine is itself driven by messages from the Consensus Layer, which are -/// received by Engine API (JSON-RPC). -/// -/// The consensus engine is idle until it receives the first -/// [`BeaconEngineMessage::ForkchoiceUpdated`] message from the CL which would initiate the sync. At -/// first, the consensus engine would run the [Pipeline] until the latest known block hash. -/// Afterward, it would attempt to create/restore the [`BlockchainTreeEngine`] from the blocks -/// that are currently available. In case the restoration is successful, the consensus engine would -/// run in a live sync mode, populating the [`BlockchainTreeEngine`] with new blocks as they arrive -/// via engine API and downloading any missing blocks from the network to fill potential gaps. -/// -/// The consensus engine has two data input sources: -/// -/// ## New Payload (`engine_newPayloadV{}`) -/// -/// The engine receives new payloads from the CL. If the payload is connected to the canonical -/// chain, it will be fully validated added to a chain in the [`BlockchainTreeEngine`]: `VALID` -/// -/// If the payload's chain is disconnected (at least 1 block is missing) then it will be buffered: -/// `SYNCING` ([`BlockStatus::Disconnected`]). -/// -/// ## Forkchoice Update (FCU) (`engine_forkchoiceUpdatedV{}`) -/// -/// This contains the latest forkchoice state and the payload attributes. The engine will attempt to -/// make a new canonical chain based on the `head_hash` of the update and trigger payload building -/// if the `payload_attrs` are present and the FCU is `VALID`. -/// -/// The `head_hash` forms a chain by walking backwards from the `head_hash` towards the canonical -/// blocks of the chain. -/// -/// Making a new canonical chain can result in the following relevant outcomes: -/// -/// ### The chain is connected -/// -/// All blocks of the `head_hash`'s chain are present in the [`BlockchainTreeEngine`] and are -/// committed to the canonical chain. This also includes reorgs. -/// -/// ### The chain is disconnected -/// -/// In this case the [`BlockchainTreeEngine`] doesn't know how the new chain connects to the -/// existing canonical chain. It could be a simple commit (new blocks extend the current head) or a -/// re-org that requires unwinding the canonical chain. -/// -/// This further distinguishes between two variants: -/// -/// #### `head_hash`'s block exists -/// -/// The `head_hash`'s block was already received/downloaded, but at least one block is missing to -/// form a _connected_ chain. The engine will attempt to download the missing blocks from the -/// network by walking backwards (`parent_hash`), and then try to make the block canonical as soon -/// as the chain becomes connected. -/// -/// However, it still can be the case that the chain and the FCU is `INVALID`. -/// -/// #### `head_hash` block is missing -/// -/// This is similar to the previous case, but the `head_hash`'s block is missing. At which point the -/// engine doesn't know where the new head will point to: new chain could be a re-org or a simple -/// commit. The engine will download the missing head first and then proceed as in the previous -/// case. -/// -/// # Panics -/// -/// If the future is polled more than once. Leads to undefined state. -/// -/// Note: soon deprecated. See `reth_engine_service::EngineService`. -#[must_use = "Future does nothing unless polled"] -#[allow(missing_debug_implementations)] -pub struct BeaconConsensusEngine -where - N: EngineNodeTypes, - Client: EthBlockClient, - BT: BlockchainTreeEngine - + BlockReader - + BlockIdReader - + CanonChainTracker - + StageCheckpointReader, -{ - /// Controls syncing triggered by engine updates. - sync: EngineSyncController, - /// The type we can use to query both the database and the blockchain tree. - blockchain: BT, - /// Used for emitting updates about whether the engine is syncing or not. - sync_state_updater: Box, - /// The Engine API message receiver. - engine_message_stream: BoxStream<'static, BeaconEngineMessage>, - /// A clone of the handle - handle: BeaconConsensusEngineHandle, - /// Tracks the received forkchoice state updates received by the CL. - forkchoice_state_tracker: ForkchoiceStateTracker, - /// The payload store. - payload_builder: PayloadBuilderHandle, - /// Validator for execution payloads - payload_validator: ExecutionPayloadValidator, - /// Current blockchain tree action. - blockchain_tree_action: Option>, - /// Pending forkchoice update. - /// It is recorded if we cannot process the forkchoice update because - /// a hook with database read-write access is active. - /// This is a temporary solution to always process missed FCUs. - pending_forkchoice_update: - Option::PayloadAttributes>>, - /// Tracks the header of invalid payloads that were rejected by the engine because they're - /// invalid. - invalid_headers: InvalidHeaderCache, - /// After downloading a block corresponding to a recent forkchoice update, the engine will - /// check whether or not we can connect the block to the current canonical chain. If we can't, - /// we need to download and execute the missing parents of that block. - /// - /// When the block can't be connected, its block number will be compared to the canonical head, - /// resulting in a heuristic for the number of missing blocks, or the size of the gap between - /// the new block and the canonical head. - /// - /// If the gap is larger than this threshold, the engine will download and execute the missing - /// blocks using the pipeline. Otherwise, the engine, sync controller, and blockchain tree will - /// be used to download and execute the missing blocks. - pipeline_run_threshold: u64, - hooks: EngineHooksController, - /// Sender for engine events. - event_sender: EventSender, - /// Consensus engine metrics. - metrics: EngineMetrics, -} - -impl BeaconConsensusEngine -where - N: TreeNodeTypes, - BT: BlockchainTreeEngine - + BlockReader, Header = HeaderTy> - + BlockIdReader - + CanonChainTracker
> - + StageCheckpointReader - + ChainSpecProvider - + 'static, - Client: EthBlockClient + 'static, -{ - /// Create a new instance of the [`BeaconConsensusEngine`]. - #[allow(clippy::too_many_arguments)] - pub fn new( - client: Client, - pipeline: Pipeline, - blockchain: BT, - task_spawner: Box, - sync_state_updater: Box, - max_block: Option, - payload_builder: PayloadBuilderHandle, - target: Option, - pipeline_run_threshold: u64, - hooks: EngineHooks, - ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { - let (to_engine, rx) = mpsc::unbounded_channel(); - Self::with_channel( - client, - pipeline, - blockchain, - task_spawner, - sync_state_updater, - max_block, - payload_builder, - target, - pipeline_run_threshold, - to_engine, - Box::pin(UnboundedReceiverStream::from(rx)), - hooks, - ) - } - - /// Create a new instance of the [`BeaconConsensusEngine`] using the given channel to configure - /// the [`BeaconEngineMessage`] communication channel. - /// - /// By default the engine is started with idle pipeline. - /// The pipeline can be launched immediately in one of the following ways descending in - /// priority: - /// - Explicit [`Option::Some`] target block hash provided via a constructor argument. - /// - The process was previously interrupted amidst the pipeline run. This is checked by - /// comparing the checkpoints of the first ([`StageId::Headers`]) and last - /// ([`StageId::Finish`]) stages. In this case, the latest available header in the database is - /// used as the target. - /// - /// Propagates any database related error. - #[allow(clippy::too_many_arguments)] - pub fn with_channel( - client: Client, - pipeline: Pipeline, - blockchain: BT, - task_spawner: Box, - sync_state_updater: Box, - max_block: Option, - payload_builder: PayloadBuilderHandle, - target: Option, - pipeline_run_threshold: u64, - to_engine: UnboundedSender>, - engine_message_stream: BoxStream<'static, BeaconEngineMessage>, - hooks: EngineHooks, - ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { - let event_sender = EventSender::default(); - let handle = BeaconConsensusEngineHandle::new(to_engine); - let sync = EngineSyncController::new( - pipeline, - client, - task_spawner.clone(), - max_block, - blockchain.chain_spec(), - event_sender.clone(), - ); - let mut this = Self { - sync, - payload_validator: ExecutionPayloadValidator::new(blockchain.chain_spec()), - blockchain, - sync_state_updater, - engine_message_stream, - handle: handle.clone(), - forkchoice_state_tracker: Default::default(), - payload_builder, - invalid_headers: InvalidHeaderCache::new(MAX_INVALID_HEADERS), - blockchain_tree_action: None, - pending_forkchoice_update: None, - pipeline_run_threshold, - hooks: EngineHooksController::new(hooks), - event_sender, - metrics: EngineMetrics::default(), - }; - - let maybe_pipeline_target = match target { - // Provided target always takes precedence. - target @ Some(_) => target, - None => this.check_pipeline_consistency()?, - }; - - if let Some(target) = maybe_pipeline_target { - this.sync.set_pipeline_sync_target(target.into()); - } - - Ok((this, handle)) - } - - /// Returns current [`EngineHookContext`] that's used for polling engine hooks. - fn current_engine_hook_context(&self) -> RethResult { - Ok(EngineHookContext { - tip_block_number: self.blockchain.canonical_tip().number, - finalized_block_number: self - .blockchain - .finalized_block_number() - .map_err(RethError::Provider)?, - }) - } - - /// Set the next blockchain tree action. - fn set_blockchain_tree_action(&mut self, action: BlockchainTreeAction) { - let previous_action = self.blockchain_tree_action.replace(action); - debug_assert!(previous_action.is_none(), "Pre-existing action found"); - } - - /// Pre-validate forkchoice update and check whether it can be processed. - /// - /// This method returns the update outcome if validation fails or - /// the node is syncing and the update cannot be processed at the moment. - fn pre_validate_forkchoice_update( - &mut self, - state: ForkchoiceState, - ) -> ProviderResult> { - if state.head_block_hash.is_zero() { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // check if the new head hash is connected to any ancestor that we previously marked as - // invalid - let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); - if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { - return Ok(Some(OnForkChoiceUpdated::with_invalid(status))) - } - - if self.sync.is_pipeline_active() { - // We can only process new forkchoice updates if the pipeline is idle, since it requires - // exclusive access to the database - trace!(target: "consensus::engine", "Pipeline is syncing, skipping forkchoice update"); - return Ok(Some(OnForkChoiceUpdated::syncing())) - } - - Ok(None) - } - - /// Process the result of attempting to make forkchoice state head hash canonical. - /// - /// # Returns - /// - /// A forkchoice state update outcome or fatal error. - fn on_forkchoice_updated_make_canonical_result( - &mut self, - state: ForkchoiceState, - mut attrs: Option<::PayloadAttributes>, - make_canonical_result: Result, - elapsed: Duration, - ) -> Result { - match make_canonical_result { - Ok(outcome) => { - let should_update_head = match &outcome { - CanonicalOutcome::AlreadyCanonical { head, header } => { - self.on_head_already_canonical(head, header, &mut attrs) - } - CanonicalOutcome::Committed { head } => { - // new VALID update that moved the canonical chain forward - debug!(target: "consensus::engine", hash=?state.head_block_hash, number=head.number, "Canonicalized new head"); - true - } - }; - - if should_update_head { - let head = outcome.header(); - let _ = self.update_head(head.clone()); - self.event_sender.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( - Box::new(head.clone()), - elapsed, - )); - } - - // Validate that the forkchoice state is consistent. - let on_updated = if let Some(invalid_fcu_response) = - self.ensure_consistent_forkchoice_state(state)? - { - trace!(target: "consensus::engine", ?state, "Forkchoice state is inconsistent"); - invalid_fcu_response - } else if let Some(attrs) = attrs { - // the CL requested to build a new payload on top of this new VALID head - let head = outcome.into_header().unseal(); - self.process_payload_attributes( - attrs, - head, - state, - EngineApiMessageVersion::default(), - ) - } else { - OnForkChoiceUpdated::valid(PayloadStatus::new( - PayloadStatusEnum::Valid, - Some(state.head_block_hash), - )) - }; - Ok(on_updated) - } - Err(err) => { - if err.is_fatal() { - error!(target: "consensus::engine", %err, "Encountered fatal error"); - Err(err) - } else { - Ok(OnForkChoiceUpdated::valid( - self.on_failed_canonical_forkchoice_update(&state, err)?, - )) - } - } - } - } - - /// Invoked when head hash references a `VALID` block that is already canonical. - /// - /// Returns `true` if the head needs to be updated. - fn on_head_already_canonical( - &self, - head: &BlockNumHash, - header: &SealedHeader, - attrs: &mut Option<::PayloadAttributes>, - ) -> bool { - // On Optimism, the proposers are allowed to reorg their own chain at will. - #[cfg(feature = "optimism")] - if reth_chainspec::EthChainSpec::is_optimism(&self.blockchain.chain_spec()) { - debug!( - target: "consensus::engine", - fcu_head_num=?header.number, - current_head_num=?head.number, - "[Optimism] Allowing beacon reorg to old head" - ); - return true - } - - // 2. Client software MAY skip an update of the forkchoice state and MUST NOT begin a - // payload build process if `forkchoiceState.headBlockHash` references a `VALID` ancestor - // of the head of canonical chain, i.e. the ancestor passed payload validation process - // and deemed `VALID`. In the case of such an event, client software MUST return - // `{payloadStatus: {status: VALID, latestValidHash: forkchoiceState.headBlockHash, - // validationError: null}, payloadId: null}` - if head != &header.num_hash() { - attrs.take(); - } - - debug!( - target: "consensus::engine", - fcu_head_num=?header.number, - current_head_num=?head.number, - "Ignoring beacon update to old head" - ); - false - } - - /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree - /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid - /// chain. - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). - /// - /// Returns an error if an internal error occurred like a database error. - fn on_forkchoice_updated( - &mut self, - state: ForkchoiceState, - attrs: Option<::PayloadAttributes>, - tx: oneshot::Sender>, - ) { - self.metrics.forkchoice_updated_messages.increment(1); - self.blockchain.on_forkchoice_update_received(&state); - trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); - - match self.pre_validate_forkchoice_update(state) { - Ok(on_updated_result) => { - if let Some(on_updated) = on_updated_result { - // Pre-validate forkchoice state update and return if it's invalid - // or cannot be processed at the moment. - self.on_forkchoice_updated_status(state, on_updated, tx); - } else if let Some(hook) = self.hooks.active_db_write_hook() { - // We can only process new forkchoice updates if no hook with db write is - // running, since it requires exclusive access to the - // database - let replaced_pending = - self.pending_forkchoice_update.replace((state, attrs, tx)); - warn!( - target: "consensus::engine", - hook = %hook.name(), - head_block_hash = ?state.head_block_hash, - safe_block_hash = ?state.safe_block_hash, - finalized_block_hash = ?state.finalized_block_hash, - replaced_pending = ?replaced_pending.map(|(state, _, _)| state), - "Hook is in progress, delaying forkchoice update. \ - This may affect the performance of your node as a validator." - ); - } else { - self.set_blockchain_tree_action( - BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx }, - ); - } - } - Err(error) => { - let _ = tx.send(Err(error.into())); - } - } - } - - /// Called after the forkchoice update status has been resolved. - /// Depending on the outcome, the method updates the sync state and notifies the listeners - /// about new processed FCU. - fn on_forkchoice_updated_status( - &mut self, - state: ForkchoiceState, - on_updated: OnForkChoiceUpdated, - tx: oneshot::Sender>, - ) { - // send the response to the CL ASAP - let status = on_updated.forkchoice_status(); - let _ = tx.send(Ok(on_updated)); - - // update the forkchoice state tracker - self.forkchoice_state_tracker.set_latest(state, status); - - match status { - ForkchoiceStatus::Invalid => {} - ForkchoiceStatus::Valid => { - // FCU head is valid, we're no longer syncing - self.sync_state_updater.update_sync_state(SyncState::Idle); - // node's fully synced, clear active download requests - self.sync.clear_block_download_requests(); - } - ForkchoiceStatus::Syncing => { - // we're syncing - self.sync_state_updater.update_sync_state(SyncState::Syncing); - } - } - - // notify listeners about new processed FCU - self.event_sender.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status)); - } - - /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less - /// than the checkpoint of the first stage). - /// - /// This will return the pipeline target if: - /// * the pipeline was interrupted during its previous run - /// * a new stage was added - /// * stage data was dropped manually through `reth stage drop ...` - /// - /// # Returns - /// - /// A target block hash if the pipeline is inconsistent, otherwise `None`. - fn check_pipeline_consistency(&self) -> RethResult> { - // If no target was provided, check if the stages are congruent - check if the - // checkpoint of the last stage matches the checkpoint of the first. - let first_stage_checkpoint = self - .blockchain - .get_stage_checkpoint(*StageId::ALL.first().unwrap())? - .unwrap_or_default() - .block_number; - - // Skip the first stage as we've already retrieved it and comparing all other checkpoints - // against it. - for stage_id in StageId::ALL.iter().skip(1) { - let stage_checkpoint = - self.blockchain.get_stage_checkpoint(*stage_id)?.unwrap_or_default().block_number; - - // If the checkpoint of any stage is less than the checkpoint of the first stage, - // retrieve and return the block hash of the latest header and use it as the target. - if stage_checkpoint < first_stage_checkpoint { - debug!( - target: "consensus::engine", - first_stage_checkpoint, - inconsistent_stage_id = %stage_id, - inconsistent_stage_checkpoint = stage_checkpoint, - "Pipeline sync progress is inconsistent" - ); - return Ok(self.blockchain.block_hash(first_stage_checkpoint)?) - } - } - - Ok(None) - } - - /// Returns a new [`BeaconConsensusEngineHandle`] that can be cloned and shared. - /// - /// The [`BeaconConsensusEngineHandle`] can be used to interact with this - /// [`BeaconConsensusEngine`] - pub fn handle(&self) -> BeaconConsensusEngineHandle { - self.handle.clone() - } - - /// Returns true if the distance from the local tip to the block is greater than the configured - /// threshold. - /// - /// If the `local_tip` is greater than the `block`, then this will return false. - #[inline] - const fn exceeds_pipeline_run_threshold(&self, local_tip: u64, block: u64) -> bool { - block > local_tip && block - local_tip > self.pipeline_run_threshold - } - - /// Returns the finalized hash to sync to if the distance from the local tip to the block is - /// greater than the configured threshold and we're not synced to the finalized block yet - /// yet (if we've seen that block already). - /// - /// If this is invoked after a new block has been downloaded, the downloaded block could be the - /// (missing) finalized block. - fn can_pipeline_sync_to_finalized( - &self, - canonical_tip_num: u64, - target_block_number: u64, - downloaded_block: Option, - ) -> Option { - let sync_target_state = self.forkchoice_state_tracker.sync_target_state(); - - // check if the distance exceeds the threshold for pipeline sync - let mut exceeds_pipeline_run_threshold = - self.exceeds_pipeline_run_threshold(canonical_tip_num, target_block_number); - - // check if the downloaded block is the tracked finalized block - if let Some(ref buffered_finalized) = sync_target_state - .as_ref() - .and_then(|state| self.blockchain.buffered_header_by_hash(state.finalized_block_hash)) - { - // if we have buffered the finalized block, we should check how far - // we're off - exceeds_pipeline_run_threshold = - self.exceeds_pipeline_run_threshold(canonical_tip_num, buffered_finalized.number); - } - - // If this is invoked after we downloaded a block we can check if this block is the - // finalized block - if let (Some(downloaded_block), Some(ref state)) = (downloaded_block, sync_target_state) { - if downloaded_block.hash == state.finalized_block_hash { - // we downloaded the finalized block - exceeds_pipeline_run_threshold = - self.exceeds_pipeline_run_threshold(canonical_tip_num, downloaded_block.number); - } - } - - // if the number of missing blocks is greater than the max, run the - // pipeline - if exceeds_pipeline_run_threshold { - if let Some(state) = sync_target_state { - // if we have already canonicalized the finalized block, we should - // skip the pipeline run - match self.blockchain.header_by_hash_or_number(state.finalized_block_hash.into()) { - Err(err) => { - warn!(target: "consensus::engine", %err, "Failed to get finalized block header"); - } - Ok(None) => { - // ensure the finalized block is known (not the zero hash) - if !state.finalized_block_hash.is_zero() { - // we don't have the block yet and the distance exceeds the allowed - // threshold - return Some(state.finalized_block_hash) - } - - // OPTIMISTIC SYNCING - // - // It can happen when the node is doing an - // optimistic sync, where the CL has no knowledge of the finalized hash, - // but is expecting the EL to sync as high - // as possible before finalizing. - // - // This usually doesn't happen on ETH mainnet since CLs use the more - // secure checkpoint syncing. - // - // However, optimism chains will do this. The risk of a reorg is however - // low. - debug!(target: "consensus::engine", hash=?state.head_block_hash, "Setting head hash as an optimistic pipeline target."); - return Some(state.head_block_hash) - } - Ok(Some(_)) => { - // we're fully synced to the finalized block - // but we want to continue downloading the missing parent - } - } - } - } - - None - } - - /// Returns how far the local tip is from the given block. If the local tip is at the same - /// height or its block number is greater than the given block, this returns None. - #[inline] - const fn distance_from_local_tip(&self, local_tip: u64, block: u64) -> Option { - if block > local_tip { - Some(block - local_tip) - } else { - None - } - } - - /// If validation fails, the response MUST contain the latest valid hash: - /// - /// - The block hash of the ancestor of the invalid payload satisfying the following two - /// conditions: - /// - It is fully validated and deemed VALID - /// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID - /// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above - /// conditions are satisfied by a `PoW` block. - /// - null if client software cannot determine the ancestor of the invalid payload satisfying - /// the above conditions. - fn latest_valid_hash_for_invalid_payload( - &mut self, - parent_hash: B256, - ) -> ProviderResult> { - // Check if parent exists in side chain or in canonical chain. - if self.blockchain.find_block_by_hash(parent_hash, BlockSource::Any)?.is_some() { - return Ok(Some(parent_hash)) - } - - // iterate over ancestors in the invalid cache - // until we encounter the first valid ancestor - let mut current_hash = parent_hash; - let mut current_block = self.invalid_headers.get(¤t_hash); - while let Some(block) = current_block { - current_hash = block.parent; - current_block = self.invalid_headers.get(¤t_hash); - - // If current_header is None, then the current_hash does not have an invalid - // ancestor in the cache, check its presence in blockchain tree - if current_block.is_none() && - self.blockchain.find_block_by_hash(current_hash, BlockSource::Any)?.is_some() - { - return Ok(Some(current_hash)) - } - } - Ok(None) - } - - /// Prepares the invalid payload response for the given hash, checking the - /// database for the parent hash and populating the payload status with the latest valid hash - /// according to the engine api spec. - fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> ProviderResult { - // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal - // PoW block, which we need to identify by looking at the parent's block difficulty - if let Ok(Some(parent)) = self.blockchain.header_by_hash_or_number(parent_hash.into()) { - if !parent.is_zero_difficulty() { - parent_hash = B256::ZERO; - } - } - - let valid_parent_hash = self.latest_valid_hash_for_invalid_payload(parent_hash)?; - Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), - }) - .with_latest_valid_hash(valid_parent_hash.unwrap_or_default())) - } - - /// Checks if the given `check` hash points to an invalid header, inserting the given `head` - /// block into the invalid header cache if the `check` hash has a known invalid ancestor. - /// - /// Returns a payload status response according to the engine API spec if the block is known to - /// be invalid. - fn check_invalid_ancestor_with_head( - &mut self, - check: B256, - head: B256, - ) -> ProviderResult> { - // check if the check hash was previously marked as invalid - let Some(block) = self.invalid_headers.get(&check) else { return Ok(None) }; - - // populate the latest valid hash field - let status = self.prepare_invalid_response(block.parent)?; - - // insert the head block into the invalid header cache - self.invalid_headers.insert_with_invalid_ancestor(head, block); - - Ok(Some(status)) - } - - /// Checks if the given `head` points to an invalid header, which requires a specific response - /// to a forkchoice update. - fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { - // check if the head was previously marked as invalid - let Some(block) = self.invalid_headers.get(&head) else { return Ok(None) }; - - // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(block.parent)?)) - } - - /// Record latency metrics for one call to make a block canonical - /// Takes start time of the call and result of the make canonical call - /// - /// Handles cases for error, already canonical and committed blocks - fn record_make_canonical_latency( - &self, - start: Instant, - outcome: &Result, - ) -> Duration { - let elapsed = start.elapsed(); - self.metrics.make_canonical_latency.record(elapsed); - match outcome { - Ok(CanonicalOutcome::AlreadyCanonical { .. }) => { - self.metrics.make_canonical_already_canonical_latency.record(elapsed) - } - Ok(CanonicalOutcome::Committed { .. }) => { - self.metrics.make_canonical_committed_latency.record(elapsed) - } - Err(_) => self.metrics.make_canonical_error_latency.record(elapsed), - } - elapsed - } - - /// Ensures that the given forkchoice state is consistent, assuming the head block has been - /// made canonical. - /// - /// If the forkchoice state is consistent, this will return Ok(None). Otherwise, this will - /// return an instance of [`OnForkChoiceUpdated`] that is INVALID. - /// - /// This also updates the safe and finalized blocks in the [`CanonChainTracker`], if they are - /// consistent with the head block. - fn ensure_consistent_forkchoice_state( - &self, - state: ForkchoiceState, - ) -> ProviderResult> { - // Ensure that the finalized block, if not zero, is known and in the canonical chain - // after the head block is canonicalized. - // - // This ensures that the finalized block is consistent with the head block, i.e. the - // finalized block is an ancestor of the head block. - if !state.finalized_block_hash.is_zero() && - !self.blockchain.is_canonical(state.finalized_block_hash)? - { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // Finalized block is consistent, so update it in the canon chain tracker. - self.update_finalized_block(state.finalized_block_hash)?; - - // Also ensure that the safe block, if not zero, is known and in the canonical chain - // after the head block is canonicalized. - // - // This ensures that the safe block is consistent with the head block, i.e. the safe - // block is an ancestor of the head block. - if !state.safe_block_hash.is_zero() && - !self.blockchain.is_canonical(state.safe_block_hash)? - { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // Safe block is consistent, so update it in the canon chain tracker. - self.update_safe_block(state.safe_block_hash)?; - - Ok(None) - } - - /// Sets the state of the canon chain tracker based to the given head. - /// - /// This expects the given head to be the new canonical head. - /// - /// Additionally, updates the head used for p2p handshakes. - /// - /// This also updates the tracked safe and finalized blocks, and should be called before - /// returning a VALID forkchoice update response - fn update_canon_chain(&self, head: SealedHeader, update: &ForkchoiceState) -> RethResult<()> { - self.update_head(head)?; - self.update_finalized_block(update.finalized_block_hash)?; - self.update_safe_block(update.safe_block_hash)?; - Ok(()) - } - - /// Updates the state of the canon chain tracker based on the given head. - /// - /// This expects the given head to be the new canonical head. - /// Additionally, updates the head used for p2p handshakes. - /// - /// This should be called before returning a VALID forkchoice update response - #[inline] - fn update_head(&self, head: SealedHeader) -> RethResult<()> { - let mut head_block = Head { - number: head.number, - hash: head.hash(), - difficulty: head.difficulty, - timestamp: head.timestamp, - // NOTE: this will be set later - total_difficulty: Default::default(), - }; - - // we update the tracked header first - self.blockchain.set_canonical_head(head); - - head_block.total_difficulty = - self.blockchain.header_td_by_number(head_block.number)?.ok_or_else(|| { - RethError::Provider(ProviderError::TotalDifficultyNotFound(head_block.number)) - })?; - self.sync_state_updater.update_status(head_block); - - Ok(()) - } - - /// Updates the tracked safe block if we have it - /// - /// Returns an error if the block is not found. - #[inline] - fn update_safe_block(&self, safe_block_hash: B256) -> ProviderResult<()> { - if !safe_block_hash.is_zero() { - if self.blockchain.safe_block_hash()? == Some(safe_block_hash) { - // nothing to update - return Ok(()) - } - - let safe = self - .blockchain - .find_block_by_hash(safe_block_hash, BlockSource::Any)? - .ok_or(ProviderError::UnknownBlockHash(safe_block_hash))?; - self.blockchain.set_safe(SealedHeader::new(safe.split().0, safe_block_hash)); - } - Ok(()) - } - - /// Updates the tracked finalized block if we have it - /// - /// Returns an error if the block is not found. - #[inline] - fn update_finalized_block(&self, finalized_block_hash: B256) -> ProviderResult<()> { - if !finalized_block_hash.is_zero() { - if self.blockchain.finalized_block_hash()? == Some(finalized_block_hash) { - // nothing to update - return Ok(()) - } - - let finalized = self - .blockchain - .find_block_by_hash(finalized_block_hash, BlockSource::Any)? - .ok_or(ProviderError::UnknownBlockHash(finalized_block_hash))?; - self.blockchain.finalize_block(finalized.header().number())?; - self.blockchain - .set_finalized(SealedHeader::new(finalized.split().0, finalized_block_hash)); - } - Ok(()) - } - - /// Handler for a failed a forkchoice update due to a canonicalization error. - /// - /// This will determine if the state's head is invalid, and if so, return immediately. - /// - /// If the newest head is not invalid, then this will trigger a new pipeline run to sync the gap - /// - /// See [`Self::on_forkchoice_updated`] and [`BlockchainTreeEngine::make_canonical`]. - fn on_failed_canonical_forkchoice_update( - &mut self, - state: &ForkchoiceState, - error: CanonicalError, - ) -> ProviderResult { - debug_assert!(self.sync.is_pipeline_idle(), "pipeline must be idle"); - - // check if the new head was previously invalidated, if so then we deem this FCU - // as invalid - if let Some(invalid_ancestor) = self.check_invalid_ancestor(state.head_block_hash)? { - warn!(target: "consensus::engine", %error, ?state, ?invalid_ancestor, head=?state.head_block_hash, "Failed to canonicalize the head hash, head is also considered invalid"); - debug!(target: "consensus::engine", head=?state.head_block_hash, current_error=%error, "Head was previously marked as invalid"); - return Ok(invalid_ancestor) - } - - match &error { - CanonicalError::Validation(BlockValidationError::BlockPreMerge { .. }) => { - warn!(target: "consensus::engine", %error, ?state, "Failed to canonicalize the head hash"); - return Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: error.to_string(), - }) - .with_latest_valid_hash(B256::ZERO)) - } - CanonicalError::BlockchainTree(BlockchainTreeError::BlockHashNotFoundInChain { - .. - }) => { - // This just means we couldn't find the block when attempting to make it canonical, - // so we should not warn the user, since this will result in us attempting to sync - // to a new target and is considered normal operation during sync - } - CanonicalError::OptimisticTargetRevert(block_number) => { - self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(*block_number)); - return Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing)) - } - _ => { - warn!(target: "consensus::engine", %error, ?state, "Failed to canonicalize the head hash"); - // TODO(mattsse) better error handling before attempting to sync (FCU could be - // invalid): only trigger sync if we can't determine whether the FCU is invalid - } - } - - // we assume the FCU is valid and at least the head is missing, - // so we need to start syncing to it - // - // find the appropriate target to sync to, if we don't have the safe block hash then we - // start syncing to the safe block via pipeline first - let target = if self.forkchoice_state_tracker.is_empty() && - // check that safe block is valid and missing - !state.safe_block_hash.is_zero() && - self.blockchain.block_number(state.safe_block_hash).ok().flatten().is_none() - { - state.safe_block_hash - } else { - state.head_block_hash - }; - - // we need to first check the buffer for the target and its ancestors - let target = self.lowest_buffered_ancestor_or(target); - - // if the threshold is zero, we should not download the block first, and just use the - // pipeline. Otherwise we use the tree to insert the block first - if self.pipeline_run_threshold == 0 { - // use the pipeline to sync to the target - trace!(target: "consensus::engine", %target, "Triggering pipeline run to sync missing ancestors of the new head"); - self.sync.set_pipeline_sync_target(target.into()); - } else { - // trigger a full block download for missing hash, or the parent of its lowest buffered - // ancestor - trace!(target: "consensus::engine", request=%target, "Triggering full block download for missing ancestors of the new head"); - self.sync.download_full_block(target); - } - - debug!(target: "consensus::engine", %target, "Syncing to new target"); - Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing)) - } - - /// Return the parent hash of the lowest buffered ancestor for the requested block, if there - /// are any buffered ancestors. If there are no buffered ancestors, and the block itself does - /// not exist in the buffer, this returns the hash that is passed in. - /// - /// Returns the parent hash of the block itself if the block is buffered and has no other - /// buffered ancestors. - fn lowest_buffered_ancestor_or(&self, hash: B256) -> B256 { - self.blockchain - .lowest_buffered_ancestor(hash) - .map(|block| block.parent_hash) - .unwrap_or_else(|| hash) - } - - /// When the Consensus layer receives a new block via the consensus gossip protocol, - /// the transactions in the block are sent to the execution layer in the form of a - /// [`ExecutionPayload`]. The Execution layer executes the transactions and validates the - /// state in the block header, then passes validation data back to Consensus layer, that - /// adds the block to the head of its own blockchain and attests to it. The block is then - /// broadcast over the consensus p2p network in the form of a "Beacon block". - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_newPayload`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification). - /// - /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and - /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip(self, payload, sidecar), fields(block_hash = ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] - fn on_new_payload( - &mut self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - ) -> Result, BeaconOnNewPayloadError> { - self.metrics.new_payload_messages.increment(1); - - // Ensures that the given payload does not violate any consensus rules that concern the - // block's layout, like: - // - missing or invalid base fee - // - invalid extra data - // - invalid transactions - // - incorrect hash - // - the versioned hashes passed with the payload do not exactly match transaction - // versioned hashes - // - the block does not contain blob transactions if it is pre-cancun - // - // This validates the following engine API rule: - // - // 3. Given the expected array of blob versioned hashes client software **MUST** run its - // validation by taking the following steps: - // - // 1. Obtain the actual array by concatenating blob versioned hashes lists - // (`tx.blob_versioned_hashes`) of each [blob - // transaction](https://eips.ethereum.org/EIPS/eip-4844#new-transaction-type) included - // in the payload, respecting the order of inclusion. If the payload has no blob - // transactions the expected array **MUST** be `[]`. - // - // 2. Return `{status: INVALID, latestValidHash: null, validationError: errorMessage | - // null}` if the expected and the actual arrays don't match. - // - // This validation **MUST** be instantly run in all cases even during active sync process. - let parent_hash = payload.parent_hash(); - let block = match self.payload_validator.ensure_well_formed_payload(payload, sidecar) { - Ok(block) => block, - Err(error) => { - error!(target: "consensus::engine", %error, "Invalid payload"); - // we need to convert the error to a payload status (response to the CL) - - let latest_valid_hash = - if error.is_block_hash_mismatch() || error.is_invalid_versioned_hashes() { - // Engine-API rules: - // > `latestValidHash: null` if the blockHash validation has failed () - // > `latestValidHash: null` if the expected and the actual arrays don't match () - None - } else { - self.latest_valid_hash_for_invalid_payload(parent_hash) - .map_err(BeaconOnNewPayloadError::internal)? - }; - - let status = PayloadStatusEnum::from(error); - return Ok(Either::Left(PayloadStatus::new(status, latest_valid_hash))) - } - }; - - let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block.hash()); - if lowest_buffered_ancestor == block.hash() { - lowest_buffered_ancestor = block.parent_hash; - } - - // now check the block itself - if let Some(status) = self - .check_invalid_ancestor_with_head(lowest_buffered_ancestor, block.hash()) - .map_err(BeaconOnNewPayloadError::internal)? - { - Ok(Either::Left(status)) - } else { - Ok(Either::Right(block)) - } - } - - /// Validates the payload attributes with respect to the header and fork choice state. - /// - /// Note: At this point, the fork choice update is considered to be VALID, however, we can still - /// return an error if the payload attributes are invalid. - fn process_payload_attributes( - &self, - attrs: ::PayloadAttributes, - head: Header, - state: ForkchoiceState, - version: EngineApiMessageVersion, - ) -> OnForkChoiceUpdated { - // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp - // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held - // client software MUST respond with -38003: `Invalid payload attributes` and MUST NOT - // begin a payload build process. In such an event, the forkchoiceState update MUST NOT - // be rolled back. - if attrs.timestamp() <= head.timestamp { - return OnForkChoiceUpdated::invalid_payload_attributes() - } - - // 8. Client software MUST begin a payload build process building on top of - // forkchoiceState.headBlockHash and identified via buildProcessId value if - // payloadAttributes is not null and the forkchoice state has been updated successfully. - // The build process is specified in the Payload building section. - match <::PayloadBuilderAttributes as PayloadBuilderAttributes>::try_new( - state.head_block_hash, - attrs, - version as u8 - ) { - Ok(attributes) => { - // send the payload to the builder and return the receiver for the pending payload - // id, initiating payload job is handled asynchronously - let pending_payload_id = self.payload_builder.send_new_payload(attributes); - - // Client software MUST respond to this method call in the following way: - // { - // payloadStatus: { - // status: VALID, - // latestValidHash: forkchoiceState.headBlockHash, - // validationError: null - // }, - // payloadId: buildProcessId - // } - // - // if the payload is deemed VALID and the build process has begun. - OnForkChoiceUpdated::updated_with_pending_payload_id( - PayloadStatus::new(PayloadStatusEnum::Valid, Some(state.head_block_hash)), - pending_payload_id, - ) - } - Err(_) => OnForkChoiceUpdated::invalid_payload_attributes(), - } - } - - /// When the pipeline is active, the tree is unable to commit any additional blocks since the - /// pipeline holds exclusive access to the database. - /// - /// In this scenario we buffer the payload in the tree if the payload is valid, once the - /// pipeline is finished, the tree is then able to also use the buffered payloads to commit to a - /// (newer) canonical chain. - /// - /// This will return `SYNCING` if the block was buffered successfully, and an error if an error - /// occurred while buffering the block. - #[instrument(level = "trace", skip_all, target = "consensus::engine", ret)] - fn try_buffer_payload( - &mut self, - block: SealedBlock, - ) -> Result { - self.blockchain.buffer_block_without_senders(block)?; - Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing)) - } - - /// Attempts to insert a new payload into the tree. - /// - /// Caution: This expects that the pipeline is idle. - #[instrument(level = "trace", skip_all, target = "consensus::engine", ret)] - fn try_insert_new_payload( - &mut self, - block: SealedBlock, - ) -> Result { - debug_assert!(self.sync.is_pipeline_idle(), "pipeline must be idle"); - - let block_hash = block.hash(); - let start = Instant::now(); - let status = self - .blockchain - .insert_block_without_senders(block.clone(), BlockValidationKind::Exhaustive)?; - - let elapsed = start.elapsed(); - let mut latest_valid_hash = None; - let status = match status { - InsertPayloadOk::Inserted(BlockStatus::Valid(attachment)) => { - latest_valid_hash = Some(block_hash); - let block = Arc::new(block); - let event = if attachment.is_canonical() { - BeaconConsensusEngineEvent::CanonicalBlockAdded(block, elapsed) - } else { - BeaconConsensusEngineEvent::ForkBlockAdded(block, elapsed) - }; - self.event_sender.notify(event); - PayloadStatusEnum::Valid - } - InsertPayloadOk::AlreadySeen(BlockStatus::Valid(_)) => { - latest_valid_hash = Some(block_hash); - PayloadStatusEnum::Valid - } - InsertPayloadOk::Inserted(BlockStatus::Disconnected { .. }) | - InsertPayloadOk::AlreadySeen(BlockStatus::Disconnected { .. }) => { - // check if the block's parent is already marked as invalid - if let Some(status) = - self.check_invalid_ancestor_with_head(block.parent_hash, block.hash()).map_err( - |error| InsertBlockError::new(block, InsertBlockErrorKind::Provider(error)), - )? - { - return Ok(status) - } - - // not known to be invalid, but we don't know anything else - PayloadStatusEnum::Syncing - } - }; - Ok(PayloadStatus::new(status, latest_valid_hash)) - } - - /// This handles downloaded blocks that are shown to be disconnected from the canonical chain. - /// - /// This mainly compares the missing parent of the downloaded block with the current canonical - /// tip, and decides whether or not the pipeline should be run. - /// - /// The canonical tip is compared to the missing parent using `exceeds_pipeline_run_threshold`, - /// which returns true if the missing parent is sufficiently ahead of the canonical tip. If so, - /// the pipeline is run. Otherwise, we need to insert blocks using the blockchain tree, and - /// must download blocks outside of the pipeline. In this case, the distance is used to - /// determine how many blocks we should download at once. - fn on_disconnected_block( - &mut self, - downloaded_block: BlockNumHash, - missing_parent: BlockNumHash, - head: BlockNumHash, - ) { - // compare the missing parent with the canonical tip - if let Some(target) = self.can_pipeline_sync_to_finalized( - head.number, - missing_parent.number, - Some(downloaded_block), - ) { - // we don't have the block yet and the distance exceeds the allowed - // threshold - self.sync.set_pipeline_sync_target(target.into()); - // we can exit early here because the pipeline will take care of syncing - return - } - - // continue downloading the missing parent - // - // this happens if either: - // * the missing parent block num < canonical tip num - // * this case represents a missing block on a fork that is shorter than the canonical - // chain - // * the missing parent block num >= canonical tip num, but the number of missing blocks is - // less than the pipeline threshold - // * this case represents a potentially long range of blocks to download and execute - if let Some(distance) = self.distance_from_local_tip(head.number, missing_parent.number) { - self.sync.download_block_range(missing_parent.hash, distance) - } else { - // This happens when the missing parent is on an outdated - // sidechain - self.sync.download_full_block(missing_parent.hash); - } - } - - /// Attempt to form a new canonical chain based on the current sync target. - /// - /// This is invoked when we successfully __downloaded__ a new block from the network which - /// resulted in [`BlockStatus::Valid`]. - /// - /// Note: This will not succeed if the sync target has changed since the block download request - /// was issued and the new target is still disconnected and additional missing blocks are - /// downloaded - fn try_make_sync_target_canonical( - &mut self, - inserted: BlockNumHash, - ) -> Result<(), (B256, CanonicalError)> { - let Some(target) = self.forkchoice_state_tracker.sync_target_state() else { return Ok(()) }; - - // optimistically try to make the head of the current FCU target canonical, the sync - // target might have changed since the block download request was issued - // (new FCU received) - let start = Instant::now(); - let make_canonical_result = self.blockchain.make_canonical(target.head_block_hash); - let elapsed = self.record_make_canonical_latency(start, &make_canonical_result); - match make_canonical_result { - Ok(outcome) => { - if let CanonicalOutcome::Committed { head } = &outcome { - self.event_sender.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( - Box::new(head.clone()), - elapsed, - )); - } - - let new_head = outcome.into_header(); - debug!(target: "consensus::engine", hash=?new_head.hash(), number=new_head.number, "Canonicalized new head"); - - // we can update the FCU blocks - if let Err(err) = self.update_canon_chain(new_head, &target) { - debug!(target: "consensus::engine", ?err, ?target, "Failed to update the canonical chain tracker"); - } - - // we're no longer syncing - self.sync_state_updater.update_sync_state(SyncState::Idle); - - // clear any active block requests - self.sync.clear_block_download_requests(); - Ok(()) - } - Err(err) => { - // if we failed to make the FCU's head canonical, because we don't have that - // block yet, then we can try to make the inserted block canonical if we know - // it's part of the canonical chain: if it's the safe or the finalized block - if err.is_block_hash_not_found() { - // if the inserted block is the currently targeted `finalized` or `safe` - // block, we will attempt to make them canonical, - // because they are also part of the canonical chain and - // their missing block range might already be downloaded (buffered). - if let Some(target_hash) = - ForkchoiceStateHash::find(&target, inserted.hash).filter(|h| !h.is_head()) - { - // TODO: do not ignore this - let _ = self.blockchain.make_canonical(*target_hash.as_ref()); - } - } else if let Some(block_number) = err.optimistic_revert_block_number() { - self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(block_number)); - } - - Err((target.head_block_hash, err)) - } - } - } - - /// Event handler for events emitted by the [`EngineSyncController`]. - /// - /// This returns a result to indicate whether the engine future should resolve (fatal error). - fn on_sync_event( - &mut self, - event: EngineSyncEvent, - ) -> Result { - let outcome = match event { - EngineSyncEvent::FetchedFullBlock(block) => { - trace!(target: "consensus::engine", hash=?block.hash(), number=%block.number, "Downloaded full block"); - // Insert block only if the block's parent is not marked as invalid - if self - .check_invalid_ancestor_with_head(block.parent_hash, block.hash()) - .map_err(|error| BeaconConsensusEngineError::Common(error.into()))? - .is_none() - { - self.set_blockchain_tree_action( - BlockchainTreeAction::InsertDownloadedPayload { block }, - ); - } - EngineEventOutcome::Processed - } - EngineSyncEvent::PipelineStarted(target) => { - trace!(target: "consensus::engine", ?target, continuous = target.is_none(), "Started the pipeline"); - self.metrics.pipeline_runs.increment(1); - self.sync_state_updater.update_sync_state(SyncState::Syncing); - EngineEventOutcome::Processed - } - EngineSyncEvent::PipelineFinished { result, reached_max_block } => { - trace!(target: "consensus::engine", ?result, ?reached_max_block, "Pipeline finished"); - // Any pipeline error at this point is fatal. - let ctrl = result?; - if reached_max_block { - // Terminate the sync early if it's reached the maximum user-configured block. - EngineEventOutcome::ReachedMaxBlock - } else { - self.on_pipeline_outcome(ctrl)?; - EngineEventOutcome::Processed - } - } - EngineSyncEvent::PipelineTaskDropped => { - error!(target: "consensus::engine", "Failed to receive spawned pipeline"); - return Err(BeaconConsensusEngineError::PipelineChannelClosed) - } - }; - - Ok(outcome) - } - - /// Invoked when the pipeline has successfully finished. - /// - /// Updates the internal sync state depending on the pipeline configuration, - /// the outcome of the pipeline run and the last observed forkchoice state. - fn on_pipeline_outcome(&mut self, ctrl: ControlFlow) -> RethResult<()> { - // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. - if let ControlFlow::Unwind { bad_block, .. } = ctrl { - warn!(target: "consensus::engine", invalid_num_hash=?bad_block.block, "Bad block detected in unwind"); - // update the `invalid_headers` cache with the new invalid header - self.invalid_headers.insert(*bad_block); - return Ok(()) - } - - let sync_target_state = match self.forkchoice_state_tracker.sync_target_state() { - Some(current_state) => current_state, - None => { - // This is only possible if the node was run with `debug.tip` - // argument and without CL. - warn!(target: "consensus::engine", "No fork choice state available"); - return Ok(()) - } - }; - - if sync_target_state.finalized_block_hash.is_zero() { - self.set_canonical_head(ctrl.block_number().unwrap_or_default())?; - self.blockchain.update_block_hashes_and_clear_buffered()?; - self.blockchain.connect_buffered_blocks_to_canonical_hashes()?; - // We are on an optimistic syncing process, better to wait for the next FCU to handle - return Ok(()) - } - - // Next, we check if we need to schedule another pipeline run or transition - // to live sync via tree. - // This can arise if we buffer the forkchoice head, and if the head is an - // ancestor of an invalid block. - // - // * The forkchoice head could be buffered if it were first sent as a `newPayload` request. - // - // In this case, we won't have the head hash in the database, so we would - // set the pipeline sync target to a known-invalid head. - // - // This is why we check the invalid header cache here. - let lowest_buffered_ancestor = - self.lowest_buffered_ancestor_or(sync_target_state.head_block_hash); - - // this inserts the head into invalid headers cache - // if the lowest buffered ancestor is invalid - if self - .check_invalid_ancestor_with_head( - lowest_buffered_ancestor, - sync_target_state.head_block_hash, - )? - .is_some() - { - warn!( - target: "consensus::engine", - invalid_ancestor = %lowest_buffered_ancestor, - head = %sync_target_state.head_block_hash, - "Current head has an invalid ancestor" - ); - return Ok(()) - } - - // get the block number of the finalized block, if we have it - let newest_finalized = self - .blockchain - .buffered_header_by_hash(sync_target_state.finalized_block_hash) - .map(|header| header.number); - - // The block number that the pipeline finished at - if the progress or newest - // finalized is None then we can't check the distance anyways. - // - // If both are Some, we perform another distance check and return the desired - // pipeline target - let pipeline_target = - ctrl.block_number().zip(newest_finalized).and_then(|(progress, finalized_number)| { - // Determines whether or not we should run the pipeline again, in case - // the new gap is large enough to warrant - // running the pipeline. - self.can_pipeline_sync_to_finalized(progress, finalized_number, None) - }); - - // If the distance is large enough, we should run the pipeline again to prevent - // the tree update from executing too many blocks and blocking. - if let Some(target) = pipeline_target { - // run the pipeline to the target since the distance is sufficient - self.sync.set_pipeline_sync_target(target.into()); - } else if let Some(number) = - self.blockchain.block_number(sync_target_state.finalized_block_hash)? - { - // Finalized block is in the database, attempt to restore the tree with - // the most recent canonical hashes. - self.blockchain.connect_buffered_blocks_to_canonical_hashes_and_finalize(number).inspect_err(|error| { - error!(target: "consensus::engine", %error, "Error restoring blockchain tree state"); - })?; - } else { - // We don't have the finalized block in the database, so we need to - // trigger another pipeline run. - self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash.into()); - } - - Ok(()) - } - - fn set_canonical_head(&self, max_block: BlockNumber) -> RethResult<()> { - let max_header = self.blockchain.sealed_header(max_block) - .inspect_err(|error| { - error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync"); - })? - .ok_or_else(|| ProviderError::HeaderNotFound(max_block.into()))?; - self.blockchain.set_canonical_head(max_header); - - Ok(()) - } - - fn on_hook_result(&self, polled_hook: PolledHook) -> Result<(), BeaconConsensusEngineError> { - if let EngineHookEvent::Finished(Err(error)) = &polled_hook.event { - error!( - target: "consensus::engine", - name = %polled_hook.name, - ?error, - "Hook finished with error" - ) - } - - if polled_hook.db_access_level.is_read_write() { - match polled_hook.event { - EngineHookEvent::NotReady => {} - EngineHookEvent::Started => { - // If the hook has read-write access to the database, it means that the engine - // can't process any FCU messages from CL. To prevent CL from sending us - // unneeded updates, we need to respond `true` on `eth_syncing` request. - self.sync_state_updater.update_sync_state(SyncState::Syncing) - } - EngineHookEvent::Finished(_) => { - // Hook with read-write access to the database has finished running, so engine - // can process new FCU messages from CL again. It's safe to - // return `false` on `eth_syncing` request. - self.sync_state_updater.update_sync_state(SyncState::Idle); - // If the hook had read-write access to the database, it means that the engine - // may have accumulated some buffered blocks. - if let Err(error) = - self.blockchain.connect_buffered_blocks_to_canonical_hashes() - { - error!(target: "consensus::engine", %error, "Error connecting buffered blocks to canonical hashes on hook result"); - return Err(RethError::Canonical(error).into()) - } - } - } - } - - Ok(()) - } - - /// Process the next set blockchain tree action. - /// The handler might set next blockchain tree action to perform, - /// so the state change should be handled accordingly. - fn on_blockchain_tree_action( - &mut self, - action: BlockchainTreeAction, - ) -> RethResult { - match action { - BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx } => { - let start = Instant::now(); - let result = self.blockchain.make_canonical(state.head_block_hash); - let elapsed = self.record_make_canonical_latency(start, &result); - match self - .on_forkchoice_updated_make_canonical_result(state, attrs, result, elapsed) - { - Ok(on_updated) => { - trace!(target: "consensus::engine", status = ?on_updated, ?state, "Returning forkchoice status"); - let fcu_status = on_updated.forkchoice_status(); - self.on_forkchoice_updated_status(state, on_updated, tx); - - if fcu_status.is_valid() { - let tip_number = self.blockchain.canonical_tip().number; - if self.sync.has_reached_max_block(tip_number) { - // Terminate the sync early if it's reached - // the maximum user configured block. - return Ok(EngineEventOutcome::ReachedMaxBlock) - } - } - } - Err(error) => { - let _ = tx.send(Err(RethError::Canonical(error.clone()))); - if error.is_fatal() { - return Err(RethError::Canonical(error)) - } - } - }; - } - BlockchainTreeAction::InsertNewPayload { block, tx } => { - let block_hash = block.hash(); - let block_num_hash = block.num_hash(); - let result = if self.sync.is_pipeline_idle() { - // we can only insert new payloads if the pipeline is _not_ running, because it - // holds exclusive access to the database - self.try_insert_new_payload(block) - } else { - self.try_buffer_payload(block) - }; - - let status = match result { - Ok(status) => status, - Err(error) => { - warn!(target: "consensus::engine", %error, "Error while processing payload"); - - let (block, error) = error.split(); - if !error.is_invalid_block() { - // TODO: revise if any error should be considered fatal at this point. - let _ = - tx.send(Err(BeaconOnNewPayloadError::Internal(Box::new(error)))); - return Ok(EngineEventOutcome::Processed) - } - - // If the error was due to an invalid payload, the payload is added to the - // invalid headers cache and `Ok` with [PayloadStatusEnum::Invalid] is - // returned. - warn!(target: "consensus::engine", invalid_hash=?block.hash(), invalid_number=?block.number, %error, "Invalid block error on new payload"); - let latest_valid_hash = if error.is_block_pre_merge() { - // zero hash must be returned if block is pre-merge - Some(B256::ZERO) - } else { - self.latest_valid_hash_for_invalid_payload(block.parent_hash)? - }; - // keep track of the invalid header - self.invalid_headers.insert(block.header.block_with_parent()); - PayloadStatus::new( - PayloadStatusEnum::Invalid { validation_error: error.to_string() }, - latest_valid_hash, - ) - } - }; - - if status.is_valid() { - if let Some(target) = self.forkchoice_state_tracker.sync_target_state() { - // if we're currently syncing and the inserted block is the targeted - // FCU head block, we can try to make it canonical. - if block_hash == target.head_block_hash { - self.set_blockchain_tree_action( - BlockchainTreeAction::MakeNewPayloadCanonical { - payload_num_hash: block_num_hash, - status, - tx, - }, - ); - return Ok(EngineEventOutcome::Processed) - } - } - // block was successfully inserted, so we can cancel the full block - // request, if any exists - self.sync.cancel_full_block_request(block_hash); - } - - trace!(target: "consensus::engine", ?status, "Returning payload status"); - let _ = tx.send(Ok(status)); - } - BlockchainTreeAction::MakeNewPayloadCanonical { payload_num_hash, status, tx } => { - let status = match self.try_make_sync_target_canonical(payload_num_hash) { - Ok(()) => status, - Err((_hash, error)) => { - if error.is_fatal() { - let response = - Err(BeaconOnNewPayloadError::Internal(Box::new(error.clone()))); - let _ = tx.send(response); - return Err(RethError::Canonical(error)) - } else if error.optimistic_revert_block_number().is_some() { - // engine already set the pipeline unwind target on - // `try_make_sync_target_canonical` - PayloadStatus::from_status(PayloadStatusEnum::Syncing) - } else { - // If we could not make the sync target block canonical, - // we should return the error as an invalid payload status. - PayloadStatus::new( - PayloadStatusEnum::Invalid { validation_error: error.to_string() }, - // TODO: return a proper latest valid hash - // See: - self.forkchoice_state_tracker.last_valid_head(), - ) - } - } - }; - - trace!(target: "consensus::engine", ?status, "Returning payload status"); - let _ = tx.send(Ok(status)); - } - - BlockchainTreeAction::InsertDownloadedPayload { block } => { - let downloaded_num_hash = block.num_hash(); - match self.blockchain.insert_block_without_senders( - block, - BlockValidationKind::SkipStateRootValidation, - ) { - Ok(status) => { - match status { - InsertPayloadOk::Inserted(BlockStatus::Valid(_)) => { - // block is connected to the canonical chain and is valid. - // if it's not connected to current canonical head, the state root - // has not been validated. - if let Err((hash, error)) = - self.try_make_sync_target_canonical(downloaded_num_hash) - { - if error.is_fatal() { - error!(target: "consensus::engine", %error, "Encountered fatal error while making sync target canonical: {:?}, {:?}", error, hash); - } else if !error.is_block_hash_not_found() { - debug!( - target: "consensus::engine", - "Unexpected error while making sync target canonical: {:?}, {:?}", - error, - hash - ) - } - } - } - InsertPayloadOk::Inserted(BlockStatus::Disconnected { - head, - missing_ancestor: missing_parent, - }) => { - // block is not connected to the canonical head, we need to download - // its missing branch first - self.on_disconnected_block( - downloaded_num_hash, - missing_parent, - head, - ); - } - _ => (), - } - } - Err(err) => { - warn!(target: "consensus::engine", %err, "Failed to insert downloaded block"); - if err.kind().is_invalid_block() { - let (block, err) = err.split(); - warn!(target: "consensus::engine", invalid_number=?block.number, invalid_hash=?block.hash(), %err, "Marking block as invalid"); - - self.invalid_headers.insert(block.header.block_with_parent()); - } - } - } - } - }; - Ok(EngineEventOutcome::Processed) - } -} - -/// On initialization, the consensus engine will poll the message receiver and return -/// [`Poll::Pending`] until the first forkchoice update message is received. -/// -/// As soon as the consensus engine receives the first forkchoice updated message and updates the -/// local forkchoice state, it will launch the pipeline to sync to the head hash. -/// While the pipeline is syncing, the consensus engine will keep processing messages from the -/// receiver and forwarding them to the blockchain tree. -impl Future for BeaconConsensusEngine -where - N: TreeNodeTypes, - Client: EthBlockClient + 'static, - BT: BlockchainTreeEngine - + BlockReader, Header = HeaderTy> - + BlockIdReader - + CanonChainTracker
> - + StageCheckpointReader - + ChainSpecProvider - + Unpin - + 'static, -{ - type Output = Result<(), BeaconConsensusEngineError>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - - // Control loop that advances the state - 'main: loop { - // Poll a running hook with db write access (if any) and CL messages first, draining - // both and then proceeding to polling other parts such as SyncController and hooks. - loop { - // Poll a running hook with db write access first, as we will not be able to process - // any engine messages until it's finished. - if let Poll::Ready(result) = - this.hooks.poll_active_db_write_hook(cx, this.current_engine_hook_context()?)? - { - this.on_hook_result(result)?; - continue - } - - // Process any blockchain tree action result as set forth during engine message - // processing. - if let Some(action) = this.blockchain_tree_action.take() { - match this.on_blockchain_tree_action(action) { - Ok(EngineEventOutcome::Processed) => {} - Ok(EngineEventOutcome::ReachedMaxBlock) => return Poll::Ready(Ok(())), - Err(error) => { - error!(target: "consensus::engine", %error, "Encountered fatal error"); - return Poll::Ready(Err(error.into())) - } - }; - - // Blockchain tree action handler might set next action to take. - continue - } - - // If the db write hook is no longer active and we have a pending forkchoice update, - // process it first. - if this.hooks.active_db_write_hook().is_none() { - if let Some((state, attrs, tx)) = this.pending_forkchoice_update.take() { - this.set_blockchain_tree_action( - BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx }, - ); - continue - } - } - - // Process one incoming message from the CL. We don't drain the messages right away, - // because we want to sneak a polling of running hook in between them. - // - // These messages can affect the state of the SyncController and they're also time - // sensitive, hence they are polled first. - if let Poll::Ready(Some(msg)) = this.engine_message_stream.poll_next_unpin(cx) { - match msg { - BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs, - tx, - version: _version, - } => { - this.on_forkchoice_updated(state, payload_attrs, tx); - } - BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { - match this.on_new_payload(payload, sidecar) { - Ok(Either::Right(block)) => { - this.set_blockchain_tree_action( - BlockchainTreeAction::InsertNewPayload { block, tx }, - ); - } - Ok(Either::Left(status)) => { - let _ = tx.send(Ok(status)); - } - Err(error) => { - let _ = tx.send(Err(error)); - } - } - } - BeaconEngineMessage::TransitionConfigurationExchanged => { - this.blockchain.on_transition_configuration_exchanged(); - } - } - continue - } - - // Both running hook with db write access and engine messages are pending, - // proceed to other polls - break - } - - // process sync events if any - if let Poll::Ready(sync_event) = this.sync.poll(cx) { - match this.on_sync_event(sync_event)? { - // Sync event was successfully processed - EngineEventOutcome::Processed => (), - // Max block has been reached, exit the engine loop - EngineEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), - } - - // this could have taken a while, so we start the next cycle to handle any new - // engine messages - continue 'main - } - - // at this point, all engine messages and sync events are fully drained - - // Poll next hook if all conditions are met: - // 1. Engine and sync messages are fully drained (both pending) - // 2. Latest FCU status is not INVALID - if !this.forkchoice_state_tracker.is_latest_invalid() { - if let Poll::Ready(result) = this.hooks.poll_next_hook( - cx, - this.current_engine_hook_context()?, - this.sync.is_pipeline_active(), - )? { - this.on_hook_result(result)?; - - // ensure we're polling until pending while also checking for new engine - // messages before polling the next hook - continue 'main - } - } - - // incoming engine messages and sync events are drained, so we can yield back - // control - return Poll::Pending - } - } -} - -enum BlockchainTreeAction { - MakeForkchoiceHeadCanonical { - state: ForkchoiceState, - attrs: Option, - tx: oneshot::Sender>, - }, - InsertNewPayload { - block: SealedBlock, - tx: oneshot::Sender>, - }, - MakeNewPayloadCanonical { - payload_num_hash: BlockNumHash, - status: PayloadStatus, - tx: oneshot::Sender>, - }, - /// Action to insert a new block that we successfully downloaded from the network. - /// There are several outcomes for inserting a downloaded block into the tree: - /// - /// ## [`BlockStatus::Valid`] - /// - /// The block is connected to the current canonical chain and is valid. - /// If the block is an ancestor of the current forkchoice head, then we can try again to - /// make the chain canonical. - /// - /// ## [`BlockStatus::Disconnected`] - /// - /// The block is not connected to the canonical chain, and we need to download the - /// missing parent first. - /// - /// ## Insert Error - /// - /// If the insertion into the tree failed, then the block was well-formed (valid hash), - /// but its chain is invalid, which means the FCU that triggered the - /// download is invalid. Here we can stop because there's nothing to do here - /// and the engine needs to wait for another FCU. - InsertDownloadedPayload { block: SealedBlock }, -} - -/// Represents outcomes of processing an engine event -#[derive(Debug)] -enum EngineEventOutcome { - /// Engine event was processed successfully, engine should continue. - Processed, - /// Engine event was processed successfully and reached max block. - ReachedMaxBlock, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - test_utils::{spawn_consensus_engine, TestConsensusEngineBuilder}, - BeaconForkChoiceUpdateError, - }; - use alloy_rpc_types_engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; - use assert_matches::assert_matches; - use reth_chainspec::{ChainSpecBuilder, MAINNET}; - use reth_node_types::FullNodePrimitives; - use reth_primitives::BlockExt; - use reth_provider::{BlockWriter, ProviderFactory, StorageLocation}; - use reth_rpc_types_compat::engine::payload::block_to_payload_v1; - use reth_stages::{ExecOutput, PipelineError, StageError}; - use reth_stages_api::StageCheckpoint; - use reth_testing_utils::generators::{self, Rng}; - use std::{collections::VecDeque, sync::Arc}; - use tokio::sync::oneshot::error::TryRecvError; - - // Pipeline error is propagated. - #[tokio::test] - async fn pipeline_error_is_propagated() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) - .disable_blockchain_tree_sync() - .with_max_block(1) - .build(); - - let res = spawn_consensus_engine(consensus_engine); - - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - ..Default::default() - }) - .await; - assert_matches!( - res.await, - Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) - ); - } - - // Test that the consensus engine is idle until first forkchoice updated is received. - #[tokio::test] - async fn is_idle_until_forkchoice_is_set() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) - .disable_blockchain_tree_sync() - .with_max_block(1) - .build(); - - let mut rx = spawn_consensus_engine(consensus_engine); - - // consensus engine is idle - tokio::time::sleep(Duration::from_millis(100)).await; - assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); - - // consensus engine is still idle because no FCUs were received - let _ = env - .send_new_payload( - block_to_payload_v1(SealedBlock::<_>::default()), - ExecutionPayloadSidecar::none(), - ) - .await; - - assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); - - // consensus engine is still idle because pruning is running - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - ..Default::default() - }) - .await; - assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); - - // consensus engine receives a forkchoice state and triggers the pipeline when pruning is - // finished - loop { - match rx.try_recv() { - Ok(result) => { - assert_matches!( - result, - Err(BeaconConsensusEngineError::Pipeline(n)) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) - ); - break - } - Err(TryRecvError::Empty) => { - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - ..Default::default() - }) - .await; - } - Err(err) => panic!("receive error: {err}"), - } - } - } - - // Test that the consensus engine runs the pipeline again if the tree cannot be restored. - // The consensus engine will propagate the second result (error) only if it runs the pipeline - // for the second time. - #[tokio::test] - async fn runs_pipeline_again_if_tree_not_restored() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(1), done: true }), - Err(StageError::ChannelClosed), - ])) - .disable_blockchain_tree_sync() - .with_max_block(2) - .build(); - - let rx = spawn_consensus_engine(consensus_engine); - - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - finalized_block_hash: rng.gen(), - ..Default::default() - }) - .await; - - assert_matches!( - rx.await, - Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) - ); - } - - #[tokio::test] - async fn terminates_upon_reaching_max_block() { - let mut rng = generators::rng(); - let max_block = 1000; - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(max_block), - done: true, - })])) - .with_max_block(max_block) - .disable_blockchain_tree_sync() - .build(); - - let rx = spawn_consensus_engine(consensus_engine); - - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - ..Default::default() - }) - .await; - assert_matches!(rx.await, Ok(Ok(()))); - } - - fn insert_blocks< - 'a, - N: ProviderNodeTypes< - Primitives: FullNodePrimitives< - BlockBody = reth_primitives::BlockBody, - BlockHeader = reth_primitives::Header, - >, - >, - >( - provider_factory: ProviderFactory, - mut blocks: impl Iterator, - ) { - let provider = provider_factory.provider_rw().unwrap(); - blocks - .try_for_each(|b| { - provider - .insert_block( - b.clone().try_seal_with_senders().expect("invalid tx signature in block"), - StorageLocation::Database, - ) - .map(drop) - }) - .expect("failed to insert"); - provider.commit().unwrap(); - } - - mod fork_choice_updated { - use super::*; - use alloy_primitives::U256; - use alloy_rpc_types_engine::ForkchoiceUpdateError; - use generators::BlockParams; - use reth_db::{tables, test_utils::create_test_static_files_dir, Database}; - use reth_db_api::transaction::DbTxMut; - use reth_provider::{providers::StaticFileProvider, test_utils::MockNodeTypesWithDB}; - use reth_testing_utils::generators::random_block; - - #[tokio::test] - async fn empty_head() { - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - let res = env.send_forkchoice_updated(ForkchoiceState::default()).await; - assert_matches!( - res, - Err(BeaconForkChoiceUpdateError::ForkchoiceUpdateError( - ForkchoiceUpdateError::InvalidState - )) - ); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn valid_forkchoice() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - env.db - .update(|tx| { - tx.put::( - StageId::Finish.to_string(), - StageCheckpoint::new(block1.number), - ) - }) - .unwrap() - .unwrap(); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - let forkchoice = ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }; - - let result = env.send_forkchoice_updated(forkchoice).await.unwrap(); - let expected_result = ForkchoiceUpdated::new(PayloadStatus::new( - PayloadStatusEnum::Valid, - Some(block1.hash()), - )); - assert_eq!(result, expected_result); - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn unknown_head_hash() { - let mut rng = generators::rng(); - - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .disable_blockchain_tree_sync() - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { parent: Some(genesis.hash()), ..Default::default() }, - ); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - let next_head = random_block( - &mut rng, - 2, - BlockParams { - parent: Some(block1.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - let next_forkchoice_state = ForkchoiceState { - head_block_hash: next_head.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }; - - // if we `await` in the assert, the forkchoice will poll after we've inserted the block, - // and it will return VALID instead of SYNCING - let invalid_rx = env.send_forkchoice_updated(next_forkchoice_state).await; - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - // Insert next head immediately after sending forkchoice update - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - std::iter::once(&next_head), - ); - - let expected_result = ForkchoiceUpdated::from_status(PayloadStatusEnum::Syncing); - assert_matches!(invalid_rx, Ok(result) => assert_eq!(result, expected_result)); - - let result = env.send_forkchoice_retry_on_syncing(next_forkchoice_state).await.unwrap(); - let expected_result = ForkchoiceUpdated::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(next_head.hash()); - assert_eq!(result, expected_result); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn unknown_finalized_hash() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .disable_blockchain_tree_sync() - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - - let engine = spawn_consensus_engine(consensus_engine); - - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - let expected_result = ForkchoiceUpdated::from_status(PayloadStatusEnum::Syncing); - assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); - drop(engine); - } - - #[tokio::test] - async fn forkchoice_updated_pre_merge() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .london_activated() - .paris_at_ttd(U256::from(3), 3) - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let mut block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - block1.header.set_difficulty(U256::from(1)); - - // a second pre-merge block - let mut block2 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - block2.header.set_difficulty(U256::from(1)); - - // a transition block - let mut block3 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - block3.header.set_difficulty(U256::from(1)); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1, &block2, &block3].into_iter(), - ); - - let _engine = spawn_consensus_engine(consensus_engine); - - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - - assert_matches!(res, Ok(result) => { - let ForkchoiceUpdated { payload_status, .. } = result; - assert_matches!(payload_status.status, PayloadStatusEnum::Invalid { .. }); - assert_eq!(payload_status.latest_valid_hash, Some(B256::ZERO)); - }); - } - - #[tokio::test] - async fn forkchoice_updated_invalid_pow() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .london_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - - let (_temp_dir, temp_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(temp_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - - let _engine = spawn_consensus_engine(consensus_engine); - - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - let expected_result = ForkchoiceUpdated::from_status(PayloadStatusEnum::Invalid { - validation_error: BlockValidationError::BlockPreMerge { hash: block1.hash() } - .to_string(), - }) - .with_latest_valid_hash(B256::ZERO); - assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); - } - } - - mod new_payload { - use super::*; - use alloy_genesis::Genesis; - use alloy_primitives::U256; - use generators::BlockParams; - use reth_db::test_utils::create_test_static_files_dir; - use reth_primitives::EthereumHardfork; - use reth_provider::{ - providers::StaticFileProvider, - test_utils::{blocks::BlockchainTestData, MockNodeTypesWithDB}, - }; - use reth_testing_utils::{generators::random_block, GenesisAllocator}; - #[tokio::test] - async fn new_payload_before_forkchoice() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send new payload - let res = env - .send_new_payload( - block_to_payload_v1(random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - )), - ExecutionPayloadSidecar::none(), - ) - .await; - - // Invalid, because this is a genesis block - assert_matches!(res, Ok(result) => assert_matches!(result.status, PayloadStatusEnum::Invalid { .. })); - - // Send new payload - let res = env - .send_new_payload( - block_to_payload_v1(random_block( - &mut rng, - 1, - BlockParams { ommers_count: Some(0), ..Default::default() }, - )), - ExecutionPayloadSidecar::none(), - ) - .await; - - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); - assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn payload_known() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - let block2 = random_block( - &mut rng, - 2, - BlockParams { - parent: Some(block1.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1, &block2].into_iter(), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send forkchoice - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(block1.hash()); - assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); - - // Send new payload - let result = env - .send_new_payload_retry_on_syncing( - block_to_payload_v1(block2.clone()), - ExecutionPayloadSidecar::none(), - ) - .await - .unwrap(); - - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(block2.hash()); - assert_eq!(result, expected_result); - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn simple_validate_block() { - let mut rng = generators::rng(); - let amount = U256::from(1000000000000000000u64); - let mut allocator = GenesisAllocator::default().with_rng(&mut rng); - for _ in 0..16 { - // add 16 new accounts - allocator.new_funded_account(amount); - } - - let alloc = allocator.build(); - - let genesis = Genesis::default().extend_accounts(alloc); - - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(genesis) - .shanghai_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_real_pipeline() - .with_real_executor() - .with_real_consensus() - .build(); - - let genesis = SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(chain_spec.genesis_hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - - // TODO: add transactions that transfer from the alloc accounts, generating the new - // block tx and state root - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send forkchoice - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(block1.hash()); - assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn payload_parent_unknown() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - std::iter::once(&genesis), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send forkchoice - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: genesis.hash(), - finalized_block_hash: genesis.hash(), - ..Default::default() - }) - .await; - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(genesis.hash()); - assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); - - // Send new payload - let parent = rng.gen(); - let block = random_block( - &mut rng, - 2, - BlockParams { parent: Some(parent), ommers_count: Some(0), ..Default::default() }, - ); - let res = env - .send_new_payload(block_to_payload_v1(block), ExecutionPayloadSidecar::none()) - .await; - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); - assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn payload_pre_merge() { - let data = BlockchainTestData::default(); - let mut block1 = data.blocks[0].0.block.clone(); - block1.header.set_difficulty( - MAINNET.fork(EthereumHardfork::Paris).ttd().unwrap() - U256::from(1), - ); - block1 = block1.unseal::().seal_slow(); - let (block2, exec_result2) = data.blocks[1].clone(); - let mut block2 = block2.unseal().block; - block2.body.withdrawals = None; - block2.header.parent_hash = block1.hash(); - block2.header.base_fee_per_gas = Some(100); - block2.header.difficulty = U256::ZERO; - let block2 = block2.clone().seal_slow(); - - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .london_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .with_executor_results(Vec::from([exec_result2])) - .build(); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&data.genesis, &block1].into_iter(), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send forkchoice - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: BlockValidationError::BlockPreMerge { hash: block1.hash() } - .to_string(), - }) - .with_latest_valid_hash(B256::ZERO); - assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); - - // Send new payload - let result = env - .send_new_payload_retry_on_syncing( - block_to_payload_v1(block2.clone()), - ExecutionPayloadSidecar::none(), - ) - .await - .unwrap(); - - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: BlockValidationError::BlockPreMerge { hash: block2.hash() } - .to_string(), - }) - .with_latest_valid_hash(B256::ZERO); - assert_eq!(result, expected_result); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - } -} diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs deleted file mode 100644 index adbb531b22fd..000000000000 --- a/crates/consensus/beacon/src/engine/sync.rs +++ /dev/null @@ -1,672 +0,0 @@ -//! Sync management for the engine implementation. - -use crate::{ - engine::metrics::EngineSyncMetrics, BeaconConsensusEngineEvent, - ConsensusEngineLiveSyncProgress, EthBeaconConsensus, -}; -use alloy_consensus::Header; -use alloy_primitives::{BlockNumber, B256}; -use futures::FutureExt; -use reth_network_p2p::{ - full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, - BlockClient, -}; -use reth_node_types::{BodyTy, HeaderTy}; -use reth_primitives::{BlockBody, EthPrimitives, NodePrimitives, SealedBlock}; -use reth_provider::providers::ProviderNodeTypes; -use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; -use reth_tasks::TaskSpawner; -use reth_tokio_util::EventSender; -use std::{ - cmp::{Ordering, Reverse}, - collections::{binary_heap::PeekMut, BinaryHeap}, - sync::Arc, - task::{ready, Context, Poll}, -}; -use tokio::sync::oneshot; -use tracing::trace; - -/// Manages syncing under the control of the engine. -/// -/// This type controls the [Pipeline] and supports (single) full block downloads. -/// -/// Caution: If the pipeline is running, this type will not emit blocks downloaded from the network -/// [`EngineSyncEvent::FetchedFullBlock`] until the pipeline is idle to prevent commits to the -/// database while the pipeline is still active. -pub(crate) struct EngineSyncController -where - N: ProviderNodeTypes, - Client: BlockClient, -{ - /// A downloader that can download full blocks from the network. - full_block_client: FullBlockClient, - /// The type that can spawn the pipeline task. - pipeline_task_spawner: Box, - /// The current state of the pipeline. - /// The pipeline is used for large ranges. - pipeline_state: PipelineState, - /// Pending target block for the pipeline to sync - pending_pipeline_target: Option, - /// In-flight full block requests in progress. - inflight_full_block_requests: Vec>, - /// In-flight full block _range_ requests in progress. - inflight_block_range_requests: Vec>, - /// Sender for engine events. - event_sender: EventSender>, - /// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for - /// ordering. This means the blocks will be popped from the heap with ascending block numbers. - range_buffered_blocks: BinaryHeap, BodyTy>>>, - /// Max block after which the consensus engine would terminate the sync. Used for debugging - /// purposes. - max_block: Option, - /// Engine sync metrics. - metrics: EngineSyncMetrics, -} - -impl EngineSyncController -where - N: ProviderNodeTypes, - Client: BlockClient, -{ - /// Create a new instance - pub(crate) fn new( - pipeline: Pipeline, - client: Client, - pipeline_task_spawner: Box, - max_block: Option, - chain_spec: Arc, - event_sender: EventSender>, - ) -> Self { - Self { - full_block_client: FullBlockClient::new( - client, - Arc::new(EthBeaconConsensus::new(chain_spec)), - ), - pipeline_task_spawner, - pipeline_state: PipelineState::Idle(Some(pipeline)), - pending_pipeline_target: None, - inflight_full_block_requests: Vec::new(), - inflight_block_range_requests: Vec::new(), - range_buffered_blocks: BinaryHeap::new(), - event_sender, - max_block, - metrics: EngineSyncMetrics::default(), - } - } -} - -impl EngineSyncController -where - N: ProviderNodeTypes, - Client: BlockClient
, Body = BodyTy> + 'static, -{ - /// Sets the metrics for the active downloads - fn update_block_download_metrics(&self) { - self.metrics.active_block_downloads.set(self.inflight_full_block_requests.len() as f64); - // TODO: full block range metrics - } - - /// Sets the max block value for testing - #[cfg(test)] - pub(crate) fn set_max_block(&mut self, block: BlockNumber) { - self.max_block = Some(block); - } - - /// Cancels all download requests that are in progress and buffered blocks. - pub(crate) fn clear_block_download_requests(&mut self) { - self.inflight_full_block_requests.clear(); - self.inflight_block_range_requests.clear(); - self.range_buffered_blocks.clear(); - self.update_block_download_metrics(); - } - - /// Cancels the full block request with the given hash. - pub(crate) fn cancel_full_block_request(&mut self, hash: B256) { - self.inflight_full_block_requests.retain(|req| *req.hash() != hash); - self.update_block_download_metrics(); - } - - /// Returns `true` if a pipeline target is queued and will be triggered on the next `poll`. - #[allow(dead_code)] - pub(crate) const fn is_pipeline_sync_pending(&self) -> bool { - self.pending_pipeline_target.is_some() && self.pipeline_state.is_idle() - } - - /// Returns `true` if the pipeline is idle. - pub(crate) const fn is_pipeline_idle(&self) -> bool { - self.pipeline_state.is_idle() - } - - /// Returns `true` if the pipeline is active. - pub(crate) const fn is_pipeline_active(&self) -> bool { - !self.is_pipeline_idle() - } - - /// Returns true if there's already a request for the given hash. - pub(crate) fn is_inflight_request(&self, hash: B256) -> bool { - self.inflight_full_block_requests.iter().any(|req| *req.hash() == hash) - } - - /// Starts requesting a range of blocks from the network, in reverse from the given hash. - /// - /// If the `count` is 1, this will use the `download_full_block` method instead, because it - /// downloads headers and bodies for the block concurrently. - pub(crate) fn download_block_range(&mut self, hash: B256, count: u64) { - if count == 1 { - self.download_full_block(hash); - } else { - trace!( - target: "consensus::engine", - ?hash, - ?count, - "start downloading full block range." - ); - - // notify listeners that we're downloading a block range - self.event_sender.notify(BeaconConsensusEngineEvent::LiveSyncProgress( - ConsensusEngineLiveSyncProgress::DownloadingBlocks { - remaining_blocks: count, - target: hash, - }, - )); - let request = self.full_block_client.get_full_block_range(hash, count); - self.inflight_block_range_requests.push(request); - } - - // // TODO: need more metrics for block ranges - // self.update_block_download_metrics(); - } - - /// Starts requesting a full block from the network. - /// - /// Returns `true` if the request was started, `false` if there's already a request for the - /// given hash. - pub(crate) fn download_full_block(&mut self, hash: B256) -> bool { - if self.is_inflight_request(hash) { - return false - } - trace!( - target: "consensus::engine::sync", - ?hash, - "Start downloading full block" - ); - - // notify listeners that we're downloading a block - self.event_sender.notify(BeaconConsensusEngineEvent::LiveSyncProgress( - ConsensusEngineLiveSyncProgress::DownloadingBlocks { - remaining_blocks: 1, - target: hash, - }, - )); - - let request = self.full_block_client.get_full_block(hash); - self.inflight_full_block_requests.push(request); - - self.update_block_download_metrics(); - - true - } - - /// Sets a new target to sync the pipeline to. - /// - /// But ensures the target is not the zero hash. - pub(crate) fn set_pipeline_sync_target(&mut self, target: PipelineTarget) { - if target.sync_target().is_some_and(|target| target.is_zero()) { - trace!( - target: "consensus::engine::sync", - "Pipeline target cannot be zero hash." - ); - // precaution to never sync to the zero hash - return - } - self.pending_pipeline_target = Some(target); - } - - /// Check if the engine reached max block as specified by `max_block` parameter. - /// - /// Note: this is mainly for debugging purposes. - pub(crate) fn has_reached_max_block(&self, progress: BlockNumber) -> bool { - let has_reached_max_block = self.max_block.is_some_and(|target| progress >= target); - if has_reached_max_block { - trace!( - target: "consensus::engine::sync", - ?progress, - max_block = ?self.max_block, - "Consensus engine reached max block" - ); - } - has_reached_max_block - } - - /// Advances the pipeline state. - /// - /// This checks for the result in the channel, or returns pending if the pipeline is idle. - fn poll_pipeline(&mut self, cx: &mut Context<'_>) -> Poll> { - let res = match self.pipeline_state { - PipelineState::Idle(_) => return Poll::Pending, - PipelineState::Running(ref mut fut) => { - ready!(fut.poll_unpin(cx)) - } - }; - let ev = match res { - Ok((pipeline, result)) => { - let minimum_block_number = pipeline.minimum_block_number(); - let reached_max_block = - self.has_reached_max_block(minimum_block_number.unwrap_or_default()); - self.pipeline_state = PipelineState::Idle(Some(pipeline)); - EngineSyncEvent::PipelineFinished { result, reached_max_block } - } - Err(_) => { - // failed to receive the pipeline - EngineSyncEvent::PipelineTaskDropped - } - }; - Poll::Ready(ev) - } - - /// This will spawn the pipeline if it is idle and a target is set or if the pipeline is set to - /// run continuously. - fn try_spawn_pipeline(&mut self) -> Option> { - match &mut self.pipeline_state { - PipelineState::Idle(pipeline) => { - let target = self.pending_pipeline_target.take()?; - let (tx, rx) = oneshot::channel(); - - let pipeline = pipeline.take().expect("exists"); - self.pipeline_task_spawner.spawn_critical_blocking( - "pipeline task", - Box::pin(async move { - let result = pipeline.run_as_fut(Some(target)).await; - let _ = tx.send(result); - }), - ); - self.pipeline_state = PipelineState::Running(rx); - - // we also clear any pending full block requests because we expect them to be - // outdated (included in the range the pipeline is syncing anyway) - self.clear_block_download_requests(); - - Some(EngineSyncEvent::PipelineStarted(Some(target))) - } - PipelineState::Running(_) => None, - } - } - - /// Advances the sync process. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { - // try to spawn a pipeline if a target is set - if let Some(event) = self.try_spawn_pipeline() { - return Poll::Ready(event) - } - - // make sure we poll the pipeline if it's active, and return any ready pipeline events - if !self.is_pipeline_idle() { - // advance the pipeline - if let Poll::Ready(event) = self.poll_pipeline(cx) { - return Poll::Ready(event) - } - } - - // advance all full block requests - for idx in (0..self.inflight_full_block_requests.len()).rev() { - let mut request = self.inflight_full_block_requests.swap_remove(idx); - if let Poll::Ready(block) = request.poll_unpin(cx) { - trace!(target: "consensus::engine", block=?block.num_hash(), "Received single full block, buffering"); - self.range_buffered_blocks.push(Reverse(OrderedSealedBlock(block))); - } else { - // still pending - self.inflight_full_block_requests.push(request); - } - } - - // advance all full block range requests - for idx in (0..self.inflight_block_range_requests.len()).rev() { - let mut request = self.inflight_block_range_requests.swap_remove(idx); - if let Poll::Ready(blocks) = request.poll_unpin(cx) { - trace!(target: "consensus::engine", len=?blocks.len(), first=?blocks.first().map(|b| b.num_hash()), last=?blocks.last().map(|b| b.num_hash()), "Received full block range, buffering"); - self.range_buffered_blocks - .extend(blocks.into_iter().map(OrderedSealedBlock).map(Reverse)); - } else { - // still pending - self.inflight_block_range_requests.push(request); - } - } - - self.update_block_download_metrics(); - - // drain an element of the block buffer if there are any - if let Some(block) = self.range_buffered_blocks.pop() { - // peek ahead and pop duplicates - while let Some(peek) = self.range_buffered_blocks.peek_mut() { - if peek.0 .0.hash() == block.0 .0.hash() { - PeekMut::pop(peek); - } else { - break - } - } - return Poll::Ready(EngineSyncEvent::FetchedFullBlock(block.0 .0)) - } - - Poll::Pending - } -} - -/// A wrapper type around [`SealedBlock`] that implements the [Ord] trait by block number. -#[derive(Debug, Clone, PartialEq, Eq)] -struct OrderedSealedBlock(SealedBlock); - -impl PartialOrd for OrderedSealedBlock -where - H: reth_primitives_traits::BlockHeader + 'static, - B: reth_primitives_traits::BlockBody + 'static, -{ - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for OrderedSealedBlock -where - H: reth_primitives_traits::BlockHeader + 'static, - B: reth_primitives_traits::BlockBody + 'static, -{ - fn cmp(&self, other: &Self) -> Ordering { - self.0.number().cmp(&other.0.number()) - } -} - -/// The event type emitted by the [`EngineSyncController`]. -#[derive(Debug)] -pub(crate) enum EngineSyncEvent { - /// A full block has been downloaded from the network. - FetchedFullBlock(SealedBlock), - /// Pipeline started syncing - /// - /// This is none if the pipeline is triggered without a specific target. - PipelineStarted(Option), - /// Pipeline finished - /// - /// If this is returned, the pipeline is idle. - PipelineFinished { - /// Final result of the pipeline run. - result: Result, - /// Whether the pipeline reached the configured `max_block`. - /// - /// Note: this is only relevant in debugging scenarios. - reached_max_block: bool, - }, - /// Pipeline task was dropped after it was started, unable to receive it because channel - /// closed. This would indicate a panicked pipeline task - PipelineTaskDropped, -} - -/// The possible pipeline states within the sync controller. -/// -/// [`PipelineState::Idle`] means that the pipeline is currently idle. -/// [`PipelineState::Running`] means that the pipeline is currently running. -/// -/// NOTE: The differentiation between these two states is important, because when the pipeline is -/// running, it acquires the write lock over the database. This means that we cannot forward to the -/// blockchain tree any messages that would result in database writes, since it would result in a -/// deadlock. -enum PipelineState { - /// Pipeline is idle. - Idle(Option>), - /// Pipeline is running and waiting for a response - Running(oneshot::Receiver>), -} - -impl PipelineState { - /// Returns `true` if the state matches idle. - const fn is_idle(&self) -> bool { - matches!(self, Self::Idle(_)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_consensus::Header; - use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; - use assert_matches::assert_matches; - use futures::poll; - use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; - use reth_network_p2p::{either::Either, test_utils::TestFullBlockClient, EthBlockClient}; - use reth_primitives::{BlockBody, SealedHeader}; - use reth_provider::{ - test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, - ExecutionOutcome, - }; - use reth_prune_types::PruneModes; - use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; - use reth_stages_api::StageCheckpoint; - use reth_static_file::StaticFileProducer; - use reth_tasks::TokioTaskExecutor; - use std::{collections::VecDeque, future::poll_fn, ops::Range}; - use tokio::sync::watch; - - struct TestPipelineBuilder { - pipeline_exec_outputs: VecDeque>, - executor_results: Vec, - max_block: Option, - } - - impl TestPipelineBuilder { - /// Create a new [`TestPipelineBuilder`]. - const fn new() -> Self { - Self { - pipeline_exec_outputs: VecDeque::new(), - executor_results: Vec::new(), - max_block: None, - } - } - - /// Set the pipeline execution outputs to use for the test consensus engine. - fn with_pipeline_exec_outputs( - mut self, - pipeline_exec_outputs: VecDeque>, - ) -> Self { - self.pipeline_exec_outputs = pipeline_exec_outputs; - self - } - - /// Set the executor results to use for the test consensus engine. - #[allow(dead_code)] - fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.executor_results = executor_results; - self - } - - /// Sets the max block for the pipeline to run. - #[allow(dead_code)] - const fn with_max_block(mut self, max_block: BlockNumber) -> Self { - self.max_block = Some(max_block); - self - } - - /// Builds the pipeline. - fn build(self, chain_spec: Arc) -> Pipeline { - reth_tracing::init_test_tracing(); - - // Setup pipeline - let (tip_tx, _tip_rx) = watch::channel(B256::default()); - let mut pipeline = Pipeline::::builder() - .add_stages(TestStages::new(self.pipeline_exec_outputs, Default::default())) - .with_tip_sender(tip_tx); - - if let Some(max_block) = self.max_block { - pipeline = pipeline.with_max_block(max_block); - } - - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); - - let static_file_producer = - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); - - pipeline.build(provider_factory, static_file_producer) - } - } - - struct TestSyncControllerBuilder { - max_block: Option, - client: Option, - } - - impl TestSyncControllerBuilder { - /// Create a new [`TestSyncControllerBuilder`]. - const fn new() -> Self { - Self { max_block: None, client: None } - } - - /// Sets the max block for the pipeline to run. - #[allow(dead_code)] - const fn with_max_block(mut self, max_block: BlockNumber) -> Self { - self.max_block = Some(max_block); - self - } - - /// Sets the client to use for network operations. - fn with_client(mut self, client: Client) -> Self { - self.client = Some(client); - self - } - - /// Builds the sync controller. - fn build( - self, - pipeline: Pipeline, - chain_spec: Arc, - ) -> EngineSyncController> - where - N: ProviderNodeTypes, - Client: EthBlockClient + 'static, - { - let client = self - .client - .map(Either::Left) - .unwrap_or_else(|| Either::Right(TestFullBlockClient::default())); - - EngineSyncController::new( - pipeline, - client, - Box::::default(), - self.max_block, - chain_spec, - Default::default(), - ) - } - } - - #[tokio::test] - async fn pipeline_started_after_setting_target() { - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let client = TestFullBlockClient::default(); - insert_headers_into_client(&client, SealedHeader::default(), 0..10); - // force the pipeline to be "done" after 5 blocks - let pipeline = TestPipelineBuilder::new() - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(5), - done: true, - })])) - .build(chain_spec.clone()); - - let mut sync_controller = TestSyncControllerBuilder::new() - .with_client(client.clone()) - .build(pipeline, chain_spec); - - let tip = client.highest_block().expect("there should be blocks here"); - sync_controller.set_pipeline_sync_target(tip.hash().into()); - - let sync_future = poll_fn(|cx| sync_controller.poll(cx)); - let next_event = poll!(sync_future); - - // can assert that the first event here is PipelineStarted because we set the sync target, - // and we should get Ready because the pipeline should be spawned immediately - assert_matches!(next_event, Poll::Ready(EngineSyncEvent::PipelineStarted(Some(target))) => { - assert_eq!(target.sync_target().unwrap(), tip.hash()); - }); - - // the next event should be the pipeline finishing in a good state - let sync_future = poll_fn(|cx| sync_controller.poll(cx)); - let next_ready = sync_future.await; - assert_matches!(next_ready, EngineSyncEvent::PipelineFinished { result, reached_max_block } => { - assert_matches!(result, Ok(control_flow) => assert_eq!(control_flow, ControlFlow::Continue { block_number: 5 })); - // no max block configured - assert!(!reached_max_block); - }); - } - - fn insert_headers_into_client( - client: &TestFullBlockClient, - genesis_header: SealedHeader, - range: Range, - ) { - let mut sealed_header = genesis_header; - let body = BlockBody::default(); - for _ in range { - let (mut header, hash) = sealed_header.split(); - // update to the next header - header.parent_hash = hash; - header.number += 1; - header.timestamp += 1; - sealed_header = SealedHeader::seal(header); - client.insert(sealed_header.clone(), body.clone()); - } - } - - #[tokio::test] - async fn controller_sends_range_request() { - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let client = TestFullBlockClient::default(); - let header = Header { - base_fee_per_gas: Some(7), - gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, - ..Default::default() - }; - let header = SealedHeader::seal(header); - insert_headers_into_client(&client, header, 0..10); - - // set up a pipeline - let pipeline = TestPipelineBuilder::new().build(chain_spec.clone()); - - let mut sync_controller = TestSyncControllerBuilder::new() - .with_client(client.clone()) - .build(pipeline, chain_spec); - - let tip = client.highest_block().expect("there should be blocks here"); - - // call the download range method - sync_controller.download_block_range(tip.hash(), tip.number); - - // ensure we have one in flight range request - assert_eq!(sync_controller.inflight_block_range_requests.len(), 1); - - // ensure the range request is made correctly - let first_req = sync_controller.inflight_block_range_requests.first().unwrap(); - assert_eq!(first_req.start_hash(), tip.hash()); - assert_eq!(first_req.count(), tip.number); - - // ensure they are in ascending order - for num in 1..=10 { - let sync_future = poll_fn(|cx| sync_controller.poll(cx)); - let next_ready = sync_future.await; - assert_matches!(next_ready, EngineSyncEvent::FetchedFullBlock(block) => { - assert_eq!(block.number, num); - }); - } - } -} diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs deleted file mode 100644 index 100d9ff0f40d..000000000000 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ /dev/null @@ -1,466 +0,0 @@ -#![allow(missing_docs)] -use crate::{ - engine::hooks::PruneHook, hooks::EngineHooks, BeaconConsensusEngine, - BeaconConsensusEngineError, BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, - EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, -}; -use alloy_primitives::{BlockNumber, B256}; -use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, -}; -use reth_blockchain_tree::{ - config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, -}; -use reth_chainspec::ChainSpec; -use reth_config::config::StageConfig; -use reth_consensus::{test_utils::TestConsensus, FullConsensus}; -use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; -use reth_downloaders::{ - bodies::bodies::BodiesDownloaderBuilder, - headers::reverse_headers::ReverseHeadersDownloaderBuilder, -}; -use reth_engine_primitives::{BeaconOnNewPayloadError, EngineApiMessageVersion}; -use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_ethereum_evm::execute::EthExecutorProvider; -use reth_evm::{either::Either, test_utils::MockExecutorProvider}; -use reth_exex_types::FinishedExExHeight; -use reth_network_p2p::{ - sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, EthBlockClient, -}; -use reth_payload_builder::test_utils::spawn_test_payload_service; -use reth_primitives::SealedHeader; -use reth_provider::{ - providers::BlockchainProvider, - test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, - ExecutionOutcome, -}; -use reth_prune::Pruner; -use reth_prune_types::PruneModes; -use reth_stages::{sets::DefaultStages, test_utils::TestStages, ExecOutput, Pipeline, StageError}; -use reth_static_file::StaticFileProducer; -use reth_tasks::TokioTaskExecutor; -use std::{collections::VecDeque, sync::Arc}; -use tokio::sync::{oneshot, watch}; - -type DatabaseEnv = TempDatabase; - -type TestBeaconConsensusEngine = BeaconConsensusEngine< - MockNodeTypesWithDB, - BlockchainProvider, - Arc>, ->; - -#[derive(Debug)] -pub struct TestEnv { - pub db: DB, - // Keep the tip receiver around, so it's not dropped. - #[allow(dead_code)] - tip_rx: watch::Receiver, - engine_handle: BeaconConsensusEngineHandle, -} - -impl TestEnv { - const fn new( - db: DB, - tip_rx: watch::Receiver, - engine_handle: BeaconConsensusEngineHandle, - ) -> Self { - Self { db, tip_rx, engine_handle } - } - - pub async fn send_new_payload>( - &self, - payload: T, - sidecar: ExecutionPayloadSidecar, - ) -> Result { - self.engine_handle.new_payload(payload.into(), sidecar).await - } - - /// Sends the `ExecutionPayload` message to the consensus engine and retries if the engine - /// is syncing. - pub async fn send_new_payload_retry_on_syncing>( - &self, - payload: T, - sidecar: ExecutionPayloadSidecar, - ) -> Result { - let payload: ExecutionPayload = payload.into(); - loop { - let result = self.send_new_payload(payload.clone(), sidecar.clone()).await?; - if !result.is_syncing() { - return Ok(result) - } - } - } - - pub async fn send_forkchoice_updated( - &self, - state: ForkchoiceState, - ) -> Result { - self.engine_handle - .fork_choice_updated(state, None, EngineApiMessageVersion::default()) - .await - } - - /// Sends the `ForkchoiceUpdated` message to the consensus engine and retries if the engine - /// is syncing. - pub async fn send_forkchoice_retry_on_syncing( - &self, - state: ForkchoiceState, - ) -> Result { - loop { - let result = self - .engine_handle - .fork_choice_updated(state, None, EngineApiMessageVersion::default()) - .await?; - if !result.is_syncing() { - return Ok(result) - } - } - } -} - -// TODO: add with_consensus in case we want to use the TestConsensus purposeful failure - this -// would require similar patterns to how we use with_client and the downloader -/// Represents either a real consensus engine, or a test consensus engine. -#[derive(Debug, Default)] -enum TestConsensusConfig { - /// Test consensus engine - #[default] - Test, - /// Real consensus engine - Real, -} - -/// Represents either test pipeline outputs, or real pipeline configuration. -#[derive(Debug)] -enum TestPipelineConfig { - /// Test pipeline outputs. - Test(VecDeque>), - /// Real pipeline configuration. - Real, -} - -impl Default for TestPipelineConfig { - fn default() -> Self { - Self::Test(VecDeque::new()) - } -} - -/// Represents either test executor results, or real executor configuration. -#[derive(Debug)] -enum TestExecutorConfig { - /// Test executor results. - Test(Vec), - /// Real executor configuration. - Real, -} - -impl Default for TestExecutorConfig { - fn default() -> Self { - Self::Test(Vec::new()) - } -} - -/// The basic configuration for a `TestConsensusEngine`, without generics for the client or -/// consensus engine. -#[derive(Debug)] -pub struct TestConsensusEngineBuilder { - chain_spec: Arc, - pipeline_config: TestPipelineConfig, - executor_config: TestExecutorConfig, - pipeline_run_threshold: Option, - max_block: Option, - consensus: TestConsensusConfig, -} - -impl TestConsensusEngineBuilder { - /// Create a new `TestConsensusEngineBuilder` with the given `ChainSpec`. - pub fn new(chain_spec: Arc) -> Self { - Self { - chain_spec, - pipeline_config: Default::default(), - executor_config: Default::default(), - pipeline_run_threshold: None, - max_block: None, - consensus: Default::default(), - } - } - - /// Set the pipeline execution outputs to use for the test consensus engine. - pub fn with_pipeline_exec_outputs( - mut self, - pipeline_exec_outputs: VecDeque>, - ) -> Self { - self.pipeline_config = TestPipelineConfig::Test(pipeline_exec_outputs); - self - } - - /// Set the executor results to use for the test consensus engine. - pub fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.executor_config = TestExecutorConfig::Test(executor_results); - self - } - - /// Sets the max block for the pipeline to run. - pub const fn with_max_block(mut self, max_block: BlockNumber) -> Self { - self.max_block = Some(max_block); - self - } - - /// Uses the real pipeline instead of a pipeline with empty exec outputs. - pub fn with_real_pipeline(mut self) -> Self { - self.pipeline_config = TestPipelineConfig::Real; - self - } - - /// Uses the real executor instead of a executor with empty results. - pub fn with_real_executor(mut self) -> Self { - self.executor_config = TestExecutorConfig::Real; - self - } - - /// Uses a real consensus engine instead of a test consensus engine. - pub const fn with_real_consensus(mut self) -> Self { - self.consensus = TestConsensusConfig::Real; - self - } - - /// Disables blockchain tree driven sync. This is the same as setting the pipeline run - /// threshold to 0. - pub const fn disable_blockchain_tree_sync(mut self) -> Self { - self.pipeline_run_threshold = Some(0); - self - } - - /// Sets the client to use for network operations. - #[allow(dead_code)] - pub const fn with_client( - self, - client: Client, - ) -> NetworkedTestConsensusEngineBuilder - where - Client: EthBlockClient + 'static, - { - NetworkedTestConsensusEngineBuilder { base_config: self, client: Some(client) } - } - - /// Builds the test consensus engine into a `TestConsensusEngine` and `TestEnv`. - pub fn build( - self, - ) -> (TestBeaconConsensusEngine, TestEnv>) { - let networked = NetworkedTestConsensusEngineBuilder { base_config: self, client: None }; - - networked.build() - } -} - -/// A builder for `TestConsensusEngine`, allows configuration of mocked pipeline outputs and -/// mocked executor results. -/// -/// This optionally includes a client for network operations. -#[derive(Debug)] -pub struct NetworkedTestConsensusEngineBuilder { - base_config: TestConsensusEngineBuilder, - client: Option, -} - -impl NetworkedTestConsensusEngineBuilder -where - Client: EthBlockClient + 'static, -{ - /// Set the pipeline execution outputs to use for the test consensus engine. - #[allow(dead_code)] - pub fn with_pipeline_exec_outputs( - mut self, - pipeline_exec_outputs: VecDeque>, - ) -> Self { - self.base_config.pipeline_config = TestPipelineConfig::Test(pipeline_exec_outputs); - self - } - - /// Set the executor results to use for the test consensus engine. - #[allow(dead_code)] - pub fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.base_config.executor_config = TestExecutorConfig::Test(executor_results); - self - } - - /// Sets the max block for the pipeline to run. - #[allow(dead_code)] - pub const fn with_max_block(mut self, max_block: BlockNumber) -> Self { - self.base_config.max_block = Some(max_block); - self - } - - /// Uses the real pipeline instead of a pipeline with empty exec outputs. - #[allow(dead_code)] - pub fn with_real_pipeline(mut self) -> Self { - self.base_config.pipeline_config = TestPipelineConfig::Real; - self - } - - /// Uses the real executor instead of a executor with empty results. - #[allow(dead_code)] - pub fn with_real_executor(mut self) -> Self { - self.base_config.executor_config = TestExecutorConfig::Real; - self - } - - /// Disables blockchain tree driven sync. This is the same as setting the pipeline run - /// threshold to 0. - #[allow(dead_code)] - pub const fn disable_blockchain_tree_sync(mut self) -> Self { - self.base_config.pipeline_run_threshold = Some(0); - self - } - - /// Sets the client to use for network operations. - #[allow(dead_code)] - pub fn with_client( - self, - client: ClientType, - ) -> NetworkedTestConsensusEngineBuilder - where - ClientType: EthBlockClient + 'static, - { - NetworkedTestConsensusEngineBuilder { base_config: self.base_config, client: Some(client) } - } - - /// Builds the test consensus engine into a `TestConsensusEngine` and `TestEnv`. - pub fn build(self) -> (TestBeaconConsensusEngine, TestEnv>) { - reth_tracing::init_test_tracing(); - let provider_factory = - create_test_provider_factory_with_chain_spec(self.base_config.chain_spec.clone()); - - let consensus: Arc = match self.base_config.consensus { - TestConsensusConfig::Real => { - Arc::new(EthBeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) - } - TestConsensusConfig::Test => Arc::new(TestConsensus::default()), - }; - let payload_builder = spawn_test_payload_service::(); - - // use either noop client or a user provided client (for example TestFullBlockClient) - let client = Arc::new( - self.client - .map(Either::Left) - .unwrap_or_else(|| Either::Right(NoopFullBlockClient::default())), - ); - - // use either test executor or real executor - let executor_factory = match self.base_config.executor_config { - TestExecutorConfig::Test(results) => { - let executor_factory = MockExecutorProvider::default(); - executor_factory.extend(results); - Either::Left(executor_factory) - } - TestExecutorConfig::Real => { - Either::Right(EthExecutorProvider::ethereum(self.base_config.chain_spec.clone())) - } - }; - - let static_file_producer = - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); - - // Setup pipeline - let (tip_tx, tip_rx) = watch::channel(B256::default()); - let mut pipeline = match self.base_config.pipeline_config { - TestPipelineConfig::Test(outputs) => Pipeline::::builder() - .add_stages(TestStages::new(outputs, Default::default())) - .with_tip_sender(tip_tx), - TestPipelineConfig::Real => { - let header_downloader = ReverseHeadersDownloaderBuilder::default() - .build(client.clone(), consensus.clone().as_header_validator()) - .into_task(); - - let body_downloader = BodiesDownloaderBuilder::default() - .build( - client.clone(), - consensus.clone().as_consensus(), - provider_factory.clone(), - ) - .into_task(); - - Pipeline::::builder().add_stages(DefaultStages::new( - provider_factory.clone(), - tip_rx.clone(), - consensus.clone().as_consensus(), - header_downloader, - body_downloader, - executor_factory.clone(), - StageConfig::default(), - PruneModes::default(), - )) - } - }; - - if let Some(max_block) = self.base_config.max_block { - pipeline = pipeline.with_max_block(max_block); - } - - let pipeline = pipeline.build(provider_factory.clone(), static_file_producer); - - // Setup blockchain tree - let externals = TreeExternals::new(provider_factory.clone(), consensus, executor_factory); - let tree = Arc::new(ShareableBlockchainTree::new( - BlockchainTree::new(externals, BlockchainTreeConfig::new(1, 2, 3, 2)) - .expect("failed to create tree"), - )); - let header = self.base_config.chain_spec.genesis_header().clone(); - let genesis_block = SealedHeader::seal(header); - - let blockchain_provider = BlockchainProvider::with_blocks( - provider_factory.clone(), - tree, - genesis_block, - None, - None, - ); - - let pruner = Pruner::new_with_factory( - provider_factory.clone(), - vec![], - 5, - self.base_config.chain_spec.prune_delete_limit, - None, - watch::channel(FinishedExExHeight::NoExExs).1, - ); - - let mut hooks = EngineHooks::new(); - hooks.add(PruneHook::new(pruner, Box::::default())); - - let (mut engine, handle) = BeaconConsensusEngine::new( - client, - pipeline, - blockchain_provider, - Box::::default(), - Box::::default(), - None, - payload_builder, - None, - self.base_config.pipeline_run_threshold.unwrap_or(MIN_BLOCKS_FOR_PIPELINE_RUN), - hooks, - ) - .expect("failed to create consensus engine"); - - if let Some(max_block) = self.base_config.max_block { - engine.sync.set_max_block(max_block) - } - - (engine, TestEnv::new(provider_factory.db_ref().clone(), tip_rx, handle)) - } -} - -pub fn spawn_consensus_engine( - engine: TestBeaconConsensusEngine, -) -> oneshot::Receiver> -where - Client: EthBlockClient + 'static, -{ - let (tx, rx) = oneshot::channel(); - tokio::spawn(async move { - let result = engine.await; - tx.send(result).expect("failed to forward consensus engine result"); - }); - rx -} diff --git a/crates/consensus/beacon/src/lib.rs b/crates/consensus/beacon/src/lib.rs deleted file mode 100644 index f62a75f94d51..000000000000 --- a/crates/consensus/beacon/src/lib.rs +++ /dev/null @@ -1,14 +0,0 @@ -//! Beacon consensus implementation. - -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -pub use reth_ethereum_consensus::EthBeaconConsensus; - -mod engine; -pub use engine::*; diff --git a/crates/consensus/common/src/calc.rs b/crates/consensus/common/src/calc.rs index 584e90f04d93..2f3ad6560c3e 100644 --- a/crates/consensus/common/src/calc.rs +++ b/crates/consensus/common/src/calc.rs @@ -1,6 +1,6 @@ use alloy_consensus::constants::ETH_TO_WEI; use alloy_primitives::BlockNumber; -use reth_chainspec::{EthereumHardfork, EthereumHardforks, Hardforks}; +use reth_chainspec::EthereumHardforks; /// Calculates the base block reward. /// @@ -35,10 +35,13 @@ pub fn base_block_reward( /// Calculates the base block reward __before__ the merge (Paris hardfork). /// /// Caution: The caller must ensure that the block number is before the merge. -pub fn base_block_reward_pre_merge(chain_spec: impl Hardforks, block_number: BlockNumber) -> u128 { - if chain_spec.fork(EthereumHardfork::Constantinople).active_at_block(block_number) { +pub fn base_block_reward_pre_merge( + chain_spec: impl EthereumHardforks, + block_number: BlockNumber, +) -> u128 { + if chain_spec.is_constantinople_active_at_block(block_number) { ETH_TO_WEI * 2 - } else if chain_spec.fork(EthereumHardfork::Byzantium).active_at_block(block_number) { + } else if chain_spec.is_byzantium_active_at_block(block_number) { ETH_TO_WEI * 3 } else { ETH_TO_WEI * 5 diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 67ecc886ea67..9e6a2ad90173 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -25,8 +25,7 @@ pub fn validate_header_base_fee( header: &H, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.is_fork_active_at_block(EthereumHardfork::London, header.number()) && - header.base_fee_per_gas().is_none() + if chain_spec.is_london_active_at_block(header.number()) && header.base_fee_per_gas().is_none() { return Err(ConsensusError::BaseFeeMissing) } @@ -65,8 +64,7 @@ pub fn validate_cancun_gas( ) -> Result<(), ConsensusError> { // Check that the blob gas used in the header matches the sum of the blob gas used by each // blob tx - let header_blob_gas_used = - block.header().blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + let header_blob_gas_used = block.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; let total_blob_gas = block.body().blob_gas_used(); if total_blob_gas != header_blob_gas_used { return Err(ConsensusError::BlobGasUsedDiff(GotExpected { @@ -253,23 +251,25 @@ pub fn validate_against_parent_eip1559_base_fee< parent: &H, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number()) { + if chain_spec.is_london_active_at_block(header.number()) { let base_fee = header.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; - let expected_base_fee = - if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number()) { - alloy_eips::eip1559::INITIAL_BASE_FEE - } else { - // This BaseFeeMissing will not happen as previous blocks are checked to have - // them. - let base_fee = parent.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; - calc_next_block_base_fee( - parent.gas_used(), - parent.gas_limit(), - base_fee, - chain_spec.base_fee_params_at_timestamp(header.timestamp()), - ) - }; + let expected_base_fee = if chain_spec + .ethereum_fork_activation(EthereumHardfork::London) + .transitions_at_block(header.number()) + { + alloy_eips::eip1559::INITIAL_BASE_FEE + } else { + // This BaseFeeMissing will not happen as previous blocks are checked to have + // them. + let base_fee = parent.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; + calc_next_block_base_fee( + parent.gas_used(), + parent.gas_limit(), + base_fee, + chain_spec.base_fee_params_at_timestamp(header.timestamp()), + ) + }; if expected_base_fee != base_fee { return Err(ConsensusError::BaseFeeDiff(GotExpected { expected: expected_base_fee, diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index ba1b1321e776..1de99d8278f5 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -66,12 +66,15 @@ pub trait FullConsensus: /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] pub trait Consensus: AsHeaderValidator { + /// The error type related to consensus. + type Error; + /// Ensures that body field values match the header. fn validate_body_against_header( &self, body: &B, header: &SealedHeader, - ) -> Result<(), ConsensusError>; + ) -> Result<(), Self::Error>; /// Validate a block disregarding world state, i.e. things that can be checked before sender /// recovery and execution. @@ -82,8 +85,7 @@ pub trait Consensus: AsHeaderValidator { /// **This should not be called for the genesis block**. /// /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_pre_execution(&self, block: &SealedBlock) - -> Result<(), ConsensusError>; + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), Self::Error>; } /// HeaderValidator is a protocol that validates headers and their relationships. @@ -170,13 +172,13 @@ impl, H> AsHeaderValidator for T { /// Helper trait to cast `Arc` to `Arc` pub trait AsConsensus: Consensus { /// Converts the [`Arc`] of self to [`Arc`] of [`HeaderValidator`] - fn as_consensus<'a>(self: Arc) -> Arc + 'a> + fn as_consensus<'a>(self: Arc) -> Arc + 'a> where Self: 'a; } impl, H, B> AsConsensus for T { - fn as_consensus<'a>(self: Arc) -> Arc + 'a> + fn as_consensus<'a>(self: Arc) -> Arc + 'a> where Self: 'a, { diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index c56e9867a256..ea269c07dada 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -30,18 +30,17 @@ impl HeaderValidator for NoopConsensus { } impl Consensus for NoopConsensus { + type Error = ConsensusError; + fn validate_body_against_header( &self, _body: &B, _header: &SealedHeader, - ) -> Result<(), ConsensusError> { + ) -> Result<(), Self::Error> { Ok(()) } - fn validate_block_pre_execution( - &self, - _block: &SealedBlock, - ) -> Result<(), ConsensusError> { + fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), Self::Error> { Ok(()) } } @@ -51,7 +50,7 @@ impl FullConsensus for NoopConsensus { &self, _block: &BlockWithSenders, _input: PostExecutionInput<'_, N::Receipt>, - ) -> Result<(), ConsensusError> { + ) -> Result<(), Self::Error> { Ok(()) } } diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index 082c8ca8bb5a..3f26222c4b90 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -61,11 +61,13 @@ impl FullConsensus for TestConsensus { } impl Consensus for TestConsensus { + type Error = ConsensusError; + fn validate_body_against_header( &self, _body: &B, _header: &SealedHeader, - ) -> Result<(), ConsensusError> { + ) -> Result<(), Self::Error> { if self.fail_body_against_header() { Err(ConsensusError::BaseFeeMissing) } else { @@ -73,10 +75,7 @@ impl Consensus for TestConsensus { } } - fn validate_block_pre_execution( - &self, - _block: &SealedBlock, - ) -> Result<(), ConsensusError> { + fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), Self::Error> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 44e518eec5c8..091e871844bd 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -13,9 +13,7 @@ use reth_node_builder::{ PayloadTypes, }; use reth_node_core::args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}; -use reth_provider::providers::{ - BlockchainProvider, BlockchainProvider2, NodeTypesForProvider, NodeTypesForTree, -}; +use reth_provider::providers::{BlockchainProvider, NodeTypesForProvider, NodeTypesForTree}; use reth_rpc_server_types::RpcModuleSelection; use reth_tasks::TaskManager; use std::sync::Arc; @@ -58,7 +56,10 @@ where TmpNodeAdapter, Components: NodeComponents, Network: PeersHandleProvider>, >, - N::AddOns: RethRpcAddOns>, + N::AddOns: RethRpcAddOns> + EngineValidatorAddOn>, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< + <::Engine as PayloadTypes>::PayloadAttributes, + >, { let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -113,24 +114,24 @@ pub async fn setup_engine( is_dev: bool, attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, ) -> eyre::Result<( - Vec>>>, + Vec>>>, TaskManager, Wallet, )> where N: Default - + Node>>> + + Node>>> + NodeTypesWithEngine + NodeTypesForProvider, N::ComponentsBuilder: NodeComponentsBuilder< - TmpNodeAdapter>>, + TmpNodeAdapter>>, Components: NodeComponents< - TmpNodeAdapter>>, + TmpNodeAdapter>>, Network: PeersHandleProvider, >, >, - N::AddOns: RethRpcAddOns>>> - + EngineValidatorAddOn>>>, + N::AddOns: RethRpcAddOns>>> + + EngineValidatorAddOn>>>, LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, @@ -163,7 +164,7 @@ where let node = N::default(); let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) .testing_node(exec.clone()) - .with_types_and_provider::>() + .with_types_and_provider::>() .with_components(node.components_builder()) .with_add_ons(node.add_ons()) .launch_with_fn(|builder| { diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 2b634ae5ce71..a0c986e4384a 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -95,7 +95,7 @@ where SystemCaller::new(self.evm_config.clone(), self.provider.chain_spec()); // Apply pre-block system contract calls. - system_caller.apply_pre_execution_changes(&block.clone().unseal().block, &mut evm)?; + system_caller.apply_pre_execution_changes(block.header(), &mut evm)?; // Re-execute all of the transactions in the block to load all touched accounts into // the cache DB. diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index 10837b174053..ad7657de8051 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -10,7 +10,6 @@ exclude.workspace = true [dependencies] # reth -reth-beacon-consensus.workspace = true reth-chainspec.workspace = true reth-consensus.workspace = true reth-engine-primitives.workspace = true diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index 045f6fea02e2..088a42fbf96b 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -59,3 +59,19 @@ where } } } + +/// A temporary workaround to support local payload engine launcher for arbitrary payload +/// attributes. +// TODO(mattsse): This should be reworked so that LocalPayloadAttributesBuilder can be implemented +// for any +pub trait UnsupportedLocalAttributes: Send + Sync + 'static {} + +impl PayloadAttributesBuilder for LocalPayloadAttributesBuilder +where + ChainSpec: Send + Sync + 'static, + T: UnsupportedLocalAttributes, +{ + fn build(&self, _: u64) -> T { + panic!("Unsupported payload attributes") + } +} diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 3c7bc72baed5..77b61c8221a9 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -16,10 +16,9 @@ use std::{ use crate::miner::{LocalMiner, MiningMode}; use futures_util::{Stream, StreamExt}; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; -use reth_consensus::FullConsensus; -use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; +use reth_consensus::{ConsensusError, FullConsensus}; +use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineValidator}; use reth_engine_service::service::EngineMessageStream; use reth_engine_tree::{ chain::{ChainEvent, HandlerEvent}, @@ -34,7 +33,10 @@ use reth_evm::execute::BlockExecutorProvider; use reth_node_types::BlockTy; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributesBuilder, PayloadTypes}; -use reth_provider::{providers::BlockchainProvider2, ChainSpecProvider, ProviderFactory}; +use reth_provider::{ + providers::{BlockchainProvider, EngineNodeTypes}, + ChainSpecProvider, ProviderFactory, +}; use reth_prune::PrunerWithFactory; use reth_stages_api::MetricEventsSender; use tokio::sync::mpsc::UnboundedSender; @@ -64,10 +66,10 @@ where /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] pub fn new( - consensus: Arc>, + consensus: Arc>, executor_factory: impl BlockExecutorProvider, provider: ProviderFactory, - blockchain_db: BlockchainProvider2, + blockchain_db: BlockchainProvider, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, payload_validator: V, diff --git a/crates/engine/primitives/src/error.rs b/crates/engine/primitives/src/error.rs index b7deb607bcf9..18e72fe83e72 100644 --- a/crates/engine/primitives/src/error.rs +++ b/crates/engine/primitives/src/error.rs @@ -1,3 +1,5 @@ +use alloy_rpc_types_engine::ForkchoiceUpdateError; + /// Represents all error cases when handling a new payload. /// /// This represents all possible error cases that must be returned as JSON RCP errors back to the @@ -18,3 +20,27 @@ impl BeaconOnNewPayloadError { Self::Internal(Box::new(e)) } } + +/// Represents error cases for an applied forkchoice update. +/// +/// This represents all possible error cases, that must be returned as JSON RPC errors back to the +/// beacon node. +#[derive(Debug, thiserror::Error)] +pub enum BeaconForkChoiceUpdateError { + /// Thrown when a forkchoice update resulted in an error. + #[error("forkchoice update error: {0}")] + ForkchoiceUpdateError(#[from] ForkchoiceUpdateError), + /// Thrown when the engine task is unavailable/stopped. + #[error("beacon consensus engine task stopped")] + EngineUnavailable, + /// An internal error occurred, not necessarily related to the update. + #[error(transparent)] + Internal(Box), +} + +impl BeaconForkChoiceUpdateError { + /// Create a new internal error. + pub fn internal(e: E) -> Self { + Self::Internal(Box::new(e)) + } +} diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/engine/primitives/src/event.rs similarity index 91% rename from crates/consensus/beacon/src/engine/event.rs rename to crates/engine/primitives/src/event.rs index acf056b3ff47..fdf5b73f1eca 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/engine/primitives/src/event.rs @@ -1,15 +1,18 @@ +//! Events emitted by the beacon consensus engine. + +use crate::ForkchoiceStatus; use alloy_consensus::BlockHeader; use alloy_primitives::B256; use alloy_rpc_types_engine::ForkchoiceState; -use reth_engine_primitives::ForkchoiceStatus; -use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlockFor, SealedHeader}; +use reth_primitives::{EthPrimitives, SealedBlockFor}; +use reth_primitives_traits::{NodePrimitives, SealedHeader}; use std::{ fmt::{Display, Formatter, Result}, sync::Arc, time::Duration, }; -/// Events emitted by [`crate::BeaconConsensusEngine`]. +/// Events emitted by the consensus engine. #[derive(Clone, Debug)] pub enum BeaconConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 9921023c4a1d..e0b465e98596 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -14,13 +14,16 @@ use core::fmt; use alloy_consensus::BlockHeader; use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; -pub use error::BeaconOnNewPayloadError; +pub use error::*; mod forkchoice; pub use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus}; mod message; -pub use message::{BeaconEngineMessage, OnForkChoiceUpdated}; +pub use message::*; + +mod event; +pub use event::*; mod invalid_block_hook; pub use invalid_block_hook::InvalidBlockHook; diff --git a/crates/engine/primitives/src/message.rs b/crates/engine/primitives/src/message.rs index d8a4c1322ad0..6e4f4629276b 100644 --- a/crates/engine/primitives/src/message.rs +++ b/crates/engine/primitives/src/message.rs @@ -1,9 +1,12 @@ -use crate::{BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, ForkchoiceStatus}; +use crate::{ + error::BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, EngineApiMessageVersion, + EngineTypes, ForkchoiceStatus, +}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; -use futures::{future::Either, FutureExt}; +use futures::{future::Either, FutureExt, TryFutureExt}; use reth_errors::RethResult; use reth_payload_builder_primitives::PayloadBuilderError; use std::{ @@ -12,7 +15,7 @@ use std::{ pin::Pin, task::{ready, Context, Poll}, }; -use tokio::sync::oneshot; +use tokio::sync::{mpsc::UnboundedSender, oneshot}; /// Represents the outcome of forkchoice update. /// @@ -191,3 +194,82 @@ impl Display for BeaconEngineMessage { } } } + +/// A clonable sender type that can be used to send engine API messages. +/// +/// This type mirrors consensus related functions of the engine API. +#[derive(Debug, Clone)] +pub struct BeaconConsensusEngineHandle +where + Engine: EngineTypes, +{ + to_engine: UnboundedSender>, +} + +impl BeaconConsensusEngineHandle +where + Engine: EngineTypes, +{ + /// Creates a new beacon consensus engine handle. + pub const fn new(to_engine: UnboundedSender>) -> Self { + Self { to_engine } + } + + /// Sends a new payload message to the beacon consensus engine and waits for a response. + /// + /// See also + pub async fn new_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, sidecar, tx }); + rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? + } + + /// Sends a forkchoice update message to the beacon consensus engine and waits for a response. + /// + /// See also + pub async fn fork_choice_updated( + &self, + state: ForkchoiceState, + payload_attrs: Option, + version: EngineApiMessageVersion, + ) -> Result { + Ok(self + .send_fork_choice_updated(state, payload_attrs, version) + .map_err(|_| BeaconForkChoiceUpdateError::EngineUnavailable) + .await? + .map_err(BeaconForkChoiceUpdateError::internal)? + .await?) + } + + /// Sends a forkchoice update message to the beacon consensus engine and returns the receiver to + /// wait for a response. + fn send_fork_choice_updated( + &self, + state: ForkchoiceState, + payload_attrs: Option, + version: EngineApiMessageVersion, + ) -> oneshot::Receiver> { + let (tx, rx) = oneshot::channel(); + let _ = self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }); + rx + } + + /// Sends a transition configuration exchange message to the beacon consensus engine. + /// + /// See also + /// + /// This only notifies about the exchange. The actual exchange is done by the engine API impl + /// itself. + pub fn transition_configuration_exchanged(&self) { + let _ = self.to_engine.send(BeaconEngineMessage::TransitionConfigurationExchanged); + } +} diff --git a/crates/engine/service/Cargo.toml b/crates/engine/service/Cargo.toml index a1f0dbf299bf..b8f20bee4435 100644 --- a/crates/engine/service/Cargo.toml +++ b/crates/engine/service/Cargo.toml @@ -12,7 +12,6 @@ workspace = true [dependencies] # reth -reth-beacon-consensus.workspace = true reth-consensus.workspace = true reth-engine-tree.workspace = true reth-evm.workspace = true @@ -36,6 +35,7 @@ thiserror.workspace = true [dev-dependencies] reth-engine-tree = { workspace = true, features = ["test-utils"] } +reth-ethereum-consensus.workspace = true reth-ethereum-engine-primitives.workspace = true reth-ethereum-evm.workspace = true reth-exex-types.workspace = true diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 89d61e066bd4..4f50f031bafb 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -1,9 +1,8 @@ use futures::{Stream, StreamExt}; use pin_project::pin_project; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; -use reth_consensus::FullConsensus; -use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; +use reth_consensus::{ConsensusError, FullConsensus}; +use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineValidator}; use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, @@ -20,7 +19,10 @@ use reth_network_p2p::BlockClient; use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::EthPrimitives; -use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; +use reth_provider::{ + providers::{BlockchainProvider, EngineNodeTypes}, + ProviderFactory, +}; use reth_prune::PrunerWithFactory; use reth_stages_api::{MetricEventsSender, Pipeline}; use reth_tasks::TaskSpawner; @@ -69,7 +71,7 @@ where /// Constructor for `EngineService`. #[allow(clippy::too_many_arguments)] pub fn new( - consensus: Arc>, + consensus: Arc>, executor_factory: E, chain_spec: Arc, client: Client, @@ -77,7 +79,7 @@ where pipeline: Pipeline, pipeline_task_spawner: Box, provider: ProviderFactory, - blockchain_db: BlockchainProvider2, + blockchain_db: BlockchainProvider, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, payload_validator: V, @@ -150,17 +152,17 @@ pub struct EngineServiceError {} #[cfg(test)] mod tests { use super::*; - use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_engine_primitives::BeaconEngineMessage; use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::NoopInvalidBlockHook}; + use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_ethereum_evm::execute::EthExecutorProvider; use reth_exex_types::FinishedExExHeight; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::SealedHeader; use reth_provider::{ - providers::BlockchainProvider2, test_utils::create_test_provider_factory_with_chain_spec, + providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, }; use reth_prune::Pruner; use reth_tasks::TokioTaskExecutor; @@ -190,7 +192,7 @@ mod tests { let executor_factory = EthExecutorProvider::ethereum(chain_spec.clone()); let blockchain_db = - BlockchainProvider2::with_latest(provider_factory.clone(), SealedHeader::default()) + BlockchainProvider::with_latest(provider_factory.clone(), SealedHeader::default()) .unwrap(); let engine_payload_validator = EthereumEngineValidator::new(chain_spec.clone()); let (_tx, rx) = watch::channel(FinishedExExHeight::NoExExs); diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 0d2284e39b3e..822780657d8f 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -12,8 +12,6 @@ workspace = true [dependencies] # reth -reth-beacon-consensus.workspace = true -reth-blockchain-tree-api.workspace = true reth-chain-state.workspace = true reth-chainspec = { workspace = true, optional = true } reth-consensus.workspace = true @@ -51,12 +49,14 @@ revm-primitives.workspace = true futures.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["macros", "sync"] } +moka = { workspace = true, features = ["sync"] } # metrics metrics.workspace = true reth-metrics = { workspace = true, features = ["common"] } # misc +schnellru.workspace = true rayon.workspace = true tracing.workspace = true derive_more.workspace = true @@ -73,6 +73,7 @@ reth-chain-state = { workspace = true, features = ["test-utils"] } reth-chainspec.workspace = true reth-db = { workspace = true, features = ["test-utils"] } reth-ethereum-engine-primitives.workspace = true +reth-ethereum-consensus.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-exex-types.workspace = true reth-network-p2p = { workspace = true, features = ["test-utils"] } diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index 1e42e25477b1..26c5b405de06 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -4,7 +4,7 @@ use crate::{engine::DownloadRequest, metrics::BlockDownloaderMetrics}; use alloy_consensus::BlockHeader; use alloy_primitives::B256; use futures::FutureExt; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, BlockClient, @@ -84,7 +84,7 @@ where /// Create a new instance pub fn new( client: Client, - consensus: Arc>, + consensus: Arc>, ) -> Self { Self { full_block_client: FullBlockClient::new(client, consensus), @@ -323,8 +323,8 @@ mod tests { use alloy_consensus::Header; use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use assert_matches::assert_matches; - use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_ethereum_consensus::EthBeaconConsensus; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::SealedHeader; use std::{future::poll_fn, sync::Arc}; diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index dfc68fb73b39..fa92cba28f8d 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -7,9 +7,8 @@ use crate::{ }; use alloy_primitives::B256; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::BeaconConsensusEngineEvent; use reth_chain_state::ExecutedBlock; -use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; +use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineTypes}; use reth_primitives::{NodePrimitives, SealedBlockWithSenders}; use reth_primitives_traits::Block; use std::{ diff --git a/crates/engine/tree/src/lib.rs b/crates/engine/tree/src/lib.rs index 19eecf8d6c88..f197dd764aab 100644 --- a/crates/engine/tree/src/lib.rs +++ b/crates/engine/tree/src/lib.rs @@ -92,9 +92,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -/// Re-export of the blockchain tree API. -pub use reth_blockchain_tree_api::*; - /// Support for backfill sync mode. pub mod backfill; /// The type that drives the chain forward. diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs new file mode 100644 index 000000000000..84d2c8a09225 --- /dev/null +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -0,0 +1,288 @@ +//! Implements a state provider that has a shared cache in front of it. +use alloy_primitives::{map::B256HashMap, Address, StorageKey, StorageValue, B256}; +use metrics::Gauge; +use moka::sync::CacheBuilder; +use reth_errors::ProviderResult; +use reth_metrics::Metrics; +use reth_primitives::{Account, Bytecode}; +use reth_provider::{ + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, + StateRootProvider, StorageRootProvider, +}; +use reth_trie::{ + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, +}; +use revm_primitives::map::DefaultHashBuilder; + +type Cache = moka::sync::Cache; + +/// A wrapper of a state provider and a shared cache. +pub(crate) struct CachedStateProvider { + /// The state provider + state_provider: S, + + /// The caches used for the provider + caches: ProviderCaches, + + /// Metrics for the cached state provider + metrics: CachedStateMetrics, +} + +impl CachedStateProvider +where + S: StateProvider, +{ + /// Creates a new [`CachedStateProvider`] from a [`ProviderCaches`], state provider, and + /// [`CachedStateMetrics`]. + pub(crate) const fn new_with_caches( + state_provider: S, + caches: ProviderCaches, + metrics: CachedStateMetrics, + ) -> Self { + Self { state_provider, caches, metrics } + } +} + +/// Metrics for the cached state provider, showing hits / misses for each cache +#[derive(Metrics, Clone)] +#[metrics(scope = "sync.caching")] +pub(crate) struct CachedStateMetrics { + /// Code cache hits + code_cache_hits: Gauge, + + /// Code cache misses + code_cache_misses: Gauge, + + /// Storage cache hits + storage_cache_hits: Gauge, + + /// Storage cache misses + storage_cache_misses: Gauge, + + /// Account cache hits + account_cache_hits: Gauge, + + /// Account cache misses + account_cache_misses: Gauge, +} + +impl CachedStateMetrics { + /// Sets all values to zero, indicating that a new block is being executed. + pub(crate) fn reset(&self) { + // code cache + self.code_cache_hits.set(0); + self.code_cache_misses.set(0); + + // storage cache + self.storage_cache_hits.set(0); + self.storage_cache_misses.set(0); + + // account cache + self.account_cache_hits.set(0); + self.account_cache_misses.set(0); + } + + /// Returns a new zeroed-out instance of [`CachedStateMetrics`]. + pub(crate) fn zeroed() -> Self { + let zeroed = Self::default(); + zeroed.reset(); + zeroed + } +} + +impl AccountReader for CachedStateProvider { + fn basic_account(&self, address: &Address) -> ProviderResult> { + if let Some(res) = self.caches.account_cache.get(address) { + self.metrics.account_cache_hits.increment(1); + return Ok(res) + } + + self.metrics.account_cache_misses.increment(1); + + let res = self.state_provider.basic_account(address)?; + self.caches.account_cache.insert(*address, res); + Ok(res) + } +} + +impl StateProvider for CachedStateProvider { + fn storage( + &self, + account: Address, + storage_key: StorageKey, + ) -> ProviderResult> { + if let Some(res) = self.caches.storage_cache.get(&(account, storage_key)) { + self.metrics.storage_cache_hits.increment(1); + return Ok(res) + } + + self.metrics.storage_cache_misses.increment(1); + + let final_res = self.state_provider.storage(account, storage_key)?; + self.caches.storage_cache.insert((account, storage_key), final_res); + Ok(final_res) + } + + fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult> { + if let Some(res) = self.caches.code_cache.get(code_hash) { + self.metrics.code_cache_hits.increment(1); + return Ok(res) + } + + self.metrics.code_cache_misses.increment(1); + + let final_res = self.state_provider.bytecode_by_hash(code_hash)?; + self.caches.code_cache.insert(*code_hash, final_res.clone()); + Ok(final_res) + } +} + +impl StateRootProvider for CachedStateProvider { + fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { + self.state_provider.state_root(hashed_state) + } + + fn state_root_from_nodes(&self, input: TrieInput) -> ProviderResult { + self.state_provider.state_root_from_nodes(input) + } + + fn state_root_from_nodes_with_updates( + &self, + input: TrieInput, + ) -> ProviderResult<(B256, TrieUpdates)> { + self.state_provider.state_root_from_nodes_with_updates(input) + } + + fn state_root_with_updates( + &self, + hashed_state: HashedPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + self.state_provider.state_root_with_updates(hashed_state) + } +} + +impl StateProofProvider for CachedStateProvider { + fn proof( + &self, + input: TrieInput, + address: Address, + slots: &[B256], + ) -> ProviderResult { + self.state_provider.proof(input, address, slots) + } + + fn multiproof( + &self, + input: TrieInput, + targets: MultiProofTargets, + ) -> ProviderResult { + self.state_provider.multiproof(input, targets) + } + + fn witness( + &self, + input: TrieInput, + target: HashedPostState, + ) -> ProviderResult> { + self.state_provider.witness(input, target) + } +} + +impl StorageRootProvider for CachedStateProvider { + fn storage_root( + &self, + address: Address, + hashed_storage: HashedStorage, + ) -> ProviderResult { + self.state_provider.storage_root(address, hashed_storage) + } + + fn storage_proof( + &self, + address: Address, + slot: B256, + hashed_storage: HashedStorage, + ) -> ProviderResult { + self.state_provider.storage_proof(address, slot, hashed_storage) + } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + self.state_provider.storage_multiproof(address, slots, hashed_storage) + } +} + +impl BlockHashReader for CachedStateProvider { + fn block_hash(&self, number: alloy_primitives::BlockNumber) -> ProviderResult> { + self.state_provider.block_hash(number) + } + + fn canonical_hashes_range( + &self, + start: alloy_primitives::BlockNumber, + end: alloy_primitives::BlockNumber, + ) -> ProviderResult> { + self.state_provider.canonical_hashes_range(start, end) + } +} + +impl HashedPostStateProvider for CachedStateProvider { + fn hashed_post_state(&self, bundle_state: &reth_revm::db::BundleState) -> HashedPostState { + self.state_provider.hashed_post_state(bundle_state) + } +} + +/// The set of caches that are used in the [`CachedStateProvider`]. +#[derive(Debug, Clone)] +pub(crate) struct ProviderCaches { + /// The cache for bytecode + code_cache: Cache>, + + /// The cache for storage + storage_cache: Cache<(Address, StorageKey), Option>, + + /// The cache for basic accounts + account_cache: Cache>, +} + +/// A builder for [`ProviderCaches`]. +#[derive(Debug)] +pub(crate) struct ProviderCacheBuilder { + /// Code cache size + code_cache_size: u64, + + /// Storage cache size + storage_cache_size: u64, + + /// Account cache size + account_cache_size: u64, +} + +impl ProviderCacheBuilder { + /// Build a [`ProviderCaches`] struct, so that provider caches can be easily cloned. + pub(crate) fn build_caches(self) -> ProviderCaches { + ProviderCaches { + code_cache: CacheBuilder::new(self.code_cache_size) + .build_with_hasher(DefaultHashBuilder::default()), + storage_cache: CacheBuilder::new(self.storage_cache_size) + .build_with_hasher(DefaultHashBuilder::default()), + account_cache: CacheBuilder::new(self.account_cache_size) + .build_with_hasher(DefaultHashBuilder::default()), + } + } +} + +impl Default for ProviderCacheBuilder { + fn default() -> Self { + // moka caches have been benchmarked up to 800k entries, so we just use 1M, optimizing for + // hitrate over memory consumption. + // + // See: https://github.com/moka-rs/moka/wiki#admission-and-eviction-policies + Self { code_cache_size: 1000000, storage_cache_size: 1000000, account_cache_size: 1000000 } + } +} diff --git a/crates/engine/tree/src/tree/config.rs b/crates/engine/tree/src/tree/config.rs index 34a6e4d0095f..c0c68799aee0 100644 --- a/crates/engine/tree/src/tree/config.rs +++ b/crates/engine/tree/src/tree/config.rs @@ -1,5 +1,14 @@ //! Engine tree configuration. +use alloy_eips::merge::EPOCH_SLOTS; + +/// The largest gap for which the tree will be used for sync. See docs for `pipeline_run_threshold` +/// for more information. +/// +/// This is the default threshold, the distance to the head that the tree will be used for sync. +/// If the distance exceeds this threshold, the pipeline will be used for sync. +pub(crate) const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; + /// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; diff --git a/crates/engine/tree/src/tree/error.rs b/crates/engine/tree/src/tree/error.rs new file mode 100644 index 000000000000..54c274abbf68 --- /dev/null +++ b/crates/engine/tree/src/tree/error.rs @@ -0,0 +1,199 @@ +//! Internal errors for the tree module. + +use alloy_consensus::BlockHeader; +use reth_consensus::ConsensusError; +use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError}; +use reth_evm::execute::InternalBlockExecutionError; +use reth_primitives::SealedBlockFor; +use reth_primitives_traits::{Block, BlockBody}; +use tokio::sync::oneshot::error::TryRecvError; + +/// This is an error that can come from advancing persistence. Either this can be a +/// [`TryRecvError`], or this can be a [`ProviderError`] +#[derive(Debug, thiserror::Error)] +pub enum AdvancePersistenceError { + /// An error that can be from failing to receive a value from persistence + #[error(transparent)] + RecvError(#[from] TryRecvError), + /// A provider error + #[error(transparent)] + Provider(#[from] ProviderError), +} + +#[derive(thiserror::Error)] +#[error("Failed to insert block (hash={}, number={}, parent_hash={}): {}", + .block.hash(), + .block.number(), + .block.parent_hash(), + .kind)] +struct InsertBlockErrorData { + block: SealedBlockFor, + #[source] + kind: InsertBlockErrorKind, +} + +impl std::fmt::Debug for InsertBlockErrorData { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("InsertBlockError") + .field("error", &self.kind) + .field("hash", &self.block.hash()) + .field("number", &self.block.number()) + .field("parent_hash", &self.block.parent_hash()) + .field("num_txs", &self.block.body().transactions().len()) + .finish_non_exhaustive() + } +} + +impl InsertBlockErrorData { + const fn new(block: SealedBlockFor, kind: InsertBlockErrorKind) -> Self { + Self { block, kind } + } + + fn boxed(block: SealedBlockFor, kind: InsertBlockErrorKind) -> Box { + Box::new(Self::new(block, kind)) + } +} + +/// Error thrown when inserting a block failed because the block is considered invalid. +#[derive(thiserror::Error)] +#[error(transparent)] +pub struct InsertBlockError { + inner: Box>, +} + +// === impl InsertBlockErrorTwo === + +impl InsertBlockError { + /// Create a new `InsertInvalidBlockErrorTwo` + pub fn new(block: SealedBlockFor, kind: InsertBlockErrorKind) -> Self { + Self { inner: InsertBlockErrorData::boxed(block, kind) } + } + + /// Create a new `InsertInvalidBlockError` from a consensus error + pub fn consensus_error(error: ConsensusError, block: SealedBlockFor) -> Self { + Self::new(block, InsertBlockErrorKind::Consensus(error)) + } + + /// Create a new `InsertInvalidBlockError` from a consensus error + pub fn sender_recovery_error(block: SealedBlockFor) -> Self { + Self::new(block, InsertBlockErrorKind::SenderRecovery) + } + + /// Consumes the error and returns the block that resulted in the error + #[inline] + pub fn into_block(self) -> SealedBlockFor { + self.inner.block + } + + /// Returns the error kind + #[inline] + pub const fn kind(&self) -> &InsertBlockErrorKind { + &self.inner.kind + } + + /// Returns the block that resulted in the error + #[inline] + pub const fn block(&self) -> &SealedBlockFor { + &self.inner.block + } + + /// Consumes the type and returns the block and error kind. + #[inline] + pub fn split(self) -> (SealedBlockFor, InsertBlockErrorKind) { + let inner = *self.inner; + (inner.block, inner.kind) + } +} + +impl std::fmt::Debug for InsertBlockError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Debug::fmt(&self.inner, f) + } +} + +/// All error variants possible when inserting a block +#[derive(Debug, thiserror::Error)] +pub enum InsertBlockErrorKind { + /// Failed to recover senders for the block + #[error("failed to recover senders for block")] + SenderRecovery, + /// Block violated consensus rules. + #[error(transparent)] + Consensus(#[from] ConsensusError), + /// Block execution failed. + #[error(transparent)] + Execution(#[from] BlockExecutionError), + /// Provider error. + #[error(transparent)] + Provider(#[from] ProviderError), + /// Other errors. + #[error(transparent)] + Other(#[from] Box), +} + +impl InsertBlockErrorKind { + /// Returns an [`InsertBlockValidationError`] if the error is caused by an invalid block. + /// + /// Returns an [`InsertBlockFatalError`] if the error is caused by an error that is not + /// validation related or is otherwise fatal. + /// + /// This is intended to be used to determine if we should respond `INVALID` as a response when + /// processing a new block. + pub fn ensure_validation_error( + self, + ) -> Result { + match self { + Self::SenderRecovery => Ok(InsertBlockValidationError::SenderRecovery), + Self::Consensus(err) => Ok(InsertBlockValidationError::Consensus(err)), + // other execution errors that are considered internal errors + Self::Execution(err) => { + match err { + BlockExecutionError::Validation(err) => { + Ok(InsertBlockValidationError::Validation(err)) + } + BlockExecutionError::Consensus(err) => { + Ok(InsertBlockValidationError::Consensus(err)) + } + // these are internal errors, not caused by an invalid block + BlockExecutionError::Internal(error) => { + Err(InsertBlockFatalError::BlockExecutionError(error)) + } + } + } + Self::Provider(err) => Err(InsertBlockFatalError::Provider(err)), + Self::Other(err) => Err(InternalBlockExecutionError::Other(err).into()), + } + } +} + +/// Error variants that are not caused by invalid blocks +#[derive(Debug, thiserror::Error)] +pub enum InsertBlockFatalError { + /// A provider error + #[error(transparent)] + Provider(#[from] ProviderError), + /// An internal / fatal block execution error + #[error(transparent)] + BlockExecutionError(#[from] InternalBlockExecutionError), +} + +/// Error variants that are caused by invalid blocks +#[derive(Debug, thiserror::Error)] +pub enum InsertBlockValidationError { + /// Failed to recover senders for the block + #[error("failed to recover senders for block")] + SenderRecovery, + /// Block violated consensus rules. + #[error(transparent)] + Consensus(#[from] ConsensusError), + /// Validation error, transparently wrapping [`BlockValidationError`] + #[error(transparent)] + Validation(#[from] BlockValidationError), +} + +impl InsertBlockValidationError { + /// Returns true if this is a block pre merge error. + pub const fn is_block_pre_merge(&self) -> bool { + matches!(self, Self::Validation(BlockValidationError::BlockPreMerge { .. })) + } +} diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/engine/tree/src/tree/invalid_headers.rs similarity index 93% rename from crates/consensus/beacon/src/engine/invalid_headers.rs rename to crates/engine/tree/src/tree/invalid_headers.rs index 384820ca9f3f..8472d44a3238 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/engine/tree/src/tree/invalid_headers.rs @@ -16,7 +16,7 @@ const INVALID_HEADER_HIT_EVICTION_THRESHOLD: u8 = 128; /// Keeps track of invalid headers. #[derive(Debug)] -pub struct InvalidHeaderCache { +pub(super) struct InvalidHeaderCache { /// This maps a header hash to a reference to its invalid ancestor. headers: LruMap, /// Metrics for the cache. @@ -25,7 +25,7 @@ pub struct InvalidHeaderCache { impl InvalidHeaderCache { /// Invalid header cache constructor. - pub fn new(max_length: u32) -> Self { + pub(super) fn new(max_length: u32) -> Self { Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() } } @@ -37,7 +37,7 @@ impl InvalidHeaderCache { /// /// If this is called, the hit count for the entry is incremented. /// If the hit count exceeds the threshold, the entry is evicted and `None` is returned. - pub fn get(&mut self, hash: &B256) -> Option { + pub(super) fn get(&mut self, hash: &B256) -> Option { { let entry = self.headers.get(hash)?; entry.hit_count += 1; @@ -52,7 +52,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid block into the cache, with a given invalid ancestor. - pub fn insert_with_invalid_ancestor( + pub(super) fn insert_with_invalid_ancestor( &mut self, header_hash: B256, invalid_ancestor: BlockWithParent, @@ -68,7 +68,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid ancestor into the map. - pub fn insert(&mut self, invalid_ancestor: BlockWithParent) { + pub(super) fn insert(&mut self, invalid_ancestor: BlockWithParent) { if self.get(&invalid_ancestor.block.hash).is_none() { warn!(target: "consensus::engine", ?invalid_ancestor, "Bad block with hash"); self.insert_entry(invalid_ancestor.block.hash, invalid_ancestor); diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 5b65e49f3923..03b9f0ab50b6 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -3,7 +3,10 @@ use crate::{ chain::FromOrchestrator, engine::{DownloadRequest, EngineApiEvent, EngineApiKind, EngineApiRequest, FromEngine}, persistence::PersistenceHandle, - tree::metrics::EngineApiMetrics, + tree::{ + cached_state::{CachedStateMetrics, CachedStateProvider, ProviderCacheBuilder}, + metrics::EngineApiMetrics, + }, }; use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; @@ -16,20 +19,16 @@ use alloy_rpc_types_engine::{ PayloadValidationError, }; use block_buffer::BlockBuffer; -use reth_beacon_consensus::{ - BeaconConsensusEngineEvent, InvalidHeaderCache, MIN_BLOCKS_FOR_PIPELINE_RUN, -}; -use reth_blockchain_tree_api::{ - error::{InsertBlockErrorKindTwo, InsertBlockErrorTwo, InsertBlockFatalError}, - BlockStatus2, InsertPayloadOk2, -}; +use error::{InsertBlockError, InsertBlockErrorKind, InsertBlockFatalError}; use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, }; use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; +pub use reth_engine_primitives::InvalidBlockHook; use reth_engine_primitives::{ - BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, - EngineValidator, ForkchoiceStateTracker, OnForkChoiceUpdated, + BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconOnNewPayloadError, + EngineApiMessageVersion, EngineTypes, EngineValidator, ForkchoiceStateTracker, + OnForkChoiceUpdated, }; use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::{execute::BlockExecutorProvider, system_calls::OnStateHook}; @@ -78,16 +77,24 @@ use tokio::sync::{ use tracing::*; mod block_buffer; +mod cached_state; pub mod config; +pub mod error; mod invalid_block_hook; +mod invalid_headers; mod metrics; mod persistence_state; +pub mod root; +mod trie_updates; + +use crate::tree::{ + config::MIN_BLOCKS_FOR_PIPELINE_RUN, error::AdvancePersistenceError, + invalid_headers::InvalidHeaderCache, +}; pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; -pub use reth_engine_primitives::InvalidBlockHook; - -pub mod root; +use trie_updates::compare_trie_updates; /// Keeps track of the state of the tree. /// @@ -494,7 +501,7 @@ where { provider: P, executor_provider: E, - consensus: Arc>, + consensus: Arc>, payload_validator: V, /// Keeps track of internals such as executed and buffered blocks. state: EngineApiTreeState, @@ -583,7 +590,7 @@ where pub fn new( provider: P, executor_provider: E, - consensus: Arc>, + consensus: Arc>, payload_validator: V, outgoing: UnboundedSender>, state: EngineApiTreeState, @@ -641,7 +648,7 @@ where pub fn spawn_new( provider: P, executor_provider: E, - consensus: Arc>, + consensus: Arc>, payload_validator: V, persistence: PersistenceHandle, payload_builder: PayloadBuilderHandle, @@ -843,17 +850,17 @@ where match self.insert_block_without_senders(block) { Ok(status) => { let status = match status { - InsertPayloadOk2::Inserted(BlockStatus2::Valid) => { + InsertPayloadOk::Inserted(BlockStatus::Valid) => { latest_valid_hash = Some(block_hash); self.try_connect_buffered_blocks(num_hash)?; PayloadStatusEnum::Valid } - InsertPayloadOk2::AlreadySeen(BlockStatus2::Valid) => { + InsertPayloadOk::AlreadySeen(BlockStatus::Valid) => { latest_valid_hash = Some(block_hash); PayloadStatusEnum::Valid } - InsertPayloadOk2::Inserted(BlockStatus2::Disconnected { .. }) | - InsertPayloadOk2::AlreadySeen(BlockStatus2::Disconnected { .. }) => { + InsertPayloadOk::Inserted(BlockStatus::Disconnected { .. }) | + InsertPayloadOk::AlreadySeen(BlockStatus::Disconnected { .. }) => { // not known to be invalid, but we don't know anything else PayloadStatusEnum::Syncing } @@ -934,7 +941,7 @@ where while current_canonical_number > current_number { if let Some(block) = self.executed_block_by_hash(old_hash)? { old_chain.push(block.clone()); - old_hash = block.block.header.parent_hash(); + old_hash = block.block.parent_hash(); current_canonical_number -= 1; } else { // This shouldn't happen as we're walking back the canonical chain @@ -950,7 +957,7 @@ where // a common ancestor (fork block) is reached. while old_hash != current_hash { if let Some(block) = self.executed_block_by_hash(old_hash)? { - old_hash = block.block.header.parent_hash(); + old_hash = block.block.parent_hash(); old_chain.push(block); } else { // This shouldn't happen as we're walking back the canonical chain @@ -1080,7 +1087,7 @@ where // 2. ensure we can apply a new chain update for the head block if let Some(chain_update) = self.on_new_head(state.head_block_hash)? { - let tip = chain_update.tip().header.clone(); + let tip = chain_update.tip().clone_sealed_header(); self.on_canonical_chain_update(chain_update); // update the safe and finalized blocks and ensure their values are valid @@ -1615,8 +1622,11 @@ where hash: B256, ) -> ProviderResult>> { // check memory first - let block = - self.state.tree_state.block_by_hash(hash).map(|block| block.as_ref().clone().header); + let block = self + .state + .tree_state + .block_by_hash(hash) + .map(|block| block.as_ref().clone_sealed_header()); if block.is_some() { Ok(block) @@ -1799,7 +1809,7 @@ where return Err(e) } - if let Err(e) = self.consensus.validate_header(block) { + if let Err(e) = self.consensus.validate_header(block.sealed_header()) { error!(target: "engine::tree", ?block, "Failed to validate header {}: {e}", block.hash()); return Err(e) } @@ -1833,7 +1843,7 @@ where Ok(res) => { debug!(target: "engine::tree", child =?child_num_hash, ?res, "connected buffered block"); if self.is_sync_target_head(child_num_hash.hash) && - matches!(res, InsertPayloadOk2::Inserted(BlockStatus2::Valid)) + matches!(res, InsertPayloadOk::Inserted(BlockStatus::Valid)) { self.make_canonical(child_num_hash.hash)?; } @@ -1858,10 +1868,10 @@ where fn buffer_block_without_senders( &mut self, block: SealedBlockFor, - ) -> Result<(), InsertBlockErrorTwo> { + ) -> Result<(), InsertBlockError> { match block.try_seal_with_senders() { Ok(block) => self.buffer_block(block), - Err(block) => Err(InsertBlockErrorTwo::sender_recovery_error(block)), + Err(block) => Err(InsertBlockError::sender_recovery_error(block)), } } @@ -1869,9 +1879,9 @@ where fn buffer_block( &mut self, block: SealedBlockWithSenders, - ) -> Result<(), InsertBlockErrorTwo> { + ) -> Result<(), InsertBlockError> { if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockErrorTwo::consensus_error(err, block.block)) + return Err(InsertBlockError::consensus_error(err, block.block)) } self.state.buffer.insert_block(block); Ok(()) @@ -2029,7 +2039,7 @@ where // update the tracked canonical head self.state.tree_state.set_canonical_head(chain_update.tip().num_hash()); - let tip = chain_update.tip().header.clone(); + let tip = chain_update.tip().clone_sealed_header(); let notification = chain_update.to_chain_notification(); // reinsert any missing reorged blocks @@ -2143,7 +2153,7 @@ where // try to append the block match self.insert_block(block) { - Ok(InsertPayloadOk2::Inserted(BlockStatus2::Valid)) => { + Ok(InsertPayloadOk::Inserted(BlockStatus::Valid)) => { if self.is_sync_target_head(block_num_hash.hash) { trace!(target: "engine::tree", "appended downloaded sync target block"); @@ -2156,10 +2166,7 @@ where trace!(target: "engine::tree", "appended downloaded block"); self.try_connect_buffered_blocks(block_num_hash)?; } - Ok(InsertPayloadOk2::Inserted(BlockStatus2::Disconnected { - head, - missing_ancestor, - })) => { + Ok(InsertPayloadOk::Inserted(BlockStatus::Disconnected { head, missing_ancestor })) => { // block is not connected to the canonical head, we need to download // its missing branch first return Ok(self.on_disconnected_downloaded_block( @@ -2168,7 +2175,7 @@ where head, )) } - Ok(InsertPayloadOk2::AlreadySeen(_)) => { + Ok(InsertPayloadOk::AlreadySeen(_)) => { trace!(target: "engine::tree", "downloaded block already executed"); } Err(err) => { @@ -2185,29 +2192,29 @@ where fn insert_block_without_senders( &mut self, block: SealedBlockFor, - ) -> Result> { + ) -> Result> { match block.try_seal_with_senders() { Ok(block) => self.insert_block(block), - Err(block) => Err(InsertBlockErrorTwo::sender_recovery_error(block)), + Err(block) => Err(InsertBlockError::sender_recovery_error(block)), } } fn insert_block( &mut self, block: SealedBlockWithSenders, - ) -> Result> { + ) -> Result> { self.insert_block_inner(block.clone()) - .map_err(|kind| InsertBlockErrorTwo::new(block.block, kind)) + .map_err(|kind| InsertBlockError::new(block.block, kind)) } fn insert_block_inner( &mut self, block: SealedBlockWithSenders, - ) -> Result { + ) -> Result { debug!(target: "engine::tree", block=?block.num_hash(), parent = ?block.parent_hash(), state_root = ?block.state_root(), "Inserting new block into tree"); if self.block_by_hash(block.hash())?.is_some() { - return Ok(InsertPayloadOk2::AlreadySeen(BlockStatus2::Valid)) + return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid)) } let start = Instant::now(); @@ -2229,7 +2236,7 @@ where self.state.buffer.insert_block(block); - return Ok(InsertPayloadOk2::Inserted(BlockStatus2::Disconnected { + return Ok(InsertPayloadOk::Inserted(BlockStatus::Disconnected { head: self.state.tree_state.current_canonical_head, missing_ancestor, })) @@ -2237,15 +2244,24 @@ where // now validate against the parent let parent_block = self.sealed_header_by_hash(block.parent_hash())?.ok_or_else(|| { - InsertBlockErrorKindTwo::Provider(ProviderError::HeaderNotFound( + InsertBlockErrorKind::Provider(ProviderError::HeaderNotFound( block.parent_hash().into(), )) })?; - if let Err(e) = self.consensus.validate_header_against_parent(&block, &parent_block) { + if let Err(e) = + self.consensus.validate_header_against_parent(block.sealed_header(), &parent_block) + { warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.hash()); return Err(e.into()) } + // Use cached state provider before executing, this does nothing currently, will be used in + // prewarming + let caches = ProviderCacheBuilder::default().build_caches(); + let cache_metrics = CachedStateMetrics::zeroed(); + let state_provider = + CachedStateProvider::new_with_caches(state_provider, caches, cache_metrics); + trace!(target: "engine::tree", block=?block.num_hash(), "Executing block"); let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); @@ -2265,7 +2281,7 @@ where let state_root_config = StateRootConfig::new_from_input( consistent_view.clone(), self.compute_trie_input(consistent_view.clone(), block.header().parent_hash()) - .map_err(|e| InsertBlockErrorKindTwo::Other(Box::new(e)))?, + .map_err(|e| InsertBlockErrorKind::Other(Box::new(e)))?, ); let provider_ro = consistent_view.provider_ro()?; @@ -2352,6 +2368,19 @@ where task_elapsed = ?time_from_last_update, "Task state root finished" ); + + if task_state_root != block.header().state_root() { + debug!(target: "engine::tree", "Task state root does not match block state root"); + let (regular_root, regular_updates) = + state_provider.state_root_with_updates(hashed_state.clone())?; + + if regular_root == block.header().state_root() { + compare_trie_updates(&task_trie_updates, ®ular_updates); + } else { + debug!(target: "engine::tree", "Regular state root does not match block state root"); + } + } + (task_state_root, task_trie_updates, time_from_last_update) } Err(error) => { @@ -2383,7 +2412,7 @@ where state_provider.state_root_with_updates(hashed_state.clone())?; (root, updates, root_time.elapsed()) } - Err(error) => return Err(InsertBlockErrorKindTwo::Other(Box::new(error))), + Err(error) => return Err(InsertBlockErrorKind::Other(Box::new(error))), } } } else { @@ -2393,7 +2422,7 @@ where (root, updates, root_time.elapsed()) }; - Result::<_, InsertBlockErrorKindTwo>::Ok(( + Result::<_, InsertBlockErrorKind>::Ok(( state_root, trie_updates, hashed_state, @@ -2448,7 +2477,7 @@ where self.emit_event(EngineApiEvent::BeaconConsensus(engine_event)); debug!(target: "engine::tree", block=?BlockNumHash::new(block_number, block_hash), "Finished inserting block"); - Ok(InsertPayloadOk2::Inserted(BlockStatus2::Valid)) + Ok(InsertPayloadOk::Inserted(BlockStatus::Valid)) } /// Compute state root for the given hashed post state in parallel. @@ -2508,7 +2537,7 @@ where /// Returns the proper payload status response if the block is invalid. fn on_insert_block_error( &mut self, - error: InsertBlockErrorTwo, + error: InsertBlockError, ) -> Result { let (block, error) = error.split(); @@ -2528,7 +2557,7 @@ where }; // keep track of the invalid header - self.state.invalid_headers.insert(block.header.block_with_parent()); + self.state.invalid_headers.insert(block.block_with_parent()); Ok(PayloadStatus::new( PayloadStatusEnum::Invalid { validation_error: validation_err.to_string() }, latest_valid_hash, @@ -2746,16 +2775,34 @@ where } } -/// This is an error that can come from advancing persistence. Either this can be a -/// [`TryRecvError`], or this can be a [`ProviderError`] -#[derive(Debug, thiserror::Error)] -pub enum AdvancePersistenceError { - /// An error that can be from failing to receive a value from persistence - #[error(transparent)] - RecvError(#[from] TryRecvError), - /// A provider error - #[error(transparent)] - Provider(#[from] ProviderError), +/// Block inclusion can be valid, accepted, or invalid. Invalid blocks are returned as an error +/// variant. +/// +/// If we don't know the block's parent, we return `Disconnected`, as we can't claim that the block +/// is valid or not. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum BlockStatus { + /// The block is valid and block extends canonical chain. + Valid, + /// The block may be valid and has an unknown missing ancestor. + Disconnected { + /// Current canonical head. + head: BlockNumHash, + /// The lowest ancestor block that is not connected to the canonical chain. + missing_ancestor: BlockNumHash, + }, +} + +/// How a payload was inserted if it was valid. +/// +/// If the payload was valid, but has already been seen, [`InsertPayloadOk::AlreadySeen(_)`] is +/// returned, otherwise [`InsertPayloadOk::Inserted(_)`] is returned. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum InsertPayloadOk { + /// The payload was valid, but we have already seen it. + AlreadySeen(BlockStatus), + /// The payload was valid and inserted into the tree. + Inserted(BlockStatus), } #[cfg(test)] @@ -2767,10 +2814,10 @@ mod tests { use alloy_rlp::Decodable; use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar}; use assert_matches::assert_matches; - use reth_beacon_consensus::EthBeaconConsensus; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; use reth_engine_primitives::ForkchoiceStatus; + use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm::test_utils::MockExecutorProvider; use reth_primitives::{Block, BlockExt, EthPrimitives}; @@ -2980,7 +3027,7 @@ mod tests { fn insert_block( &mut self, block: SealedBlockWithSenders, - ) -> Result> { + ) -> Result> { let execution_outcome = self.block_builder.get_execution_outcome(block.clone()); self.extend_execution_outcome([execution_outcome]); self.tree.provider.add_state_root(block.state_root); @@ -3337,7 +3384,7 @@ mod tests { let outcome = test_harness.tree.insert_block_without_senders(sealed.clone()).unwrap(); assert_eq!( outcome, - InsertPayloadOk2::Inserted(BlockStatus2::Disconnected { + InsertPayloadOk::Inserted(BlockStatus::Disconnected { head: test_harness.tree.state.tree_state.current_canonical_head, missing_ancestor: sealed.parent_num_hash() }) diff --git a/crates/engine/tree/src/tree/trie_updates.rs b/crates/engine/tree/src/tree/trie_updates.rs new file mode 100644 index 000000000000..ea78aca13b87 --- /dev/null +++ b/crates/engine/tree/src/tree/trie_updates.rs @@ -0,0 +1,208 @@ +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, +}; +use reth_trie::{ + updates::{StorageTrieUpdates, TrieUpdates}, + BranchNodeCompact, Nibbles, +}; +use tracing::debug; + +#[derive(Debug, Default)] +struct TrieUpdatesDiff { + account_nodes: HashMap, Option)>, + removed_nodes: HashMap, + storage_tries: HashMap, +} + +impl TrieUpdatesDiff { + fn has_differences(&self) -> bool { + !self.account_nodes.is_empty() || + !self.removed_nodes.is_empty() || + !self.storage_tries.is_empty() + } + + pub(super) fn log_differences(mut self) { + if self.has_differences() { + for (path, (task, regular)) in &mut self.account_nodes { + debug!(target: "engine::tree", ?path, ?task, ?regular, "Difference in account trie updates"); + } + + for (path, (task, regular)) in &self.removed_nodes { + debug!(target: "engine::tree", ?path, ?task, ?regular, "Difference in removed account trie nodes"); + } + + for (address, storage_diff) in self.storage_tries { + storage_diff.log_differences(address); + } + } + } +} + +#[derive(Debug)] +enum StorageTrieDiffEntry { + /// Storage Trie entry exists for one of the task or regular trie updates, but not the other. + Existence(bool, bool), + /// Storage Trie entries exists for both task and regular trie updates, but their values + /// differ. + Value(StorageTrieUpdatesDiff), +} + +impl StorageTrieDiffEntry { + fn log_differences(self, address: B256) { + match self { + Self::Existence(task, regular) => { + debug!(target: "engine::tree", ?address, ?task, ?regular, "Difference in storage trie existence"); + } + Self::Value(mut storage_diff) => { + if let Some((task, regular)) = storage_diff.is_deleted { + debug!(target: "engine::tree", ?address, ?task, ?regular, "Difference in storage trie deletion"); + } + + for (path, (task, regular)) in &mut storage_diff.storage_nodes { + debug!(target: "engine::tree", ?address, ?path, ?task, ?regular, "Difference in storage trie updates"); + } + + for (path, (task, regular)) in &storage_diff.removed_nodes { + debug!(target: "engine::tree", ?address, ?path, ?task, ?regular, "Difference in removed account trie nodes"); + } + } + } + } +} + +#[derive(Debug, Default)] +struct StorageTrieUpdatesDiff { + is_deleted: Option<(bool, bool)>, + storage_nodes: HashMap, Option)>, + removed_nodes: HashMap, +} + +impl StorageTrieUpdatesDiff { + fn has_differences(&self) -> bool { + self.is_deleted.is_some() || + !self.storage_nodes.is_empty() || + !self.removed_nodes.is_empty() + } +} + +/// Compares the trie updates from state root task and regular state root calculation, and logs +/// the differences if there's any. +pub(super) fn compare_trie_updates(task: &TrieUpdates, regular: &TrieUpdates) { + let mut diff = TrieUpdatesDiff::default(); + + // compare account nodes + for key in task + .account_nodes + .keys() + .chain(regular.account_nodes.keys()) + .cloned() + .collect::>() + { + let (left, right) = (task.account_nodes.get(&key), regular.account_nodes.get(&key)); + + if !branch_nodes_equal(left, right) { + diff.account_nodes.insert(key, (left.cloned(), right.cloned())); + } + } + + // compare removed nodes + for key in task + .removed_nodes + .iter() + .chain(regular.removed_nodes.iter()) + .cloned() + .collect::>() + { + let (left, right) = + (task.removed_nodes.contains(&key), regular.removed_nodes.contains(&key)); + if left != right { + diff.removed_nodes.insert(key, (left, right)); + } + } + + // compare storage tries + for key in task + .storage_tries + .keys() + .chain(regular.storage_tries.keys()) + .copied() + .collect::>() + { + let (left, right) = (task.storage_tries.get(&key), regular.storage_tries.get(&key)); + if left != right { + if let Some((left, right)) = left.zip(right) { + let storage_diff = compare_storage_trie_updates(left, right); + if storage_diff.has_differences() { + diff.storage_tries.insert(key, StorageTrieDiffEntry::Value(storage_diff)); + } + } else { + diff.storage_tries + .insert(key, StorageTrieDiffEntry::Existence(left.is_some(), right.is_some())); + } + } + } + + // log differences + diff.log_differences(); +} + +fn compare_storage_trie_updates( + task: &StorageTrieUpdates, + regular: &StorageTrieUpdates, +) -> StorageTrieUpdatesDiff { + let mut diff = StorageTrieUpdatesDiff { + is_deleted: (task.is_deleted != regular.is_deleted) + .then_some((task.is_deleted, regular.is_deleted)), + ..Default::default() + }; + + // compare storage nodes + for key in task + .storage_nodes + .keys() + .chain(regular.storage_nodes.keys()) + .cloned() + .collect::>() + { + let (left, right) = (task.storage_nodes.get(&key), regular.storage_nodes.get(&key)); + if !branch_nodes_equal(left, right) { + diff.storage_nodes.insert(key, (left.cloned(), right.cloned())); + } + } + + // compare removed nodes + for key in task + .removed_nodes + .iter() + .chain(regular.removed_nodes.iter()) + .cloned() + .collect::>() + { + let (left, right) = + (task.removed_nodes.contains(&key), regular.removed_nodes.contains(&key)); + if left != right { + diff.removed_nodes.insert(key, (left, right)); + } + } + + diff +} + +/// Compares the branch nodes from state root task and regular state root calculation. +/// +/// Returns `true` if they are equal. +fn branch_nodes_equal( + task: Option<&BranchNodeCompact>, + regular: Option<&BranchNodeCompact>, +) -> bool { + if let (Some(task), Some(regular)) = (task.as_ref(), regular.as_ref()) { + task.state_mask == regular.state_mask && + // We do not compare the tree mask because it is known to be mismatching + task.hash_mask == regular.hash_mask && + task.hashes == regular.hashes && + task.root_hash == regular.root_hash + } else { + task == regular + } +} diff --git a/crates/errors/Cargo.toml b/crates/errors/Cargo.toml index bb56a8bace56..11ecd708449d 100644 --- a/crates/errors/Cargo.toml +++ b/crates/errors/Cargo.toml @@ -11,7 +11,6 @@ repository.workspace = true workspace = true [dependencies] -reth-blockchain-tree-api.workspace = true reth-consensus.workspace = true reth-execution-errors.workspace = true reth-fs-util.workspace = true diff --git a/crates/errors/src/error.rs b/crates/errors/src/error.rs index 2d97572f529a..5141a7457f45 100644 --- a/crates/errors/src/error.rs +++ b/crates/errors/src/error.rs @@ -1,4 +1,3 @@ -use reth_blockchain_tree_api::error::{BlockchainTreeError, CanonicalError}; use reth_consensus::ConsensusError; use reth_execution_errors::BlockExecutionError; use reth_fs_util::FsPathError; @@ -31,10 +30,6 @@ pub enum RethError { #[error(transparent)] Provider(#[from] ProviderError), - /// Canonical errors encountered. - #[error(transparent)] - Canonical(#[from] CanonicalError), - /// Any other error. #[error(transparent)] Other(Box), @@ -55,12 +50,6 @@ impl RethError { } } -impl From for RethError { - fn from(error: BlockchainTreeError) -> Self { - Self::Canonical(CanonicalError::BlockchainTree(error)) - } -} - impl From for RethError { fn from(err: FsPathError) -> Self { Self::other(err) @@ -78,10 +67,9 @@ mod size_asserts { }; } - static_assert_size!(RethError, 64); + static_assert_size!(RethError, 56); static_assert_size!(BlockExecutionError, 56); static_assert_size!(ConsensusError, 48); static_assert_size!(DatabaseError, 32); static_assert_size!(ProviderError, 48); - static_assert_size!(CanonicalError, 56); } diff --git a/crates/errors/src/lib.rs b/crates/errors/src/lib.rs index 9dc0ce0ca5bc..fc464eb98cbd 100644 --- a/crates/errors/src/lib.rs +++ b/crates/errors/src/lib.rs @@ -15,7 +15,6 @@ mod error; pub use error::{RethError, RethResult}; -pub use reth_blockchain_tree_api::error::{BlockchainTreeError, CanonicalError}; pub use reth_consensus::ConsensusError; pub use reth_execution_errors::{BlockExecutionError, BlockValidationError}; pub use reth_storage_errors::{ diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index 2d65949df452..938934d20e52 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -16,11 +16,11 @@ workspace = true alloy-chains.workspace = true alloy-eip2124.workspace = true alloy-primitives = { workspace = true, features = ["serde", "rlp"] } -once_cell.workspace = true # misc serde = { workspace = true, features = ["derive"], optional = true } dyn-clone.workspace = true +once_cell.workspace = true rustc-hash = { workspace = true, optional = true } # arbitrary utils @@ -48,8 +48,8 @@ std = [ "alloy-chains/std", "alloy-primitives/std", "rustc-hash/std", - "once_cell/std", "serde?/std", - "alloy-eip2124/std" + "alloy-eip2124/std", + "once_cell/std" ] rustc-hash = ["dep:rustc-hash"] diff --git a/crates/ethereum-forks/src/hardfork/dev.rs b/crates/ethereum-forks/src/hardfork/dev.rs index 8a0510a97985..225263ffed60 100644 --- a/crates/ethereum-forks/src/hardfork/dev.rs +++ b/crates/ethereum-forks/src/hardfork/dev.rs @@ -8,7 +8,7 @@ use once_cell::sync::Lazy as LazyLock; #[cfg(feature = "std")] use std::sync::LazyLock; -use crate::{ChainHardforks, EthereumHardfork, ForkCondition}; +use crate::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; /// Dev hardforks pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { diff --git a/crates/ethereum-forks/src/hardfork/macros.rs b/crates/ethereum-forks/src/hardfork/macros.rs index 780c15f6e6b9..dae98248bf13 100644 --- a/crates/ethereum-forks/src/hardfork/macros.rs +++ b/crates/ethereum-forks/src/hardfork/macros.rs @@ -17,11 +17,6 @@ macro_rules! hardfork { $( $enum::$variant => stringify!($variant), )* } } - - /// Boxes `self` and returns it as `Box`. - pub fn boxed(self) -> Box { - Box::new(self) - } } impl FromStr for $enum { diff --git a/crates/ethereum-forks/src/hardfork/mod.rs b/crates/ethereum-forks/src/hardfork/mod.rs index f77d06cbf768..c939e2912c1d 100644 --- a/crates/ethereum-forks/src/hardfork/mod.rs +++ b/crates/ethereum-forks/src/hardfork/mod.rs @@ -6,6 +6,7 @@ pub use ethereum::EthereumHardfork; mod dev; pub use dev::DEV_HARDFORKS; +use alloc::boxed::Box; use core::{ any::Any, hash::{Hash, Hasher}, @@ -17,6 +18,11 @@ use dyn_clone::DynClone; pub trait Hardfork: Any + DynClone + Send + Sync + 'static { /// Fork name. fn name(&self) -> &'static str; + + /// Returns boxed value. + fn boxed(&self) -> Box { + Box::new(self) + } } dyn_clone::clone_trait_object!(Hardfork); diff --git a/crates/ethereum-forks/src/hardforks/ethereum.rs b/crates/ethereum-forks/src/hardforks/ethereum.rs index c62c6a91a355..c9b1a115d23b 100644 --- a/crates/ethereum-forks/src/hardforks/ethereum.rs +++ b/crates/ethereum-forks/src/hardforks/ethereum.rs @@ -1,54 +1,80 @@ use alloy_primitives::U256; -use crate::{hardforks::Hardforks, EthereumHardfork, ForkCondition}; +use crate::{EthereumHardfork, ForkCondition}; /// Helper methods for Ethereum forks. #[auto_impl::auto_impl(&, Arc)] -pub trait EthereumHardforks: Hardforks { +pub trait EthereumHardforks: Clone { + /// Retrieves [`ForkCondition`] by an [`EthereumHardfork`]. If `fork` is not present, returns + /// [`ForkCondition::Never`]. + fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition; + + /// Convenience method to check if an [`EthereumHardfork`] is active at a given timestamp. + fn is_ethereum_fork_active_at_timestamp(&self, fork: EthereumHardfork, timestamp: u64) -> bool { + self.ethereum_fork_activation(fork).active_at_timestamp(timestamp) + } + + /// Convenience method to check if an [`EthereumHardfork`] is active at a given block number. + fn is_ethereum_fork_active_at_block(&self, fork: EthereumHardfork, block_number: u64) -> bool { + self.ethereum_fork_activation(fork).active_at_block(block_number) + } + /// Convenience method to check if [`EthereumHardfork::Shanghai`] is active at a given /// timestamp. fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(EthereumHardfork::Shanghai, timestamp) + self.is_ethereum_fork_active_at_timestamp(EthereumHardfork::Shanghai, timestamp) } /// Convenience method to check if [`EthereumHardfork::Cancun`] is active at a given timestamp. fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(EthereumHardfork::Cancun, timestamp) + self.is_ethereum_fork_active_at_timestamp(EthereumHardfork::Cancun, timestamp) } /// Convenience method to check if [`EthereumHardfork::Prague`] is active at a given timestamp. fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(EthereumHardfork::Prague, timestamp) + self.is_ethereum_fork_active_at_timestamp(EthereumHardfork::Prague, timestamp) } /// Convenience method to check if [`EthereumHardfork::Osaka`] is active at a given timestamp. fn is_osaka_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(EthereumHardfork::Osaka, timestamp) + self.is_ethereum_fork_active_at_timestamp(EthereumHardfork::Osaka, timestamp) } /// Convenience method to check if [`EthereumHardfork::Byzantium`] is active at a given block /// number. fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { - self.fork(EthereumHardfork::Byzantium).active_at_block(block_number) + self.is_ethereum_fork_active_at_block(EthereumHardfork::Byzantium, block_number) } /// Convenience method to check if [`EthereumHardfork::SpuriousDragon`] is active at a given /// block number. fn is_spurious_dragon_active_at_block(&self, block_number: u64) -> bool { - self.fork(EthereumHardfork::SpuriousDragon).active_at_block(block_number) + self.is_ethereum_fork_active_at_block(EthereumHardfork::SpuriousDragon, block_number) } /// Convenience method to check if [`EthereumHardfork::Homestead`] is active at a given block /// number. fn is_homestead_active_at_block(&self, block_number: u64) -> bool { - self.fork(EthereumHardfork::Homestead).active_at_block(block_number) + self.is_ethereum_fork_active_at_block(EthereumHardfork::Homestead, block_number) + } + + /// Convenience method to check if [`EthereumHardfork::London`] is active at a given block + /// number. + fn is_london_active_at_block(&self, block_number: u64) -> bool { + self.is_ethereum_fork_active_at_block(EthereumHardfork::London, block_number) + } + + /// Convenience method to check if [`EthereumHardfork::Constantinople`] is active at a given + /// block number. + fn is_constantinople_active_at_block(&self, block_number: u64) -> bool { + self.is_ethereum_fork_active_at_block(EthereumHardfork::Constantinople, block_number) } /// The Paris hardfork (merge) is activated via block number. If we have knowledge of the block, /// this function will return true if the block number is greater than or equal to the Paris /// (merge) block. fn is_paris_active_at_block(&self, block_number: u64) -> Option { - match self.fork(EthereumHardfork::Paris) { + match self.ethereum_fork_activation(EthereumHardfork::Paris) { ForkCondition::TTD { activation_block_number, .. } => { Some(block_number >= activation_block_number) } diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index c1ba56b8c624..b81ee1d5c448 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -11,7 +11,7 @@ use alloy_consensus::{BlockHeader, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{eip7840::BlobParams, merge::ALLOWED_FUTURE_BLOCK_TIME_SECONDS}; use alloy_primitives::U256; -use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::{ Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput, }; @@ -56,16 +56,16 @@ impl EthBeaconConsensus parent: &SealedHeader, ) -> Result<(), ConsensusError> { // Determine the parent gas limit, considering elasticity multiplier on the London fork. - let parent_gas_limit = - if self.chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number()) - { - parent.gas_limit() * - self.chain_spec - .base_fee_params_at_timestamp(header.timestamp()) - .elasticity_multiplier as u64 - } else { - parent.gas_limit() - }; + let parent_gas_limit = if !self.chain_spec.is_london_active_at_block(parent.number()) && + self.chain_spec.is_london_active_at_block(header.number()) + { + parent.gas_limit() * + self.chain_spec + .base_fee_params_at_timestamp(header.timestamp()) + .elasticity_multiplier as u64 + } else { + parent.gas_limit() + }; // Check for an increase in gas limit beyond the allowed threshold. if header.gas_limit() > parent_gas_limit { @@ -116,18 +116,17 @@ where H: BlockHeader, B: BlockBody, { + type Error = ConsensusError; + fn validate_body_against_header( &self, body: &B, header: &SealedHeader, - ) -> Result<(), ConsensusError> { + ) -> Result<(), Self::Error> { validate_body_against_header(body, header.header()) } - fn validate_block_pre_execution( - &self, - block: &SealedBlock, - ) -> Result<(), ConsensusError> { + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), Self::Error> { validate_block_pre_execution(block, &self.chain_spec) } } @@ -210,12 +209,10 @@ where fn validate_header_with_total_difficulty( &self, header: &H, - total_difficulty: U256, + _total_difficulty: U256, ) -> Result<(), ConsensusError> { - let is_post_merge = self - .chain_spec - .fork(EthereumHardfork::Paris) - .active_at_ttd(total_difficulty, header.difficulty()); + let is_post_merge = + self.chain_spec.is_paris_active_at_block(header.number()).is_some_and(|active| active); if is_post_merge { // TODO: add `is_zero_difficulty` to `alloy_consensus::BlockHeader` trait diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index d1301882c638..350780d0bdad 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -9,8 +9,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod payload; -use std::sync::Arc; - use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; pub use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, @@ -26,6 +24,7 @@ use reth_payload_primitives::{ use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{Block, NodePrimitives, SealedBlock, SealedBlockFor}; use reth_rpc_types_compat::engine::payload::block_to_payload; +use std::sync::Arc; /// The types used in the default mainnet ethereum beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 0470a283ed6e..3d8fed3b5cc0 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -5,7 +5,7 @@ use crate::{ EthEvmConfig, }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use alloy_consensus::Transaction as _; +use alloy_consensus::Transaction; use alloy_eips::{eip6110, eip7685::Requests}; use core::fmt::Display; use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks, MAINNET}; @@ -116,14 +116,14 @@ where impl EthExecutionStrategy where DB: Database + Display>, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, { /// Configures a new evm configuration and block environment for the given block. /// /// # Caution /// /// This does not initialize the tx environment. - fn evm_env_for_block(&self, header: &alloy_consensus::Header) -> EnvWithHandlerCfg { + fn evm_env_for_block(&self, header: &EvmConfig::Header) -> EnvWithHandlerCfg { let EvmEnv { cfg_env_with_handler_cfg, block_env } = self.evm_config.cfg_and_block_env(header); EnvWithHandlerCfg::new_with_cfg_env(cfg_env_with_handler_cfg, block_env, Default::default()) @@ -156,7 +156,7 @@ where let env = self.evm_env_for_block(&block.header); let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - self.system_caller.apply_pre_execution_changes(&block.block, &mut evm)?; + self.system_caller.apply_pre_execution_changes(&block.header, &mut evm)?; Ok(()) } diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 08b51a998f84..d472de9aa9bf 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -16,6 +16,7 @@ reth-payload-builder.workspace = true reth-ethereum-engine-primitives.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true +reth-ethereum-consensus.workspace = true reth-node-builder.workspace = true reth-tracing.workspace = true reth-provider.workspace = true @@ -24,7 +25,6 @@ reth-network.workspace = true reth-evm.workspace = true reth-ethereum-evm.workspace = true reth-consensus.workspace = true -reth-beacon-consensus.workspace = true reth-rpc.workspace = true reth-node-api.workspace = true reth-chainspec.workspace = true diff --git a/crates/ethereum/node/src/lib.rs b/crates/ethereum/node/src/lib.rs index a51886dd1c41..0db3f2d17a11 100644 --- a/crates/ethereum/node/src/lib.rs +++ b/crates/ethereum/node/src/lib.rs @@ -21,6 +21,7 @@ pub use evm::{ BasicBlockExecutorProvider, EthEvmConfig, EthExecutionStrategyFactory, EthExecutorProvider, }; +pub use reth_ethereum_consensus as consensus; pub mod node; pub use node::EthereumNode; diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index c7cec2eaee14..390e5e1e68f8 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -1,8 +1,11 @@ //! Ethereum Node types config. +pub use crate::payload::EthereumPayloadBuilder; use crate::{EthEngineTypes, EthEvmConfig}; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; +use reth_consensus::{ConsensusError, FullConsensus}; +use reth_ethereum_consensus::EthBeaconConsensus; +pub use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_ethereum_engine_primitives::{ EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, }; @@ -29,9 +32,6 @@ use reth_transaction_pool::{ use reth_trie_db::MerklePatriciaTrie; use std::sync::Arc; -pub use crate::payload::EthereumPayloadBuilder; -pub use reth_ethereum_engine_primitives::EthereumEngineValidator; - /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] @@ -252,7 +252,7 @@ impl ConsensusBuilder for EthereumConsensusBuilder where Node: FullNodeTypes>, { - type Consensus = Arc; + type Consensus = Arc>; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index 325575998c26..cb8eb1556a4c 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -9,7 +9,7 @@ use reth_node_builder::{ }; use reth_node_core::args::DevArgs; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; -use reth_provider::{providers::BlockchainProvider2, CanonStateSubscriptions}; +use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions}; use reth_rpc_eth_api::helpers::EthTransactions; use reth_tasks::TaskManager; use std::sync::Arc; @@ -25,7 +25,7 @@ async fn can_run_dev_node() -> eyre::Result<()> { .with_dev(DevArgs { dev: true, ..Default::default() }); let NodeHandle { node, .. } = NodeBuilder::new(node_config.clone()) .testing_node(exec.clone()) - .with_types_and_provider::>() + .with_types_and_provider::>() .with_components(EthereumNode::components()) .with_add_ons(EthereumAddOns::default()) .launch_with_fn(|builder| { diff --git a/crates/ethereum/node/tests/it/builder.rs b/crates/ethereum/node/tests/it/builder.rs index 218839fbe019..e3d78182ed52 100644 --- a/crates/ethereum/node/tests/it/builder.rs +++ b/crates/ethereum/node/tests/it/builder.rs @@ -9,7 +9,7 @@ use reth_db::{ use reth_node_api::NodeTypesWithDBAdapter; use reth_node_builder::{EngineNodeLauncher, FullNodeComponents, NodeBuilder, NodeConfig}; use reth_node_ethereum::node::{EthereumAddOns, EthereumNode}; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; use reth_tasks::TaskManager; #[test] @@ -50,7 +50,7 @@ async fn test_eth_launcher() { let _builder = NodeBuilder::new(config) .with_database(db) - .with_types_and_provider::>>, >>() .with_components(EthereumNode::components()) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 67046266aecc..fadb37b49ce1 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -490,7 +490,7 @@ where }; let sealed_block = Arc::new(block.seal_slow()); - debug!(target: "payload_builder", id=%attributes.id, sealed_block_header = ?sealed_block.header, "sealed built block"); + debug!(target: "payload_builder", id=%attributes.id, sealed_block_header = ?sealed_block.sealed_header(), "sealed built block"); // create the executed block data let executed = ExecutedBlock { diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index f49fa693f241..cc723fa110ff 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -11,7 +11,10 @@ extern crate alloc; -use alloc::{boxed::Box, string::String}; +use alloc::{ + boxed::Box, + string::{String, ToString}, +}; use alloy_eips::BlockNumHash; use alloy_primitives::B256; use reth_consensus::ConsensusError; @@ -134,7 +137,6 @@ pub enum BlockExecutionError { impl BlockExecutionError { /// Create a new [`BlockExecutionError::Internal`] variant, containing a /// [`InternalBlockExecutionError::Other`] error. - #[cfg(feature = "std")] pub fn other(error: E) -> Self where E: core::error::Error + Send + Sync + 'static, @@ -144,8 +146,7 @@ impl BlockExecutionError { /// Create a new [`BlockExecutionError::Internal`] variant, containing a /// [`InternalBlockExecutionError::Other`] error with the given message. - #[cfg(feature = "std")] - pub fn msg(msg: impl std::fmt::Display) -> Self { + pub fn msg(msg: impl core::fmt::Display) -> Self { Self::Internal(InternalBlockExecutionError::msg(msg)) } @@ -195,7 +196,6 @@ pub enum InternalBlockExecutionError { impl InternalBlockExecutionError { /// Create a new [`InternalBlockExecutionError::Other`] variant. - #[cfg(feature = "std")] pub fn other(error: E) -> Self where E: core::error::Error + Send + Sync + 'static, @@ -204,8 +204,7 @@ impl InternalBlockExecutionError { } /// Create a new [`InternalBlockExecutionError::Other`] from a given message. - #[cfg(feature = "std")] - pub fn msg(msg: impl std::fmt::Display) -> Self { + pub fn msg(msg: impl core::fmt::Display) -> Self { Self::Other(msg.to_string().into()) } } diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 91badacc187c..43b5269bef3b 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -91,7 +91,7 @@ impl Chain { /// Returns an iterator over all headers in the block with increasing block numbers. pub fn headers(&self) -> impl Iterator> + '_ { - self.blocks.values().map(|block| block.header.clone()) + self.blocks.values().map(|block| block.clone_sealed_header()) } /// Get cached trie updates for this chain. @@ -858,8 +858,8 @@ mod tests { let mut block2 = block; // Set the hashes of block1 and block2 - block1.block.header.set_hash(block1_hash); - block2.block.header.set_hash(block2_hash); + block1.block.set_hash(block1_hash); + block2.block.set_hash(block2_hash); // Create a random receipt object, receipt1 let receipt1 = Receipt { diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 9a9f65375918..8bf40d38caa0 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -6,7 +6,6 @@ pub use reth_execution_errors::{ BlockExecutionError, BlockValidationError, InternalBlockExecutionError, }; pub use reth_execution_types::{BlockExecutionOutput, ExecutionOutcome}; -use reth_primitives_traits::Block as _; pub use reth_storage_errors::provider::ProviderError; use crate::{system_calls::OnStateHook, TxEnvOverrides}; @@ -19,6 +18,7 @@ use alloy_primitives::{ use core::fmt::Display; use reth_consensus::ConsensusError; use reth_primitives::{BlockWithSenders, NodePrimitives, Receipt}; +use reth_primitives_traits::Block; use reth_prune_types::PruneModes; use reth_revm::batch::BlockBatchRecord; use revm::{ diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index 5758bdd5855e..4d0fc8041d45 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -94,26 +94,25 @@ where Chainspec: EthereumHardforks, { /// Apply pre execution changes. - pub fn apply_pre_execution_changes( + pub fn apply_pre_execution_changes( &mut self, - block: &Block, + header: &EvmConfig::Header, evm: &mut Evm<'_, Ext, DB>, ) -> Result<(), BlockExecutionError> where DB: Database + DatabaseCommit, DB::Error: Display, - Block: reth_primitives_traits::Block
, { self.apply_blockhashes_contract_call( - block.header().timestamp(), - block.header().number(), - block.header().parent_hash(), + header.timestamp(), + header.number(), + header.parent_hash(), evm, )?; self.apply_beacon_root_contract_call( - block.header().timestamp(), - block.header().number(), - block.header().parent_beacon_block_root(), + header.timestamp(), + header.number(), + header.parent_beacon_block_root(), evm, )?; diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index feb84e0493f8..9aadecf1afda 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -108,7 +108,7 @@ where // Unseal the block for execution let (block, senders) = block.into_components(); - let (header, body) = block.split_header_body(); + let (header, body) = block.split(); let (unsealed_header, hash) = header.split(); let block = P::Block::new(unsealed_header, body).with_senders_unchecked(senders); @@ -235,7 +235,7 @@ mod tests { use reth_ethereum_evm::execute::EthExecutorProvider; use reth_primitives_traits::crypto::secp256k1::public_key_to_address; use reth_provider::{ - providers::BlockchainProvider2, test_utils::create_test_provider_factory_with_chain_spec, + providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, }; use reth_testing_utils::generators; use secp256k1::Keypair; @@ -253,7 +253,7 @@ mod tests { let executor = EthExecutorProvider::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory)?; - let blockchain_db = BlockchainProvider2::new(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; let blocks_and_execution_outputs = blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?; @@ -289,7 +289,7 @@ mod tests { let executor = EthExecutorProvider::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory)?; - let blockchain_db = BlockchainProvider2::new(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; let blocks_and_execution_outcomes = blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?; diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index fc452d17f6db..f9d607a925f0 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -246,7 +246,7 @@ mod tests { use reth_ethereum_evm::execute::EthExecutorProvider; use reth_primitives_traits::crypto::secp256k1::public_key_to_address; use reth_provider::{ - providers::BlockchainProvider2, test_utils::create_test_provider_factory_with_chain_spec, + providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, }; use reth_stages_api::ExecutionStageThresholds; use reth_testing_utils::generators; @@ -265,7 +265,7 @@ mod tests { let executor = EthExecutorProvider::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory)?; - let blockchain_db = BlockchainProvider2::new(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; // Create first 2 blocks let blocks_and_execution_outcomes = @@ -303,7 +303,7 @@ mod tests { let executor = EthExecutorProvider::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory)?; - let blockchain_db = BlockchainProvider2::new(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; // Create first 2 blocks let (blocks, execution_outcome) = diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index a874de4834d9..3cf538d06c28 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -80,7 +80,7 @@ where let block = block.clone().seal_slow(); provider_rw.append_blocks_with_state( vec![block], - execution_outcome, + &execution_outcome, Default::default(), Default::default(), )?; @@ -214,7 +214,7 @@ where let provider_rw = provider_factory.provider_rw()?; provider_rw.append_blocks_with_state( vec![block1.clone(), block2.clone()], - execution_outcome.clone(), + &execution_outcome, Default::default(), Default::default(), )?; diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index fb296c9ba9a6..1bbd099abe9d 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -666,7 +666,7 @@ mod tests { use reth_evm::test_utils::MockExecutorProvider; use reth_primitives::SealedBlockWithSenders; use reth_provider::{ - providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockReader, + providers::BlockchainProvider, test_utils::create_test_provider_factory, BlockReader, BlockWriter, Chain, DatabaseProviderFactory, StorageLocation, TransactionVariant, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; @@ -767,8 +767,8 @@ mod tests { // Define the notification for testing let mut block1: SealedBlockWithSenders = Default::default(); - block1.block.header.set_hash(B256::new([0x01; 32])); - block1.block.header.set_block_number(10); + block1.block.set_hash(B256::new([0x01; 32])); + block1.block.set_block_number(10); let notification1 = ExExNotification::ChainCommitted { new: Arc::new(Chain::new(vec![block1.clone()], Default::default(), Default::default())), @@ -785,8 +785,8 @@ mod tests { // Push another notification let mut block2: SealedBlockWithSenders = Default::default(); - block2.block.header.set_hash(B256::new([0x02; 32])); - block2.block.header.set_block_number(20); + block2.block.set_hash(B256::new([0x02; 32])); + block2.block.set_block_number(20); let notification2 = ExExNotification::ChainCommitted { new: Arc::new(Chain::new(vec![block2.clone()], Default::default(), Default::default())), @@ -828,8 +828,8 @@ mod tests { // Push some notifications to fill part of the buffer let mut block1: SealedBlockWithSenders = Default::default(); - block1.block.header.set_hash(B256::new([0x01; 32])); - block1.block.header.set_block_number(10); + block1.set_hash(B256::new([0x01; 32])); + block1.set_block_number(10); let notification1 = ExExNotification::ChainCommitted { new: Arc::new(Chain::new(vec![block1.clone()], Default::default(), Default::default())), @@ -1098,7 +1098,7 @@ mod tests { async fn exex_handle_new() { let provider_factory = create_test_provider_factory(); init_genesis(&provider_factory).unwrap(); - let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let provider = BlockchainProvider::new(provider_factory).unwrap(); let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); @@ -1117,12 +1117,12 @@ mod tests { // Setup two blocks for the chain commit notification let mut block1: SealedBlockWithSenders = Default::default(); - block1.block.header.set_hash(B256::new([0x01; 32])); - block1.block.header.set_block_number(10); + block1.block.set_hash(B256::new([0x01; 32])); + block1.block.set_block_number(10); let mut block2: SealedBlockWithSenders = Default::default(); - block2.block.header.set_hash(B256::new([0x02; 32])); - block2.block.header.set_block_number(11); + block2.block.set_hash(B256::new([0x02; 32])); + block2.block.set_block_number(11); // Setup a notification let notification = ExExNotification::ChainCommitted { @@ -1153,7 +1153,7 @@ mod tests { async fn test_notification_if_finished_height_gt_chain_tip() { let provider_factory = create_test_provider_factory(); init_genesis(&provider_factory).unwrap(); - let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let provider = BlockchainProvider::new(provider_factory).unwrap(); let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); @@ -1170,8 +1170,8 @@ mod tests { exex_handle.finished_height = Some(BlockNumHash::new(15, B256::random())); let mut block1: SealedBlockWithSenders = Default::default(); - block1.block.header.set_hash(B256::new([0x01; 32])); - block1.block.header.set_block_number(10); + block1.block.set_hash(B256::new([0x01; 32])); + block1.block.set_block_number(10); let notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new(vec![block1.clone()], Default::default(), Default::default())), @@ -1203,7 +1203,7 @@ mod tests { async fn test_sends_chain_reorged_notification() { let provider_factory = create_test_provider_factory(); init_genesis(&provider_factory).unwrap(); - let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let provider = BlockchainProvider::new(provider_factory).unwrap(); let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); @@ -1246,7 +1246,7 @@ mod tests { async fn test_sends_chain_reverted_notification() { let provider_factory = create_test_provider_factory(); init_genesis(&provider_factory).unwrap(); - let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let provider = BlockchainProvider::new(provider_factory).unwrap(); let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); @@ -1306,7 +1306,7 @@ mod tests { provider_rw.insert_block(block.clone(), StorageLocation::Database).unwrap(); provider_rw.commit().unwrap(); - let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let provider = BlockchainProvider::new(provider_factory).unwrap(); let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); @@ -1327,7 +1327,7 @@ mod tests { }; let (finalized_headers_tx, rx) = watch::channel(None); - finalized_headers_tx.send(Some(genesis_block.header.clone()))?; + finalized_headers_tx.send(Some(genesis_block.clone_sealed_header()))?; let finalized_header_stream = ForkChoiceStream::new(rx); let mut exex_manager = std::pin::pin!(ExExManager::new( @@ -1361,7 +1361,7 @@ mod tests { [notification.clone()] ); - finalized_headers_tx.send(Some(block.header.clone()))?; + finalized_headers_tx.send(Some(block.clone_sealed_header()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL isn't finalized because the ExEx didn't emit the `FinishedHeight` event assert_eq!( @@ -1374,7 +1374,7 @@ mod tests { .send(ExExEvent::FinishedHeight((rng.gen::(), rng.gen::()).into())) .unwrap(); - finalized_headers_tx.send(Some(block.header.clone()))?; + finalized_headers_tx.send(Some(block.clone_sealed_header()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL isn't finalized because the ExEx emitted a `FinishedHeight` event with a // non-canonical block @@ -1386,7 +1386,7 @@ mod tests { // Send a `FinishedHeight` event with a canonical block events_tx.send(ExExEvent::FinishedHeight(block.num_hash())).unwrap(); - finalized_headers_tx.send(Some(block.header.clone()))?; + finalized_headers_tx.send(Some(block.clone_sealed_header()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL is finalized assert_eq!(exex_manager.wal.iter_notifications()?.next().transpose()?, None); diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 6c4f485495db..ae2b6b08f1a8 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -446,7 +446,7 @@ mod tests { use reth_ethereum_evm::execute::EthExecutorProvider; use reth_primitives::{Block, BlockExt}; use reth_provider::{ - providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockWriter, + providers::BlockchainProvider, test_utils::create_test_provider_factory, BlockWriter, Chain, DatabaseProviderFactory, StorageLocation, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; @@ -465,7 +465,7 @@ mod tests { .block(genesis_hash.into())? .ok_or_else(|| eyre::eyre!("genesis block not found"))?; - let provider = BlockchainProvider2::new(provider_factory.clone())?; + let provider = BlockchainProvider::new(provider_factory.clone())?; let node_head_block = random_block( &mut rng, @@ -547,7 +547,7 @@ mod tests { .block(genesis_hash.into())? .ok_or_else(|| eyre::eyre!("genesis block not found"))?; - let provider = BlockchainProvider2::new(provider_factory)?; + let provider = BlockchainProvider::new(provider_factory)?; let node_head = Head { number: genesis_block.number, hash: genesis_hash, ..Default::default() }; @@ -604,7 +604,7 @@ mod tests { .block(genesis_hash.into())? .ok_or_else(|| eyre::eyre!("genesis block not found"))?; - let provider = BlockchainProvider2::new(provider_factory)?; + let provider = BlockchainProvider::new(provider_factory)?; let node_head_block = random_block( &mut rng, @@ -704,7 +704,7 @@ mod tests { .block(genesis_hash.into())? .ok_or_else(|| eyre::eyre!("genesis block not found"))?; - let provider = BlockchainProvider2::new(provider_factory)?; + let provider = BlockchainProvider::new(provider_factory)?; let exex_head_block = random_block( &mut rng, diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 77289a73ca72..bbb8c6710edc 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -37,7 +37,7 @@ use reth_node_builder::{ Components, ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NodeComponentsBuilder, PoolBuilder, }, - BuilderContext, Node, NodeAdapter, RethFullAdapter2, + BuilderContext, Node, NodeAdapter, RethFullAdapter, }; use reth_node_core::node_config::NodeConfig; use reth_node_ethereum::{ @@ -50,7 +50,7 @@ use reth_provider::{providers::StaticFileProvider, BlockReader, EthStorage, Prov use reth_tasks::TaskManager; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; use tempfile::TempDir; use thiserror::Error; use tokio::sync::mpsc::{Sender, UnboundedReceiver}; @@ -169,14 +169,14 @@ pub type TmpDB = Arc>; /// The [`NodeAdapter`] for the [`TestExExContext`]. Contains type necessary to /// boot the testing environment pub type Adapter = NodeAdapter< - RethFullAdapter2, + RethFullAdapter, <>, + BlockchainProvider>, >, - >>::ComponentsBuilder as NodeComponentsBuilder>>::Components, + >>::ComponentsBuilder as NodeComponentsBuilder>>::Components, >; /// An [`ExExContext`] using the [`Adapter`] type. pub type TestExExContext = ExExContext; @@ -271,7 +271,7 @@ pub async fn test_exex_context_with_chain_spec( ); let genesis_hash = init_genesis(&provider_factory)?; - let provider = BlockchainProvider2::new(provider_factory.clone())?; + let provider = BlockchainProvider::new(provider_factory.clone())?; let network_manager = NetworkManager::new( NetworkConfigBuilder::new(SecretKey::new(&mut rand::thread_rng())) diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 47a816f4ce6b..9aed7d3b698a 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -5,7 +5,7 @@ use alloy_primitives::BlockNumber; use futures::Stream; use futures_util::StreamExt; use reth_config::BodiesConfig; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_network_p2p::{ bodies::{ client::BodiesClient, @@ -39,7 +39,7 @@ pub struct BodiesDownloader { /// The bodies client client: Arc, /// The consensus client - consensus: Arc>, + consensus: Arc>, /// The database handle provider: Provider, /// The maximum number of non-empty blocks per one request @@ -579,7 +579,7 @@ impl BodiesDownloaderBuilder { pub fn build( self, client: B, - consensus: Arc>, + consensus: Arc>, provider: Provider, ) -> BodiesDownloader where @@ -677,7 +677,7 @@ mod tests { BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..2, ..Default::default() }, ); - let headers = blocks.iter().map(|block| block.header.clone()).collect::>(); + let headers = blocks.iter().map(|block| block.clone_sealed_header()).collect::>(); let bodies = blocks .into_iter() .map(|block| (block.hash(), block.into_body())) diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index 892eae14cbb1..b9f63b143ac2 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -4,7 +4,7 @@ use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use futures::{stream::FuturesUnordered, Stream}; use futures_util::StreamExt; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_network_p2p::{ bodies::{client::BodiesClient, response::BlockResponse}, error::DownloadResult, @@ -59,7 +59,7 @@ where pub(crate) fn push_new_request( &mut self, client: Arc, - consensus: Arc>, + consensus: Arc>, request: Vec>, ) { // Set last max requested block number diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index a3ad1f3b9dc2..f8c93a2a78e3 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -2,7 +2,7 @@ use crate::metrics::{BodyDownloaderMetrics, ResponseMetrics}; use alloy_consensus::BlockHeader; use alloy_primitives::B256; use futures::{Future, FutureExt}; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_network_p2p::{ bodies::{client::BodiesClient, response::BlockResponse}, error::{DownloadError, DownloadResult}, @@ -40,7 +40,7 @@ use std::{ /// and eventually disconnected. pub(crate) struct BodiesRequestFuture { client: Arc, - consensus: Arc>, + consensus: Arc>, metrics: BodyDownloaderMetrics, /// Metrics for individual responses. This can be used to observe how the size (in bytes) of /// responses change while bodies are being downloaded. @@ -62,7 +62,7 @@ where /// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request. pub(crate) fn new( client: Arc, - consensus: Arc>, + consensus: Arc>, metrics: BodyDownloaderMetrics, ) -> Self { Self { @@ -194,7 +194,7 @@ where // Body is invalid, put the header back and return an error let hash = block.hash(); let number = block.number(); - self.pending_headers.push_front(block.header); + self.pending_headers.push_front(block.into_sealed_header()); return Err(DownloadError::BodyValidation { hash, number, diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index 9377be78676c..863c889532c3 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -43,7 +43,7 @@ impl TaskDow /// # Example /// /// ``` - /// use reth_consensus::Consensus; + /// use reth_consensus::{Consensus, ConsensusError}; /// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader}; /// use reth_network_p2p::bodies::client::BodiesClient; /// use reth_primitives_traits::InMemorySize; @@ -55,7 +55,7 @@ impl TaskDow /// Provider: HeaderProvider
+ Unpin + 'static, /// >( /// client: Arc, - /// consensus: Arc>, + /// consensus: Arc>, /// provider: Provider, /// ) { /// let downloader = BodiesDownloaderBuilder::default().build(client, consensus, provider); diff --git a/crates/net/downloaders/src/test_utils/mod.rs b/crates/net/downloaders/src/test_utils/mod.rs index 0529b78a2b20..698f30faee5b 100644 --- a/crates/net/downloaders/src/test_utils/mod.rs +++ b/crates/net/downloaders/src/test_utils/mod.rs @@ -28,7 +28,7 @@ pub(crate) fn generate_bodies( BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..2, ..Default::default() }, ); - let headers = blocks.iter().map(|block| block.header.clone()).collect(); + let headers = blocks.iter().map(|block| block.clone_sealed_header()).collect(); let bodies = blocks.into_iter().map(|block| (block.hash(), block.into_body())).collect(); (headers, bodies) diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 956057d98bff..517c5b879835 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -2,7 +2,6 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, U256}; use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; use reth_primitives_traits::InMemorySize; - /// The block response #[derive(PartialEq, Eq, Debug, Clone)] pub enum BlockResponse { @@ -19,7 +18,7 @@ where /// Return the reference to the response header pub const fn header(&self) -> &SealedHeader { match self { - Self::Full(block) => &block.header, + Self::Full(block) => block.sealed_header(), Self::Empty(header) => header, } } diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 62981ad5d9ab..309252bb8f26 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -7,7 +7,7 @@ use crate::{ }; use alloy_consensus::BlockHeader; use alloy_primitives::{Sealable, B256}; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::WithPeerId; use reth_primitives::{SealedBlock, SealedHeader}; @@ -30,7 +30,7 @@ where Client: BlockClient, { client: Client, - consensus: Arc>, + consensus: Arc>, } impl FullBlockClient @@ -40,7 +40,7 @@ where /// Creates a new instance of `FullBlockClient`. pub fn new( client: Client, - consensus: Arc>, + consensus: Arc>, ) -> Self { Self { client, consensus } } @@ -118,7 +118,7 @@ where Client: BlockClient, { client: Client, - consensus: Arc>, + consensus: Arc>, hash: B256, request: FullBlockRequest, header: Option>, @@ -330,7 +330,7 @@ where /// The client used to fetch headers and bodies. client: Client, /// The consensus instance used to validate the blocks. - consensus: Arc>, + consensus: Arc>, /// The block hash to start fetching from (inclusive). start_hash: B256, /// How many blocks to fetch: `len([start_hash, ..]) == count` @@ -429,7 +429,7 @@ where // put response hashes back into bodies map since we aren't returning them as a // response for block in valid_responses { - let (header, body) = block.split_header_body(); + let (header, body) = block.split(); self.bodies.insert(header, BodyResponse::Validated(body)); } diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index 5809ad6bdd40..6e20b335a107 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -12,7 +12,7 @@ use crate::{ }; use alloy_consensus::Header; use futures::{Future, FutureExt, Stream, StreamExt}; -use reth_consensus::{test_utils::TestConsensus, Consensus}; +use reth_consensus::{test_utils::TestConsensus, Consensus, ConsensusError}; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; use reth_primitives::SealedHeader; @@ -147,7 +147,11 @@ impl Stream for TestDownload { let empty: SealedHeader = SealedHeader::default(); if let Err(error) = - >::validate_header_against_parent(&this.consensus, &empty, &empty) + >::validate_header_against_parent( + &this.consensus, + &empty, + &empty, + ) { this.done = true; return Poll::Ready(Some(Err(DownloadError::HeaderValidation { diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index d952a8abfaae..fa323108dc66 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth reth-db-api.workspace = true -reth-beacon-consensus.workspace = true reth-consensus.workspace = true reth-evm.workspace = true reth-provider.workspace = true diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 8db75480d11a..498297c2db8b 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -2,12 +2,12 @@ use crate::ConfigureEvm; use alloy_rpc_types_engine::JwtSecret; -use reth_beacon_consensus::BeaconConsensusEngineHandle; -use reth_consensus::FullConsensus; +use reth_consensus::{ConsensusError, FullConsensus}; use reth_db_api::{ database_metrics::{DatabaseMetadata, DatabaseMetrics}, Database, }; +use reth_engine_primitives::BeaconConsensusEngineHandle; use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; @@ -58,7 +58,10 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { type Executor: BlockExecutorProvider::Primitives>; /// The consensus type of the node. - type Consensus: FullConsensus<::Primitives> + Clone + Unpin + 'static; + type Consensus: FullConsensus<::Primitives, Error = ConsensusError> + + Clone + + Unpin + + 'static; /// Network API. type Network: FullNetwork; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index ff07d5ee26e1..9f08507f9f21 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -13,8 +13,6 @@ workspace = true [dependencies] ## reth -reth-beacon-consensus.workspace = true -reth-blockchain-tree.workspace = true reth-chain-state.workspace = true reth-chainspec.workspace = true reth-cli-util.workspace = true @@ -41,7 +39,6 @@ reth-node-core.workspace = true reth-node-events.workspace = true reth-node-metrics.workspace = true reth-payload-builder.workspace = true -reth-payload-validator.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true @@ -99,7 +96,6 @@ default = [] js-tracer = ["reth-rpc/js-tracer"] test-utils = [ "reth-db/test-utils", - "reth-blockchain-tree/test-utils", "reth-chain-state/test-utils", "reth-chainspec/test-utils", "reth-consensus/test-utils", diff --git a/crates/node/builder/docs/mermaid/builder.mmd b/crates/node/builder/docs/mermaid/builder.mmd index aa56bfe736d2..96282d3fd9fd 100644 --- a/crates/node/builder/docs/mermaid/builder.mmd +++ b/crates/node/builder/docs/mermaid/builder.mmd @@ -9,7 +9,7 @@ graph TD; end NodeBuilderC--"launch"-->launch subgraph launch - database("database init")-->tree("blockchain tree init") + database("database init")-->tree("blockchain provider init") tree--BuilderContext-->components{"build_components"} subgraph components ComponentsBuilder--"first creates"-->Pool diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 47d9a54572e9..82e81209a057 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -7,11 +7,10 @@ use crate::{ components::NodeComponentsBuilder, node::FullNode, rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, - BlockReaderFor, DefaultNodeLauncher, LaunchNode, Node, NodeHandle, + BlockReaderFor, EngineNodeLauncher, LaunchNode, Node, }; use alloy_eips::eip4844::env_settings::EnvKzgSettings; use futures::Future; -use reth_blockchain_tree::externals::NodeTypesForTree; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli_util::get_secret_key; use reth_db_api::{ @@ -34,7 +33,7 @@ use reth_node_core::{ primitives::Head, }; use reth_provider::{ - providers::{BlockchainProvider, BlockchainProvider2, NodeTypesForProvider}, + providers::{BlockchainProvider, NodeTypesForProvider, NodeTypesForTree}, ChainSpecProvider, FullProvider, }; use reth_tasks::TaskExecutor; @@ -53,11 +52,6 @@ pub use states::*; pub type RethFullAdapter = FullNodeTypesAdapter>>; -/// The adapter type for a reth node with the builtin provider type -// Note: we need to hardcode this because custom components might depend on it in associated types. -pub type RethFullAdapter2 = - FullNodeTypesAdapter>>; - #[allow(clippy::doc_markdown)] #[cfg_attr(doc, aquamarine::aquamarine)] /// Declaratively construct a node. @@ -346,18 +340,14 @@ where /// /// This bootstraps the node internals, creates all the components with the given [Node] /// - /// Returns a [`NodeHandle`] that can be used to interact with the node. + /// Returns a [`NodeHandle`](crate::NodeHandle) that can be used to interact with the node. pub async fn launch_node( self, node: N, ) -> eyre::Result< - NodeHandle< - NodeAdapter< - RethFullAdapter, - >>::Components, - >, - N::AddOns, - >, + , N::ComponentsBuilder, N::AddOns>, + >>::Node, > where N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, @@ -368,6 +358,9 @@ where >, >, N::Primitives: FullNodePrimitives, + EngineNodeLauncher: LaunchNode< + NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns>, + >, { self.node(node).launch().await } @@ -558,14 +551,20 @@ where T: NodeTypesWithEngine + NodeTypesForTree, CB: NodeComponentsBuilder>, AO: RethRpcAddOns, CB::Components>>, + EngineNodeLauncher: LaunchNode, CB, AO>>, { - /// Launches the node with the [`DefaultNodeLauncher`] that sets up engine API consensus and rpc + /// Launches the node with the [`EngineNodeLauncher`] that sets up engine API consensus and rpc pub async fn launch( self, - ) -> eyre::Result, CB::Components>, AO>> { + ) -> eyre::Result< + , CB, AO>, + >>::Node, + > { let Self { builder, task_executor } = self; - let launcher = DefaultNodeLauncher::new(task_executor, builder.config.datadir()); + let launcher = + EngineNodeLauncher::new(task_executor, builder.config.datadir(), Default::default()); builder.launch_with(launcher).await } } diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index ce24c8bff8df..977381b6582b 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -7,7 +7,7 @@ use crate::{ }, BuilderContext, ConfigureEvm, FullNodeTypes, }; -use reth_consensus::FullConsensus; +use reth_consensus::{ConsensusError, FullConsensus}; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkPrimitives; use reth_node_api::{BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; @@ -402,7 +402,10 @@ where + 'static, EVM: ConfigureEvm
, Transaction = TxTy>, Executor: BlockExecutorProvider::Primitives>, - Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, + Cons: FullConsensus<::Primitives, Error = ConsensusError> + + Clone + + Unpin + + 'static, { type Components = Components; diff --git a/crates/node/builder/src/components/consensus.rs b/crates/node/builder/src/components/consensus.rs index 074080d337b1..0620b2507d2a 100644 --- a/crates/node/builder/src/components/consensus.rs +++ b/crates/node/builder/src/components/consensus.rs @@ -1,4 +1,5 @@ //! Consensus component for the node builder. +use reth_consensus::{ConsensusError, FullConsensus}; use reth_node_api::NodeTypes; use crate::{BuilderContext, FullNodeTypes}; @@ -7,7 +8,7 @@ use std::future::Future; /// A type that knows how to build the consensus implementation. pub trait ConsensusBuilder: Send { /// The consensus implementation to build. - type Consensus: reth_consensus::FullConsensus<::Primitives> + type Consensus: FullConsensus<::Primitives, Error = ConsensusError> + Clone + Unpin + 'static; @@ -22,7 +23,7 @@ pub trait ConsensusBuilder: Send { impl ConsensusBuilder for F where Node: FullNodeTypes, - Consensus: reth_consensus::FullConsensus<::Primitives> + Consensus: FullConsensus<::Primitives, Error = ConsensusError> + Clone + Unpin + 'static, diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 892380a4c6ca..c5ac67e5cbc7 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -23,7 +23,7 @@ pub use pool::*; use reth_network_p2p::BlockClient; use crate::{ConfigureEvm, FullNodeTypes}; -use reth_consensus::FullConsensus; +use reth_consensus::{ConsensusError, FullConsensus}; use reth_evm::execute::BlockExecutorProvider; use reth_network::{NetworkHandle, NetworkPrimitives}; use reth_network_api::FullNetwork; @@ -47,7 +47,10 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati type Executor: BlockExecutorProvider::Primitives>; /// The consensus type of the node. - type Consensus: FullConsensus<::Primitives> + Clone + Unpin + 'static; + type Consensus: FullConsensus<::Primitives, Error = ConsensusError> + + Clone + + Unpin + + 'static; /// Network API. type Network: FullNetwork< @@ -106,7 +109,10 @@ where + 'static, EVM: ConfigureEvm
, Transaction = TxTy>, Executor: BlockExecutorProvider::Primitives>, - Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, + Cons: FullConsensus<::Primitives, Error = ConsensusError> + + Clone + + Unpin + + 'static, { type Pool = Pool; type Evm = EVM; diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index e4e247e239d3..31968197d818 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -2,10 +2,6 @@ use alloy_consensus::BlockHeader; use futures::{future::Either, stream, stream_select, StreamExt}; -use reth_beacon_consensus::{ - hooks::{EngineHooks, StaticFileHook}, - BeaconConsensusEngineHandle, -}; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; use reth_db_api::{ @@ -23,8 +19,8 @@ use reth_exex::ExExManagerHandle; use reth_network::{NetworkSyncUpdater, SyncState}; use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ - BuiltPayload, FullNodeTypes, NodeTypesWithDBAdapter, NodeTypesWithEngine, - PayloadAttributesBuilder, PayloadBuilder, PayloadTypes, + BeaconConsensusEngineHandle, BuiltPayload, FullNodeTypes, NodeTypesWithDBAdapter, + NodeTypesWithEngine, PayloadAttributesBuilder, PayloadBuilder, PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -33,7 +29,7 @@ use reth_node_core::{ }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_primitives::EthereumHardforks; -use reth_provider::providers::{BlockchainProvider2, NodeTypesForProvider}; +use reth_provider::providers::{BlockchainProvider, NodeTypesForProvider}; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; use reth_tracing::tracing::{debug, error, info}; @@ -79,7 +75,7 @@ where T: FullNodeTypes< Types = Types, DB = DB, - Provider = BlockchainProvider2>, + Provider = BlockchainProvider>, >, CB: NodeComponentsBuilder, AO: RethRpcAddOns> @@ -131,7 +127,7 @@ where // passing FullNodeTypes as type parameter here so that we can build // later the components. .with_blockchain_db::(move |provider_factory| { - Ok(BlockchainProvider2::new(provider_factory)?) + Ok(BlockchainProvider::new(provider_factory)?) })? .with_components(components_builder, on_component_initialized).await?; @@ -166,14 +162,9 @@ where .maybe_store_messages(node_config.debug.engine_api_store.clone()); let max_block = ctx.max_block(network_client.clone()).await?; - let mut hooks = EngineHooks::new(); let static_file_producer = ctx.static_file_producer(); let static_file_producer_events = static_file_producer.lock().events(); - hooks.add(StaticFileHook::new( - static_file_producer.clone(), - Box::new(ctx.task_executor().clone()), - )); info!(target: "reth::cli", "StaticFileProducer initialized"); let consensus = Arc::new(ctx.components().consensus().clone()); diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index c6a00a6eec8c..33e37c329ae1 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -6,50 +6,12 @@ mod exex; pub(crate) mod engine; pub use common::LaunchContext; -use common::{Attached, LaunchContextWith, WithConfigs}; pub use exex::ExExLauncher; -use reth_db_api::{ - database_metrics::{DatabaseMetadata, DatabaseMetrics}, - Database, -}; -use std::{future::Future, sync::Arc}; +use std::future::Future; -use futures::{future::Either, stream, stream_select, StreamExt}; -use reth_beacon_consensus::{ - hooks::{EngineHooks, PruneHook, StaticFileHook}, - BeaconConsensusEngine, -}; -use reth_blockchain_tree::{ - noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, - TreeExternals, -}; -use reth_chainspec::EthChainSpec; -use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; -use reth_engine_util::EngineMessageStreamExt; -use reth_exex::ExExManagerHandle; -use reth_network::BlockDownloaderProvider; -use reth_node_api::{AddOnsContext, FullNodeTypes, NodeTypesWithDBAdapter, NodeTypesWithEngine}; -use reth_node_core::{ - dirs::{ChainPath, DataDirPath}, - exit::NodeExitFuture, -}; -use reth_node_events::{cl::ConsensusLayerHealthEvents, node, node::NodeEvent}; -use reth_provider::providers::{BlockchainProvider, NodeTypesForTree}; use reth_rpc::eth::RpcNodeCore; use reth_tasks::TaskExecutor; -use reth_tracing::tracing::{debug, info}; -use tokio::sync::{mpsc::unbounded_channel, oneshot}; -use tokio_stream::wrappers::UnboundedReceiverStream; - -use crate::{ - builder::{NodeAdapter, NodeTypesAdapter}, - components::{NodeComponents, NodeComponentsBuilder}, - hooks::NodeHooks, - node::FullNode, - rpc::{RethRpcAddOns, RpcHandle}, - AddOns, NodeBuilderWithComponents, NodeHandle, -}; /// Alias for [`reth_rpc_eth_types::EthApiBuilderCtx`], adapter for [`RpcNodeCore`]. pub type EthApiBuilderCtx = reth_rpc_eth_types::EthApiBuilderCtx< @@ -68,7 +30,8 @@ pub type EthApiBuilderCtx = reth_rpc_eth_types::EthApiBuilderCtx< /// /// This is essentially the launch logic for a node. /// -/// See also [`DefaultNodeLauncher`] and [`NodeBuilderWithComponents::launch_with`] +/// See also [`EngineNodeLauncher`](crate::EngineNodeLauncher) and +/// [`NodeBuilderWithComponents::launch_with`](crate::NodeBuilderWithComponents) pub trait LaunchNode { /// The node type that is created. type Node; @@ -88,317 +51,3 @@ where self(target) } } - -/// The default launcher for a node. -#[derive(Debug)] -pub struct DefaultNodeLauncher { - /// The task executor for the node. - pub ctx: LaunchContext, -} - -impl DefaultNodeLauncher { - /// Create a new instance of the default node launcher. - pub const fn new(task_executor: TaskExecutor, data_dir: ChainPath) -> Self { - Self { ctx: LaunchContext::new(task_executor, data_dir) } - } -} - -impl LaunchNode> for DefaultNodeLauncher -where - Types: NodeTypesWithEngine + NodeTypesForTree, - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: FullNodeTypes< - Provider = BlockchainProvider>, - Types = Types, - DB = DB, - >, - CB: NodeComponentsBuilder, - AO: RethRpcAddOns>, -{ - type Node = NodeHandle, AO>; - - async fn launch_node( - self, - target: NodeBuilderWithComponents, - ) -> eyre::Result { - let Self { ctx } = self; - let NodeBuilderWithComponents { - adapter: NodeTypesAdapter { database }, - components_builder, - add_ons: AddOns { hooks, exexs: installed_exex, add_ons }, - config, - } = target; - let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; - - // TODO: remove tree and move tree_config and canon_state_notification_sender - // initialization to with_blockchain_db once the engine revamp is done - // https://github.com/paradigmxyz/reth/issues/8742 - let tree_config = BlockchainTreeConfig::default(); - - // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: - let (canon_state_notification_sender, _receiver) = - tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); - - let tree = Arc::new(NoopBlockchainTree::with_canon_state_notifications( - canon_state_notification_sender.clone(), - )); - - // setup the launch context - let mut ctx = ctx - .with_configured_globals() - // load the toml config - .with_loaded_toml_config(config)? - // add resolved peers - .with_resolved_peers().await? - // attach the database - .attach(database.clone()) - // ensure certain settings take effect - .with_adjusted_configs() - // Create the provider factory - .with_provider_factory().await? - .inspect(|_| { - info!(target: "reth::cli", "Database opened"); - }) - .with_prometheus_server().await? - .inspect(|this| { - debug!(target: "reth::cli", chain=%this.chain_id(), genesis=?this.genesis_hash(), "Initializing genesis"); - }) - .with_genesis()? - .inspect(|this: &LaunchContextWith, _>>| { - info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks()); - }) - .with_metrics_task() - // passing FullNodeTypes as type parameter here so that we can build - // later the components. - .with_blockchain_db::(move |provider_factory| { - Ok(BlockchainProvider::new(provider_factory, tree)?) - })? - .with_components(components_builder, on_component_initialized).await?; - - let consensus = Arc::new(ctx.components().consensus().clone()); - - let tree_externals = TreeExternals::new( - ctx.provider_factory().clone(), - consensus.clone(), - ctx.components().block_executor().clone(), - ); - let tree = BlockchainTree::new(tree_externals, tree_config)? - .with_sync_metrics_tx(ctx.sync_metrics_tx()) - // Note: This is required because we need to ensure that both the components and the - // tree are using the same channel for canon state notifications. This will be removed - // once the Blockchain provider no longer depends on an instance of the tree - .with_canon_state_notification_sender(canon_state_notification_sender); - - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - - ctx.node_adapter_mut().provider = ctx.blockchain_db().clone().with_tree(blockchain_tree); - - debug!(target: "reth::cli", "configured blockchain tree"); - - // spawn exexs - let exex_manager_handle = ExExLauncher::new( - ctx.head(), - ctx.node_adapter().clone(), - installed_exex, - ctx.configs().clone(), - ) - .launch() - .await?; - - // create pipeline - let network_client = ctx.components().network().fetch_client().await?; - let (consensus_engine_tx, consensus_engine_rx) = unbounded_channel(); - - let node_config = ctx.node_config(); - let consensus_engine_stream = UnboundedReceiverStream::from(consensus_engine_rx) - .maybe_skip_fcu(node_config.debug.skip_fcu) - .maybe_skip_new_payload(node_config.debug.skip_new_payload) - .maybe_reorg( - ctx.blockchain_db().clone(), - ctx.components().evm_config().clone(), - reth_payload_validator::ExecutionPayloadValidator::new(ctx.chain_spec()), - node_config.debug.reorg_frequency, - node_config.debug.reorg_depth, - ) - // Store messages _after_ skipping so that `replay-engine` command - // would replay only the messages that were observed by the engine - // during this run. - .maybe_store_messages(node_config.debug.engine_api_store.clone()); - - let max_block = ctx.max_block(network_client.clone()).await?; - let mut hooks = EngineHooks::new(); - - let static_file_producer = ctx.static_file_producer(); - let static_file_producer_events = static_file_producer.lock().events(); - hooks.add(StaticFileHook::new( - static_file_producer.clone(), - Box::new(ctx.task_executor().clone()), - )); - info!(target: "reth::cli", "StaticFileProducer initialized"); - - // Configure the pipeline - let pipeline_exex_handle = - exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); - let (pipeline, client) = if ctx.is_dev() { - eyre::bail!("Dev mode is not supported for legacy engine") - } else { - let pipeline = crate::setup::build_networked_pipeline( - &ctx.toml_config().stages, - network_client.clone(), - consensus.clone(), - ctx.provider_factory().clone(), - ctx.task_executor(), - ctx.sync_metrics_tx(), - ctx.prune_config(), - max_block, - static_file_producer, - ctx.components().block_executor().clone(), - pipeline_exex_handle, - )?; - - (pipeline, network_client.clone()) - }; - - let pipeline_events = pipeline.events(); - - let initial_target = ctx.node_config().debug.tip; - - let mut pruner_builder = ctx.pruner_builder(); - if let Some(exex_manager_handle) = &exex_manager_handle { - pruner_builder = - pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); - } - let pruner = pruner_builder.build_with_provider_factory(ctx.provider_factory().clone()); - - let pruner_events = pruner.events(); - info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); - hooks.add(PruneHook::new(pruner, Box::new(ctx.task_executor().clone()))); - - // Configure the consensus engine - let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( - client, - pipeline, - ctx.blockchain_db().clone(), - Box::new(ctx.task_executor().clone()), - Box::new(ctx.components().network().clone()), - max_block, - ctx.components().payload_builder().clone(), - initial_target, - reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, - consensus_engine_tx, - Box::pin(consensus_engine_stream), - hooks, - )?; - info!(target: "reth::cli", "Consensus engine initialized"); - - let events = stream_select!( - pipeline_events.map(Into::>::into), - if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { - Either::Left( - ConsensusLayerHealthEvents::new(Box::new(ctx.blockchain_db().clone())) - .map(Into::into), - ) - } else { - Either::Right(stream::empty()) - }, - pruner_events.map(Into::into), - static_file_producer_events.map(Into::into), - ); - ctx.task_executor().spawn_critical( - "events task", - node::handle_events( - Some(Box::new(ctx.components().network().clone())), - Some(ctx.head().number), - events, - ), - ); - - // extract the jwt secret from the args if possible - let jwt_secret = ctx.auth_jwt_secret()?; - - let add_ons_ctx = AddOnsContext { - node: ctx.node_adapter().clone(), - config: ctx.node_config(), - beacon_engine_handle, - jwt_secret, - }; - - let RpcHandle { rpc_server_handles, rpc_registry } = - add_ons.launch_add_ons(add_ons_ctx).await?; - - // Run consensus engine to completion - let (tx, rx) = oneshot::channel(); - info!(target: "reth::cli", "Starting consensus engine"); - ctx.task_executor().spawn_critical_blocking("consensus engine", async move { - let res = beacon_consensus_engine.await; - let _ = tx.send(res); - }); - - if let Some(maybe_custom_etherscan_url) = ctx.node_config().debug.etherscan.clone() { - info!(target: "reth::cli", "Using etherscan as consensus client"); - - let chain = ctx.node_config().chain.chain(); - let etherscan_url = maybe_custom_etherscan_url.map(Ok).unwrap_or_else(|| { - // If URL isn't provided, use default Etherscan URL for the chain if it is known - chain - .etherscan_urls() - .map(|urls| urls.0.to_string()) - .ok_or_else(|| eyre::eyre!("failed to get etherscan url for chain: {chain}")) - })?; - - let block_provider = EtherscanBlockProvider::new( - etherscan_url, - chain.etherscan_api_key().ok_or_else(|| { - eyre::eyre!( - "etherscan api key not found for rpc consensus client for chain: {chain}" - ) - })?, - ); - let rpc_consensus_client = DebugConsensusClient::new( - rpc_server_handles.auth.clone(), - Arc::new(block_provider), - ); - ctx.task_executor().spawn_critical("etherscan consensus client", async move { - rpc_consensus_client.run::().await - }); - } - - if let Some(rpc_ws_url) = ctx.node_config().debug.rpc_consensus_ws.clone() { - info!(target: "reth::cli", "Using rpc provider as consensus client"); - - let block_provider = RpcBlockProvider::new(rpc_ws_url); - let rpc_consensus_client = DebugConsensusClient::new( - rpc_server_handles.auth.clone(), - Arc::new(block_provider), - ); - ctx.task_executor().spawn_critical("rpc consensus client", async move { - rpc_consensus_client.run::().await - }); - } - - let full_node = FullNode { - evm_config: ctx.components().evm_config().clone(), - block_executor: ctx.components().block_executor().clone(), - pool: ctx.components().pool().clone(), - network: ctx.components().network().clone(), - provider: ctx.node_adapter().provider.clone(), - payload_builder: ctx.components().payload_builder().clone(), - task_executor: ctx.task_executor().clone(), - config: ctx.node_config().clone(), - data_dir: ctx.data_dir().clone(), - add_ons_handle: RpcHandle { rpc_server_handles, rpc_registry }, - }; - // Notify on node started - on_node_started.on_event(FullNode::clone(&full_node))?; - - let handle = NodeHandle { - node_exit_future: NodeExitFuture::new( - async { Ok(rx.await??) }, - full_node.config.debug.terminate, - ), - node: full_node, - }; - - Ok(handle) - } -} diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 62cfbac9bea8..610ca7bbc799 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use alloy_primitives::{BlockNumber, B256}; use reth_config::{config::StageConfig, PruneConfig}; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, @@ -27,7 +27,7 @@ use tokio::sync::watch; pub fn build_networked_pipeline( config: &StageConfig, client: Client, - consensus: Arc>, + consensus: Arc>, provider_factory: ProviderFactory, task_executor: &TaskExecutor, metrics_tx: reth_stages::MetricEventsSender, @@ -75,7 +75,7 @@ pub fn build_pipeline( stage_config: &StageConfig, header_downloader: H, body_downloader: B, - consensus: Arc>, + consensus: Arc>, max_block: Option, metrics_tx: reth_stages::MetricEventsSender, prune_config: Option, diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index 1db9c1f6b9ff..31d847da7fbd 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -5,7 +5,7 @@ use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_rpc_types_engine::{JwtError, JwtSecret}; use eyre::Result; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_network_p2p::{ bodies::client::BodiesClient, headers::client::HeadersClient, priority::Priority, }; @@ -72,7 +72,7 @@ where pub async fn get_single_body( client: Client, header: SealedHeader, - consensus: impl Consensus, + consensus: impl Consensus, ) -> Result> where Client: BodiesClient, diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 9629aecef9a2..0b57fa0110c6 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth reth-storage-api.workspace = true -reth-beacon-consensus.workspace = true reth-network-api.workspace = true reth-stages.workspace = true reth-prune-types.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 129fe20ea785..00817b6a8834 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -5,8 +5,9 @@ use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader}; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::ForkchoiceState; use futures::Stream; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; -use reth_engine_primitives::ForkchoiceStatus; +use reth_engine_primitives::{ + BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, +}; use reth_network_api::PeersInfo; use reth_primitives_traits::{format_gas, format_gas_throughput, BlockBody, NodePrimitives}; use reth_prune_types::PrunerEvent; @@ -255,12 +256,12 @@ impl NodeState { hash=?block.hash(), peers=self.num_connected_peers(), txs=block.body().transactions().len(), - gas=%format_gas(block.header.gas_used()), - gas_throughput=%format_gas_throughput(block.header.gas_used(), elapsed), - full=%format!("{:.1}%", block.header.gas_used() as f64 * 100.0 / block.header.gas_limit() as f64), - base_fee=%format!("{:.2}gwei", block.header.base_fee_per_gas().unwrap_or(0) as f64 / GWEI_TO_WEI as f64), - blobs=block.header.blob_gas_used().unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, - excess_blobs=block.header.excess_blob_gas().unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, + gas=%format_gas(block.gas_used()), + gas_throughput=%format_gas_throughput(block.gas_used(), elapsed), + full=%format!("{:.1}%", block.gas_used() as f64 * 100.0 / block.gas_limit() as f64), + base_fee=%format!("{:.2}gwei", block.base_fee_per_gas().unwrap_or(0) as f64 / GWEI_TO_WEI as f64), + blobs=block.blob_gas_used().unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, + excess_blobs=block.excess_blob_gas().unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, ?elapsed, "Block added to canonical chain" ); diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index db4fd9ec01f2..55eb923470c4 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -6,7 +6,7 @@ use clap::Parser; use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher, Node}; use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; use reth_optimism_node::{args::RollupArgs, OpNode}; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; use tracing as _; @@ -29,7 +29,7 @@ fn main() { let op_node = OpNode::new(rollup_args.clone()); let handle = builder - .with_types_and_provider::>() + .with_types_and_provider::>() .with_components(op_node.components()) .with_add_ons(op_node.add_ons()) .launch_with_fn(|builder| { diff --git a/crates/optimism/chainspec/src/base.rs b/crates/optimism/chainspec/src/base.rs index ab24ecf16c40..8282c58f6551 100644 --- a/crates/optimism/chainspec/src/base.rs +++ b/crates/optimism/chainspec/src/base.rs @@ -5,7 +5,7 @@ use alloc::{sync::Arc, vec}; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; -use reth_ethereum_forks::EthereumHardfork; +use reth_ethereum_forks::{EthereumHardfork, Hardfork}; use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; diff --git a/crates/optimism/chainspec/src/base_sepolia.rs b/crates/optimism/chainspec/src/base_sepolia.rs index 4ebf4d9a81e7..2b5434754501 100644 --- a/crates/optimism/chainspec/src/base_sepolia.rs +++ b/crates/optimism/chainspec/src/base_sepolia.rs @@ -4,7 +4,7 @@ use alloc::{sync::Arc, vec}; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; -use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec, Hardfork}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OpHardfork; diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 47610bdfc416..e8f8a084e3c2 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -26,8 +26,6 @@ pub use base::BASE_MAINNET; pub use base_sepolia::BASE_SEPOLIA; use derive_more::{Constructor, Deref, Display, From, Into}; pub use dev::OP_DEV; -#[cfg(not(feature = "std"))] -pub(crate) use once_cell::sync::Lazy as LazyLock; pub use op::OP_MAINNET; use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; pub use op_sepolia::OP_SEPOLIA; @@ -38,8 +36,7 @@ use reth_chainspec::{ use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; use reth_network_peers::NodeRecord; use reth_optimism_forks::{OpHardfork, OpHardforks}; -#[cfg(feature = "std")] -pub(crate) use std::sync::LazyLock; +use reth_primitives_traits::sync::LazyLock; /// Chain spec builder for a OP stack chain. #[derive(Debug, Default, From)] @@ -316,6 +313,10 @@ impl Hardforks for OpChainSpec { } impl EthereumHardforks for OpChainSpec { + fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition { + self.fork(fork) + } + fn get_final_paris_total_difficulty(&self) -> Option { self.inner.get_final_paris_total_difficulty() } @@ -325,7 +326,11 @@ impl EthereumHardforks for OpChainSpec { } } -impl OpHardforks for OpChainSpec {} +impl OpHardforks for OpChainSpec { + fn op_fork_activation(&self, fork: OpHardfork) -> ForkCondition { + self.fork(fork) + } +} impl From for OpChainSpec { fn from(genesis: Genesis) -> Self { diff --git a/crates/optimism/chainspec/src/op.rs b/crates/optimism/chainspec/src/op.rs index 20a2ac60e220..9b2c98e61808 100644 --- a/crates/optimism/chainspec/src/op.rs +++ b/crates/optimism/chainspec/src/op.rs @@ -4,7 +4,7 @@ use crate::{LazyLock, OpChainSpec}; use alloc::{sync::Arc, vec}; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; -use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec, Hardfork}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OpHardfork; diff --git a/crates/optimism/chainspec/src/op_sepolia.rs b/crates/optimism/chainspec/src/op_sepolia.rs index 3a60d49ed120..99702e6a17ac 100644 --- a/crates/optimism/chainspec/src/op_sepolia.rs +++ b/crates/optimism/chainspec/src/op_sepolia.rs @@ -4,7 +4,7 @@ use crate::{LazyLock, OpChainSpec}; use alloc::{sync::Arc, vec}; use alloy_chains::{Chain, NamedChain}; use alloy_primitives::{b256, U256}; -use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec, Hardfork}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OpHardfork; diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index 040ecdc00357..e564982cfd57 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -223,7 +223,7 @@ where // finally, write the receipts provider.write_state( - execution_outcome, + &execution_outcome, OriginalValuesKnown::Yes, StorageLocation::StaticFiles, )?; diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 23f5206c6c03..024bb957f815 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -38,4 +38,19 @@ op-alloy-consensus.workspace = true reth-optimism-chainspec.workspace = true [features] +default = ["std"] +std = [ + "reth-chainspec/std", + "reth-consensus/std", + "reth-consensus-common/std", + "reth-primitives/std", + "reth-optimism-forks/std", + "reth-optimism-chainspec/std", + "reth-optimism-primitives/std", + "alloy-eips/std", + "alloy-primitives/std", + "alloy-consensus/std", + "alloy-trie/std", + "op-alloy-consensus/std", +] optimism = ["reth-primitives/optimism", "reth-optimism-primitives/optimism"] diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 01f8f9a72f50..cedc8c462929 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -6,9 +6,13 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] +extern crate alloc; + +use alloc::sync::Arc; use alloy_consensus::{BlockHeader, Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::eip7840::BlobParams; use alloy_primitives::{B64, U256}; @@ -26,7 +30,6 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{OpBlock, OpBlockBody, OpPrimitives, OpReceipt}; use reth_primitives::{BlockWithSenders, GotExpected, SealedBlockFor, SealedHeader}; -use std::{sync::Arc, time::SystemTime}; mod proof; pub use proof::calculate_receipt_root_no_memo_optimism; @@ -61,6 +64,8 @@ impl FullConsensus for OpBeaconConsensus { } impl Consensus for OpBeaconConsensus { + type Error = ConsensusError; + fn validate_body_against_header( &self, body: &OpBlockBody, @@ -155,42 +160,32 @@ impl HeaderValidator for OpBeaconConsensus { _total_difficulty: U256, ) -> Result<(), ConsensusError> { // with OP-stack Bedrock activation number determines when TTD (eth Merge) has been reached. - let is_post_merge = self.chain_spec.is_bedrock_active_at_block(header.number); + debug_assert!( + self.chain_spec.is_bedrock_active_at_block(header.number), + "manually import OVM blocks" + ); - if is_post_merge { - if header.nonce != B64::ZERO { - return Err(ConsensusError::TheMergeNonceIsNotZero) - } + if header.nonce != B64::ZERO { + return Err(ConsensusError::TheMergeNonceIsNotZero) + } - if header.ommers_hash != EMPTY_OMMER_ROOT_HASH { - return Err(ConsensusError::TheMergeOmmerRootIsNotEmpty) - } + if header.ommers_hash != EMPTY_OMMER_ROOT_HASH { + return Err(ConsensusError::TheMergeOmmerRootIsNotEmpty) + } - // Post-merge, the consensus layer is expected to perform checks such that the block - // timestamp is a function of the slot. This is different from pre-merge, where blocks - // are only allowed to be in the future (compared to the system's clock) by a certain - // threshold. - // - // Block validation with respect to the parent should ensure that the block timestamp - // is greater than its parent timestamp. + // Post-merge, the consensus layer is expected to perform checks such that the block + // timestamp is a function of the slot. This is different from pre-merge, where blocks + // are only allowed to be in the future (compared to the system's clock) by a certain + // threshold. + // + // Block validation with respect to the parent should ensure that the block timestamp + // is greater than its parent timestamp. - // validate header extra data for all networks post merge - validate_header_extra_data(header)?; + // validate header extra data for all networks post merge + validate_header_extra_data(header)?; - // mixHash is used instead of difficulty inside EVM - // https://eips.ethereum.org/EIPS/eip-4399#using-mixhash-field-instead-of-difficulty - } else { - // Check if timestamp is in the future. Clock can drift but this can be consensus issue. - let present_timestamp = - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); - - if header.exceeds_allowed_future_timestamp(present_timestamp) { - return Err(ConsensusError::TimestampIsInFuture { - timestamp: header.timestamp, - present_timestamp, - }) - } - } + // mixHash is used instead of difficulty inside EVM + // https://eips.ethereum.org/EIPS/eip-4399#using-mixhash-field-instead-of-difficulty Ok(()) } diff --git a/crates/optimism/consensus/src/proof.rs b/crates/optimism/consensus/src/proof.rs index 6f86e70f9c33..e83990bdaba6 100644 --- a/crates/optimism/consensus/src/proof.rs +++ b/crates/optimism/consensus/src/proof.rs @@ -1,11 +1,12 @@ //! Helper function for Receipt root calculation for Optimism hardforks. +use alloc::vec::Vec; use alloy_consensus::TxReceipt; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_trie::root::ordered_trie_root_with_encoder; use reth_chainspec::ChainSpec; -use reth_optimism_forks::OpHardfork; +use reth_optimism_forks::{OpHardfork, OpHardforks}; use reth_optimism_primitives::OpReceipt; use reth_primitives::ReceiptWithBloom; @@ -45,7 +46,7 @@ pub(crate) fn calculate_receipt_root_optimism( /// NOTE: Prefer calculate receipt root optimism if you have log blooms memoized. pub fn calculate_receipt_root_no_memo_optimism( receipts: &[&OpReceipt], - chain_spec: impl reth_chainspec::Hardforks, + chain_spec: impl OpHardforks, timestamp: u64, ) -> B256 { // There is a minor bug in op-geth and op-erigon where in the Regolith hardfork, @@ -53,8 +54,8 @@ pub fn calculate_receipt_root_no_memo_optimism( // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the // receipts before calculating the receipt root. This was corrected in the Canyon // hardfork. - if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp) + if chain_spec.is_regolith_active_at_timestamp(timestamp) && + !chain_spec.is_canyon_active_at_timestamp(timestamp) { let receipts = receipts .iter() diff --git a/crates/optimism/consensus/src/validation.rs b/crates/optimism/consensus/src/validation.rs index 9335917ddf9d..8aef0086375b 100644 --- a/crates/optimism/consensus/src/validation.rs +++ b/crates/optimism/consensus/src/validation.rs @@ -1,4 +1,5 @@ use crate::proof::calculate_receipt_root_optimism; +use alloc::vec::Vec; use alloy_consensus::TxReceipt; use alloy_primitives::{Bloom, B256}; use reth_chainspec::{ChainSpec, EthereumHardforks}; diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index e2ec79401c82..19b63d9fe033 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -74,7 +74,8 @@ std = [ "thiserror/std", "op-alloy-consensus/std", "reth-chainspec/std", - "reth-consensus-common/std" + "reth-optimism-consensus/std", + "reth-consensus-common/std", ] optimism = [ "reth-primitives/optimism", diff --git a/crates/optimism/hardforks/Cargo.toml b/crates/optimism/hardforks/Cargo.toml index 1ea23069a685..5ac5f6fe6ce4 100644 --- a/crates/optimism/hardforks/Cargo.toml +++ b/crates/optimism/hardforks/Cargo.toml @@ -23,6 +23,7 @@ alloy-primitives.workspace = true serde = { workspace = true, optional = true } # misc +auto_impl.workspace = true once_cell.workspace = true [features] diff --git a/crates/optimism/hardforks/src/dev.rs b/crates/optimism/hardforks/src/dev.rs index 33877301c7d4..897ce510f170 100644 --- a/crates/optimism/hardforks/src/dev.rs +++ b/crates/optimism/hardforks/src/dev.rs @@ -1,6 +1,6 @@ use alloc::vec; use alloy_primitives::U256; -use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition}; +use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; #[cfg(not(feature = "std"))] use once_cell::sync::Lazy as LazyLock; diff --git a/crates/optimism/hardforks/src/hardfork.rs b/crates/optimism/hardforks/src/hardfork.rs index 75d294c9b3e5..313fd67dde6f 100644 --- a/crates/optimism/hardforks/src/hardfork.rs +++ b/crates/optimism/hardforks/src/hardfork.rs @@ -1,6 +1,6 @@ //! Hard forks of optimism protocol. -use alloc::{boxed::Box, format, string::String, vec}; +use alloc::{format, string::String, vec}; use core::{ any::Any, fmt::{self, Display, Formatter}, diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index 36f42155e942..fbe77aa20a7a 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -17,51 +17,56 @@ mod dev; pub use dev::DEV_HARDFORKS; pub use hardfork::OpHardfork; -use reth_ethereum_forks::EthereumHardforks; +use reth_ethereum_forks::{EthereumHardforks, ForkCondition}; /// Extends [`EthereumHardforks`] with optimism helper methods. +#[auto_impl::auto_impl(&, Arc)] pub trait OpHardforks: EthereumHardforks { + /// Retrieves [`ForkCondition`] by an [`OpHardfork`]. If `fork` is not present, returns + /// [`ForkCondition::Never`]. + fn op_fork_activation(&self, fork: OpHardfork) -> ForkCondition; + /// Convenience method to check if [`OpHardfork::Bedrock`] is active at a given block /// number. fn is_bedrock_active_at_block(&self, block_number: u64) -> bool { - self.fork(OpHardfork::Bedrock).active_at_block(block_number) + self.op_fork_activation(OpHardfork::Bedrock).active_at_block(block_number) } /// Returns `true` if [`Regolith`](OpHardfork::Regolith) is active at given block /// timestamp. fn is_regolith_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Regolith).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Regolith).active_at_timestamp(timestamp) } /// Returns `true` if [`Canyon`](OpHardfork::Canyon) is active at given block timestamp. fn is_canyon_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Canyon).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Canyon).active_at_timestamp(timestamp) } /// Returns `true` if [`Ecotone`](OpHardfork::Ecotone) is active at given block timestamp. fn is_ecotone_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Ecotone).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Ecotone).active_at_timestamp(timestamp) } /// Returns `true` if [`Fjord`](OpHardfork::Fjord) is active at given block timestamp. fn is_fjord_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Fjord).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Fjord).active_at_timestamp(timestamp) } /// Returns `true` if [`Granite`](OpHardfork::Granite) is active at given block timestamp. fn is_granite_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Granite).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Granite).active_at_timestamp(timestamp) } /// Returns `true` if [`Holocene`](OpHardfork::Holocene) is active at given block /// timestamp. fn is_holocene_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Holocene).active_at_timestamp(timestamp) } /// Returns `true` if [`Isthmus`](OpHardfork::Isthmus) is active at given block /// timestamp. fn is_isthmus_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Isthmus).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Isthmus).active_at_timestamp(timestamp) } } diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index cad466dfc8da..25adb3d10893 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -30,7 +30,6 @@ reth-transaction-pool.workspace = true reth-network.workspace = true reth-evm.workspace = true reth-revm = { workspace = true, features = ["std"] } -reth-beacon-consensus.workspace = true reth-trie-db.workspace = true reth-rpc-server-types.workspace = true reth-rpc-types-compat.workspace = true @@ -94,7 +93,6 @@ optimism = [ "reth-provider/optimism", "reth-optimism-evm/optimism", "reth-optimism-payload-builder/optimism", - "reth-beacon-consensus/optimism", "revm/optimism", "reth-optimism-rpc/optimism", "reth-engine-local/op", diff --git a/crates/optimism/node/src/utils.rs b/crates/optimism/node/src/utils.rs index c7482288f374..95875a767366 100644 --- a/crates/optimism/node/src/utils.rs +++ b/crates/optimism/node/src/utils.rs @@ -8,14 +8,14 @@ use reth_e2e_test_utils::{ use reth_node_api::NodeTypesWithDBAdapter; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; use reth_tasks::TaskManager; use std::sync::Arc; use tokio::sync::Mutex; /// Optimism Node Helper type pub(crate) type OpNode = - NodeHelperType>>; + NodeHelperType>>; /// Creates the initial setup with `num_nodes` of the node config, started and connected. pub async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index fc0016fbcaf7..eba2aed422dc 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -5,7 +5,7 @@ use reth_node_api::{FullNodeComponents, NodeTypesWithDBAdapter}; use reth_node_builder::{Node, NodeBuilder, NodeConfig}; use reth_optimism_chainspec::BASE_MAINNET; use reth_optimism_node::{args::RollupArgs, OpNode}; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; #[test] fn test_basic_setup() { @@ -16,7 +16,7 @@ fn test_basic_setup() { let op_node = OpNode::new(args); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types_and_provider::>>() + .with_types_and_provider::>>() .with_components(op_node.components()) .with_add_ons(op_node.add_ons()) .on_component_initialized(move |ctx| { diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index d34dac483672..defce4466267 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -29,7 +29,7 @@ use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_primitives::{OpPrimitives, OpTransactionSigned}; use reth_payload_util::{PayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}; use reth_primitives::RecoveredTx; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; use reth_tasks::TaskManager; use reth_transaction_pool::{pool::BestPayloadTransactions, PoolTransaction}; use std::sync::Arc; @@ -148,7 +148,7 @@ async fn test_custom_block_priority_config() { let tasks = TaskManager::current(); let node_handle = NodeBuilder::new(config.clone()) .with_database(db) - .with_types_and_provider::>() + .with_types_and_provider::>() .with_components(build_components(config.chain.chain_id())) .with_add_ons(OpAddOns::default()) .launch_with_fn(|builder| { diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index b87b01b37b2b..7bf3f8015b71 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -434,7 +434,7 @@ where }; let sealed_block = Arc::new(block.seal_slow()); - debug!(target: "payload_builder", id=%ctx.attributes().payload_id(), sealed_block_header = ?sealed_block.header, "sealed built block"); + debug!(target: "payload_builder", id=%ctx.attributes().payload_id(), sealed_block_header = ?sealed_block.header(), "sealed built block"); // create the executed block data let executed: ExecutedBlock = ExecutedBlock { diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 3ba5df6968a8..5361d00be0d7 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -12,6 +12,7 @@ use op_alloy_network::Network; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; +use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{OpBlock, OpReceipt, OpTransactionSigned}; use reth_primitives::{logs_bloom, BlockBody, SealedBlockWithSenders}; use reth_provider::{ @@ -40,7 +41,7 @@ where Block = OpBlock, Receipt = OpReceipt, Header = reth_primitives::Header, - > + ChainSpecProvider + > + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool>>, Evm: ConfigureEvm< diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 23589be58283..d017ea650e4a 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -34,11 +34,12 @@ secp256k1 = { workspace = true, features = [ k256.workspace = true # misc +auto_impl.workspace = true byteorder = { workspace = true, optional = true } bytes.workspace = true derive_more.workspace = true +once_cell.workspace = true serde_with = { workspace = true, optional = true } -auto_impl.workspace = true thiserror.workspace = true # required by reth-codecs @@ -87,6 +88,7 @@ std = [ "bytes/std", "derive_more/std", "k256/std", + "once_cell/std", "secp256k1?/std", "thiserror/std", "alloy-trie/std", diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 58fe3c4b43e9..279e7d45cc8d 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -13,9 +13,6 @@ pub trait FullBlockBody: BlockBody + MaybeSerdeBincod impl FullBlockBody for T where T: BlockBody + MaybeSerdeBincodeCompat {} -#[cfg(feature = "rayon")] -use rayon::prelude::*; - /// Abstraction for block's body. pub trait BlockBody: Send @@ -115,14 +112,7 @@ pub trait BlockBody: where Self::Transaction: SignedTransaction, { - #[cfg(feature = "rayon")] - { - self.transactions().into_par_iter().map(|tx| tx.recover_signer()).collect() - } - #[cfg(not(feature = "rayon"))] - { - self.transactions().iter().map(|tx| tx.recover_signer()).collect() - } + crate::transaction::recover::recover_signers(self.transactions()) } /// Recover signer addresses for all transactions in the block body _without ensuring that the @@ -133,14 +123,7 @@ pub trait BlockBody: where Self::Transaction: SignedTransaction, { - #[cfg(feature = "rayon")] - { - self.transactions().into_par_iter().map(|tx| tx.recover_signer_unchecked()).collect() - } - #[cfg(not(feature = "rayon"))] - { - self.transactions().iter().map(|tx| tx.recover_signer_unchecked()).collect() - } + crate::transaction::recover::recover_signers_unchecked(self.transactions()) } } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 4c98a94b318a..85e29995f4b6 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -88,3 +88,56 @@ where (self.header, self.body) } } + +/// An extension trait for [`Block`]s that allows for mutable access to the block's internals. +/// +/// This allows for modifying the block's header and body for testing purposes. +#[cfg(any(test, feature = "test-utils"))] +pub trait TestBlock: Block { + /// Returns mutable reference to block body. + fn body_mut(&mut self) -> &mut Self::Body; + + /// Returns mutable reference to block header. + fn header_mut(&mut self) -> &mut Self::Header; + + /// Updates the block header. + fn set_header(&mut self, header: Self::Header); + + /// Updates the parent block hash. + fn set_parent_hash(&mut self, hash: alloy_primitives::BlockHash) { + crate::header::test_utils::TestHeader::set_parent_hash(self.header_mut(), hash); + } + + /// Updates the block number. + fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) { + crate::header::test_utils::TestHeader::set_block_number(self.header_mut(), number); + } + + /// Updates the block state root. + fn set_state_root(&mut self, state_root: alloy_primitives::B256) { + crate::header::test_utils::TestHeader::set_state_root(self.header_mut(), state_root); + } + + /// Updates the block difficulty. + fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) { + crate::header::test_utils::TestHeader::set_difficulty(self.header_mut(), difficulty); + } +} + +#[cfg(any(test, feature = "test-utils"))] +impl TestBlock for alloy_consensus::Block +where + T: SignedTransaction, +{ + fn body_mut(&mut self) -> &mut Self::Body { + &mut self.body + } + + fn header_mut(&mut self) -> &mut Self::Header { + &mut self.header + } + + fn set_header(&mut self, header: Self::Header) { + self.header = header + } +} diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index ef8b5fde5e91..4b1a83fb50d7 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -119,10 +119,28 @@ impl Decodable for SealedHeader { } } +impl From> for Sealed { + fn from(value: SealedHeader) -> Self { + Self::new_unchecked(value.header, value.hash) + } +} + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a, H> arbitrary::Arbitrary<'a> for SealedHeader +where + H: for<'b> arbitrary::Arbitrary<'b> + Sealable, +{ + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + let header = H::arbitrary(u)?; + + Ok(Self::seal(header)) + } +} + #[cfg(any(test, feature = "test-utils"))] -impl SealedHeader { +impl SealedHeader { /// Updates the block header. - pub fn set_header(&mut self, header: Header) { + pub fn set_header(&mut self, header: H) { self.header = header } @@ -131,42 +149,29 @@ impl SealedHeader { self.hash = hash } + /// Returns a mutable reference to the header. + pub fn header_mut(&mut self) -> &mut H { + &mut self.header + } + /// Updates the parent block hash. pub fn set_parent_hash(&mut self, hash: BlockHash) { - self.header.parent_hash = hash + self.header.set_parent_hash(hash); } /// Updates the block number. pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) { - self.header.number = number; + self.header.set_block_number(number); } /// Updates the block state root. pub fn set_state_root(&mut self, state_root: alloy_primitives::B256) { - self.header.state_root = state_root; + self.header.set_state_root(state_root); } /// Updates the block difficulty. pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) { - self.header.difficulty = difficulty; - } -} - -impl From> for Sealed { - fn from(value: SealedHeader) -> Self { - Self::new_unchecked(value.header, value.hash) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a, H> arbitrary::Arbitrary<'a> for SealedHeader -where - H: for<'b> arbitrary::Arbitrary<'b> + Sealable, -{ - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let header = H::arbitrary(u)?; - - Ok(Self::seal(header)) + self.header.set_difficulty(difficulty); } } diff --git a/crates/primitives-traits/src/header/test_utils.rs b/crates/primitives-traits/src/header/test_utils.rs index 0e79f6cb462f..58237fbca105 100644 --- a/crates/primitives-traits/src/header/test_utils.rs +++ b/crates/primitives-traits/src/header/test_utils.rs @@ -1,10 +1,45 @@ -//! Test utilities to generate random valid headers. +//! Test utilities for the block header. use alloy_consensus::Header; -use alloy_primitives::B256; +use alloy_primitives::{BlockHash, BlockNumber, B256, U256}; use proptest::{arbitrary::any, prop_compose}; use proptest_arbitrary_interop::arb; +/// A helper trait for [`Header`]s that allows for mutable access to the headers values. +/// +/// This allows for modifying the header for testing purposes. +pub trait TestHeader { + /// Updates the parent block hash. + fn set_parent_hash(&mut self, hash: BlockHash); + + /// Updates the block number. + fn set_block_number(&mut self, number: BlockNumber); + + /// Updates the block state root. + fn set_state_root(&mut self, state_root: B256); + + /// Updates the block difficulty. + fn set_difficulty(&mut self, difficulty: U256); +} + +impl TestHeader for Header { + fn set_parent_hash(&mut self, hash: BlockHash) { + self.parent_hash = hash + } + + fn set_block_number(&mut self, number: BlockNumber) { + self.number = number; + } + + fn set_state_root(&mut self, state_root: B256) { + self.state_root = state_root; + } + + fn set_difficulty(&mut self, difficulty: U256) { + self.difficulty = difficulty; + } +} + /// Generates a header which is valid __with respect to past and future forks__. This means, for /// example, that if the withdrawals root is present, the base fee per gas is also present. /// diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 74d10fc3c84f..bad587e0f67d 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -16,6 +16,7 @@ //! - `serde`: Adds serde support for all types. //! - `secp256k1`: Adds secp256k1 support for transaction signing/recovery. (By default the no-std //! friendly `k256` is used) +//! - `rayon`: Uses `rayon` for parallel transaction sender recovery in [`BlockBody`] by default. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -69,10 +70,10 @@ pub use alloy_primitives::{logs_bloom, Log, LogData}; mod storage; pub use storage::StorageEntry; +pub mod sync; + /// Common header types pub mod header; -#[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] -pub use header::test_utils; pub use header::{Header, HeaderError, SealedHeader}; /// Bincode-compatible serde implementations for common abstracted types in Reth. @@ -132,3 +133,11 @@ pub trait MaybeSerdeBincodeCompat {} impl MaybeSerdeBincodeCompat for T where T: crate::serde_bincode_compat::SerdeBincodeCompat {} #[cfg(not(feature = "serde-bincode-compat"))] impl MaybeSerdeBincodeCompat for T {} + +/// Utilities for testing. +#[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] +pub mod test_utils { + pub use crate::header::test_utils::{generate_valid_header, valid_header_strategy}; + #[cfg(feature = "test-utils")] + pub use crate::{block::TestBlock, header::test_utils::TestHeader}; +} diff --git a/crates/primitives-traits/src/sync.rs b/crates/primitives-traits/src/sync.rs new file mode 100644 index 000000000000..353278da7499 --- /dev/null +++ b/crates/primitives-traits/src/sync.rs @@ -0,0 +1,9 @@ +//! Lock synchronization primitives + +use once_cell as _; + +#[cfg(not(feature = "std"))] +pub use once_cell::sync::{Lazy as LazyLock, OnceCell as OnceLock}; + +#[cfg(feature = "std")] +pub use std::sync::{LazyLock, OnceLock}; diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index 15b3df7fdb8b..43fe7899d99b 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -5,6 +5,7 @@ pub mod signature; pub mod signed; pub mod error; +pub mod recover; pub use alloy_consensus::transaction::{TransactionInfo, TransactionMeta}; diff --git a/crates/primitives-traits/src/transaction/recover.rs b/crates/primitives-traits/src/transaction/recover.rs new file mode 100644 index 000000000000..cad57bc26607 --- /dev/null +++ b/crates/primitives-traits/src/transaction/recover.rs @@ -0,0 +1,68 @@ +//! Helpers for recovering signers from a set of transactions + +#[cfg(feature = "rayon")] +pub use rayon::*; + +#[cfg(not(feature = "rayon"))] +pub use iter::*; + +#[cfg(feature = "rayon")] +mod rayon { + use crate::SignedTransaction; + use alloc::vec::Vec; + use alloy_primitives::Address; + use rayon::prelude::{IntoParallelIterator, ParallelIterator}; + + /// Recovers a list of signers from a transaction list iterator. + /// + /// Returns `None`, if some transaction's signature is invalid + pub fn recover_signers<'a, I, T>(txes: I) -> Option> + where + T: SignedTransaction, + I: IntoParallelIterator + IntoIterator + Send, + { + txes.into_par_iter().map(|tx| tx.recover_signer()).collect() + } + + /// Recovers a list of signers from a transaction list iterator _without ensuring that the + /// signature has a low `s` value_. + /// + /// Returns `None`, if some transaction's signature is invalid. + pub fn recover_signers_unchecked<'a, I, T>(txes: I) -> Option> + where + T: SignedTransaction, + I: IntoParallelIterator + IntoIterator + Send, + { + txes.into_par_iter().map(|tx| tx.recover_signer_unchecked()).collect() + } +} + +#[cfg(not(feature = "rayon"))] +mod iter { + use crate::SignedTransaction; + use alloc::vec::Vec; + use alloy_primitives::Address; + + /// Recovers a list of signers from a transaction list iterator. + /// + /// Returns `None`, if some transaction's signature is invalid + pub fn recover_signers<'a, I, T>(txes: I) -> Option> + where + T: SignedTransaction, + I: IntoIterator + IntoIterator, + { + txes.into_iter().map(|tx| tx.recover_signer()).collect() + } + + /// Recovers a list of signers from a transaction list iterator _without ensuring that the + /// signature has a low `s` value_. + /// + /// Returns `None`, if some transaction's signature is invalid. + pub fn recover_signers_unchecked<'a, I, T>(txes: I) -> Option> + where + T: SignedTransaction, + I: IntoIterator + IntoIterator, + { + txes.into_iter().map(|tx| tx.recover_signer_unchecked()).collect() + } +} diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index e7036f1752bc..2ccaf4b0d067 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -53,7 +53,6 @@ derive_more.workspace = true modular-bitfield = { workspace = true, optional = true } once_cell.workspace = true rand = { workspace = true, optional = true } -rayon.workspace = true serde.workspace = true serde_with = { workspace = true, optional = true } @@ -64,7 +63,7 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } # eth reth-chainspec = { workspace = true, features = ["arbitrary"] } reth-codecs = { workspace = true, features = ["test-utils"] } -reth-primitives-traits = { workspace = true, features = ["arbitrary"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary", "test-utils"] } reth-testing-utils.workspace = true reth-trie-common = { workspace = true, features = ["arbitrary"] } revm-primitives = { workspace = true, features = ["arbitrary"] } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 3cff1646e435..fd0dc0cee40a 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -4,8 +4,10 @@ use crate::{ }; use alloc::vec::Vec; use alloy_consensus::Header; -use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; -use alloy_primitives::{Address, Bytes, B256}; +use alloy_eips::{ + eip1898::BlockWithParent, eip2718::Encodable2718, eip4895::Withdrawals, BlockNumHash, +}; +use alloy_primitives::{Address, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] @@ -162,12 +164,10 @@ impl BlockWithSenders { /// Sealed Ethereum full block. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct SealedBlock { /// Locked block header. - #[deref] - #[deref_mut] - pub header: SealedHeader, + header: SealedHeader, /// Block body. body: B, } @@ -185,11 +185,34 @@ impl SealedBlock { self.header.hash() } + /// Returns reference to block header. + pub const fn header(&self) -> &H { + self.header.header() + } + /// Returns reference to block body. pub const fn body(&self) -> &B { &self.body } + /// Returns the Sealed header. + pub const fn sealed_header(&self) -> &SealedHeader { + &self.header + } + + /// Clones the wrapped header and returns a [`SealedHeader`] sealed with the hash. + pub fn clone_sealed_header(&self) -> SealedHeader + where + H: Clone, + { + self.header.clone() + } + + /// Consumes the block and returns the sealed header. + pub fn into_sealed_header(self) -> SealedHeader { + self.header + } + /// Consumes the block and returns the header. pub fn into_header(self) -> H { self.header.unseal() @@ -202,25 +225,11 @@ impl SealedBlock { /// Splits the [`BlockBody`] and [`SealedHeader`] into separate components #[inline] - pub fn split_header_body(self) -> (SealedHeader, B) { + pub fn split(self) -> (SealedHeader, B) { (self.header, self.body) } } -impl SealedBlock { - /// Returns whether or not the block contains any blob transactions. - #[inline] - pub fn has_eip4844_transactions(&self) -> bool { - self.body.has_eip4844_transactions() - } - - /// Returns whether or not the block contains any eip-7702 transactions. - #[inline] - pub fn has_eip7702_transactions(&self) -> bool { - self.body.has_eip7702_transactions() - } -} - impl SealedBlock where B: reth_primitives_traits::BlockBody, @@ -248,6 +257,16 @@ where H: alloy_consensus::BlockHeader, B: reth_primitives_traits::BlockBody, { + /// Return the number hash tuple. + pub fn num_hash(&self) -> BlockNumHash { + BlockNumHash::new(self.number(), self.hash()) + } + + /// Return a [`BlockWithParent`] for this header. + pub fn block_with_parent(&self) -> BlockWithParent { + BlockWithParent { parent: self.parent_hash(), block: self.num_hash() } + } + /// Ensures that the transaction root in the block header is valid. /// /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure @@ -359,16 +378,6 @@ where { Block::new(self.header.unseal(), self.body) } - - /// Returns a vector of encoded 2718 transactions. - /// - /// This is also known as `raw transactions`. - /// - /// See also [`Encodable2718`]. - #[doc(alias = "raw_transactions")] - pub fn encoded_2718_transactions(&self) -> Vec { - self.body.encoded_2718_transactions() - } } impl InMemorySize for SealedBlock { @@ -394,6 +403,14 @@ where } } +impl Deref for SealedBlock { + type Target = H; + + fn deref(&self) -> &Self::Target { + self.header.header() + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a, H, B> arbitrary::Arbitrary<'a> for SealedBlock where @@ -405,6 +422,52 @@ where } } +#[cfg(any(test, feature = "test-utils"))] +impl SealedBlock +where + H: reth_primitives_traits::test_utils::TestHeader, +{ + /// Returns a mutable reference to the header. + pub fn header_mut(&mut self) -> &mut H { + self.header.header_mut() + } + + /// Returns a mutable reference to the header. + pub fn body_mut(&mut self) -> &mut B { + &mut self.body + } + + /// Updates the block header. + pub fn set_header(&mut self, header: H) { + self.header.set_header(header) + } + + /// Updates the block hash. + pub fn set_hash(&mut self, hash: alloy_primitives::BlockHash) { + self.header.set_hash(hash); + } + + /// Updates the parent block hash. + pub fn set_parent_hash(&mut self, hash: alloy_primitives::BlockHash) { + self.header.set_parent_hash(hash); + } + + /// Updates the block number. + pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) { + self.header.set_block_number(number); + } + + /// Updates the block state root. + pub fn set_state_root(&mut self, state_root: B256) { + self.header.set_state_root(state_root); + } + + /// Updates the block difficulty. + pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) { + self.header.set_difficulty(difficulty); + } +} + /// A helepr trait to construct [`SealedBlock`] from a [`reth_primitives_traits::Block`]. pub type SealedBlockFor = SealedBlock< ::Header, @@ -447,7 +510,7 @@ impl SealedBlockWithSenders { #[inline] pub fn unseal(self) -> BlockWithSenders { let (block, senders) = self.into_components(); - let (header, body) = block.split_header_body(); + let (header, body) = block.split(); let header = header.unseal(); BlockWithSenders::new_unchecked(B::new(header, body), senders) } @@ -493,17 +556,22 @@ impl SealedBlockWithSenders { } #[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for SealedBlockWithSenders { +impl<'a, B> arbitrary::Arbitrary<'a> for SealedBlockWithSenders +where + B: reth_primitives_traits::Block + arbitrary::Arbitrary<'a>, +{ fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let block: SealedBlock = SealedBlock::arbitrary(u)?; + let block = B::arbitrary(u)?; let senders = block - .body - .transactions + .body() + .transactions() .iter() .map(|tx| tx.recover_signer().unwrap()) .collect::>(); + let (header, body) = block.split(); + let block = SealedBlock::new(SealedHeader::seal(header), body); Ok(Self { block, senders }) } } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index bee59a647e96..6189eb10c20c 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -21,15 +21,13 @@ use alloy_rlp::{Decodable, Encodable, Header}; use core::hash::{Hash, Hasher}; use derive_more::{AsRef, Deref}; use once_cell as _; -#[cfg(not(feature = "std"))] -use once_cell::sync::{Lazy as LazyLock, OnceCell as OnceLock}; #[cfg(feature = "optimism")] use op_alloy_consensus::DepositTransaction; #[cfg(feature = "optimism")] use op_alloy_consensus::TxDeposit; pub use pooled::PooledTransactionsElementEcRecovered; -use rayon::prelude::{IntoParallelIterator, ParallelIterator}; pub use reth_primitives_traits::{ + sync::{LazyLock, OnceLock}, transaction::{ error::{ InvalidTransactionError, TransactionConversionError, TryFromRecoveredTransactionError, @@ -42,8 +40,6 @@ use reth_primitives_traits::{InMemorySize, SignedTransaction}; use revm_primitives::{AuthorizationList, TxEnv}; use serde::{Deserialize, Serialize}; pub use signature::{recover_signer, recover_signer_unchecked}; -#[cfg(feature = "std")] -use std::sync::{LazyLock, OnceLock}; pub use tx_type::TxType; /// Handling transaction signature operations, including signature recovery, @@ -54,15 +50,6 @@ pub mod util; mod pooled; mod tx_type; -/// Expected number of transactions where we can expect a speed-up by recovering the senders in -/// parallel. -pub static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = - LazyLock::new(|| match rayon::current_num_threads() { - 0..=1 => usize::MAX, - 2..=8 => 10, - _ => 5, - }); - /// A raw transaction. /// /// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). @@ -1727,37 +1714,6 @@ pub mod serde_bincode_compat { } } -/// Recovers a list of signers from a transaction list iterator. -/// -/// Returns `None`, if some transaction's signature is invalid -pub fn recover_signers<'a, I, T>(txes: I, num_txes: usize) -> Option> -where - T: SignedTransaction, - I: IntoParallelIterator + IntoIterator + Send, -{ - if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { - txes.into_iter().map(|tx| tx.recover_signer()).collect() - } else { - txes.into_par_iter().map(|tx| tx.recover_signer()).collect() - } -} - -/// Recovers a list of signers from a transaction list iterator _without ensuring that the -/// signature has a low `s` value_. -/// -/// Returns `None`, if some transaction's signature is invalid. -pub fn recover_signers_unchecked<'a, I, T>(txes: I, num_txes: usize) -> Option> -where - T: SignedTransaction, - I: IntoParallelIterator + IntoIterator + Send, -{ - if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { - txes.into_iter().map(|tx| tx.recover_signer_unchecked()).collect() - } else { - txes.into_par_iter().map(|tx| tx.recover_signer_unchecked()).collect() - } -} - #[cfg(test)] mod tests { use crate::{ diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index 9f9e989dc06a..c1e23063fe6f 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -147,7 +147,7 @@ mod tests { use super::*; use alloy_primitives::B256; use reth_provider::{ - providers::BlockchainProvider2, + providers::BlockchainProvider, test_utils::{create_test_provider_factory, MockEthProvider}, }; use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; @@ -198,7 +198,7 @@ mod tests { provider_rw.commit().expect("failed to commit"); // Create a new provider - let provider = BlockchainProvider2::new(factory).unwrap(); + let provider = BlockchainProvider::new(factory).unwrap(); // Since there are no transactions, expected None let range = input.get_next_tx_num_range(&provider).expect("Expected range"); @@ -236,7 +236,7 @@ mod tests { provider_rw.commit().expect("failed to commit"); // Create a new provider - let provider = BlockchainProvider2::new(factory).unwrap(); + let provider = BlockchainProvider::new(factory).unwrap(); // Get the next tx number range let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap(); @@ -282,7 +282,7 @@ mod tests { provider_rw.commit().expect("failed to commit"); // Create a new provider - let provider = BlockchainProvider2::new(factory).unwrap(); + let provider = BlockchainProvider::new(factory).unwrap(); // Fetch the range and check if it is correct let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap(); @@ -318,7 +318,7 @@ mod tests { provider_rw.commit().expect("failed to commit"); // Create a new provider - let provider = BlockchainProvider2::new(factory).unwrap(); + let provider = BlockchainProvider::new(factory).unwrap(); // Get the last tx number // Calculate the total number of transactions diff --git a/crates/prune/prune/src/segments/user/history.rs b/crates/prune/prune/src/segments/user/history.rs index e27884a92780..4e2218af23fb 100644 --- a/crates/prune/prune/src/segments/user/history.rs +++ b/crates/prune/prune/src/segments/user/history.rs @@ -125,7 +125,7 @@ where cursor.delete_current()?; // Upsert will replace the last shard for this sharded key with // the previous value. - cursor.upsert(RawKey::new(key), prev_value)?; + cursor.upsert(RawKey::new(key), &prev_value)?; Ok(PruneShardOutcome::Updated) } // If there's no previous shard for this sharded key, @@ -151,7 +151,7 @@ where } else { cursor.upsert( RawKey::new(key), - RawValue::new(BlockNumberList::new_pre_sorted(higher_blocks)), + &RawValue::new(BlockNumberList::new_pre_sorted(higher_blocks)), )?; Ok(PruneShardOutcome::Updated) } diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index ebdfeb6127f5..a4c33d8716e9 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -51,7 +51,6 @@ tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } [dev-dependencies] reth-chainspec.workspace = true -reth-beacon-consensus.workspace = true reth-network-api.workspace = true reth-network-peers.workspace = true reth-ethereum-evm.workspace = true diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 5e0111db68c3..bb56916249e6 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -16,6 +16,7 @@ //! Configure only an http server with a selection of [`RethRpcModule`]s //! //! ``` +//! use reth_consensus::{ConsensusError, FullConsensus}; //! use reth_engine_primitives::PayloadValidator; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; @@ -67,7 +68,7 @@ //! CanonStateSubscriptions + Clone + 'static, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, -//! Consensus: reth_consensus::FullConsensus + Clone + 'static, +//! Consensus: FullConsensus + Clone + 'static, //! Validator: PayloadValidator, //! { //! // configure the rpc module per transport @@ -99,6 +100,7 @@ //! //! //! ``` +//! use reth_consensus::{ConsensusError, FullConsensus}; //! use reth_engine_primitives::{EngineTypes, PayloadValidator}; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; @@ -159,7 +161,7 @@ //! EngineT: EngineTypes, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, -//! Consensus: reth_consensus::FullConsensus + Clone + 'static, +//! Consensus: FullConsensus + Clone + 'static, //! Validator: PayloadValidator, //! { //! // configure the rpc module per transport @@ -226,7 +228,7 @@ use jsonrpsee::{ Methods, RpcModule, }; use reth_chainspec::EthereumHardforks; -use reth_consensus::FullConsensus; +use reth_consensus::{ConsensusError, FullConsensus}; use reth_engine_primitives::{EngineTypes, PayloadValidator}; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; @@ -298,7 +300,7 @@ pub async fn launch, block_executor: BlockExecutor, - consensus: Arc>, + consensus: Arc>, payload_validator: Arc>, ) -> Result where @@ -684,7 +686,7 @@ where Transaction = ::SignedTx, >, BlockExecutor: BlockExecutorProvider, - Consensus: reth_consensus::FullConsensus + Clone + 'static, + Consensus: FullConsensus + Clone + 'static, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). @@ -1347,7 +1349,8 @@ where /// Instantiates `ValidationApi` pub fn validation_api(&self) -> ValidationApi where - Consensus: reth_consensus::FullConsensus + Clone + 'static, + Consensus: + FullConsensus + Clone + 'static, Provider: BlockReader::Block>, { ValidationApi::new( @@ -1379,7 +1382,7 @@ where >, >, BlockExecutor: BlockExecutorProvider, - Consensus: reth_consensus::FullConsensus + Clone + 'static, + Consensus: FullConsensus + Clone + 'static, { /// Configures the auth module that includes the /// * `engine_` namespace diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 79c1d8b42f48..20de310cbcb2 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -4,9 +4,9 @@ use std::{ }; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; -use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::MAINNET; use reth_consensus::noop::NoopConsensus; +use reth_engine_primitives::BeaconConsensusEngineHandle; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_ethereum_evm::{execute::EthExecutionStrategyFactory, EthEvmConfig}; use reth_evm::execute::BasicBlockExecutorProvider; diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index ae3fc490d5c7..2b4560028db5 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -14,10 +14,8 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-primitives.workspace = true reth-rpc-api.workspace = true reth-storage-api.workspace = true -reth-beacon-consensus.workspace = true reth-payload-builder.workspace = true reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true @@ -50,9 +48,10 @@ parking_lot.workspace = true [dev-dependencies] reth-ethereum-engine-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } +reth-primitives.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-testing-utils.workspace = true alloy-rlp.workspace = true -assert_matches.workspace = true \ No newline at end of file +assert_matches.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index db27d8a1e35d..fa3fba285745 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -16,15 +16,13 @@ use alloy_rpc_types_engine::{ use async_trait::async_trait; use jsonrpsee_core::RpcResult; use parking_lot::Mutex; -use reth_beacon_consensus::BeaconConsensusEngineHandle; -use reth_chainspec::{EthereumHardforks, Hardforks}; -use reth_engine_primitives::{EngineTypes, EngineValidator}; +use reth_chainspec::{EthereumHardfork, EthereumHardforks}; +use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineTypes, EngineValidator}; use reth_payload_builder::PayloadStore; use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_primitives::EthereumHardfork; use reth_rpc_api::EngineApiServer; use reth_rpc_types_compat::engine::payload::convert_to_payload_body_v1; use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; @@ -616,7 +614,7 @@ where let merge_terminal_td = self .inner .chain_spec - .fork(EthereumHardfork::Paris) + .ethereum_fork_activation(EthereumHardfork::Paris) .ttd() .expect("the engine API should not be running for chains w/o paris"); @@ -1025,7 +1023,7 @@ mod tests { use super::*; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use assert_matches::assert_matches; - use reth_chainspec::{ChainSpec, MAINNET}; + use reth_chainspec::{ChainSpec, EthereumHardfork, MAINNET}; use reth_engine_primitives::BeaconEngineMessage; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_payload_builder::test_utils::spawn_test_payload_service; diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 4210d415bfed..f5c12f80053f 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -2,8 +2,7 @@ use alloy_primitives::{B256, U256}; use jsonrpsee_types::error::{ INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE, INVALID_PARAMS_MSG, SERVER_ERROR_MSG, }; -use reth_beacon_consensus::BeaconForkChoiceUpdateError; -use reth_engine_primitives::BeaconOnNewPayloadError; +use reth_engine_primitives::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError}; use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::EngineObjectValidationError; use thiserror::Error; diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 2aecb500dd5e..91bedbeb532e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -126,10 +126,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA base_fee_params, ) } else { - base_block - .header - .next_block_base_fee(base_fee_params) - .unwrap_or_default() + base_block.next_block_base_fee(base_fee_params).unwrap_or_default() }; block_env.basefee = U256::from(base_fee); } else { diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 66c6b5a27a92..bb4c9c5ebf58 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -496,12 +496,12 @@ pub trait Trace: db, cfg, block_env, - block.header.parent_beacon_block_root(), + block.parent_beacon_block_root(), ) .map_err(|_| EthApiError::EvmCustom("failed to apply 4788 system call".to_string()))?; system_caller - .pre_block_blockhashes_contract_call(db, cfg, block_env, block.header.parent_hash()) + .pre_block_blockhashes_contract_call(db, cfg, block_env, block.parent_hash()) .map_err(|_| { EthApiError::EvmCustom("failed to apply blockhashes system call".to_string()) })?; diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index 75cfece9d564..8ee6d5c861a4 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -114,7 +114,10 @@ where .chain_spec .get_final_paris_total_difficulty() .is_some(), - terminal_total_difficulty: self.chain_spec.fork(EthereumHardfork::Paris).ttd(), + terminal_total_difficulty: self + .chain_spec + .ethereum_fork_activation(EthereumHardfork::Paris) + .ttd(), deposit_contract_address: self.chain_spec.deposit_contract().map(|dc| dc.address), ..self.chain_spec.genesis().config.clone() }; @@ -125,7 +128,7 @@ where $( // don't overwrite if already set if $config.$field.is_none() { - $config.$field = match self.chain_spec.fork(EthereumHardfork::$fork) { + $config.$field = match self.chain_spec.ethereum_fork_activation(EthereumHardfork::$fork) { ForkCondition::Block(block) => Some(block), ForkCondition::TTD { fork_block, .. } => fork_block, ForkCondition::Timestamp(ts) => Some(ts), diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 33e32aec0281..d906419021b9 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -314,7 +314,7 @@ where if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? { all_traces.extend( self.extract_reward_traces( - block.header.header(), + block.header(), block.body().ommers(), base_block_reward, ) @@ -391,11 +391,9 @@ where maybe_traces.map(|traces| traces.into_iter().flatten().collect::>()); if let (Some(block), Some(traces)) = (maybe_block, maybe_traces.as_mut()) { - if let Some(base_block_reward) = - self.calculate_base_block_reward(block.header.header())? - { + if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? { traces.extend(self.extract_reward_traces( - block.block.header(), + block.header(), block.body().ommers(), base_block_reward, )); @@ -490,7 +488,7 @@ where Ok(Some(BlockOpcodeGas { block_hash: block.hash(), - block_number: block.header.number(), + block_number: block.number(), transactions, })) } diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index cb3ab4f296cf..1c40004f8bf3 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -43,7 +43,7 @@ where /// Create a new instance of the [`ValidationApi`] pub fn new( provider: Provider, - consensus: Arc>, + consensus: Arc>, executor_provider: E, config: ValidationApiConfig, task_spawner: Box, @@ -102,10 +102,10 @@ where message: BidTrace, registered_gas_limit: u64, ) -> Result<(), ValidationApiError> { - self.validate_message_against_header(&block.header, &message)?; + self.validate_message_against_header(block.sealed_header(), &message)?; - self.consensus.validate_header_with_total_difficulty(&block.header, U256::MAX)?; - self.consensus.validate_header(&block.header)?; + self.consensus.validate_header_with_total_difficulty(block.sealed_header(), U256::MAX)?; + self.consensus.validate_header(block.sealed_header())?; self.consensus.validate_block_pre_execution(&block)?; if !self.disallow.is_empty() { @@ -130,15 +130,14 @@ where let latest_header = self.provider.latest_header()?.ok_or_else(|| ValidationApiError::MissingLatestBlock)?; - if latest_header.hash() != block.header.parent_hash() { + if latest_header.hash() != block.parent_hash() { return Err(ConsensusError::ParentHashMismatch( - GotExpected { got: block.header.parent_hash(), expected: latest_header.hash() } - .into(), + GotExpected { got: block.parent_hash(), expected: latest_header.hash() }.into(), ) .into()) } - self.consensus.validate_header_against_parent(&block.header, &latest_header)?; - self.validate_gas_limit(registered_gas_limit, &latest_header, &block.header)?; + self.consensus.validate_header_against_parent(block.sealed_header(), &latest_header)?; + self.validate_gas_limit(registered_gas_limit, &latest_header, block.sealed_header())?; let latest_header_hash = latest_header.hash(); let state_provider = self.provider.state_by_block_hash(latest_header_hash)?; @@ -461,7 +460,7 @@ pub struct ValidationApiInner { /// The provider that can interact with the chain. provider: Provider, /// Consensus implementation. - consensus: Arc>, + consensus: Arc>, /// Execution payload validator. payload_validator: Arc::Block>>, /// Block executor factory. diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index 2c1174d63292..d6bf4414450f 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -151,7 +151,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { .unwrap(); let second_block = blocks.get_mut(1).unwrap(); let cloned_second = second_block.clone(); - let mut updated_header = cloned_second.header.clone().unseal(); + let mut updated_header = cloned_second.header().clone(); updated_header.state_root = root; *second_block = SealedBlock::new(SealedHeader::seal(updated_header), cloned_second.into_body()); @@ -185,7 +185,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let last_block = blocks.last_mut().unwrap(); let cloned_last = last_block.clone(); - let mut updated_header = cloned_last.header.clone().unseal(); + let mut updated_header = cloned_last.header().clone(); updated_header.state_root = root; *last_block = SealedBlock::new(SealedHeader::seal(updated_header), cloned_last.into_body()); diff --git a/crates/stages/stages/src/lib.rs b/crates/stages/stages/src/lib.rs index 0ee5f800ed1e..bfa66af2a4b2 100644 --- a/crates/stages/stages/src/lib.rs +++ b/crates/stages/stages/src/lib.rs @@ -30,11 +30,11 @@ //! # use reth_provider::test_utils::{create_test_provider_factory, MockNodeTypesWithDB}; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::StageConfig; -//! # use reth_consensus::Consensus; +//! # use reth_consensus::{Consensus, ConsensusError}; //! # use reth_consensus::test_utils::TestConsensus; //! # //! # let chain_spec = MAINNET.clone(); -//! # let consensus: Arc = Arc::new(TestConsensus::default()); +//! # let consensus: Arc> = Arc::new(TestConsensus::default()); //! # let headers_downloader = ReverseHeadersDownloaderBuilder::default().build( //! # Arc::new(TestHeadersClient::default()), //! # consensus.clone().as_header_validator() diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 0b52f2c5d304..16e91e132dfd 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -44,7 +44,7 @@ use crate::{ }; use alloy_primitives::B256; use reth_config::config::StageConfig; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader}; use reth_provider::HeaderSyncGapProvider; @@ -102,7 +102,7 @@ where pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc>, + consensus: Arc>, header_downloader: H, body_downloader: B, executor_factory: E, @@ -185,7 +185,7 @@ where /// The tip for the headers stage. tip: watch::Receiver, /// The consensus engine used to validate incoming data. - consensus: Arc>, + consensus: Arc>, /// The block header downloader header_downloader: H, /// The block body downloader @@ -203,7 +203,7 @@ where pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc>, + consensus: Arc>, header_downloader: H, body_downloader: B, stages_config: StageConfig, @@ -236,7 +236,7 @@ where provider: P, tip: watch::Receiver, header_downloader: H, - consensus: Arc>, + consensus: Arc>, stages_config: StageConfig, ) -> StageSetBuilder where diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index b17ad3562a09..51941183953e 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -582,7 +582,7 @@ mod tests { ..Default::default() }, ); - self.db.insert_headers_with_td(blocks.iter().map(|block| &block.header))?; + self.db.insert_headers_with_td(blocks.iter().map(|block| block.sealed_header()))?; if let Some(progress) = blocks.get(start as usize) { // Insert last progress data { diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 71aff566481f..eb2d9d5b1c07 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -442,7 +442,7 @@ where let time = Instant::now(); // write output - provider.write_state(state, OriginalValuesKnown::Yes, StorageLocation::StaticFiles)?; + provider.write_state(&state, OriginalValuesKnown::Yes, StorageLocation::StaticFiles)?; let db_write_duration = time.elapsed(); debug!( diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 551c10d7711f..976c775d1ab1 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -100,7 +100,7 @@ impl AccountHashingStage { provider.tx_ref().cursor_write::()?; accounts.sort_by(|a, b| a.0.cmp(&b.0)); for (addr, acc) in &accounts { - account_cursor.append(*addr, *acc)?; + account_cursor.append(*addr, acc)?; } let mut acc_changeset_cursor = @@ -113,7 +113,7 @@ impl AccountHashingStage { bytecode_hash: None, }; let acc_before_tx = AccountBeforeTx { address: *addr, info: Some(prev_acc) }; - acc_changeset_cursor.append(t, acc_before_tx)?; + acc_changeset_cursor.append(t, &acc_before_tx)?; } } @@ -202,7 +202,7 @@ where let (key, value) = item?; hashed_account_cursor - .append(RawKey::::from_vec(key), RawValue::::from_vec(value))?; + .append(RawKey::::from_vec(key), &RawValue::::from_vec(value))?; } } else { // Aggregate all transition changesets and make a list of accounts that have been diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index 6075e62158fd..c9b959e2595d 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -344,7 +344,7 @@ mod tests { BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..3, ..Default::default() }, ); - self.db.insert_headers(blocks.iter().map(|block| &block.header))?; + self.db.insert_headers(blocks.iter().map(|block| block.sealed_header()))?; let iter = blocks.iter(); let mut next_tx_num = 0; @@ -373,7 +373,7 @@ mod tests { tx, (block_number, *addr).into(), new_entry, - progress.header.number == stage_progress, + progress.number == stage_progress, )?; } @@ -392,7 +392,7 @@ mod tests { key: keccak256("mining"), value: U256::from(rng.gen::()), }, - progress.header.number == stage_progress, + progress.number == stage_progress, )?; } @@ -533,7 +533,7 @@ mod tests { } if !entry.value.is_zero() { - storage_cursor.upsert(bn_address.address(), entry)?; + storage_cursor.upsert(bn_address.address(), &entry)?; } } Ok(()) diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index bf6611d9ed88..f411060bcca3 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -184,12 +184,12 @@ where if first_sync { cursor_header_numbers.append( RawKey::::from_vec(hash), - RawValue::::from_vec(number), + &RawValue::::from_vec(number), )?; } else { cursor_header_numbers.insert( RawKey::::from_vec(hash), - RawValue::::from_vec(number), + &RawValue::::from_vec(number), )?; } } @@ -660,7 +660,7 @@ mod tests { provider .append_blocks_with_state( sealed_blocks, - ExecutionOutcome::default(), + &ExecutionOutcome::default(), HashedPostStateSorted::default(), TrieUpdates::default(), ) diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index f697ced2dc81..a2b4655835cc 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -525,7 +525,7 @@ mod tests { stage_progress, BlockParams { parent: preblocks.last().map(|b| b.hash()), ..Default::default() }, ) - .split_header_body(); + .split(); let mut header = header.unseal(); header.state_root = state_root( @@ -648,7 +648,7 @@ mod tests { if !value.is_zero() { let storage_entry = StorageEntry { key: hashed_slot, value }; - storage_cursor.upsert(hashed_address, storage_entry).unwrap(); + storage_cursor.upsert(hashed_address, &storage_entry).unwrap(); } } } diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index fad86848fa77..9144d4dc8c44 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -357,7 +357,7 @@ mod tests { { let provider_rw = db.factory.provider_rw().unwrap(); let mut cursor = provider_rw.tx_ref().cursor_write::().unwrap(); - cursor.insert(key, Default::default()).unwrap(); + cursor.insert(key, &Default::default()).unwrap(); provider_rw.commit().unwrap(); assert!(matches!( diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index 8d768265465f..34598714a18b 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -207,7 +207,7 @@ where } } }; - senders_cursor.append(tx_id, sender)?; + senders_cursor.append(tx_id, &sender)?; processed_transactions += 1; } } diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index dd15c4f43fca..4e3f4a8776ed 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -164,9 +164,9 @@ where let key = RawKey::::from_vec(hash); if append_only { - txhash_cursor.append(key, RawValue::::from_vec(number))? + txhash_cursor.append(key, &RawValue::::from_vec(number))? } else { - txhash_cursor.insert(key, RawValue::::from_vec(number))? + txhash_cursor.insert(key, &RawValue::::from_vec(number))? } } diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 169d556348b2..add013d40710 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -224,9 +224,9 @@ where let value = BlockNumberList::new_pre_sorted(chunk); if append_only { - cursor.append(key, value)?; + cursor.append(key, &value)?; } else { - cursor.upsert(key, value)?; + cursor.upsert(key, &value)?; } } } diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 5e4c61b6fd36..59ba08df8aa0 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -235,7 +235,7 @@ impl TestStageDB { .then(|| provider.latest_writer(StaticFileSegment::Headers).unwrap()); blocks.iter().try_for_each(|block| { - Self::insert_header(headers_writer.as_mut(), &tx, &block.header, U256::ZERO) + Self::insert_header(headers_writer.as_mut(), &tx, block.sealed_header(), U256::ZERO) })?; if let Some(mut writer) = headers_writer { @@ -396,7 +396,7 @@ impl TestStageDB { { cursor.delete_current()?; } - cursor.upsert(address, entry)?; + cursor.upsert(address, &entry)?; let mut cursor = tx.cursor_dup_write::()?; if cursor @@ -406,7 +406,7 @@ impl TestStageDB { { cursor.delete_current()?; } - cursor.upsert(hashed_address, hashed_entry)?; + cursor.upsert(hashed_address, &hashed_entry)?; Ok(()) }) diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 7653d0d5af3d..fcbbb9e3b0a3 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -292,7 +292,7 @@ mod tests { let tx = db.factory.db_ref().tx_mut().expect("init tx"); for block in &blocks { - TestStageDB::insert_header(None, &tx, &block.header, U256::ZERO) + TestStageDB::insert_header(None, &tx, block.sealed_header(), U256::ZERO) .expect("insert block header"); } tx.commit().expect("commit tx"); diff --git a/crates/storage/db-api/src/cursor.rs b/crates/storage/db-api/src/cursor.rs index 9297f738ab5a..4a7fccc1280a 100644 --- a/crates/storage/db-api/src/cursor.rs +++ b/crates/storage/db-api/src/cursor.rs @@ -104,17 +104,17 @@ pub trait DbDupCursorRO { pub trait DbCursorRW { /// Database operation that will update an existing row if a specified value already /// exists in a table, and insert a new row if the specified value doesn't already exist - fn upsert(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError>; + fn upsert(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError>; /// Database operation that will insert a row at a given key. If the key is already /// present, the operation will result in an error. - fn insert(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError>; + fn insert(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError>; /// Append value to next cursor item. /// /// This is efficient for pre-sorted data. If the data is not pre-sorted, use /// [`DbCursorRW::insert`]. - fn append(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError>; + fn append(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError>; /// Delete current value that cursor points to fn delete_current(&mut self) -> Result<(), DatabaseError>; diff --git a/crates/storage/db-api/src/mock.rs b/crates/storage/db-api/src/mock.rs index 5580727fdbed..ece47f81ee5a 100644 --- a/crates/storage/db-api/src/mock.rs +++ b/crates/storage/db-api/src/mock.rs @@ -220,7 +220,7 @@ impl DbCursorRW for CursorMock { fn upsert( &mut self, _key: ::Key, - _value: ::Value, + _value: &::Value, ) -> Result<(), DatabaseError> { Ok(()) } @@ -228,7 +228,7 @@ impl DbCursorRW for CursorMock { fn insert( &mut self, _key: ::Key, - _value: ::Value, + _value: &::Value, ) -> Result<(), DatabaseError> { Ok(()) } @@ -236,7 +236,7 @@ impl DbCursorRW for CursorMock { fn append( &mut self, _key: ::Key, - _value: ::Value, + _value: &::Value, ) -> Result<(), DatabaseError> { Ok(()) } diff --git a/crates/storage/db-api/src/models/integer_list.rs b/crates/storage/db-api/src/models/integer_list.rs index 5301ec303e50..c252d5ee0c87 100644 --- a/crates/storage/db-api/src/models/integer_list.rs +++ b/crates/storage/db-api/src/models/integer_list.rs @@ -165,7 +165,7 @@ impl Compress for IntegerList { self.to_bytes() } - fn compress_to_buf>(self, buf: &mut B) { + fn compress_to_buf>(&self, buf: &mut B) { self.to_mut_bytes(buf) } } diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index e818a1a478d0..232e257a1dc8 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -194,8 +194,8 @@ macro_rules! impl_compression_for_compact { impl$(<$($generic: core::fmt::Debug + Send + Sync + Compact),*>)? Compress for $name$(<$($generic),*>)? { type Compressed = Vec; - fn compress_to_buf>(self, buf: &mut B) { - let _ = Compact::to_compact(&self, buf); + fn compress_to_buf>(&self, buf: &mut B) { + let _ = Compact::to_compact(self, buf); } } @@ -253,8 +253,8 @@ macro_rules! impl_compression_fixed_compact { Some(self.as_ref()) } - fn compress_to_buf>(self, buf: &mut B) { - let _ = Compact::to_compact(&self, buf); + fn compress_to_buf>(&self, buf: &mut B) { + let _ = Compact::to_compact(self, buf); } } diff --git a/crates/storage/db-api/src/scale.rs b/crates/storage/db-api/src/scale.rs index 591635be054e..2ab1c3b5e819 100644 --- a/crates/storage/db-api/src/scale.rs +++ b/crates/storage/db-api/src/scale.rs @@ -21,7 +21,7 @@ where parity_scale_codec::Encode::encode(&self) } - fn compress_to_buf>(self, buf: &mut B) { + fn compress_to_buf>(&self, buf: &mut B) { parity_scale_codec::Encode::encode_to(&self, OutputCompat::wrap_mut(buf)); } } diff --git a/crates/storage/db-api/src/table.rs b/crates/storage/db-api/src/table.rs index a4d3f87b40b5..5715852a5ddd 100644 --- a/crates/storage/db-api/src/table.rs +++ b/crates/storage/db-api/src/table.rs @@ -32,7 +32,7 @@ pub trait Compress: Send + Sync + Sized + Debug { } /// Compresses data to a given buffer. - fn compress_to_buf>(self, buf: &mut B); + fn compress_to_buf>(&self, buf: &mut B); } /// Trait that will transform the data to be read from the DB. @@ -132,7 +132,7 @@ pub trait TableImporter: DbTxMut { for kv in source_tx.cursor_read::()?.walk(None)? { let (k, v) = kv?; - destination_cursor.append(k, v)?; + destination_cursor.append(k, &v)?; } Ok(()) @@ -157,7 +157,7 @@ pub trait TableImporter: DbTxMut { }; for row in source_range? { let (key, value) = row?; - destination_cursor.append(key, value)?; + destination_cursor.append(key, &value)?; } Ok(()) diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 30b5bd2c885c..9cc1e8d2c05d 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -252,7 +252,11 @@ where Vec::new(), ); - provider.write_state(execution_outcome, OriginalValuesKnown::Yes, StorageLocation::Database)?; + provider.write_state( + &execution_outcome, + OriginalValuesKnown::Yes, + StorageLocation::Database, + )?; trace!(target: "reth::cli", "Inserted state"); diff --git a/crates/storage/db/benches/criterion.rs b/crates/storage/db/benches/criterion.rs index b8102326d0a2..abfc8be33daa 100644 --- a/crates/storage/db/benches/criterion.rs +++ b/crates/storage/db/benches/criterion.rs @@ -137,7 +137,7 @@ where let tx = db.tx_mut().expect("tx"); let mut crsr = tx.cursor_write::().expect("cursor"); for (k, _, v, _) in input { - crsr.append(k, v).expect("submit"); + crsr.append(k, &v).expect("submit"); } tx.inner.commit().unwrap() }, @@ -157,7 +157,7 @@ where let mut crsr = tx.cursor_write::().expect("cursor"); for index in RANDOM_INDEXES { let (k, _, v, _) = input.get(index).unwrap().clone(); - crsr.insert(k, v).expect("submit"); + crsr.insert(k, &v).expect("submit"); } tx.inner.commit().unwrap() diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index e4e87014eb88..cb145789de9d 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -184,7 +184,7 @@ where let mut crsr = tx.cursor_write::().expect("cursor"); black_box({ for (k, v) in input { - crsr.append(k, v).expect("submit"); + crsr.append(k, &v).expect("submit"); } tx.inner.commit().unwrap() @@ -202,7 +202,7 @@ where let mut crsr = tx.cursor_write::().expect("cursor"); black_box({ for (k, v) in input { - crsr.insert(k, v).expect("submit"); + crsr.insert(k, &v).expect("submit"); } tx.inner.commit().unwrap() diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 756a622bcb03..ec5f3b7c2824 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -241,7 +241,7 @@ impl DbCursorRW for Cursor { /// it will append the value to the subkey, even if the subkeys are the same. So if you want /// to properly upsert, you'll need to `seek_exact` & `delete_current` if the key+subkey was /// found, before calling `upsert`. - fn upsert(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { + fn upsert(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError> { let key = key.encode(); let value = compress_to_buf_or_ref!(self, value); self.execute_with_operation_metric( @@ -263,7 +263,7 @@ impl DbCursorRW for Cursor { ) } - fn insert(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { + fn insert(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError> { let key = key.encode(); let value = compress_to_buf_or_ref!(self, value); self.execute_with_operation_metric( @@ -287,7 +287,7 @@ impl DbCursorRW for Cursor { /// Appends the data to the end of the table. Consequently, the append operation /// will fail if the inserted key is less than the last table key - fn append(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { + fn append(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError> { let key = key.encode(); let value = compress_to_buf_or_ref!(self, value); self.execute_with_operation_metric( diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 8c3d36308892..d2e0d91b1d23 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -479,7 +479,7 @@ impl DatabaseEnv { if Some(&version) != last_version.as_ref() { version_cursor.upsert( SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(), - version, + &version, )?; tx.commit()?; } @@ -580,8 +580,8 @@ mod tests { let entry_0 = StorageEntry { key: B256::with_last_byte(1), value: U256::from(0) }; let entry_1 = StorageEntry { key: B256::with_last_byte(1), value: U256::from(1) }; - dup_cursor.upsert(Address::with_last_byte(1), entry_0).expect(ERROR_UPSERT); - dup_cursor.upsert(Address::with_last_byte(1), entry_1).expect(ERROR_UPSERT); + dup_cursor.upsert(Address::with_last_byte(1), &entry_0).expect(ERROR_UPSERT); + dup_cursor.upsert(Address::with_last_byte(1), &entry_1).expect(ERROR_UPSERT); assert_eq!( dup_cursor.walk(None).unwrap().collect::, _>>(), @@ -910,12 +910,12 @@ mod tests { let mut cursor = tx.cursor_write::().unwrap(); // INSERT - assert_eq!(cursor.insert(key_to_insert, B256::ZERO), Ok(())); + assert_eq!(cursor.insert(key_to_insert, &B256::ZERO), Ok(())); assert_eq!(cursor.current(), Ok(Some((key_to_insert, B256::ZERO)))); // INSERT (failure) assert_eq!( - cursor.insert(key_to_insert, B256::ZERO), + cursor.insert(key_to_insert, &B256::ZERO), Err(DatabaseWriteError { info: Error::KeyExist.into(), operation: DatabaseWriteOperation::CursorInsert, @@ -947,11 +947,11 @@ mod tests { let subkey2 = B256::random(); let entry1 = StorageEntry { key: subkey1, value: U256::ZERO }; - assert!(dup_cursor.insert(key, entry1).is_ok()); + assert!(dup_cursor.insert(key, &entry1).is_ok()); // Can't insert let entry2 = StorageEntry { key: subkey2, value: U256::ZERO }; - assert!(dup_cursor.insert(key, entry2).is_err()); + assert!(dup_cursor.insert(key, &entry2).is_err()); } #[test] @@ -964,9 +964,9 @@ mod tests { let key3 = Address::with_last_byte(3); let mut cursor = tx.cursor_write::().unwrap(); - assert!(cursor.insert(key1, Account::default()).is_ok()); - assert!(cursor.insert(key2, Account::default()).is_ok()); - assert!(cursor.insert(key3, Account::default()).is_ok()); + assert!(cursor.insert(key1, &Account::default()).is_ok()); + assert!(cursor.insert(key2, &Account::default()).is_ok()); + assert!(cursor.insert(key3, &Account::default()).is_ok()); // Seek & delete key2 cursor.seek_exact(key2).unwrap(); @@ -1002,7 +1002,7 @@ mod tests { assert_eq!(cursor.current(), Ok(Some((9, B256::ZERO)))); for pos in (2..=8).step_by(2) { - assert_eq!(cursor.insert(pos, B256::ZERO), Ok(())); + assert_eq!(cursor.insert(pos, &B256::ZERO), Ok(())); assert_eq!(cursor.current(), Ok(Some((pos, B256::ZERO)))); } tx.commit().expect(ERROR_COMMIT); @@ -1031,7 +1031,7 @@ mod tests { let key_to_append = 5; let tx = db.tx_mut().expect(ERROR_INIT_TX); let mut cursor = tx.cursor_write::().unwrap(); - assert_eq!(cursor.append(key_to_append, B256::ZERO), Ok(())); + assert_eq!(cursor.append(key_to_append, &B256::ZERO), Ok(())); tx.commit().expect(ERROR_COMMIT); // Confirm the result @@ -1059,7 +1059,7 @@ mod tests { let tx = db.tx_mut().expect(ERROR_INIT_TX); let mut cursor = tx.cursor_write::().unwrap(); assert_eq!( - cursor.append(key_to_append, B256::ZERO), + cursor.append(key_to_append, &B256::ZERO), Err(DatabaseWriteError { info: Error::KeyMismatch.into(), operation: DatabaseWriteOperation::CursorAppend, @@ -1088,15 +1088,15 @@ mod tests { let key = Address::random(); let account = Account::default(); - cursor.upsert(key, account).expect(ERROR_UPSERT); + cursor.upsert(key, &account).expect(ERROR_UPSERT); assert_eq!(cursor.seek_exact(key), Ok(Some((key, account)))); let account = Account { nonce: 1, ..Default::default() }; - cursor.upsert(key, account).expect(ERROR_UPSERT); + cursor.upsert(key, &account).expect(ERROR_UPSERT); assert_eq!(cursor.seek_exact(key), Ok(Some((key, account)))); let account = Account { nonce: 2, ..Default::default() }; - cursor.upsert(key, account).expect(ERROR_UPSERT); + cursor.upsert(key, &account).expect(ERROR_UPSERT); assert_eq!(cursor.seek_exact(key), Ok(Some((key, account)))); let mut dup_cursor = tx.cursor_dup_write::().unwrap(); @@ -1104,12 +1104,12 @@ mod tests { let value = U256::from(1); let entry1 = StorageEntry { key: subkey, value }; - dup_cursor.upsert(key, entry1).expect(ERROR_UPSERT); + dup_cursor.upsert(key, &entry1).expect(ERROR_UPSERT); assert_eq!(dup_cursor.seek_by_key_subkey(key, subkey), Ok(Some(entry1))); let value = U256::from(2); let entry2 = StorageEntry { key: subkey, value }; - dup_cursor.upsert(key, entry2).expect(ERROR_UPSERT); + dup_cursor.upsert(key, &entry2).expect(ERROR_UPSERT); assert_eq!(dup_cursor.seek_by_key_subkey(key, subkey), Ok(Some(entry1))); assert_eq!(dup_cursor.next_dup_val(), Ok(Some(entry2))); } @@ -1127,7 +1127,7 @@ mod tests { .try_for_each(|val| { cursor.append( transition_id, - AccountBeforeTx { address: Address::with_last_byte(val), info: None }, + &AccountBeforeTx { address: Address::with_last_byte(val), info: None }, ) }) .expect(ERROR_APPEND); @@ -1153,7 +1153,7 @@ mod tests { assert_eq!( cursor.append( transition_id - 1, - AccountBeforeTx { address: Address::with_last_byte(subkey_to_append), info: None } + &AccountBeforeTx { address: Address::with_last_byte(subkey_to_append), info: None } ), Err(DatabaseWriteError { info: Error::KeyMismatch.into(), @@ -1166,7 +1166,7 @@ mod tests { assert_eq!( cursor.append( transition_id, - AccountBeforeTx { address: Address::with_last_byte(subkey_to_append), info: None } + &AccountBeforeTx { address: Address::with_last_byte(subkey_to_append), info: None } ), Ok(()) ); diff --git a/crates/storage/db/src/tables/raw.rs b/crates/storage/db/src/tables/raw.rs index 453116ee5e35..18fe0da23cd8 100644 --- a/crates/storage/db/src/tables/raw.rs +++ b/crates/storage/db/src/tables/raw.rs @@ -168,7 +168,7 @@ impl Compress for RawValue { self.value } - fn compress_to_buf>(self, buf: &mut B) { + fn compress_to_buf>(&self, buf: &mut B) { buf.put_slice(self.value.as_slice()) } } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 84808ed7c381..0955821b4237 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -14,7 +14,6 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-blockchain-tree-api.workspace = true reth-execution-types.workspace = true reth-primitives = { workspace = true, features = ["reth-codec", "secp256k1"] } reth-primitives-traits = { workspace = true, features = ["reth-codec"] } @@ -43,7 +42,7 @@ alloy-consensus.workspace = true revm.workspace = true # optimism -reth-optimism-primitives = { workspace = true, optional = true } +reth-optimism-primitives = { workspace = true, features = ["reth-codec"], optional = true } # async tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 20c371cdf1c5..1c19e8260b8d 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1,6 +1,6 @@ #![allow(unused)] use crate::{ - providers::{ConsistentProvider, StaticFileProvider}, + providers::{ConsistentProvider, ProviderNodeTypes, StaticFileProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProvider, @@ -30,7 +30,7 @@ use reth_primitives::{ Account, Block, BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, SealedBlock, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionSigned, }; -use reth_primitives_traits::BlockBody as _; +use reth_primitives_traits::BlockBody; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ @@ -51,15 +51,13 @@ use std::{ }; use tracing::trace; -use crate::providers::ProviderNodeTypes; - /// The main type for interacting with the blockchain. /// /// This type serves as the main entry point for interacting with the blockchain and provides data /// from database storage and from the blockchain tree (pending state etc.) It is a simple wrapper /// type that holds an instance of the database and the blockchain tree. #[derive(Debug)] -pub struct BlockchainProvider2 { +pub struct BlockchainProvider { /// Provider factory used to access the database. pub(crate) database: ProviderFactory, /// Tracks the chain info wrt forkchoice updates and in memory canonical @@ -67,7 +65,7 @@ pub struct BlockchainProvider2 { pub(crate) canonical_in_memory_state: CanonicalInMemoryState, } -impl Clone for BlockchainProvider2 { +impl Clone for BlockchainProvider { fn clone(&self) -> Self { Self { database: self.database.clone(), @@ -76,8 +74,8 @@ impl Clone for BlockchainProvider2 { } } -impl BlockchainProvider2 { - /// Create a new [`BlockchainProvider2`] using only the storage, fetching the latest +impl BlockchainProvider { + /// Create a new [`BlockchainProvider`] using only the storage, fetching the latest /// header from the database to initialize the provider. pub fn new(storage: ProviderFactory) -> ProviderResult { let provider = storage.provider()?; @@ -160,11 +158,11 @@ impl BlockchainProvider2 { } } -impl NodePrimitivesProvider for BlockchainProvider2 { +impl NodePrimitivesProvider for BlockchainProvider { type Primitives = N::Primitives; } -impl DatabaseProviderFactory for BlockchainProvider2 { +impl DatabaseProviderFactory for BlockchainProvider { type DB = N::DB; type Provider = as DatabaseProviderFactory>::Provider; type ProviderRW = as DatabaseProviderFactory>::ProviderRW; @@ -178,17 +176,17 @@ impl DatabaseProviderFactory for BlockchainProvider2 { } } -impl StateCommitmentProvider for BlockchainProvider2 { +impl StateCommitmentProvider for BlockchainProvider { type StateCommitment = N::StateCommitment; } -impl StaticFileProviderFactory for BlockchainProvider2 { +impl StaticFileProviderFactory for BlockchainProvider { fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } } -impl HeaderProvider for BlockchainProvider2 { +impl HeaderProvider for BlockchainProvider { type Header = HeaderTy; fn header(&self, block_hash: &BlockHash) -> ProviderResult> { @@ -237,7 +235,7 @@ impl HeaderProvider for BlockchainProvider2 { } } -impl BlockHashReader for BlockchainProvider2 { +impl BlockHashReader for BlockchainProvider { fn block_hash(&self, number: u64) -> ProviderResult> { self.consistent_provider()?.block_hash(number) } @@ -251,7 +249,7 @@ impl BlockHashReader for BlockchainProvider2 { } } -impl BlockNumReader for BlockchainProvider2 { +impl BlockNumReader for BlockchainProvider { fn chain_info(&self) -> ProviderResult { Ok(self.canonical_in_memory_state.chain_info()) } @@ -269,7 +267,7 @@ impl BlockNumReader for BlockchainProvider2 { } } -impl BlockIdReader for BlockchainProvider2 { +impl BlockIdReader for BlockchainProvider { fn pending_block_num_hash(&self) -> ProviderResult> { Ok(self.canonical_in_memory_state.pending_block_num_hash()) } @@ -283,7 +281,7 @@ impl BlockIdReader for BlockchainProvider2 { } } -impl BlockReader for BlockchainProvider2 { +impl BlockReader for BlockchainProvider { type Block = BlockTy; fn find_block_by_hash( @@ -355,7 +353,7 @@ impl BlockReader for BlockchainProvider2 { } } -impl TransactionsProvider for BlockchainProvider2 { +impl TransactionsProvider for BlockchainProvider { type Transaction = TxTy; fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { @@ -421,7 +419,7 @@ impl TransactionsProvider for BlockchainProvider2 { } } -impl ReceiptProvider for BlockchainProvider2 { +impl ReceiptProvider for BlockchainProvider { type Receipt = ReceiptTy; fn receipt(&self, id: TxNumber) -> ProviderResult> { @@ -447,13 +445,13 @@ impl ReceiptProvider for BlockchainProvider2 { } } -impl ReceiptProviderIdExt for BlockchainProvider2 { +impl ReceiptProviderIdExt for BlockchainProvider { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { self.consistent_provider()?.receipts_by_block_id(block) } } -impl WithdrawalsProvider for BlockchainProvider2 { +impl WithdrawalsProvider for BlockchainProvider { fn withdrawals_by_block( &self, id: BlockHashOrNumber, @@ -463,13 +461,13 @@ impl WithdrawalsProvider for BlockchainProvider2 { } } -impl OmmersProvider for BlockchainProvider2 { +impl OmmersProvider for BlockchainProvider { fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { self.consistent_provider()?.ommers(id) } } -impl BlockBodyIndicesProvider for BlockchainProvider2 { +impl BlockBodyIndicesProvider for BlockchainProvider { fn block_body_indices( &self, number: BlockNumber, @@ -478,7 +476,7 @@ impl BlockBodyIndicesProvider for BlockchainProvider2 { } } -impl StageCheckpointReader for BlockchainProvider2 { +impl StageCheckpointReader for BlockchainProvider { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.consistent_provider()?.get_stage_checkpoint(id) } @@ -492,7 +490,7 @@ impl StageCheckpointReader for BlockchainProvider2 { } } -impl PruneCheckpointReader for BlockchainProvider2 { +impl PruneCheckpointReader for BlockchainProvider { fn get_prune_checkpoint( &self, segment: PruneSegment, @@ -505,7 +503,7 @@ impl PruneCheckpointReader for BlockchainProvider2 { } } -impl ChainSpecProvider for BlockchainProvider2 { +impl ChainSpecProvider for BlockchainProvider { type ChainSpec = N::ChainSpec; fn chain_spec(&self) -> Arc { @@ -513,7 +511,7 @@ impl ChainSpecProvider for BlockchainProvider2 { } } -impl StateProviderFactory for BlockchainProvider2 { +impl StateProviderFactory for BlockchainProvider { /// Storage provider for latest block fn latest(&self) -> ProviderResult { trace!(target: "providers::blockchain", "Getting latest block state provider"); @@ -622,7 +620,7 @@ impl StateProviderFactory for BlockchainProvider2 { } } -impl HashedPostStateProvider for BlockchainProvider2 { +impl HashedPostStateProvider for BlockchainProvider { fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState { HashedPostState::from_bundle_state::<::KeyHasher>( bundle_state.state(), @@ -630,7 +628,7 @@ impl HashedPostStateProvider for BlockchainProvider2 { } } -impl CanonChainTracker for BlockchainProvider2 { +impl CanonChainTracker for BlockchainProvider { type Header = HeaderTy; fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { @@ -663,7 +661,7 @@ impl CanonChainTracker for BlockchainProvider2 { } } -impl BlockReaderIdExt for BlockchainProvider2 +impl BlockReaderIdExt for BlockchainProvider where Self: ReceiptProviderIdExt, { @@ -701,13 +699,13 @@ where } } -impl CanonStateSubscriptions for BlockchainProvider2 { +impl CanonStateSubscriptions for BlockchainProvider { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.canonical_in_memory_state.subscribe_canon_state() } } -impl ForkChoiceSubscriptions for BlockchainProvider2 { +impl ForkChoiceSubscriptions for BlockchainProvider { type Header = HeaderTy; fn subscribe_safe_block(&self) -> ForkChoiceNotifications { @@ -721,7 +719,7 @@ impl ForkChoiceSubscriptions for BlockchainProvider2 { } } -impl StorageChangeSetReader for BlockchainProvider2 { +impl StorageChangeSetReader for BlockchainProvider { fn storage_changeset( &self, block_number: BlockNumber, @@ -730,7 +728,7 @@ impl StorageChangeSetReader for BlockchainProvider2 { } } -impl ChangeSetReader for BlockchainProvider2 { +impl ChangeSetReader for BlockchainProvider { fn account_block_changeset( &self, block_number: BlockNumber, @@ -739,14 +737,14 @@ impl ChangeSetReader for BlockchainProvider2 { } } -impl AccountReader for BlockchainProvider2 { +impl AccountReader for BlockchainProvider { /// Get basic account information. fn basic_account(&self, address: &Address) -> ProviderResult> { self.consistent_provider()?.basic_account(address) } } -impl StateReader for BlockchainProvider2 { +impl StateReader for BlockchainProvider { type Receipt = ReceiptTy; /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. @@ -769,7 +767,7 @@ impl StateReader for BlockchainProvider2 { #[cfg(test)] mod tests { use crate::{ - providers::BlockchainProvider2, + providers::BlockchainProvider, test_utils::{ create_test_provider_factory, create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, @@ -797,7 +795,7 @@ mod tests { use reth_errors::ProviderError; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{BlockExt, EthPrimitives, Receipt, SealedBlock, StaticFileSegment}; - use reth_primitives_traits::{BlockBody as _, SignedTransaction}; + use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DatabaseProviderFactory, HeaderProvider, @@ -810,7 +808,7 @@ mod tests { }; use revm::db::BundleState; use std::{ - ops::{Bound, Range, RangeBounds}, + ops::{Bound, Deref, Range, RangeBounds}, sync::Arc, time::Instant, }; @@ -860,7 +858,7 @@ mod tests { in_memory_blocks: usize, block_range_params: BlockRangeParams, ) -> eyre::Result<( - BlockchainProvider2, + BlockchainProvider, Vec, Vec, Vec>, @@ -916,7 +914,7 @@ mod tests { // Commit to both storages: database and static files UnifiedStorageWriter::commit(provider_rw)?; - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; // Insert the rest of the blocks and receipts into the in-memory state let chain = NewCanonicalChain::Commit { @@ -948,9 +946,9 @@ mod tests { let finalized_block = blocks.get(block_count - 3).unwrap(); // Set the canonical head, safe, and finalized blocks - provider.set_canonical_head(canonical_block.header.clone()); - provider.set_safe(safe_block.header.clone()); - provider.set_finalized(finalized_block.header.clone()); + provider.set_canonical_head(canonical_block.clone_sealed_header()); + provider.set_safe(safe_block.clone_sealed_header()); + provider.set_finalized(finalized_block.clone_sealed_header()); Ok((provider, database_blocks.clone(), in_memory_blocks.clone(), receipts)) } @@ -962,7 +960,7 @@ mod tests { in_memory_blocks: usize, block_range_params: BlockRangeParams, ) -> eyre::Result<( - BlockchainProvider2, + BlockchainProvider, Vec, Vec, Vec>, @@ -982,7 +980,7 @@ mod tests { /// This simulates a RPC method having a different view than when its database transaction was /// created. fn persist_block_after_db_tx_creation( - provider: BlockchainProvider2, + provider: BlockchainProvider, block_number: BlockNumber, ) { let hook_provider = provider.clone(); @@ -1035,7 +1033,7 @@ mod tests { provider_rw.commit()?; // Create a new provider - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; // Useful blocks let first_db_block = database_blocks.first().unwrap(); @@ -1133,7 +1131,7 @@ mod tests { provider_rw.commit()?; // Create a new provider - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; // First in memory block let first_in_mem_block = in_memory_blocks.first().unwrap(); @@ -1357,7 +1355,7 @@ mod tests { let in_memory_block = in_memory_blocks.last().unwrap().clone(); // make sure that the finalized block is on db let finalized_block = database_blocks.get(database_blocks.len() - 3).unwrap(); - provider.set_finalized(finalized_block.header.clone()); + provider.set_finalized(finalized_block.clone_sealed_header()); let blocks = [database_blocks, in_memory_blocks].concat(); @@ -1376,7 +1374,7 @@ mod tests { blocks .iter() .take_while(|header| header.number <= 8) - .map(|b| b.header.clone()) + .map(|b| b.clone_sealed_header()) .collect::>() ); @@ -1397,7 +1395,7 @@ mod tests { provider_rw.insert_historical_block(block_1)?; provider_rw.commit()?; - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; // Subscribe twice for canonical state updates. let in_memory_state = provider.canonical_in_memory_state(); @@ -1548,38 +1546,38 @@ mod tests { let block_number = database_block.number; assert_eq!( provider.header_by_number_or_tag(block_number.into()).unwrap(), - Some(database_block.header.clone().unseal()) + Some(database_block.header().clone()) ); assert_eq!( - provider.sealed_header_by_number_or_tag(block_number.into()).unwrap(), - Some(database_block.header) + provider.sealed_header_by_number_or_tag(block_number.into())?, + Some(database_block.clone_sealed_header()) ); assert_eq!( provider.header_by_number_or_tag(BlockNumberOrTag::Latest).unwrap(), - Some(canonical_block.header.clone().unseal()) + Some(canonical_block.header().clone()) ); assert_eq!( provider.sealed_header_by_number_or_tag(BlockNumberOrTag::Latest).unwrap(), - Some(canonical_block.header) + Some(canonical_block.clone_sealed_header()) ); assert_eq!( provider.header_by_number_or_tag(BlockNumberOrTag::Safe).unwrap(), - Some(safe_block.header.clone().unseal()) + Some(safe_block.header().clone()) ); assert_eq!( provider.sealed_header_by_number_or_tag(BlockNumberOrTag::Safe).unwrap(), - Some(safe_block.header) + Some(safe_block.clone_sealed_header()) ); assert_eq!( provider.header_by_number_or_tag(BlockNumberOrTag::Finalized).unwrap(), - Some(finalized_block.header.clone().unseal()) + Some(finalized_block.header().clone()) ); assert_eq!( provider.sealed_header_by_number_or_tag(BlockNumberOrTag::Finalized).unwrap(), - Some(finalized_block.header) + Some(finalized_block.clone_sealed_header()) ); Ok(()) @@ -1603,20 +1601,20 @@ mod tests { assert_eq!( provider.header_by_id(block_number.into()).unwrap(), - Some(database_block.header.clone().unseal()) + Some(database_block.header().clone()) ); assert_eq!( provider.sealed_header_by_id(block_number.into()).unwrap(), - Some(database_block.header.clone()) + Some(database_block.clone_sealed_header()) ); assert_eq!( provider.header_by_id(block_hash.into()).unwrap(), - Some(database_block.header.clone().unseal()) + Some(database_block.header().clone()) ); assert_eq!( provider.sealed_header_by_id(block_hash.into()).unwrap(), - Some(database_block.header) + Some(database_block.clone_sealed_header()) ); let block_number = in_memory_block.number; @@ -1624,20 +1622,20 @@ mod tests { assert_eq!( provider.header_by_id(block_number.into()).unwrap(), - Some(in_memory_block.header.clone().unseal()) + Some(in_memory_block.header().clone()) ); assert_eq!( provider.sealed_header_by_id(block_number.into()).unwrap(), - Some(in_memory_block.header.clone()) + Some(in_memory_block.clone_sealed_header()) ); assert_eq!( provider.header_by_id(block_hash.into()).unwrap(), - Some(in_memory_block.header.clone().unseal()) + Some(in_memory_block.header().clone()) ); assert_eq!( provider.sealed_header_by_id(block_hash.into()).unwrap(), - Some(in_memory_block.header) + Some(in_memory_block.clone_sealed_header()) ); Ok(()) @@ -1810,7 +1808,7 @@ mod tests { .into_iter() .map(|b| b.seal_with_senders().expect("failed to seal block with senders")) .collect(), - ExecutionOutcome { + &ExecutionOutcome { bundle: BundleState::new( database_state.into_iter().map(|(address, (account, _))| { (address, None, Some(account.into()), Default::default()) @@ -1833,7 +1831,7 @@ mod tests { )?; provider_rw.commit()?; - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; let in_memory_changesets = in_memory_changesets.into_iter().next().unwrap(); let chain = NewCanonicalChain::Commit { @@ -2023,7 +2021,7 @@ mod tests { ); // test state by block tag for safe block let safe_block = in_memory_blocks[in_memory_blocks.len() - 2].clone(); - in_memory_provider.canonical_in_memory_state.set_safe(safe_block.header.clone()); + in_memory_provider.canonical_in_memory_state.set_safe(safe_block.clone_sealed_header()); assert_eq!( safe_block.hash(), in_memory_provider @@ -2033,7 +2031,9 @@ mod tests { ); // test state by block tag for finalized block let finalized_block = in_memory_blocks[in_memory_blocks.len() - 3].clone(); - in_memory_provider.canonical_in_memory_state.set_finalized(finalized_block.header.clone()); + in_memory_provider + .canonical_in_memory_state + .set_finalized(finalized_block.clone_sealed_header()); assert_eq!( finalized_block.hash(), in_memory_provider @@ -2106,11 +2106,11 @@ mod tests { // Set the safe block in memory let safe_block = in_memory_blocks[in_memory_blocks.len() - 2].clone(); - provider.canonical_in_memory_state.set_safe(safe_block.header.clone()); + provider.canonical_in_memory_state.set_safe(safe_block.clone_sealed_header()); // Set the finalized block in memory let finalized_block = in_memory_blocks[in_memory_blocks.len() - 3].clone(); - provider.canonical_in_memory_state.set_finalized(finalized_block.header.clone()); + provider.canonical_in_memory_state.set_finalized(finalized_block.clone_sealed_header()); // Verify the pending block number and hash assert_eq!( @@ -2325,7 +2325,7 @@ mod tests { // instead start end test_by_block_range!([ (headers_range, |block: &SealedBlock| block.header().clone()), - (sealed_headers_range, |block: &SealedBlock| block.header.clone()), + (sealed_headers_range, |block: &SealedBlock| block.clone_sealed_header()), (block_range, |block: &SealedBlock| block.clone().unseal()), (block_with_senders_range, |block: &SealedBlock| block .clone() @@ -2458,7 +2458,7 @@ mod tests { header_by_number, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( block.number, - Some(block.header.header().clone()) + Some(block.header().clone()) ), u64::MAX ), @@ -2467,7 +2467,7 @@ mod tests { sealed_header, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( block.number, - Some(block.header.clone()) + Some(block.clone_sealed_header()) ), u64::MAX ), diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index f2872352b8f3..098c27c3c753 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -631,7 +631,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( (*block_hash).into(), |db_provider| db_provider.header(block_hash), - |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), + |block_state| Ok(Some(block_state.block_ref().block().header().clone())), ) } @@ -639,7 +639,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( num.into(), |db_provider| db_provider.header_by_number(num), - |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), + |block_state| Ok(Some(block_state.block_ref().block().header().clone())), ) } @@ -681,7 +681,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.headers_range(range), - |block_state, _| Some(block_state.block_ref().block().header.header().clone()), + |block_state, _| Some(block_state.block_ref().block().header().clone()), |_| true, ) } @@ -693,7 +693,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( number.into(), |db_provider| db_provider.sealed_header(number), - |block_state| Ok(Some(block_state.block_ref().block().header.clone())), + |block_state| Ok(Some(block_state.block_ref().block().clone_sealed_header())), ) } @@ -704,7 +704,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.sealed_headers_range(range), - |block_state, _| Some(block_state.block_ref().block().header.clone()), + |block_state, _| Some(block_state.block_ref().block().clone_sealed_header()), |_| true, ) } @@ -718,7 +718,7 @@ impl HeaderProvider for ConsistentProvider { range, |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), |block_state, predicate| { - let header = &block_state.block_ref().block().header; + let header = block_state.block_ref().block().sealed_header(); predicate(header).then(|| header.clone()) }, predicate, @@ -1444,7 +1444,7 @@ impl StateReader for ConsistentProvider { #[cfg(test)] mod tests { use crate::{ - providers::blockchain_provider::BlockchainProvider2, + providers::blockchain_provider::BlockchainProvider, test_utils::create_test_provider_factory, BlockWriter, }; use alloy_eips::BlockHashOrNumber; @@ -1524,7 +1524,7 @@ mod tests { provider_rw.commit()?; // Create a new provider - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; let consistent_provider = provider.consistent_provider()?; // Useful blocks @@ -1635,7 +1635,7 @@ mod tests { provider_rw.commit()?; // Create a new provider - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; let consistent_provider = provider.consistent_provider()?; // First in memory block @@ -1730,7 +1730,7 @@ mod tests { .into_iter() .map(|b| b.seal_with_senders().expect("failed to seal block with senders")) .collect(), - ExecutionOutcome { + &ExecutionOutcome { bundle: BundleState::new( database_state.into_iter().map(|(address, (account, _))| { (address, None, Some(account.into()), Default::default()) @@ -1753,7 +1753,7 @@ mod tests { )?; provider_rw.commit()?; - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; let in_memory_changesets = in_memory_changesets.into_iter().next().unwrap(); let chain = NewCanonicalChain::Commit { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 89b2ae5b6001..8f7dbbc2177b 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -426,12 +426,12 @@ impl< &self, block: SealedBlockWithSenders<::Block>, ) -> ProviderResult { - let ttd = if block.number == 0 { - block.difficulty + let ttd = if block.number() == 0 { + block.difficulty() } else { - let parent_block_number = block.number - 1; + let parent_block_number = block.number() - 1; let parent_ttd = self.header_td_by_number(parent_block_number)?.unwrap_or_default(); - parent_ttd + block.difficulty + parent_ttd + block.difficulty() }; let mut writer = self.static_file_provider.latest_writer(StaticFileSegment::Headers)?; @@ -439,14 +439,14 @@ impl< // Backfill: some tests start at a forward block number, but static files require no gaps. let segment_header = writer.user_header(); if segment_header.block_end().is_none() && segment_header.expected_block_start() == 0 { - for block_number in 0..block.number { - let mut prev = block.header.clone().unseal(); + for block_number in 0..block.number() { + let mut prev = block.clone_sealed_header().unseal(); prev.number = block_number; writer.append_header(&prev, U256::ZERO, &B256::ZERO)?; } } - writer.append_header(block.header.as_ref(), ttd, &block.hash())?; + writer.append_header(block.header(), ttd, &block.hash())?; self.insert_block(block, StorageLocation::Database) } @@ -801,15 +801,17 @@ impl DatabaseProvider { /// Load shard and remove it. If list is empty, last shard was full or /// there are no shards at all. - fn take_shard(&self, key: T::Key) -> ProviderResult> + fn take_shard( + &self, + cursor: &mut ::CursorMut, + key: T::Key, + ) -> ProviderResult> where T: Table, { - let mut cursor = self.tx.cursor_read::()?; - let shard = cursor.seek_exact(key)?; - if let Some((shard_key, list)) = shard { + if let Some((_, list)) = cursor.seek_exact(key)? { // delete old shard so new one can be inserted. - self.tx.delete::(shard_key, None)?; + cursor.delete_current()?; let list = list.iter().collect::>(); return Ok(list) } @@ -832,13 +834,13 @@ impl DatabaseProvider { P: Copy, T: Table, { + let mut cursor = self.tx.cursor_write::()?; for (partial_key, indices) in index_updates { let mut last_shard = - self.take_shard::(sharded_key_factory(partial_key, u64::MAX))?; + self.take_shard::(&mut cursor, sharded_key_factory(partial_key, u64::MAX))?; last_shard.extend(indices); // Chunk indices and insert them in shards of N size. - let indices = last_shard; - let mut chunks = indices.chunks(sharded_key::NUM_OF_INDICES_IN_SHARD).peekable(); + let mut chunks = last_shard.chunks(sharded_key::NUM_OF_INDICES_IN_SHARD).peekable(); while let Some(list) = chunks.next() { let highest_block_number = if chunks.peek().is_some() { *list.last().expect("`chunks` does not return empty list") @@ -846,9 +848,9 @@ impl DatabaseProvider { // Insert last list with `u64::MAX`. u64::MAX }; - self.tx.put::( + cursor.insert( sharded_key_factory(partial_key, highest_block_number), - BlockNumberList::new_pre_sorted(list.iter().copied()), + &BlockNumberList::new_pre_sorted(list.iter().copied()), )?; } } @@ -1670,7 +1672,7 @@ impl StageCheckpointWriter for DatabaseProvider StateWriter fn write_state( &self, - execution_outcome: ExecutionOutcome, + execution_outcome: &ExecutionOutcome, is_value_known: OriginalValuesKnown, write_receipts_to: StorageLocation, ) -> ProviderResult<()> { @@ -1785,7 +1787,7 @@ impl StateWriter }) .transpose()?; - for (idx, receipts) in execution_outcome.receipts.into_iter().enumerate() { + for (idx, receipts) in execution_outcome.receipts.iter().enumerate() { let block_number = execution_outcome.first_block + idx as u64; // Increment block number for receipts static file writer @@ -1798,11 +1800,11 @@ impl StateWriter .map(|(_, indices)| indices.first_tx_num()) .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; - for (idx, receipt) in receipts.into_iter().enumerate() { + for (idx, receipt) in receipts.iter().enumerate() { let receipt_idx = first_tx_index + idx as u64; if let Some(receipt) = receipt { if let Some(writer) = &mut receipts_static_writer { - writer.append_receipt(receipt_idx, &receipt)?; + writer.append_receipt(receipt_idx, receipt)?; } if let Some(cursor) = &mut receipts_cursor { @@ -1897,7 +1899,7 @@ impl StateWriter for (address, account) in changes.accounts { if let Some(account) = account { tracing::trace!(?address, "Updating plain state account"); - accounts_cursor.upsert(address, account.into())?; + accounts_cursor.upsert(address, &account.into())?; } else if accounts_cursor.seek_exact(address)?.is_some() { tracing::trace!(?address, "Deleting plain state account"); accounts_cursor.delete_current()?; @@ -1908,7 +1910,7 @@ impl StateWriter tracing::trace!(len = changes.contracts.len(), "Writing bytecodes"); let mut bytecodes_cursor = self.tx_ref().cursor_write::()?; for (hash, bytecode) in changes.contracts { - bytecodes_cursor.upsert(hash, Bytecode(bytecode))?; + bytecodes_cursor.upsert(hash, &Bytecode(bytecode))?; } // Write new storage state and wipe storage if needed. @@ -1936,7 +1938,7 @@ impl StateWriter } if !entry.value.is_zero() { - storages_cursor.upsert(address, entry)?; + storages_cursor.upsert(address, &entry)?; } } } @@ -1949,7 +1951,7 @@ impl StateWriter let mut hashed_accounts_cursor = self.tx_ref().cursor_write::()?; for (hashed_address, account) in hashed_state.accounts().accounts_sorted() { if let Some(account) = account { - hashed_accounts_cursor.upsert(hashed_address, account)?; + hashed_accounts_cursor.upsert(hashed_address, &account)?; } else if hashed_accounts_cursor.seek_exact(hashed_address)?.is_some() { hashed_accounts_cursor.delete_current()?; } @@ -1975,7 +1977,7 @@ impl StateWriter } if !entry.value.is_zero() { - hashed_storage_cursor.upsert(*hashed_address, entry)?; + hashed_storage_cursor.upsert(*hashed_address, &entry)?; } } } @@ -2047,7 +2049,7 @@ impl StateWriter if old_account != new_account { let existing_entry = plain_accounts_cursor.seek_exact(*address)?; if let Some(account) = old_account { - plain_accounts_cursor.upsert(*address, *account)?; + plain_accounts_cursor.upsert(*address, account)?; } else if existing_entry.is_some() { plain_accounts_cursor.delete_current()?; } @@ -2068,7 +2070,7 @@ impl StateWriter // insert value if needed if !old_storage_value.is_zero() { - plain_storage_cursor.upsert(*address, storage_entry)?; + plain_storage_cursor.upsert(*address, &storage_entry)?; } } } @@ -2147,7 +2149,7 @@ impl StateWriter if old_account != new_account { let existing_entry = plain_accounts_cursor.seek_exact(*address)?; if let Some(account) = old_account { - plain_accounts_cursor.upsert(*address, *account)?; + plain_accounts_cursor.upsert(*address, account)?; } else if existing_entry.is_some() { plain_accounts_cursor.delete_current()?; } @@ -2168,7 +2170,7 @@ impl StateWriter // insert value if needed if !old_storage_value.is_zero() { - plain_storage_cursor.upsert(*address, storage_entry)?; + plain_storage_cursor.upsert(*address, &storage_entry)?; } } } @@ -2255,7 +2257,7 @@ impl TrieWriter for DatabaseProvider Some(node) => { if !nibbles.0.is_empty() { num_entries += 1; - account_trie_cursor.upsert(nibbles, node.clone())?; + account_trie_cursor.upsert(nibbles, node)?; } } None => { @@ -2330,7 +2332,7 @@ impl HashingWriter for DatabaseProvi let mut hashed_accounts_cursor = self.tx.cursor_write::()?; for (hashed_address, account) in &hashed_accounts { if let Some(account) = account { - hashed_accounts_cursor.upsert(*hashed_address, *account)?; + hashed_accounts_cursor.upsert(*hashed_address, account)?; } else if hashed_accounts_cursor.seek_exact(*hashed_address)?.is_some() { hashed_accounts_cursor.delete_current()?; } @@ -2360,7 +2362,7 @@ impl HashingWriter for DatabaseProvi changesets.into_iter().map(|(ad, ac)| (keccak256(ad), ac)).collect::>(); for (hashed_address, account) in &hashed_accounts { if let Some(account) = account { - hashed_accounts_cursor.upsert(*hashed_address, *account)?; + hashed_accounts_cursor.upsert(*hashed_address, account)?; } else if hashed_accounts_cursor.seek_exact(*hashed_address)?.is_some() { hashed_accounts_cursor.delete_current()?; } @@ -2397,7 +2399,7 @@ impl HashingWriter for DatabaseProvi } if !value.is_zero() { - hashed_storage.upsert(hashed_address, StorageEntry { key, value })?; + hashed_storage.upsert(hashed_address, &StorageEntry { key, value })?; } } Ok(hashed_storage_keys) @@ -2449,7 +2451,7 @@ impl HashingWriter for DatabaseProvi } if !value.is_zero() { - hashed_storage_cursor.upsert(hashed_address, StorageEntry { key, value })?; + hashed_storage_cursor.upsert(hashed_address, &StorageEntry { key, value })?; } Ok(()) }) @@ -2561,7 +2563,7 @@ impl HistoryWriter for DatabaseProvi if !partial_shard.is_empty() { cursor.insert( ShardedKey::last(address), - BlockNumberList::new_pre_sorted(partial_shard), + &BlockNumberList::new_pre_sorted(partial_shard), )?; } } @@ -2619,7 +2621,7 @@ impl HistoryWriter for DatabaseProvi if !partial_shard.is_empty() { cursor.insert( StorageShardedKey::last(address, storage_key), - BlockNumberList::new_pre_sorted(partial_shard), + &BlockNumberList::new_pre_sorted(partial_shard), )?; } } @@ -2769,8 +2771,7 @@ impl BlockWrite durations_recorder.record_relative(metrics::Action::InsertCanonicalHeaders); // Put header with canonical hashes. - self.tx - .put::>>(block_number, block.header.as_ref().clone())?; + self.tx.put::>>(block_number, block.header().clone())?; durations_recorder.record_relative(metrics::Action::InsertHeaders); self.tx.put::(block_number, ttd.into())?; @@ -2780,7 +2781,7 @@ impl BlockWrite if write_to.static_files() { let mut writer = self.static_file_provider.get_writer(block_number, StaticFileSegment::Headers)?; - writer.append_header(&block.header, ttd, &block.hash())?; + writer.append_header(block.header(), ttd, &block.hash())?; } self.tx.put::(block.hash(), block_number)?; @@ -2864,7 +2865,7 @@ impl BlockWrite let mut durations_recorder = metrics::DurationsRecorder::default(); // insert block meta - block_indices_cursor.append(*block_number, block_indices)?; + block_indices_cursor.append(*block_number, &block_indices)?; durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); @@ -2872,7 +2873,7 @@ impl BlockWrite // write transaction block index if !body.transactions().is_empty() { - tx_block_cursor.append(block_indices.last_tx_num(), *block_number)?; + tx_block_cursor.append(block_indices.last_tx_num(), block_number)?; durations_recorder.record_relative(metrics::Action::InsertTransactionBlocks); } @@ -2882,7 +2883,7 @@ impl BlockWrite writer.append_transaction(next_tx_num, transaction)?; } if let Some(cursor) = tx_cursor.as_mut() { - cursor.append(next_tx_num, transaction.clone())?; + cursor.append(next_tx_num, transaction)?; } // Increment transaction id for each transaction. @@ -2992,7 +2993,7 @@ impl BlockWrite fn append_blocks_with_state( &self, blocks: Vec>, - execution_outcome: ExecutionOutcome, + execution_outcome: &ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, ) -> ProviderResult<()> { diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index b8c7ce0c8b81..7ac5bde40741 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -1,51 +1,9 @@ -use core::fmt; +//! Contains the main provider types and traits for interacting with the blockchain's storage. -use crate::{ - AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - BlockSource, BlockchainTreePendingStateProvider, CanonStateNotifications, - CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, - DatabaseProviderFactory, FullExecutionDataProvider, HeaderProvider, NodePrimitivesProvider, - ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, -}; -use alloy_consensus::{transaction::TransactionMeta, Header}; -use alloy_eips::{ - eip4895::Withdrawals, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, -}; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; -use alloy_rpc_types_engine::ForkchoiceState; -use reth_blockchain_tree_api::{ - error::{CanonicalError, InsertBlockError}, - BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, - InsertPayloadOk, -}; -use reth_chain_state::{ChainInfoTracker, ForkChoiceNotifications, ForkChoiceSubscriptions}; -use reth_chainspec::{ChainInfo, EthereumHardforks}; +use reth_chainspec::EthereumHardforks; use reth_db::table::Value; -use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; -use reth_node_types::{ - BlockTy, FullNodePrimitives, HeaderTy, NodeTypes, NodeTypesWithDB, NodeTypesWithEngine, - ReceiptTy, TxTy, -}; -use reth_primitives::{ - Account, BlockWithSenders, EthPrimitives, Receipt, SealedBlock, SealedBlockFor, - SealedBlockWithSenders, SealedHeader, -}; -use reth_prune_types::{PruneCheckpoint, PruneSegment}; -use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{ - BlockBodyIndicesProvider, CanonChainTracker, OmmersProvider, StateCommitmentProvider, -}; -use reth_storage_errors::provider::ProviderResult; -use std::{ - collections::BTreeMap, - ops::{RangeBounds, RangeInclusive}, - sync::Arc, - time::Instant, -}; - -use tracing::trace; +use reth_node_types::{FullNodePrimitives, NodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; +use reth_primitives::EthPrimitives; mod database; pub use database::*; @@ -69,7 +27,7 @@ mod consistent_view; pub use consistent_view::{ConsistentDbView, ConsistentViewError}; mod blockchain_provider; -pub use blockchain_provider::BlockchainProvider2; +pub use blockchain_provider::BlockchainProvider; mod consistent; pub use consistent::ConsistentProvider; @@ -114,849 +72,12 @@ impl NodeTypesForTree for T where { } +/// Helper trait expressing requirements for node types to be used in engine. +pub trait EngineNodeTypes: ProviderNodeTypes + NodeTypesWithEngine {} + +impl EngineNodeTypes for T where T: ProviderNodeTypes + NodeTypesWithEngine {} + /// Helper trait with requirements for [`ProviderNodeTypes`] to be used within legacy blockchain /// tree. pub trait TreeNodeTypes: ProviderNodeTypes + NodeTypesForTree {} impl TreeNodeTypes for T where T: ProviderNodeTypes + NodeTypesForTree {} - -/// The main type for interacting with the blockchain. -/// -/// This type serves as the main entry point for interacting with the blockchain and provides data -/// from database storage and from the blockchain tree (pending state etc.) It is a simple wrapper -/// type that holds an instance of the database and the blockchain tree. -pub struct BlockchainProvider { - /// Provider type used to access the database. - database: ProviderFactory, - /// The blockchain tree instance. - tree: Arc>, - /// Tracks the chain info wrt forkchoice updates - chain_info: ChainInfoTracker, -} - -impl Clone for BlockchainProvider { - fn clone(&self) -> Self { - Self { - database: self.database.clone(), - tree: self.tree.clone(), - chain_info: self.chain_info.clone(), - } - } -} - -impl BlockchainProvider { - /// Sets the treeviewer for the provider. - #[doc(hidden)] - pub fn with_tree(mut self, tree: Arc>) -> Self { - self.tree = tree; - self - } -} - -impl BlockchainProvider { - /// Create new provider instance that wraps the database and the blockchain tree, using the - /// provided latest header to initialize the chain info tracker, alongside the finalized header - /// if it exists. - pub fn with_blocks( - database: ProviderFactory, - tree: Arc>, - latest: SealedHeader, - finalized: Option, - safe: Option, - ) -> Self { - Self { database, tree, chain_info: ChainInfoTracker::new(latest, finalized, safe) } - } - - /// Create a new provider using only the database and the tree, fetching the latest header from - /// the database to initialize the provider. - pub fn new( - database: ProviderFactory, - tree: Arc>, - ) -> ProviderResult { - let provider = database.provider()?; - let best = provider.chain_info()?; - let latest_header = provider - .header_by_number(best.best_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(best.best_number.into()))?; - - let finalized_header = provider - .last_finalized_block_number()? - .map(|num| provider.sealed_header(num)) - .transpose()? - .flatten(); - - let safe_header = provider - .last_safe_block_number()? - .map(|num| provider.sealed_header(num)) - .transpose()? - .flatten(); - - Ok(Self::with_blocks( - database, - tree, - SealedHeader::new(latest_header, best.best_hash), - finalized_header, - safe_header, - )) - } - - /// Ensures that the given block number is canonical (synced) - /// - /// This is a helper for guarding the [`HistoricalStateProvider`] against block numbers that are - /// out of range and would lead to invalid results, mainly during initial sync. - /// - /// Verifying the `block_number` would be expensive since we need to lookup sync table - /// Instead, we ensure that the `block_number` is within the range of the - /// [`Self::best_block_number`] which is updated when a block is synced. - #[inline] - fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { - let latest = self.best_block_number()?; - if block_number > latest { - Err(ProviderError::HeaderNotFound(block_number.into())) - } else { - Ok(()) - } - } -} - -impl BlockchainProvider -where - Self: StateProviderFactory, - N: NodeTypesWithDB, -{ - /// Return a [`StateProviderBox`] that contains bundle state data provider. - /// Used to inspect or execute transaction on the pending state. - fn pending_with_provider( - &self, - bundle_state_data: Box, - ) -> ProviderResult { - let canonical_fork = bundle_state_data.canonical_fork(); - trace!(target: "providers::blockchain", ?canonical_fork, "Returning post state provider"); - - let state_provider = self.history_by_block_hash(canonical_fork.hash)?; - let bundle_state_provider = BundleStateProvider::new(state_provider, bundle_state_data); - Ok(Box::new(bundle_state_provider)) - } -} - -impl NodePrimitivesProvider for BlockchainProvider { - type Primitives = N::Primitives; -} - -impl DatabaseProviderFactory for BlockchainProvider { - type DB = N::DB; - type Provider = as DatabaseProviderFactory>::Provider; - type ProviderRW = as DatabaseProviderFactory>::ProviderRW; - - fn database_provider_ro(&self) -> ProviderResult { - self.database.database_provider_ro() - } - - fn database_provider_rw(&self) -> ProviderResult { - self.database.database_provider_rw() - } -} - -impl StateCommitmentProvider for BlockchainProvider { - type StateCommitment = N::StateCommitment; -} - -impl StaticFileProviderFactory for BlockchainProvider { - fn static_file_provider(&self) -> StaticFileProvider { - self.database.static_file_provider() - } -} - -impl HeaderProvider for BlockchainProvider { - type Header = Header; - - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { - self.database.header(block_hash) - } - - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.database.header_by_number(num) - } - - fn header_td(&self, hash: &BlockHash) -> ProviderResult> { - self.database.header_td(hash) - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - self.database.header_td_by_number(number) - } - - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { - self.database.headers_range(range) - } - - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { - self.database.sealed_header(number) - } - - fn sealed_headers_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.database.sealed_headers_range(range) - } - - fn sealed_headers_while( - &self, - range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { - self.database.sealed_headers_while(range, predicate) - } -} - -impl BlockHashReader for BlockchainProvider { - fn block_hash(&self, number: u64) -> ProviderResult> { - self.database.block_hash(number) - } - - fn canonical_hashes_range( - &self, - start: BlockNumber, - end: BlockNumber, - ) -> ProviderResult> { - self.database.canonical_hashes_range(start, end) - } -} - -impl BlockNumReader for BlockchainProvider { - fn chain_info(&self) -> ProviderResult { - Ok(self.chain_info.chain_info()) - } - - fn best_block_number(&self) -> ProviderResult { - Ok(self.chain_info.get_canonical_block_number()) - } - - fn last_block_number(&self) -> ProviderResult { - self.database.last_block_number() - } - - fn block_number(&self, hash: B256) -> ProviderResult> { - self.database.block_number(hash) - } -} - -impl BlockIdReader for BlockchainProvider { - fn pending_block_num_hash(&self) -> ProviderResult> { - Ok(self.tree.pending_block_num_hash()) - } - - fn safe_block_num_hash(&self) -> ProviderResult> { - Ok(self.chain_info.get_safe_num_hash()) - } - - fn finalized_block_num_hash(&self) -> ProviderResult> { - Ok(self.chain_info.get_finalized_num_hash()) - } -} - -impl BlockReader for BlockchainProvider { - type Block = BlockTy; - - fn find_block_by_hash( - &self, - hash: B256, - source: BlockSource, - ) -> ProviderResult> { - let block = match source { - BlockSource::Any => { - // check database first - let mut block = self.database.block_by_hash(hash)?; - if block.is_none() { - // Note: it's fine to return the unsealed block because the caller already has - // the hash - block = self.tree.block_by_hash(hash).map(|block| block.unseal()); - } - block - } - BlockSource::Pending => self.tree.block_by_hash(hash).map(|block| block.unseal()), - BlockSource::Canonical => self.database.block_by_hash(hash)?, - }; - - Ok(block) - } - - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { - match id { - BlockHashOrNumber::Hash(hash) => self.find_block_by_hash(hash, BlockSource::Any), - BlockHashOrNumber::Number(num) => self.database.block_by_number(num), - } - } - - fn pending_block(&self) -> ProviderResult>> { - Ok(self.tree.pending_block()) - } - - fn pending_block_with_senders( - &self, - ) -> ProviderResult>> { - Ok(self.tree.pending_block_with_senders()) - } - - fn pending_block_and_receipts( - &self, - ) -> ProviderResult, Vec)>> { - Ok(self.tree.pending_block_and_receipts()) - } - - /// Returns the block with senders with matching number or hash from database. - /// - /// **NOTE: If [`TransactionVariant::NoHash`] is provided then the transactions have invalid - /// hashes, since they would need to be calculated on the spot, and we want fast querying.** - /// - /// Returns `None` if block is not found. - fn block_with_senders( - &self, - id: BlockHashOrNumber, - transaction_kind: TransactionVariant, - ) -> ProviderResult>> { - self.database.block_with_senders(id, transaction_kind) - } - - fn sealed_block_with_senders( - &self, - id: BlockHashOrNumber, - transaction_kind: TransactionVariant, - ) -> ProviderResult>> { - self.database.sealed_block_with_senders(id, transaction_kind) - } - - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - self.database.block_range(range) - } - - fn block_with_senders_range( - &self, - range: RangeInclusive, - ) -> ProviderResult>> { - self.database.block_with_senders_range(range) - } - - fn sealed_block_with_senders_range( - &self, - range: RangeInclusive, - ) -> ProviderResult>> { - self.database.sealed_block_with_senders_range(range) - } -} - -impl TransactionsProvider for BlockchainProvider { - type Transaction = TxTy; - - fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { - self.database.transaction_id(tx_hash) - } - - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { - self.database.transaction_by_id(id) - } - - fn transaction_by_id_unhashed( - &self, - id: TxNumber, - ) -> ProviderResult> { - self.database.transaction_by_id_unhashed(id) - } - - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - self.database.transaction_by_hash(hash) - } - - fn transaction_by_hash_with_meta( - &self, - tx_hash: TxHash, - ) -> ProviderResult> { - self.database.transaction_by_hash_with_meta(tx_hash) - } - - fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - self.database.transaction_block(id) - } - - fn transactions_by_block( - &self, - id: BlockHashOrNumber, - ) -> ProviderResult>> { - self.database.transactions_by_block(id) - } - - fn transactions_by_block_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult>> { - self.database.transactions_by_block_range(range) - } - - fn transactions_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.database.transactions_by_tx_range(range) - } - - fn senders_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.database.senders_by_tx_range(range) - } - - fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - self.database.transaction_sender(id) - } -} - -impl ReceiptProvider for BlockchainProvider { - type Receipt = ReceiptTy; - - fn receipt(&self, id: TxNumber) -> ProviderResult> { - self.database.receipt(id) - } - - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { - self.database.receipt_by_hash(hash) - } - - fn receipts_by_block( - &self, - block: BlockHashOrNumber, - ) -> ProviderResult>> { - self.database.receipts_by_block(block) - } - - fn receipts_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.database.receipts_by_tx_range(range) - } -} - -impl ReceiptProviderIdExt for BlockchainProvider { - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { - match block { - BlockId::Hash(rpc_block_hash) => { - let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; - if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) { - receipts = self.tree.receipts_by_block_hash(rpc_block_hash.block_hash); - } - Ok(receipts) - } - BlockId::Number(num_tag) => match num_tag { - BlockNumberOrTag::Pending => Ok(self.tree.pending_receipts()), - _ => { - if let Some(num) = self.convert_block_number(num_tag)? { - self.receipts_by_block(num.into()) - } else { - Ok(None) - } - } - }, - } - } -} - -impl WithdrawalsProvider for BlockchainProvider { - fn withdrawals_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - self.database.withdrawals_by_block(id, timestamp) - } -} - -impl OmmersProvider for BlockchainProvider { - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - self.database.ommers(id) - } -} - -impl BlockBodyIndicesProvider for BlockchainProvider { - fn block_body_indices( - &self, - number: BlockNumber, - ) -> ProviderResult> { - self.database.block_body_indices(number) - } -} - -impl StageCheckpointReader for BlockchainProvider { - fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { - self.database.provider()?.get_stage_checkpoint(id) - } - - fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { - self.database.provider()?.get_stage_checkpoint_progress(id) - } - - fn get_all_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_all_checkpoints() - } -} - -impl PruneCheckpointReader for BlockchainProvider { - fn get_prune_checkpoint( - &self, - segment: PruneSegment, - ) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoint(segment) - } - - fn get_prune_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoints() - } -} - -impl ChainSpecProvider for BlockchainProvider { - type ChainSpec = N::ChainSpec; - - fn chain_spec(&self) -> Arc { - self.database.chain_spec() - } -} - -impl StateProviderFactory for BlockchainProvider { - /// Storage provider for latest block - fn latest(&self) -> ProviderResult { - trace!(target: "providers::blockchain", "Getting latest block state provider"); - self.database.latest() - } - - /// Returns a [`StateProviderBox`] indexed by the given block number or tag. - /// - /// Note: if a number is provided this will only look at historical(canonical) state. - fn state_by_block_number_or_tag( - &self, - number_or_tag: BlockNumberOrTag, - ) -> ProviderResult { - match number_or_tag { - BlockNumberOrTag::Latest => self.latest(), - BlockNumberOrTag::Finalized => { - // we can only get the finalized state by hash, not by num - let hash = - self.finalized_block_hash()?.ok_or(ProviderError::FinalizedBlockNotFound)?; - - // only look at historical state - self.history_by_block_hash(hash) - } - BlockNumberOrTag::Safe => { - // we can only get the safe state by hash, not by num - let hash = self.safe_block_hash()?.ok_or(ProviderError::SafeBlockNotFound)?; - - self.history_by_block_hash(hash) - } - BlockNumberOrTag::Earliest => self.history_by_block_number(0), - BlockNumberOrTag::Pending => self.pending(), - BlockNumberOrTag::Number(num) => { - // Note: The `BlockchainProvider` could also lookup the tree for the given block number, if for example the block number is `latest + 1`, however this should only support canonical state: - self.history_by_block_number(num) - } - } - } - - fn history_by_block_number( - &self, - block_number: BlockNumber, - ) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); - self.ensure_canonical_block(block_number)?; - self.database.history_by_block_number(block_number) - } - - fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); - self.database.history_by_block_hash(block_hash) - } - - fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); - let mut state = self.history_by_block_hash(block); - - // we failed to get the state by hash, from disk, hash block be the pending block - if state.is_err() { - if let Ok(Some(pending)) = self.pending_state_by_hash(block) { - // we found pending block by hash - state = Ok(pending) - } - } - - state - } - - /// Returns the state provider for pending state. - /// - /// If there's no pending block available then the latest state provider is returned: - /// [`Self::latest`] - fn pending(&self) -> ProviderResult { - trace!(target: "providers::blockchain", "Getting provider for pending state"); - - if let Some(block) = self.tree.pending_block_num_hash() { - if let Ok(pending) = self.tree.pending_state_provider(block.hash) { - return self.pending_with_provider(pending) - } - } - - // fallback to latest state if the pending block is not available - self.latest() - } - - fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult> { - if let Some(state) = self.tree.find_pending_state_provider(block_hash) { - return Ok(Some(self.pending_with_provider(state)?)) - } - Ok(None) - } -} - -impl BlockchainTreeEngine for BlockchainProvider { - fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { - self.tree.buffer_block(block) - } - - fn insert_block( - &self, - block: SealedBlockWithSenders, - validation_kind: BlockValidationKind, - ) -> Result { - self.tree.insert_block(block, validation_kind) - } - - fn finalize_block(&self, finalized_block: BlockNumber) -> ProviderResult<()> { - self.tree.finalize_block(finalized_block) - } - - fn connect_buffered_blocks_to_canonical_hashes_and_finalize( - &self, - last_finalized_block: BlockNumber, - ) -> Result<(), CanonicalError> { - self.tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(last_finalized_block) - } - - fn update_block_hashes_and_clear_buffered( - &self, - ) -> Result, CanonicalError> { - self.tree.update_block_hashes_and_clear_buffered() - } - - fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { - self.tree.connect_buffered_blocks_to_canonical_hashes() - } - - fn make_canonical(&self, block_hash: BlockHash) -> Result { - self.tree.make_canonical(block_hash) - } -} - -impl BlockchainTreeViewer for BlockchainProvider { - fn header_by_hash(&self, hash: BlockHash) -> Option { - self.tree.header_by_hash(hash) - } - - fn block_by_hash(&self, block_hash: BlockHash) -> Option { - self.tree.block_by_hash(block_hash) - } - - fn block_with_senders_by_hash(&self, block_hash: BlockHash) -> Option { - self.tree.block_with_senders_by_hash(block_hash) - } - - fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option { - self.tree.buffered_header_by_hash(block_hash) - } - - fn is_canonical(&self, hash: BlockHash) -> Result { - self.tree.is_canonical(hash) - } - - fn lowest_buffered_ancestor(&self, hash: BlockHash) -> Option { - self.tree.lowest_buffered_ancestor(hash) - } - - fn canonical_tip(&self) -> BlockNumHash { - self.tree.canonical_tip() - } - - fn pending_block_num_hash(&self) -> Option { - self.tree.pending_block_num_hash() - } - - fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { - self.tree.pending_block_and_receipts() - } - - fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { - self.tree.receipts_by_block_hash(block_hash) - } -} - -impl CanonChainTracker for BlockchainProvider { - type Header = HeaderTy; - - fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { - // update timestamp - self.chain_info.on_forkchoice_update_received(); - } - - fn last_received_update_timestamp(&self) -> Option { - self.chain_info.last_forkchoice_update_received_at() - } - - fn on_transition_configuration_exchanged(&self) { - self.chain_info.on_transition_configuration_exchanged(); - } - - fn last_exchanged_transition_configuration_timestamp(&self) -> Option { - self.chain_info.last_transition_configuration_exchanged_at() - } - - fn set_canonical_head(&self, header: SealedHeader) { - self.chain_info.set_canonical_head(header); - } - - fn set_safe(&self, header: SealedHeader) { - self.chain_info.set_safe(header); - } - - fn set_finalized(&self, header: SealedHeader) { - self.chain_info.set_finalized(header); - } -} - -impl BlockReaderIdExt for BlockchainProvider { - fn block_by_id(&self, id: BlockId) -> ProviderResult> { - match id { - BlockId::Number(num) => self.block_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: should we only apply this for the RPCs that are listed in EIP-1898? - // so not at the provider level? - // if we decide to do this at a higher level, then we can make this an automatic - // trait impl - if Some(true) == hash.require_canonical { - // check the database, canonical blocks are only stored in the database - self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) - } else { - BlockReader::block_by_hash(self, hash.block_hash) - } - } - } - } - - fn header_by_number_or_tag( - &self, - id: BlockNumberOrTag, - ) -> ProviderResult> { - Ok(match id { - BlockNumberOrTag::Latest => Some(self.chain_info.get_canonical_head().unseal()), - BlockNumberOrTag::Finalized => { - self.chain_info.get_finalized_header().map(|h| h.unseal()) - } - BlockNumberOrTag::Safe => self.chain_info.get_safe_header().map(|h| h.unseal()), - BlockNumberOrTag::Earliest => self.header_by_number(0)?, - BlockNumberOrTag::Pending => self.tree.pending_header().map(|h| h.unseal()), - BlockNumberOrTag::Number(num) => self.header_by_number(num)?, - }) - } - - fn sealed_header_by_number_or_tag( - &self, - id: BlockNumberOrTag, - ) -> ProviderResult>> { - match id { - BlockNumberOrTag::Latest => Ok(Some(self.chain_info.get_canonical_head())), - BlockNumberOrTag::Finalized => Ok(self.chain_info.get_finalized_header()), - BlockNumberOrTag::Safe => Ok(self.chain_info.get_safe_header()), - BlockNumberOrTag::Earliest => self - .header_by_number(0)? - .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), - BlockNumberOrTag::Pending => Ok(self.tree.pending_header()), - BlockNumberOrTag::Number(num) => self - .header_by_number(num)? - .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), - } - } - - fn sealed_header_by_id( - &self, - id: BlockId, - ) -> ProviderResult>> { - Ok(match id { - BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), - }) - } - - fn header_by_id(&self, id: BlockId) -> ProviderResult> { - Ok(match id { - BlockId::Number(num) => self.header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?, - }) - } - - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { - match id { - BlockId::Number(num) => self.ommers_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: EIP-1898 question, see above - // here it is not handled - self.ommers(BlockHashOrNumber::Hash(hash.block_hash)) - } - } - } -} - -impl BlockchainTreePendingStateProvider for BlockchainProvider { - fn find_pending_state_provider( - &self, - block_hash: BlockHash, - ) -> Option> { - self.tree.find_pending_state_provider(block_hash) - } -} - -impl CanonStateSubscriptions for BlockchainProvider { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - self.tree.subscribe_to_canonical_state() - } -} - -impl ForkChoiceSubscriptions for BlockchainProvider { - type Header = HeaderTy; - - fn subscribe_safe_block(&self) -> ForkChoiceNotifications { - let receiver = self.chain_info.subscribe_safe_block(); - ForkChoiceNotifications(receiver) - } - - fn subscribe_finalized_block(&self) -> ForkChoiceNotifications { - let receiver = self.chain_info.subscribe_finalized_block(); - ForkChoiceNotifications(receiver) - } -} - -impl ChangeSetReader for BlockchainProvider { - fn account_block_changeset( - &self, - block_number: BlockNumber, - ) -> ProviderResult> { - self.database.provider()?.account_block_changeset(block_number) - } -} - -impl AccountReader for BlockchainProvider { - /// Get basic account information. - fn basic_account(&self, address: &Address) -> ProviderResult> { - self.database.provider()?.basic_account(address) - } -} - -impl fmt::Debug for BlockchainProvider { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BlockchainProvider").finish_non_exhaustive() - } -} diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 598e726ab08e..d4a7bbf34540 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -18,8 +18,7 @@ use reth_db::{ table::{Decompress, Value}, }; use reth_node_types::NodePrimitives; -use reth_primitives::{transaction::recover_signers, SealedHeader}; -use reth_primitives_traits::SignedTransaction; +use reth_primitives_traits::{SealedHeader, SignedTransaction}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ fmt::Debug, @@ -297,7 +296,8 @@ impl> TransactionsPr range: impl RangeBounds, ) -> ProviderResult> { let txs = self.transactions_by_tx_range(range)?; - recover_signers(&txs, txs.len()).ok_or(ProviderError::SenderRecoveryError) + reth_primitives_traits::transaction::recover::recover_signers(&txs) + .ok_or(ProviderError::SenderRecoveryError) } fn transaction_sender(&self, num: TxNumber) -> ProviderResult> { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index cb8be0f922fc..e81c42284d41 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -33,7 +33,6 @@ use reth_primitives::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }, - transaction::recover_signers, BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionSigned, }; @@ -1554,7 +1553,8 @@ impl> TransactionsPr range: impl RangeBounds, ) -> ProviderResult> { let txes = self.transactions_by_tx_range(range)?; - recover_signers(&txes, txes.len()).ok_or(ProviderError::SenderRecoveryError) + reth_primitives_traits::transaction::recover::recover_signers(&txes) + .ok_or(ProviderError::SenderRecoveryError) } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 5ed8b09ee0b8..9924375ecb99 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -29,7 +29,7 @@ pub fn assert_genesis_block( let tx = provider; // check if all tables are empty - assert_eq!(tx.table::().unwrap(), vec![(g.number, g.header.clone().unseal())]); + assert_eq!(tx.table::().unwrap(), vec![(g.number, g.header().clone())]); assert_eq!(tx.table::().unwrap(), vec![(h, n)]); assert_eq!(tx.table::().unwrap(), vec![(n, h)]); @@ -232,7 +232,7 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, ExecutionOutcome) { b256!("5d035ccb3e75a9057452ff060b773b213ec1fc353426174068edfc3971a0b6bd") ); - let (header, mut body) = TEST_BLOCK.clone().split_header_body(); + let (header, mut body) = TEST_BLOCK.clone().split(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); let mut header = header.unseal(); header.number = number; @@ -294,7 +294,7 @@ fn block2( b256!("90101a13dd059fa5cca99ed93d1dc23657f63626c5b8f993a2ccbdf7446b64f8") ); - let (header, mut body) = TEST_BLOCK.clone().split_header_body(); + let (header, mut body) = TEST_BLOCK.clone().split(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); let mut header = header.unseal(); @@ -359,7 +359,7 @@ fn block3( extended.extend(execution_outcome.clone()); let state_root = bundle_state_root(&extended); - let (header, mut body) = TEST_BLOCK.clone().split_header_body(); + let (header, mut body) = TEST_BLOCK.clone().split(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); let mut header = header.unseal(); header.number = number; @@ -448,7 +448,7 @@ fn block4( extended.extend(execution_outcome.clone()); let state_root = bundle_state_root(&extended); - let (header, mut body) = TEST_BLOCK.clone().split_header_body(); + let (header, mut body) = TEST_BLOCK.clone().split(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); let mut header = header.unseal(); header.number = number; @@ -534,7 +534,7 @@ fn block5( extended.extend(execution_outcome.clone()); let state_root = bundle_state_root(&extended); - let (header, mut body) = TEST_BLOCK.clone().split_header_body(); + let (header, mut body) = TEST_BLOCK.clone().split(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); let mut header = header.unseal(); header.number = number; diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 9c5821057fc8..a0b9657e4032 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -133,7 +133,7 @@ pub trait BlockWriter: Send + Sync { fn append_blocks_with_state( &self, blocks: Vec>, - execution_outcome: ExecutionOutcome, + execution_outcome: &ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, ) -> ProviderResult<()>; diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 4b3178fc6413..09ba9f109bdf 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -19,6 +19,3 @@ pub use static_file_provider::StaticFileProviderFactory; mod full; pub use full::{FullProvider, FullRpcProvider}; - -mod tree_viewer; -pub use tree_viewer::TreeViewer; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 2c4ee2cfa8d3..b49e05db2f77 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -18,7 +18,7 @@ pub trait StateWriter { /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. fn write_state( &self, - execution_outcome: ExecutionOutcome, + execution_outcome: &ExecutionOutcome, is_value_known: OriginalValuesKnown, write_receipts_to: StorageLocation, ) -> ProviderResult<()>; diff --git a/crates/storage/provider/src/traits/tree_viewer.rs b/crates/storage/provider/src/traits/tree_viewer.rs deleted file mode 100644 index f75dbae24d22..000000000000 --- a/crates/storage/provider/src/traits/tree_viewer.rs +++ /dev/null @@ -1,22 +0,0 @@ -use crate::BlockchainTreePendingStateProvider; -use reth_blockchain_tree_api::{BlockchainTreeEngine, BlockchainTreeViewer}; -use reth_chain_state::CanonStateSubscriptions; - -/// Helper trait to combine all the traits we need for the `BlockchainProvider` -/// -/// This is a temporary solution -pub trait TreeViewer: - BlockchainTreeViewer - + BlockchainTreePendingStateProvider - + CanonStateSubscriptions - + BlockchainTreeEngine -{ -} - -impl TreeViewer for T where - T: BlockchainTreeViewer - + BlockchainTreePendingStateProvider - + CanonStateSubscriptions - + BlockchainTreeEngine -{ -} diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index c1ce33978fd0..022c71f81c44 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -169,7 +169,7 @@ where // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. self.database().write_state( - Arc::unwrap_or_clone(execution_output), + &execution_output, OriginalValuesKnown::No, StorageLocation::StaticFiles, )?; @@ -273,10 +273,13 @@ mod tests { for address in addresses { let hashed_address = keccak256(address); accounts_cursor - .insert(hashed_address, Account { nonce: 1, ..Default::default() }) + .insert(hashed_address, &Account { nonce: 1, ..Default::default() }) .unwrap(); storage_cursor - .insert(hashed_address, StorageEntry { key: hashed_slot, value: U256::from(1) }) + .insert( + hashed_address, + &StorageEntry { key: hashed_slot, value: U256::from(1) }, + ) .unwrap(); } provider_rw.commit().unwrap(); @@ -496,7 +499,7 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); provider - .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(&outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); // Check plain storage state @@ -596,7 +599,7 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 2, Vec::new()); provider - .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(&outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); assert_eq!( @@ -663,7 +666,7 @@ mod tests { let outcome = ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); provider - .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(&outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -811,7 +814,7 @@ mod tests { let outcome: ExecutionOutcome = ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); provider - .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(&outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider @@ -976,7 +979,7 @@ mod tests { let outcome = ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); provider - .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(&outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -1023,7 +1026,7 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); provider - .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(&outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs index 6e26e2666d44..6306f418fee0 100644 --- a/crates/storage/storage-api/src/chain.rs +++ b/crates/storage/storage-api/src/chain.rs @@ -108,14 +108,14 @@ where // Write ommers if any if !body.ommers.is_empty() { - ommers_cursor.append(block_number, StoredBlockOmmers { ommers: body.ommers })?; + ommers_cursor.append(block_number, &StoredBlockOmmers { ommers: body.ommers })?; } // Write withdrawals if any if let Some(withdrawals) = body.withdrawals { if !withdrawals.is_empty() { withdrawals_cursor - .append(block_number, StoredBlockWithdrawals { withdrawals })?; + .append(block_number, &StoredBlockWithdrawals { withdrawals })?; } } } diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index b364e9a86f14..4dddc5c4da61 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -158,7 +158,7 @@ where if let Some(node) = maybe_updated { self.cursor.upsert( self.hashed_address, - StorageTrieEntry { nibbles, node: node.clone() }, + &StorageTrieEntry { nibbles, node: node.clone() }, )?; } } @@ -229,7 +229,7 @@ mod tests { cursor .upsert( key.into(), - BranchNodeCompact::new( + &BranchNodeCompact::new( 0b0000_0010_0000_0001, 0b0000_0010_0000_0001, 0, @@ -264,7 +264,7 @@ mod tests { let value = BranchNodeCompact::new(1, 1, 1, vec![B256::random()], None); cursor - .upsert(hashed_address, StorageTrieEntry { nibbles: key.clone(), node: value.clone() }) + .upsert(hashed_address, &StorageTrieEntry { nibbles: key.clone(), node: value.clone() }) .unwrap(); let mut cursor = DatabaseStorageTrieCursor::new(cursor, hashed_address); diff --git a/crates/trie/db/tests/fuzz_in_memory_nodes.rs b/crates/trie/db/tests/fuzz_in_memory_nodes.rs index e293b0caaf71..874f71bfc40b 100644 --- a/crates/trie/db/tests/fuzz_in_memory_nodes.rs +++ b/crates/trie/db/tests/fuzz_in_memory_nodes.rs @@ -31,7 +31,7 @@ proptest! { // Insert init state into database for (hashed_address, balance) in init_state.clone() { - hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); + hashed_account_cursor.upsert(hashed_address, &Account { balance, ..Default::default() }).unwrap(); } // Compute initial root and updates @@ -46,7 +46,7 @@ proptest! { for (hashed_address, balance) in state_update { if let Some(balance) = balance { let account = Account { balance, ..Default::default() }; - hashed_account_cursor.upsert(hashed_address, account).unwrap(); + hashed_account_cursor.upsert(hashed_address, &account).unwrap(); hashed_state.accounts.insert(hashed_address, Some(account)); state.insert(hashed_address, balance); } else { @@ -85,7 +85,7 @@ proptest! { // Insert init state into database for (hashed_slot, value) in init_storage.clone() { hashed_storage_cursor - .upsert(hashed_address, StorageEntry { key: hashed_slot, value }) + .upsert(hashed_address, &StorageEntry { key: hashed_slot, value }) .unwrap(); } @@ -102,7 +102,7 @@ proptest! { let mut hashed_storage = HashedStorage::new(is_deleted); for (hashed_slot, value) in storage_update.clone() { hashed_storage_cursor - .upsert(hashed_address, StorageEntry { key: hashed_slot, value }) + .upsert(hashed_address, &StorageEntry { key: hashed_slot, value }) .unwrap(); hashed_storage.storage.insert(hashed_slot, value); } diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index 45c72ffd51d6..a768bcad4205 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -57,7 +57,7 @@ fn incremental_vs_full_root(inputs: &[&str], modified: &str) { let data = inputs.iter().map(|x| B256::from_str(x).unwrap()); let value = U256::from(0); for key in data { - hashed_storage_cursor.upsert(hashed_address, StorageEntry { key, value }).unwrap(); + hashed_storage_cursor.upsert(hashed_address, &StorageEntry { key, value }).unwrap(); } // Generate the intermediate nodes on the receiving end of the channel @@ -71,7 +71,7 @@ fn incremental_vs_full_root(inputs: &[&str], modified: &str) { hashed_storage_cursor.delete_current().unwrap(); } hashed_storage_cursor - .upsert(hashed_address, StorageEntry { key: modified_key, value }) + .upsert(hashed_address, &StorageEntry { key: modified_key, value }) .unwrap(); // 2. Calculate full merkle root @@ -313,7 +313,7 @@ fn storage_root_regression() { let mut hashed_storage_cursor = tx.tx_ref().cursor_dup_write::().unwrap(); for (hashed_slot, value) in storage.clone() { - hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); + hashed_storage_cursor.upsert(key3, &StorageEntry { key: hashed_slot, value }).unwrap(); } tx.commit().unwrap(); let tx = factory.provider_rw().unwrap(); @@ -349,7 +349,7 @@ fn account_and_storage_trie() { let key1 = B256::from_str("b000000000000000000000000000000000000000000000000000000000000000").unwrap(); let account1 = Account { nonce: 0, balance: U256::from(3).mul(ether), bytecode_hash: None }; - hashed_account_cursor.upsert(key1, account1).unwrap(); + hashed_account_cursor.upsert(key1, &account1).unwrap(); hash_builder.add_leaf(Nibbles::unpack(key1), &encode_account(account1, None)); // Some address whose hash starts with 0xB040 @@ -358,7 +358,7 @@ fn account_and_storage_trie() { assert_eq!(key2[0], 0xB0); assert_eq!(key2[1], 0x40); let account2 = Account { nonce: 0, balance: ether, ..Default::default() }; - hashed_account_cursor.upsert(key2, account2).unwrap(); + hashed_account_cursor.upsert(key2, &account2).unwrap(); hash_builder.add_leaf(Nibbles::unpack(key2), &encode_account(account2, None)); // Some address whose hash starts with 0xB041 @@ -370,7 +370,7 @@ fn account_and_storage_trie() { B256::from_str("5be74cad16203c4905c068b012a2e9fb6d19d036c410f16fd177f337541440dd").unwrap(); let account3 = Account { nonce: 0, balance: U256::from(2).mul(ether), bytecode_hash: Some(code_hash) }; - hashed_account_cursor.upsert(key3, account3).unwrap(); + hashed_account_cursor.upsert(key3, &account3).unwrap(); for (hashed_slot, value) in storage { if hashed_storage_cursor .seek_by_key_subkey(key3, hashed_slot) @@ -380,7 +380,7 @@ fn account_and_storage_trie() { { hashed_storage_cursor.delete_current().unwrap(); } - hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); + hashed_storage_cursor.upsert(key3, &StorageEntry { key: hashed_slot, value }).unwrap(); } let account3_storage_root = StorageRoot::from_tx(tx.tx_ref(), address3).root().unwrap(); hash_builder @@ -389,19 +389,19 @@ fn account_and_storage_trie() { let key4a = B256::from_str("B1A0000000000000000000000000000000000000000000000000000000000000").unwrap(); let account4a = Account { nonce: 0, balance: U256::from(4).mul(ether), ..Default::default() }; - hashed_account_cursor.upsert(key4a, account4a).unwrap(); + hashed_account_cursor.upsert(key4a, &account4a).unwrap(); hash_builder.add_leaf(Nibbles::unpack(key4a), &encode_account(account4a, None)); let key5 = B256::from_str("B310000000000000000000000000000000000000000000000000000000000000").unwrap(); let account5 = Account { nonce: 0, balance: U256::from(8).mul(ether), ..Default::default() }; - hashed_account_cursor.upsert(key5, account5).unwrap(); + hashed_account_cursor.upsert(key5, &account5).unwrap(); hash_builder.add_leaf(Nibbles::unpack(key5), &encode_account(account5, None)); let key6 = B256::from_str("B340000000000000000000000000000000000000000000000000000000000000").unwrap(); let account6 = Account { nonce: 0, balance: U256::from(1).mul(ether), ..Default::default() }; - hashed_account_cursor.upsert(key6, account6).unwrap(); + hashed_account_cursor.upsert(key6, &account6).unwrap(); hash_builder.add_leaf(Nibbles::unpack(key6), &encode_account(account6, None)); // Populate account & storage trie DB tables @@ -452,7 +452,7 @@ fn account_and_storage_trie() { let key4b = keccak256(address4b); assert_eq!(key4b.0[0], key4a.0[0]); let account4b = Account { nonce: 0, balance: U256::from(5).mul(ether), bytecode_hash: None }; - hashed_account_cursor.upsert(key4b, account4b).unwrap(); + hashed_account_cursor.upsert(key4b, &account4b).unwrap(); let mut prefix_set = PrefixSetMut::default(); prefix_set.insert(Nibbles::unpack(key4b)); @@ -649,7 +649,7 @@ proptest! { let should_generate_changeset = !state.is_empty(); let mut changes = PrefixSetMut::default(); for (hashed_address, balance) in accounts.clone() { - hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); + hashed_account_cursor.upsert(hashed_address, &Account { balance, ..Default::default() }).unwrap(); if should_generate_changeset { changes.insert(Nibbles::unpack(hashed_address)); } @@ -703,7 +703,9 @@ fn extension_node_storage_trie( hex!("30af8f0000000000000000000000000000000000000000000000000000000000"), hex!("3100000000000000000000000000000000000000000000000000000000000000"), ] { - hashed_storage.upsert(hashed_address, StorageEntry { key: B256::new(key), value }).unwrap(); + hashed_storage + .upsert(hashed_address, &StorageEntry { key: B256::new(key), value }) + .unwrap(); hb.add_leaf(Nibbles::unpack(key), &alloy_rlp::encode_fixed_size(&value)); } @@ -730,7 +732,7 @@ fn extension_node_trie( hex!("30af8f0000000000000000000000000000000000000000000000000000000000"), hex!("3100000000000000000000000000000000000000000000000000000000000000"), ] { - hashed_accounts.upsert(B256::new(key), a).unwrap(); + hashed_accounts.upsert(B256::new(key), &a).unwrap(); hb.add_leaf(Nibbles::unpack(key), &val); } diff --git a/crates/trie/db/tests/walker.rs b/crates/trie/db/tests/walker.rs index 0e0b094920b1..2194a2fadf60 100644 --- a/crates/trie/db/tests/walker.rs +++ b/crates/trie/db/tests/walker.rs @@ -38,7 +38,7 @@ fn walk_nodes_with_common_prefix() { let mut account_cursor = tx.tx_ref().cursor_write::().unwrap(); for (k, v) in &inputs { - account_cursor.upsert(k.clone().into(), v.clone()).unwrap(); + account_cursor.upsert(k.clone().into(), &v.clone()).unwrap(); } let account_trie = DatabaseAccountTrieCursor::new(account_cursor); test_cursor(account_trie, &expected); @@ -47,7 +47,10 @@ fn walk_nodes_with_common_prefix() { let mut storage_cursor = tx.tx_ref().cursor_dup_write::().unwrap(); for (k, v) in &inputs { storage_cursor - .upsert(hashed_address, StorageTrieEntry { nibbles: k.clone().into(), node: v.clone() }) + .upsert( + hashed_address, + &StorageTrieEntry { nibbles: k.clone().into(), node: v.clone() }, + ) .unwrap(); } let storage_trie = DatabaseStorageTrieCursor::new(storage_cursor, hashed_address); @@ -106,7 +109,7 @@ fn cursor_rootnode_with_changesets() { let hashed_address = B256::random(); for (k, v) in nodes { - cursor.upsert(hashed_address, StorageTrieEntry { nibbles: k.into(), node: v }).unwrap(); + cursor.upsert(hashed_address, &StorageTrieEntry { nibbles: k.into(), node: v }).unwrap(); } let mut trie = DatabaseStorageTrieCursor::new(cursor, hashed_address); diff --git a/crates/trie/db/tests/witness.rs b/crates/trie/db/tests/witness.rs index 385f6269f394..1b760ba2d912 100644 --- a/crates/trie/db/tests/witness.rs +++ b/crates/trie/db/tests/witness.rs @@ -114,10 +114,10 @@ fn correctly_decodes_branch_node_values() { let mut hashed_storage_cursor = provider.tx_ref().cursor_dup_write::().unwrap(); hashed_storage_cursor - .upsert(hashed_address, StorageEntry { key: hashed_slot1, value: U256::from(1) }) + .upsert(hashed_address, &StorageEntry { key: hashed_slot1, value: U256::from(1) }) .unwrap(); hashed_storage_cursor - .upsert(hashed_address, StorageEntry { key: hashed_slot2, value: U256::from(1) }) + .upsert(hashed_address, &StorageEntry { key: hashed_slot2, value: U256::from(1) }) .unwrap(); let state_root = StateRoot::from_tx(provider.tx_ref()).root().unwrap(); diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 07264cbc728b..fecb3c5fb40e 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -15,10 +15,10 @@ use reth_primitives_traits::Account; use reth_tracing::tracing::trace; use reth_trie_common::{ updates::{StorageTrieUpdates, TrieUpdates}, - MultiProof, MultiProofTargets, Nibbles, TrieAccount, TrieNode, EMPTY_ROOT_HASH, + MultiProof, MultiProofTargets, Nibbles, RlpNode, TrieAccount, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, }; -use std::{fmt, iter::Peekable}; +use std::{collections::VecDeque, fmt, iter::Peekable}; /// Sparse state trie representing lazy-loaded Ethereum state trie. pub struct SparseStateTrie { @@ -271,6 +271,104 @@ impl SparseStateTrie { Ok(()) } + /// Reveal state witness with the given state root. + /// The state witness is expected to be a map of `keccak(rlp(node)): rlp(node).` + /// NOTE: This method does not extensively validate the witness. + pub fn reveal_witness( + &mut self, + state_root: B256, + witness: B256HashMap, + ) -> SparseStateTrieResult<()> { + // Create a `(hash, path, maybe_account)` queue for traversing witness trie nodes + // starting from the root node. + let mut queue = VecDeque::from([(state_root, Nibbles::default(), None)]); + + while let Some((hash, path, maybe_account)) = queue.pop_front() { + // Retrieve the trie node and decode it. + let Some(trie_node_bytes) = witness.get(&hash) else { continue }; + let trie_node = TrieNode::decode(&mut &trie_node_bytes[..])?; + + // Push children nodes into the queue. + match &trie_node { + TrieNode::Branch(branch) => { + for (idx, maybe_child) in branch.as_ref().children() { + if let Some(child_hash) = maybe_child.and_then(RlpNode::as_hash) { + let mut child_path = path.clone(); + child_path.push_unchecked(idx); + queue.push_back((child_hash, child_path, maybe_account)); + } + } + } + TrieNode::Extension(ext) => { + if let Some(child_hash) = ext.child.as_hash() { + let mut child_path = path.clone(); + child_path.extend_from_slice_unchecked(&ext.key); + queue.push_back((child_hash, child_path, maybe_account)); + } + } + TrieNode::Leaf(leaf) => { + let mut full_path = path.clone(); + full_path.extend_from_slice_unchecked(&leaf.key); + if let Some(hashed_address) = maybe_account { + // Record storage slot in revealed. + let hashed_slot = B256::from_slice(&full_path.pack()); + self.revealed.entry(hashed_address).or_default().insert(hashed_slot); + } else { + let hashed_address = B256::from_slice(&full_path.pack()); + let account = TrieAccount::decode(&mut &leaf.value[..])?; + if account.storage_root != EMPTY_ROOT_HASH { + queue.push_back(( + account.storage_root, + Nibbles::default(), + Some(hashed_address), + )); + } + + // Record account in revealed. + self.revealed.entry(hashed_address).or_default(); + } + } + TrieNode::EmptyRoot => {} // nothing to do here + }; + + // Reveal the node itself. + if let Some(account) = maybe_account { + let storage_trie_entry = self.storages.entry(account).or_default(); + if path.is_empty() { + // Handle special storage state root node case. + storage_trie_entry.reveal_root_with_provider( + self.provider_factory.storage_node_provider(account), + trie_node, + None, + self.retain_updates, + )?; + } else { + // Reveal non-root storage trie node. + storage_trie_entry + .as_revealed_mut() + .ok_or(SparseTrieErrorKind::Blind)? + .reveal_node(path, trie_node, None)?; + } + } else if path.is_empty() { + // Handle special state root node case. + self.state.reveal_root_with_provider( + self.provider_factory.account_node_provider(), + trie_node, + None, + self.retain_updates, + )?; + } else { + // Reveal non-root state trie node. + self.state + .as_revealed_mut() + .ok_or(SparseTrieErrorKind::Blind)? + .reveal_node(path, trie_node, None)?; + } + } + + Ok(()) + } + /// Validates the root node of the proof and returns it if it exists and is valid. fn validate_root_node>( &self, diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 8f54d9454022..f472578e3d9d 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -700,9 +700,8 @@ impl

RevealedSparseTrie

{ .resize(buffers.branch_child_buf.len(), Default::default()); let mut added_children = false; - // TODO(alexey): set the `TrieMask` bits directly - let mut tree_mask_values = Vec::new(); - let mut hash_mask_values = Vec::new(); + let mut tree_mask = TrieMask::default(); + let mut hash_mask = TrieMask::default(); let mut hashes = Vec::new(); for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { if buffers.rlp_node_stack.last().is_some_and(|e| &e.0 == child_path) { @@ -711,18 +710,21 @@ impl

RevealedSparseTrie

{ // Update the masks only if we need to retain trie updates if retain_updates { - // Set the trie mask - let tree_mask_value = if node_type.store_in_db_trie() { + // SAFETY: it's a child, so it's never empty + let last_child_nibble = child_path.last().unwrap(); + + // Determine whether we need to set trie mask bit. + let should_set_tree_mask_bit = // A branch or an extension node explicitly set the // `store_in_db_trie` flag - true - } else { + node_type.store_in_db_trie() || // Set the flag according to whether a child node was // pre-calculated (`calculated = false`), meaning that it wasn't // in the database - !calculated - }; - tree_mask_values.push(tree_mask_value); + !calculated; + if should_set_tree_mask_bit { + tree_mask.set_bit(last_child_nibble); + } // Set the hash mask. If a child node is a revealed branch node OR // is a blinded node that has its hash mask bit set according to the @@ -733,12 +735,11 @@ impl

RevealedSparseTrie

{ self.branch_node_hash_masks .get(&path) .is_some_and(|mask| { - mask.is_bit_set(child_path.last().unwrap()) + mask.is_bit_set(last_child_nibble) })) }); - let hash_mask_value = hash.is_some(); - hash_mask_values.push(hash_mask_value); if let Some(hash) = hash { + hash_mask.set_bit(last_child_nibble); hashes.push(hash); } @@ -746,16 +747,17 @@ impl

RevealedSparseTrie

{ target: "trie::sparse", ?path, ?child_path, - ?tree_mask_value, - ?hash_mask_value, + tree_mask_bit_set = should_set_tree_mask_bit, + hash_mask_bit_set = hash.is_some(), "Updating branch node child masks" ); } // Insert children in the resulting buffer in a normal order, // because initially we iterated in reverse. - buffers.branch_value_stack_buf - [buffers.branch_child_buf.len() - i - 1] = child; + // SAFETY: i < len and len is never 0 + let original_idx = buffers.branch_child_buf.len() - i - 1; + buffers.branch_value_stack_buf[original_idx] = child; added_children = true; } else { debug_assert!(!added_children); @@ -778,21 +780,6 @@ impl

RevealedSparseTrie

{ let store_in_db_trie_value = if let Some(updates) = self.updates.as_mut().filter(|_| retain_updates && !path.is_empty()) { - let mut tree_mask_values = tree_mask_values.into_iter().rev(); - let mut hash_mask_values = hash_mask_values.into_iter().rev(); - let mut tree_mask = TrieMask::default(); - let mut hash_mask = TrieMask::default(); - for (i, child) in branch_node_ref.children() { - if child.is_some() { - if hash_mask_values.next().unwrap() { - hash_mask.set_bit(i); - } - if tree_mask_values.next().unwrap() { - tree_mask.set_bit(i); - } - } - } - // Store in DB trie if there are either any children that are stored in the // DB trie, or any children represent hashed values let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); diff --git a/examples/bsc-p2p/src/chainspec.rs b/examples/bsc-p2p/src/chainspec.rs index acf9f4dff062..106d96b560be 100644 --- a/examples/bsc-p2p/src/chainspec.rs +++ b/examples/bsc-p2p/src/chainspec.rs @@ -1,6 +1,7 @@ use alloy_primitives::{b256, B256}; use reth_chainspec::{ - once_cell_set, BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, + once_cell_set, BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, + ForkCondition, Hardfork, }; use reth_network_peers::NodeRecord; diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index d6642a8edfe5..536ff1a94472 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -13,6 +13,7 @@ reth-node-core.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true +reth-engine-local.workspace = true reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true reth-trie-db.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index c64cd0495306..ce25eedaacca 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -54,6 +54,7 @@ use reth_basic_payload_builder::{ PayloadBuilder, PayloadConfig, }; use reth_chainspec::{Chain, ChainSpec, ChainSpecProvider}; +use reth_engine_local::payload::UnsupportedLocalAttributes; use reth_ethereum_payload_builder::EthereumBuilderConfig; use reth_node_api::{ payload::{EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes}, @@ -88,6 +89,9 @@ pub struct CustomPayloadAttributes { pub custom: u64, } +// TODO(mattsse): remove this tmp workaround +impl UnsupportedLocalAttributes for CustomPayloadAttributes {} + /// Custom error type used in payload attributes validation #[derive(Debug, Error)] pub enum CustomError { diff --git a/examples/polygon-p2p/src/chain_cfg.rs b/examples/polygon-p2p/src/chain_cfg.rs index d87bbccb2836..586d755861f9 100644 --- a/examples/polygon-p2p/src/chain_cfg.rs +++ b/examples/polygon-p2p/src/chain_cfg.rs @@ -1,6 +1,7 @@ use alloy_primitives::{b256, B256}; use reth_chainspec::{ - once_cell_set, BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, + once_cell_set, BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, + ForkCondition, Hardfork, }; use reth_discv4::NodeRecord; use reth_primitives::Head; diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index cde891036e6a..19c108e5e7de 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -33,7 +33,7 @@ use reth::rpc::builder::{ }; // Configuring the network parts, ideally also wouldn't need to think about this. use myrpc_ext::{MyRpcExt, MyRpcExtApiServer}; -use reth::{blockchain_tree::noop::NoopBlockchainTree, tasks::TokioTaskExecutor}; +use reth::tasks::TokioTaskExecutor; use reth_node_ethereum::{ node::EthereumEngineValidator, EthEvmConfig, EthExecutorProvider, EthereumNode, }; @@ -61,7 +61,7 @@ async fn main() -> eyre::Result<()> { // 2. Setup the blockchain provider using only the database provider and a noop for the tree to // satisfy trait bounds. Tree is not used in this example since we are only operating on the // disk and don't handle new blocks/live sync etc, which is done by the blockchain tree. - let provider = BlockchainProvider::new(factory, Arc::new(NoopBlockchainTree::default()))?; + let provider = BlockchainProvider::new(factory)?; let rpc_builder = RpcModuleBuilder::default() .with_provider(provider.clone())