From 79983a63e95f5a08a09e6783c921021351ed90ae Mon Sep 17 00:00:00 2001 From: Jason Paryani Date: Wed, 27 Oct 2021 16:10:05 -0400 Subject: [PATCH 1/8] Flashbots change up to v0.3 --- .github/workflows/go.yml | 64 ++ README.md | 369 +---------- README.original.md | 363 +++++++++++ cmd/geth/main.go | 1 + cmd/geth/usage.go | 1 + cmd/utils/flags.go | 7 + core/tx_pool.go | 64 +- core/types/transaction.go | 8 + eth/api_backend.go | 4 + infra/Dockerfile.node | 23 + infra/Dockerfile.updater | 23 + infra/mev-geth-nodes-arm64.yaml | 979 +++++++++++++++++++++++++++++ infra/mev-geth-nodes-x86-64.yaml | 972 ++++++++++++++++++++++++++++ infra/mev-geth-updater-arm64.yaml | 749 ++++++++++++++++++++++ infra/mev-geth-updater-x86-64.yaml | 737 ++++++++++++++++++++++ infra/start-mev-geth-node.sh | 96 +++ infra/start-mev-geth-updater.sh | 181 ++++++ internal/ethapi/api.go | 51 ++ internal/ethapi/backend.go | 6 + internal/web3ext/web3ext.go | 5 + les/api_backend.go | 3 + light/txpool.go | 11 + miner/miner.go | 31 +- miner/multi_worker.go | 118 ++++ miner/worker.go | 414 +++++++++++- miner/worker_test.go | 5 +- 26 files changed, 4899 insertions(+), 386 deletions(-) create mode 100644 .github/workflows/go.yml create mode 100644 README.original.md create mode 100644 infra/Dockerfile.node create mode 100644 infra/Dockerfile.updater create mode 100644 infra/mev-geth-nodes-arm64.yaml create mode 100644 infra/mev-geth-nodes-x86-64.yaml create mode 100644 infra/mev-geth-updater-arm64.yaml create mode 100644 infra/mev-geth-updater-x86-64.yaml create mode 100755 infra/start-mev-geth-node.sh create mode 100755 infra/start-mev-geth-updater.sh create mode 100644 miner/multi_worker.go diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 000000000000..3fc1f2ff8c68 --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,64 @@ +name: Go + +on: + push: + pull_request: + branches: [ master ] + +jobs: + + build: + name: Build + runs-on: ubuntu-latest + steps: + + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ^1.13 + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + + - name: Test + run: go test ./core ./miner/... ./internal/ethapi/... ./les/... + + - name: Build + run: make geth + + e2e: + name: End to End + runs-on: ubuntu-latest + steps: + + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ^1.13 + id: go + + - name: Use Node.js 12.x + uses: actions/setup-node@v1 + with: + node-version: 12.x + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + + - name: Build + run: make geth + + - name: Check out the e2e code repo + uses: actions/checkout@v2 + with: + repository: flashbots/mev-geth-demo + path: e2e + + - run: cd e2e && yarn install + - run: | + cd e2e + GETH=`pwd`/../build/bin/geth ./run.sh & + sleep 15 + yarn run demo-simple + yarn run demo-contract diff --git a/README.md b/README.md index 81b7215ba85d..8fe7df9e4255 100644 --- a/README.md +++ b/README.md @@ -1,363 +1,30 @@ -## Go Ethereum +# MEV-geth -Official Golang implementation of the Ethereum protocol. +This is a fork of go-ethereum, [the original README is here](README.original.md). -[![API Reference]( -https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 -)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc) -[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum) -[![Travis](https://travis-ci.com/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.com/ethereum/go-ethereum) -[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv) +Flashbots is a research and development organization formed to mitigate the negative externalities and existential risks posed by miner-extractable value (MEV) to smart-contract blockchains. We propose a permissionless, transparent, and fair ecosystem for MEV extraction that reinforce the Ethereum ideals. -Automated builds are available for stable releases and the unstable master branch. Binary -archives are published at https://geth.ethereum.org/downloads/. +## Quick start -## Building the source - -For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/install-and-build/installing-geth). - -Building `geth` requires both a Go (version 1.14 or later) and a C compiler. You can install -them using your favourite package manager. Once the dependencies are installed, run - -```shell -make geth -``` - -or, to build the full suite of utilities: - -```shell -make all -``` - -## Executables - -The go-ethereum project comes with several wrappers/executables found in the `cmd` -directory. - -| Command | Description | -| :-----------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. | -| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. | -| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. | -| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. | -| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | -| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). | -| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://eth.wiki/en/fundamentals/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | -| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. | - -## Running `geth` - -Going through all the possible command line flags is out of scope here (please consult our -[CLI Wiki page](https://geth.ethereum.org/docs/interface/command-line-options)), -but we've enumerated a few common parameter combos to get you up to speed quickly -on how you can run your own `geth` instance. - -### Full node on the main Ethereum network - -By far the most common scenario is people wanting to simply interact with the Ethereum -network: create accounts; transfer funds; deploy and interact with contracts. For this -particular use-case the user doesn't care about years-old historical data, so we can -sync quickly to the current state of the network. To do so: - -```shell -$ geth console -``` - -This command will: - * Start `geth` in snap sync mode (default, can be changed with the `--syncmode` flag), - causing it to download more data in exchange for avoiding processing the entire history - of the Ethereum network, which is very CPU intensive. - * Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console), - (via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://github.com/ChainSafe/web3.js/blob/0.20.7/DOCUMENTATION.md) - (note: the `web3` version bundled within `geth` is very old, and not up to date with official docs), - as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server). - This tool is optional and if you leave it out you can always attach to an already running - `geth` instance with `geth attach`. - -### A Full node on the Görli test network - -Transitioning towards developers, if you'd like to play around with creating Ethereum -contracts, you almost certainly would like to do that without any real money involved until -you get the hang of the entire system. In other words, instead of attaching to the main -network, you want to join the **test** network with your node, which is fully equivalent to -the main network, but with play-Ether only. - -```shell -$ geth --goerli console -``` - -The `console` subcommand has the exact same meaning as above and they are equally -useful on the testnet too. Please, see above for their explanations if you've skipped here. - -Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit: - - * Instead of connecting the main Ethereum network, the client will connect to the Görli - test network, which uses different P2P bootnodes, different network IDs and genesis - states. - * Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth` - will nest itself one level deeper into a `goerli` subfolder (`~/.ethereum/goerli` on - Linux). Note, on OSX and Linux this also means that attaching to a running testnet node - requires the use of a custom endpoint since `geth attach` will try to attach to a - production node endpoint by default, e.g., - `geth attach /goerli/geth.ipc`. Windows users are not affected by - this. - -*Note: Although there are some internal protective measures to prevent transactions from -crossing over between the main network and test network, you should make sure to always -use separate accounts for play-money and real-money. Unless you manually move -accounts, `geth` will by default correctly separate the two networks and will not make any -accounts available between them.* - -### Full node on the Rinkeby test network - -Go Ethereum also supports connecting to the older proof-of-authority based test network -called [*Rinkeby*](https://www.rinkeby.io) which is operated by members of the community. - -```shell -$ geth --rinkeby console -``` - -### Full node on the Ropsten test network - -In addition to Görli and Rinkeby, Geth also supports the ancient Ropsten testnet. The -Ropsten test network is based on the Ethash proof-of-work consensus algorithm. As such, -it has certain extra overhead and is more susceptible to reorganization attacks due to the -network's low difficulty/security. - -```shell -$ geth --ropsten console ``` - -*Note: Older Geth configurations store the Ropsten database in the `testnet` subdirectory.* - -### Configuration - -As an alternative to passing the numerous flags to the `geth` binary, you can also pass a -configuration file via: - -```shell -$ geth --config /path/to/your_config.toml -``` - -To get an idea how the file should look like you can use the `dumpconfig` subcommand to -export your existing configuration: - -```shell -$ geth --your-favourite-flags dumpconfig -``` - -*Note: This works only with `geth` v1.6.0 and above.* - -#### Docker quick start - -One of the quickest ways to get Ethereum up and running on your machine is by using -Docker: - -```shell -docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \ - -p 8545:8545 -p 30303:30303 \ - ethereum/client-go -``` - -This will start `geth` in snap-sync mode with a DB memory allowance of 1GB just as the -above command does. It will also create a persistent volume in your home directory for -saving your blockchain as well as map the default ports. There is also an `alpine` tag -available for a slim version of the image. - -Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers -and/or hosts. By default, `geth` binds to the local interface and RPC endpoints are not -accessible from the outside. - -### Programmatically interfacing `geth` nodes - -As a developer, sooner rather than later you'll want to start interacting with `geth` and the -Ethereum network via your own programs and not manually through the console. To aid -this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://eth.wiki/json-rpc/API) -and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)). -These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based -platforms, and named pipes on Windows). - -The IPC interface is enabled by default and exposes all the APIs supported by `geth`, -whereas the HTTP and WS interfaces need to manually be enabled and only expose a -subset of APIs due to security reasons. These can be turned on/off and configured as -you'd expect. - -HTTP based JSON-RPC API options: - - * `--http` Enable the HTTP-RPC server - * `--http.addr` HTTP-RPC server listening interface (default: `localhost`) - * `--http.port` HTTP-RPC server listening port (default: `8545`) - * `--http.api` API's offered over the HTTP-RPC interface (default: `eth,net,web3`) - * `--http.corsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced) - * `--ws` Enable the WS-RPC server - * `--ws.addr` WS-RPC server listening interface (default: `localhost`) - * `--ws.port` WS-RPC server listening port (default: `8546`) - * `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`) - * `--ws.origins` Origins from which to accept websockets requests - * `--ipcdisable` Disable the IPC-RPC server - * `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`) - * `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it) - -You'll need to use your own programming environments' capabilities (libraries, tools, etc) to -connect via HTTP, WS or IPC to a `geth` node configured with the above flags and you'll -need to speak [JSON-RPC](https://www.jsonrpc.org/specification) on all transports. You -can reuse the same connection for multiple requests! - -**Note: Please understand the security implications of opening up an HTTP/WS based -transport before doing so! Hackers on the internet are actively trying to subvert -Ethereum nodes with exposed APIs! Further, all browser tabs can access locally -running web servers, so malicious web pages could try to subvert locally available -APIs!** - -### Operating a private network - -Maintaining your own private network is more involved as a lot of configurations taken for -granted in the official networks need to be manually set up. - -#### Defining the private genesis state - -First, you'll need to create the genesis state of your networks, which all nodes need to be -aware of and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`): - -```json -{ - "config": { - "chainId": , - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "berlinBlock": 0, - "londonBlock": 0 - }, - "alloc": {}, - "coinbase": "0x0000000000000000000000000000000000000000", - "difficulty": "0x20000", - "extraData": "", - "gasLimit": "0x2fefd8", - "nonce": "0x0000000000000042", - "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0x00" -} -``` - -The above fields should be fine for most purposes, although we'd recommend changing -the `nonce` to some random value so you prevent unknown remote nodes from being able -to connect to you. If you'd like to pre-fund some accounts for easier testing, create -the accounts and populate the `alloc` field with their addresses. - -```json -"alloc": { - "0x0000000000000000000000000000000000000001": { - "balance": "111111111" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "222222222" - } -} -``` - -With the genesis state defined in the above JSON file, you'll need to initialize **every** -`geth` node with it prior to starting it up to ensure all blockchain parameters are correctly -set: - -```shell -$ geth init path/to/genesis.json -``` - -#### Creating the rendezvous point - -With all nodes that you want to run initialized to the desired genesis state, you'll need to -start a bootstrap node that others can use to find each other in your network and/or over -the internet. The clean way is to configure and run a dedicated bootnode: - -```shell -$ bootnode --genkey=boot.key -$ bootnode --nodekey=boot.key -``` - -With the bootnode online, it will display an [`enode` URL](https://eth.wiki/en/fundamentals/enode-url-format) -that other nodes can use to connect to it and exchange peer information. Make sure to -replace the displayed IP address information (most probably `[::]`) with your externally -accessible IP to get the actual `enode` URL. - -*Note: You could also use a full-fledged `geth` node as a bootnode, but it's the less -recommended way.* - -#### Starting up your member nodes - -With the bootnode operational and externally reachable (you can try -`telnet ` to ensure it's indeed reachable), start every subsequent `geth` -node pointed to the bootnode for peer discovery via the `--bootnodes` flag. It will -probably also be desirable to keep the data directory of your private network separated, so -do also specify a custom `--datadir` flag. - -```shell -$ geth --datadir=path/to/custom/data/folder --bootnodes= -``` - -*Note: Since your network will be completely cut off from the main and test networks, you'll -also need to configure a miner to process transactions and create new blocks for you.* - -#### Running a private miner - -Mining on the public Ethereum network is a complex task as it's only feasible using GPUs, -requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a -setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/) -and the [ethminer](https://github.com/ethereum-mining/ethminer) repository. - -In a private network setting, however a single CPU miner instance is more than enough for -practical purposes as it can produce a stable stream of blocks at the correct intervals -without needing heavy resources (consider running on a single thread, no need for multiple -ones either). To start a `geth` instance for mining, run it with all your usual flags, extended -by: - -```shell -$ geth --mine --miner.threads=1 --miner.etherbase=0x0000000000000000000000000000000000000000 +git clone https://github.com/flashbots/mev-geth +cd mev-geth +make geth ``` -Which will start mining blocks and transactions on a single CPU thread, crediting all -proceedings to the account specified by `--miner.etherbase`. You can further tune the mining -by changing the default gas limit blocks converge to (`--miner.targetgaslimit`) and the price -transactions are accepted at (`--miner.gasprice`). - -## Contribution - -Thank you for considering to help out with the source code! We welcome contributions -from anyone on the internet, and are grateful for even the smallest of fixes! - -If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request -for the maintainers to review and merge into the main code base. If you wish to submit -more complex changes though, please check up with the core devs first on [our Discord Server](https://discord.gg/invite/nthXNEv) -to ensure those changes are in line with the general philosophy of the project and/or get -some early feedback which can make both your efforts much lighter as well as our review -and merge procedures quick and simple. - -Please make sure your contributions adhere to our coding guidelines: +See [here](https://geth.ethereum.org/docs/install-and-build/installing-geth#build-go-ethereum-from-source-code) for further info on building MEV-geth from source. - * Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) - guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)). - * Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) - guidelines. - * Pull requests need to be based on and opened against the `master` branch. - * Commit messages should be prefixed with the package(s) they modify. - * E.g. "eth, rpc: make trace configs optional" +## Documentation -Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide) -for more details on configuring your environment, managing project dependencies, and -testing procedures. +See [here](https://docs.flashbots.net) for Flashbots documentation. -## License +| Version | Spec | +| ------- | ------------------------------------------------------------------------------------------- | +| v0.4 | [MEV-Geth Spec v0.4](https://docs.flashbots.net/flashbots-auction/miners/mev-geth-spec/v04) | +| v0.3 | [MEV-Geth Spec v0.3](https://docs.flashbots.net/flashbots-auction/miners/mev-geth-spec/v03) | +| v0.2 | [MEV-Geth Spec v0.2](https://docs.flashbots.net/flashbots-auction/miners/mev-geth-spec/v02) | +| v0.1 | [MEV-Geth Spec v0.1](https://docs.flashbots.net/flashbots-auction/miners/mev-geth-spec/v01) | -The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the -[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html), -also included in our repository in the `COPYING.LESSER` file. +### Feature requests and bug reports -The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the -[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also -included in our repository in the `COPYING` file. +If you are a user of MEV-Geth and have suggestions on how to make integration with your current setup easier, or would like to submit a bug report, we encourage you to open an issue in this repository with the `enhancement` or `bug` labels respectively. If you need help getting started, please ask in the dedicated [#⛏️miners](https://discord.gg/rcgADN9qFX) channel in our Discord. diff --git a/README.original.md b/README.original.md new file mode 100644 index 000000000000..81b7215ba85d --- /dev/null +++ b/README.original.md @@ -0,0 +1,363 @@ +## Go Ethereum + +Official Golang implementation of the Ethereum protocol. + +[![API Reference]( +https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 +)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc) +[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum) +[![Travis](https://travis-ci.com/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.com/ethereum/go-ethereum) +[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv) + +Automated builds are available for stable releases and the unstable master branch. Binary +archives are published at https://geth.ethereum.org/downloads/. + +## Building the source + +For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/install-and-build/installing-geth). + +Building `geth` requires both a Go (version 1.14 or later) and a C compiler. You can install +them using your favourite package manager. Once the dependencies are installed, run + +```shell +make geth +``` + +or, to build the full suite of utilities: + +```shell +make all +``` + +## Executables + +The go-ethereum project comes with several wrappers/executables found in the `cmd` +directory. + +| Command | Description | +| :-----------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. | +| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. | +| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. | +| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. | +| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | +| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). | +| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://eth.wiki/en/fundamentals/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | +| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. | + +## Running `geth` + +Going through all the possible command line flags is out of scope here (please consult our +[CLI Wiki page](https://geth.ethereum.org/docs/interface/command-line-options)), +but we've enumerated a few common parameter combos to get you up to speed quickly +on how you can run your own `geth` instance. + +### Full node on the main Ethereum network + +By far the most common scenario is people wanting to simply interact with the Ethereum +network: create accounts; transfer funds; deploy and interact with contracts. For this +particular use-case the user doesn't care about years-old historical data, so we can +sync quickly to the current state of the network. To do so: + +```shell +$ geth console +``` + +This command will: + * Start `geth` in snap sync mode (default, can be changed with the `--syncmode` flag), + causing it to download more data in exchange for avoiding processing the entire history + of the Ethereum network, which is very CPU intensive. + * Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console), + (via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://github.com/ChainSafe/web3.js/blob/0.20.7/DOCUMENTATION.md) + (note: the `web3` version bundled within `geth` is very old, and not up to date with official docs), + as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server). + This tool is optional and if you leave it out you can always attach to an already running + `geth` instance with `geth attach`. + +### A Full node on the Görli test network + +Transitioning towards developers, if you'd like to play around with creating Ethereum +contracts, you almost certainly would like to do that without any real money involved until +you get the hang of the entire system. In other words, instead of attaching to the main +network, you want to join the **test** network with your node, which is fully equivalent to +the main network, but with play-Ether only. + +```shell +$ geth --goerli console +``` + +The `console` subcommand has the exact same meaning as above and they are equally +useful on the testnet too. Please, see above for their explanations if you've skipped here. + +Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit: + + * Instead of connecting the main Ethereum network, the client will connect to the Görli + test network, which uses different P2P bootnodes, different network IDs and genesis + states. + * Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth` + will nest itself one level deeper into a `goerli` subfolder (`~/.ethereum/goerli` on + Linux). Note, on OSX and Linux this also means that attaching to a running testnet node + requires the use of a custom endpoint since `geth attach` will try to attach to a + production node endpoint by default, e.g., + `geth attach /goerli/geth.ipc`. Windows users are not affected by + this. + +*Note: Although there are some internal protective measures to prevent transactions from +crossing over between the main network and test network, you should make sure to always +use separate accounts for play-money and real-money. Unless you manually move +accounts, `geth` will by default correctly separate the two networks and will not make any +accounts available between them.* + +### Full node on the Rinkeby test network + +Go Ethereum also supports connecting to the older proof-of-authority based test network +called [*Rinkeby*](https://www.rinkeby.io) which is operated by members of the community. + +```shell +$ geth --rinkeby console +``` + +### Full node on the Ropsten test network + +In addition to Görli and Rinkeby, Geth also supports the ancient Ropsten testnet. The +Ropsten test network is based on the Ethash proof-of-work consensus algorithm. As such, +it has certain extra overhead and is more susceptible to reorganization attacks due to the +network's low difficulty/security. + +```shell +$ geth --ropsten console +``` + +*Note: Older Geth configurations store the Ropsten database in the `testnet` subdirectory.* + +### Configuration + +As an alternative to passing the numerous flags to the `geth` binary, you can also pass a +configuration file via: + +```shell +$ geth --config /path/to/your_config.toml +``` + +To get an idea how the file should look like you can use the `dumpconfig` subcommand to +export your existing configuration: + +```shell +$ geth --your-favourite-flags dumpconfig +``` + +*Note: This works only with `geth` v1.6.0 and above.* + +#### Docker quick start + +One of the quickest ways to get Ethereum up and running on your machine is by using +Docker: + +```shell +docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \ + -p 8545:8545 -p 30303:30303 \ + ethereum/client-go +``` + +This will start `geth` in snap-sync mode with a DB memory allowance of 1GB just as the +above command does. It will also create a persistent volume in your home directory for +saving your blockchain as well as map the default ports. There is also an `alpine` tag +available for a slim version of the image. + +Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers +and/or hosts. By default, `geth` binds to the local interface and RPC endpoints are not +accessible from the outside. + +### Programmatically interfacing `geth` nodes + +As a developer, sooner rather than later you'll want to start interacting with `geth` and the +Ethereum network via your own programs and not manually through the console. To aid +this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://eth.wiki/json-rpc/API) +and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)). +These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based +platforms, and named pipes on Windows). + +The IPC interface is enabled by default and exposes all the APIs supported by `geth`, +whereas the HTTP and WS interfaces need to manually be enabled and only expose a +subset of APIs due to security reasons. These can be turned on/off and configured as +you'd expect. + +HTTP based JSON-RPC API options: + + * `--http` Enable the HTTP-RPC server + * `--http.addr` HTTP-RPC server listening interface (default: `localhost`) + * `--http.port` HTTP-RPC server listening port (default: `8545`) + * `--http.api` API's offered over the HTTP-RPC interface (default: `eth,net,web3`) + * `--http.corsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced) + * `--ws` Enable the WS-RPC server + * `--ws.addr` WS-RPC server listening interface (default: `localhost`) + * `--ws.port` WS-RPC server listening port (default: `8546`) + * `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`) + * `--ws.origins` Origins from which to accept websockets requests + * `--ipcdisable` Disable the IPC-RPC server + * `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`) + * `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it) + +You'll need to use your own programming environments' capabilities (libraries, tools, etc) to +connect via HTTP, WS or IPC to a `geth` node configured with the above flags and you'll +need to speak [JSON-RPC](https://www.jsonrpc.org/specification) on all transports. You +can reuse the same connection for multiple requests! + +**Note: Please understand the security implications of opening up an HTTP/WS based +transport before doing so! Hackers on the internet are actively trying to subvert +Ethereum nodes with exposed APIs! Further, all browser tabs can access locally +running web servers, so malicious web pages could try to subvert locally available +APIs!** + +### Operating a private network + +Maintaining your own private network is more involved as a lot of configurations taken for +granted in the official networks need to be manually set up. + +#### Defining the private genesis state + +First, you'll need to create the genesis state of your networks, which all nodes need to be +aware of and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`): + +```json +{ + "config": { + "chainId": , + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0 + }, + "alloc": {}, + "coinbase": "0x0000000000000000000000000000000000000000", + "difficulty": "0x20000", + "extraData": "", + "gasLimit": "0x2fefd8", + "nonce": "0x0000000000000042", + "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0x00" +} +``` + +The above fields should be fine for most purposes, although we'd recommend changing +the `nonce` to some random value so you prevent unknown remote nodes from being able +to connect to you. If you'd like to pre-fund some accounts for easier testing, create +the accounts and populate the `alloc` field with their addresses. + +```json +"alloc": { + "0x0000000000000000000000000000000000000001": { + "balance": "111111111" + }, + "0x0000000000000000000000000000000000000002": { + "balance": "222222222" + } +} +``` + +With the genesis state defined in the above JSON file, you'll need to initialize **every** +`geth` node with it prior to starting it up to ensure all blockchain parameters are correctly +set: + +```shell +$ geth init path/to/genesis.json +``` + +#### Creating the rendezvous point + +With all nodes that you want to run initialized to the desired genesis state, you'll need to +start a bootstrap node that others can use to find each other in your network and/or over +the internet. The clean way is to configure and run a dedicated bootnode: + +```shell +$ bootnode --genkey=boot.key +$ bootnode --nodekey=boot.key +``` + +With the bootnode online, it will display an [`enode` URL](https://eth.wiki/en/fundamentals/enode-url-format) +that other nodes can use to connect to it and exchange peer information. Make sure to +replace the displayed IP address information (most probably `[::]`) with your externally +accessible IP to get the actual `enode` URL. + +*Note: You could also use a full-fledged `geth` node as a bootnode, but it's the less +recommended way.* + +#### Starting up your member nodes + +With the bootnode operational and externally reachable (you can try +`telnet ` to ensure it's indeed reachable), start every subsequent `geth` +node pointed to the bootnode for peer discovery via the `--bootnodes` flag. It will +probably also be desirable to keep the data directory of your private network separated, so +do also specify a custom `--datadir` flag. + +```shell +$ geth --datadir=path/to/custom/data/folder --bootnodes= +``` + +*Note: Since your network will be completely cut off from the main and test networks, you'll +also need to configure a miner to process transactions and create new blocks for you.* + +#### Running a private miner + +Mining on the public Ethereum network is a complex task as it's only feasible using GPUs, +requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a +setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/) +and the [ethminer](https://github.com/ethereum-mining/ethminer) repository. + +In a private network setting, however a single CPU miner instance is more than enough for +practical purposes as it can produce a stable stream of blocks at the correct intervals +without needing heavy resources (consider running on a single thread, no need for multiple +ones either). To start a `geth` instance for mining, run it with all your usual flags, extended +by: + +```shell +$ geth --mine --miner.threads=1 --miner.etherbase=0x0000000000000000000000000000000000000000 +``` + +Which will start mining blocks and transactions on a single CPU thread, crediting all +proceedings to the account specified by `--miner.etherbase`. You can further tune the mining +by changing the default gas limit blocks converge to (`--miner.targetgaslimit`) and the price +transactions are accepted at (`--miner.gasprice`). + +## Contribution + +Thank you for considering to help out with the source code! We welcome contributions +from anyone on the internet, and are grateful for even the smallest of fixes! + +If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request +for the maintainers to review and merge into the main code base. If you wish to submit +more complex changes though, please check up with the core devs first on [our Discord Server](https://discord.gg/invite/nthXNEv) +to ensure those changes are in line with the general philosophy of the project and/or get +some early feedback which can make both your efforts much lighter as well as our review +and merge procedures quick and simple. + +Please make sure your contributions adhere to our coding guidelines: + + * Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) + guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)). + * Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) + guidelines. + * Pull requests need to be based on and opened against the `master` branch. + * Commit messages should be prefixed with the package(s) they modify. + * E.g. "eth, rpc: make trace configs optional" + +Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide) +for more details on configuring your environment, managing project dependencies, and +testing procedures. + +## License + +The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the +[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html), +also included in our repository in the `COPYING.LESSER` file. + +The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the +[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also +included in our repository in the `COPYING` file. diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 94a0b16a8dbf..68f8ef8af76b 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -131,6 +131,7 @@ var ( utils.MinerExtraDataFlag, utils.MinerRecommitIntervalFlag, utils.MinerNoVerifyFlag, + utils.MinerMaxMergedBundles, utils.NATFlag, utils.NoDiscoverFlag, utils.DiscoveryV5Flag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 417fba68923d..d3d39e2861d2 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -189,6 +189,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.MinerExtraDataFlag, utils.MinerRecommitIntervalFlag, utils.MinerNoVerifyFlag, + utils.MinerMaxMergedBundles, }, }, { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 7d11b0631a28..030ba4a4c851 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -475,6 +475,11 @@ var ( Usage: "Time interval to recreate the block being mined", Value: ethconfig.Defaults.Miner.Recommit, } + MinerMaxMergedBundles = cli.IntFlag{ + Name: "miner.maxmergedbundles", + Usage: "flashbots - The maximum amount of bundles to merge. The miner will run this many workers in parallel to calculate if the full block is more profitable with these additional bundles.", + Value: 3, + } MinerNoVerifyFlag = cli.BoolFlag{ Name: "miner.noverify", Usage: "Disable remote sealing verification", @@ -1402,6 +1407,8 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { if ctx.GlobalIsSet(LegacyMinerGasTargetFlag.Name) { log.Warn("The generic --miner.gastarget flag is deprecated and will be removed in the future!") } + + cfg.MaxMergedBundles = ctx.GlobalInt(MinerMaxMergedBundles.Name) } func setWhitelist(ctx *cli.Context, cfg *ethconfig.Config) { diff --git a/core/tx_pool.go b/core/tx_pool.go index 3329d736a37f..51312d3e78b6 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -251,11 +251,12 @@ type TxPool struct { locals *accountSet // Set of local transaction to exempt from eviction rules journal *txJournal // Journal of local transaction to back up to disk - pending map[common.Address]*txList // All currently processable transactions - queue map[common.Address]*txList // Queued but non-processable transactions - beats map[common.Address]time.Time // Last heartbeat from each known account - all *txLookup // All transactions to allow lookups - priced *txPricedList // All transactions sorted by price + pending map[common.Address]*txList // All currently processable transactions + queue map[common.Address]*txList // Queued but non-processable transactions + beats map[common.Address]time.Time // Last heartbeat from each known account + mevBundles []types.MevBundle + all *txLookup // All transactions to allow lookups + priced *txPricedList // All transactions sorted by price chainHeadCh chan ChainHeadEvent chainHeadSub event.Subscription @@ -557,6 +558,59 @@ func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transacti return pending } +/// AllMevBundles returns all the MEV Bundles currently in the pool +func (pool *TxPool) AllMevBundles() []types.MevBundle { + return pool.mevBundles +} + +// MevBundles returns a list of bundles valid for the given blockNumber/blockTimestamp +// also prunes bundles that are outdated +func (pool *TxPool) MevBundles(blockNumber *big.Int, blockTimestamp uint64) ([]types.MevBundle, error) { + pool.mu.Lock() + defer pool.mu.Unlock() + + // returned values + var ret []types.MevBundle + // rolled over values + var bundles []types.MevBundle + + for _, bundle := range pool.mevBundles { + // Prune outdated bundles + if (bundle.MaxTimestamp != 0 && blockTimestamp > bundle.MaxTimestamp) || blockNumber.Cmp(bundle.BlockNumber) > 0 { + continue + } + + // Roll over future bundles + if (bundle.MinTimestamp != 0 && blockTimestamp < bundle.MinTimestamp) || blockNumber.Cmp(bundle.BlockNumber) < 0 { + bundles = append(bundles, bundle) + continue + } + + // return the ones which are in time + ret = append(ret, bundle) + // keep the bundles around internally until they need to be pruned + bundles = append(bundles, bundle) + } + + pool.mevBundles = bundles + return ret, nil +} + +// AddMevBundle adds a mev bundle to the pool +func (pool *TxPool) AddMevBundle(txs types.Transactions, blockNumber *big.Int, minTimestamp, maxTimestamp uint64, revertingTxHashes []common.Hash) error { + pool.mu.Lock() + defer pool.mu.Unlock() + + pool.mevBundles = append(pool.mevBundles, types.MevBundle{ + Txs: txs, + BlockNumber: blockNumber, + MinTimestamp: minTimestamp, + MaxTimestamp: maxTimestamp, + RevertingTxHashes: revertingTxHashes, + }) + return nil +} + // Locals retrieves the accounts currently considered local by the pool. func (pool *TxPool) Locals() []common.Address { pool.mu.Lock() diff --git a/core/types/transaction.go b/core/types/transaction.go index 83f1766e67e2..5425249ef0af 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -635,3 +635,11 @@ func copyAddressPtr(a *common.Address) *common.Address { cpy := *a return &cpy } + +type MevBundle struct { + Txs Transactions + BlockNumber *big.Int + MinTimestamp uint64 + MaxTimestamp uint64 + RevertingTxHashes []common.Hash +} diff --git a/eth/api_backend.go b/eth/api_backend.go index 6577ac1e1af4..8454c0afe701 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -239,6 +239,10 @@ func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) return b.eth.txPool.AddLocal(signedTx) } +func (b *EthAPIBackend) SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash) error { + return b.eth.txPool.AddMevBundle(txs, big.NewInt(blockNumber.Int64()), minTimestamp, maxTimestamp, revertingTxHashes) +} + func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) { pending := b.eth.txPool.Pending(false) var txs types.Transactions diff --git a/infra/Dockerfile.node b/infra/Dockerfile.node new file mode 100644 index 000000000000..db8e99ac937e --- /dev/null +++ b/infra/Dockerfile.node @@ -0,0 +1,23 @@ +# Build Geth in a stock Go builder container +FROM golang:1.15-alpine as builder + +RUN apk add --no-cache make gcc musl-dev linux-headers git + +ADD . /go-ethereum +RUN cd /go-ethereum && make geth + +# Pull Geth into a second stage deploy alpine container +FROM alpine:latest + +ENV PYTHONUNBUFFERED=1 +RUN apk add --update --no-cache groff less python3 curl jq ca-certificates && ln -sf python3 /usr/bin/python +RUN python3 -m ensurepip +RUN pip3 install --no-cache --upgrade pip setuptools awscli + +COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ + +COPY ./infra/start-mev-geth-node.sh /root/start-mev-geth-node.sh +RUN chmod 755 /root/start-mev-geth-node.sh + +EXPOSE 8545 8546 30303 30303/udp +ENTRYPOINT ["/root/start-mev-geth-node.sh"] diff --git a/infra/Dockerfile.updater b/infra/Dockerfile.updater new file mode 100644 index 000000000000..d3099d19ce1a --- /dev/null +++ b/infra/Dockerfile.updater @@ -0,0 +1,23 @@ +# Build Geth in a stock Go builder container +FROM golang:1.15-alpine as builder + +RUN apk add --no-cache make gcc musl-dev linux-headers git + +ADD . /go-ethereum +RUN cd /go-ethereum && make geth + +# Pull Geth into a second stage deploy alpine container +FROM alpine:latest + +ENV PYTHONUNBUFFERED=1 +RUN apk add --update --no-cache groff less python3 curl jq ca-certificates && ln -sf python3 /usr/bin/python +RUN python3 -m ensurepip +RUN pip3 install --no-cache --upgrade pip setuptools awscli + +COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ + +COPY ./infra/start-mev-geth-updater.sh /root/start-mev-geth-updater.sh +RUN chmod 755 /root/start-mev-geth-updater.sh + +EXPOSE 8545 8546 30303 30303/udp +ENTRYPOINT ["/root/start-mev-geth-updater.sh"] diff --git a/infra/mev-geth-nodes-arm64.yaml b/infra/mev-geth-nodes-arm64.yaml new file mode 100644 index 000000000000..af76b6aada82 --- /dev/null +++ b/infra/mev-geth-nodes-arm64.yaml @@ -0,0 +1,979 @@ +--- +AWSTemplateFormatVersion: 2010-09-09 + +Description: > + This template creates an automated continuous deployment pipeline to Amazon Elastic Container Service (ECS) + Created by Luke Youngblood, luke@blockscale.net + +Parameters: + +# GitHub Parameters + + GitHubUser: + Type: String + Default: lyoungblood + Description: Your team or username on GitHub. + + NodeGitHubRepo: + Type: String + Default: mev-geth + Description: The repo name of the node service. + + NodeGitHubBranch: + Type: String + Default: master + Description: The branch of the node repo to continuously deploy. + + GitHubToken: + Type: String + NoEcho: true + Description: > + Token for the team or user specified above. (https://github.com/settings/tokens) + +# VPC Parameters + + VPC: + Type: AWS::EC2::VPC::Id + + Subnets: + Type: List + + VpcCIDR: + Type: String + Default: 172.31.0.0/16 + +# ECS Parameters + + InstanceType: + Type: String + Default: m6gd.large + + MemoryLimit: + Type: Number + Default: 6144 + + KeyPair: + Type: AWS::EC2::KeyPair::KeyName + + SpotPrice: + Type: Number + Default: 0.0904 + + ClusterSize: + Type: Number + Default: 5 + + Bandwidth: + Type: Number + Default: 2048 + + BandwidthCeiling: + Type: Number + Default: 4096 + + NodeDesiredCount: + Type: Number + Default: 0 + + NodeTaskName: + Type: String + Default: mev-geth-node + + ECSAMI: + Type: AWS::SSM::Parameter::Value + Default: /aws/service/ecs/optimized-ami/amazon-linux-2/arm64/recommended/image_id + +# SNS Parameters + + SNSSubscriptionEndpoint: + Type: String + Default: https://events.pagerduty.com/integration/44cbdb66f22b4f3caf5dd15741c7eb17/enqueue + + SNSSubscriptionProtocol: + Type: String + Default: HTTPS + +# CloudWatch Alarm Parameters + + CPUAlarmThreshold: + Type: Number + Default: 80 + + MemoryAlarmThreshold: + Type: Number + Default: 80 + +# Mev-Geth Parameters + + Network: + Type: String + Default: mainnet + AllowedValues: + - mainnet + - goerli + + SyncMode: + Type: String + Default: fast + AllowedValues: + - full + - fast + - light + + Connections: + Type: Number + Default: 50 + + RpcPort: + Type: Number + Default: 8545 + + WsPort: + Type: Number + Default: 8546 + + NetPort: + Type: Number + Default: 30303 + +Metadata: + + AWS::CloudFormation::Interface: + ParameterLabels: + GitHubUser: + default: "User" + NodeGitHubRepo: + default: "Node Repo" + NodeGitHubBranch: + default: "Node Branch" + GitHubToken: + default: "Personal Access Token" + VPC: + default: "Choose which VPC the autoscaling group should be deployed to" + Subnets: + default: "Choose which subnets the autoscaling group should be deployed to" + VpcCIDR: + default: "VPC CIDR Block" + InstanceType: + default: "Which instance type should we use to build the ECS cluster?" + MemoryLimit: + default: "How much memory should be reserved for each task. Set to greater than 50% of instance memory capacity." + KeyPair: + default: "Which keypair should be used to allow SSH to the nodes?" + ClusterSize: + default: "How many ECS hosts do you want to initially deploy?" + SpotPrice: + default: "The maximum spot price to pay for instances - this should normally be set to the on demand price." + Bandwidth: + default: "How much bandwidth, in kb/sec., should be allocated to Ethereum peers (upload) per EC2 instance" + BandwidthCeiling: + default: "How much bandwidth, in kb/sec., should be allocated to Ethereum peers as a ceiling (max. upload)" + NodeDesiredCount: + default: "How many ECS Tasks do you want to initially execute?" + NodeTaskName: + default: "The name of the node ECS Task" + ECSAMI: + default: "The ECS AMI ID populated from SSM." + Network: + default: "The Ethereum network you will be connecting to" + SyncMode: + default: "The synchronization mode that Mev-Geth should use (full, fast, or light)" + Connections: + default: "The number of desired connections on the Mev-Geth node" + RpcPort: + default: "The RPC port used for communication with the local Mev-Geth node" + WsPort: + default: "The Websockets port used for communication with the local Mev-Geth node" + NetPort: + default: "The TCP port used for connectivity to other Ethereum peer nodes" + ParameterGroups: + - Label: + default: GitHub Configuration + Parameters: + - NodeGitHubRepo + - NodeGitHubBranch + - GitHubUser + - GitHubToken + - Label: + default: VPC Configuration + Parameters: + - VPC + - Subnets + - VpcCIDR + - Label: + default: ECS Configuration + Parameters: + - InstanceType + - MemoryLimit + - KeyPair + - SpotPrice + - ClusterSize + - Bandwidth + - BandwidthCeiling + - NodeDesiredCount + - NodeTaskName + - ECSAMI + - Label: + default: Mev-Geth Configuration + Parameters: + - Network + - SyncMode + - Connections + - RpcPort + - WsPort + - NetPort + - Label: + default: PagerDuty Endpoint Configuration + Parameters: + - SNSSubscriptionEndpoint + - SNSSubscriptionProtocol + - Label: + default: CloudWatch Alarms Configuration + Parameters: + - CPUAlarmThreshold + - MemoryAlarmThreshold + +# Mappings + +Mappings: + + RegionMap: + us-east-2: + mainnet: mev-geth-updater-fast-chainbucket-17p2xhnhcydlz + goerli: mev-geth-updater-fast-goerli-chainbucket-j6dujg8apbna + #us-west-2: + # mainnet: + # goerli: + +Resources: + +# ECS Resources + + Cluster: + Type: AWS::ECS::Cluster + Properties: + ClusterName: !Ref AWS::StackName + + SecurityGroup: + Type: "AWS::EC2::SecurityGroup" + Properties: + GroupDescription: !Sub ${AWS::StackName}-sg + VpcId: !Ref VPC + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref RpcPort + ToPort: !Ref RpcPort + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref WsPort + ToPort: !Ref WsPort + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + + ECSAutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + VPCZoneIdentifier: !Ref Subnets + LaunchConfigurationName: !Ref ECSLaunchConfiguration + MinSize: !Ref ClusterSize + MaxSize: !Ref ClusterSize + DesiredCapacity: !Ref ClusterSize + Tags: + - Key: Name + Value: !Sub ${AWS::StackName} ECS host + PropagateAtLaunch: true + CreationPolicy: + ResourceSignal: + Timeout: PT15M + UpdatePolicy: + AutoScalingRollingUpdate: + MinInstancesInService: 2 + MaxBatchSize: 1 + PauseTime: PT15M + SuspendProcesses: + - HealthCheck + - ReplaceUnhealthy + - AZRebalance + - AlarmNotification + - ScheduledActions + WaitOnResourceSignals: true + + ECSLaunchConfiguration: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ECSAMI + InstanceType: !Ref InstanceType + KeyName: !Ref KeyPair + AssociatePublicIpAddress: True + # Uncomment if you would like to use Spot instances (subject to unexpected termination) + # SpotPrice: !Ref SpotPrice + SecurityGroups: + - !Ref SecurityGroup + IamInstanceProfile: !Ref ECSInstanceProfile + UserData: + "Fn::Base64": !Sub | + #!/bin/bash + yum install -y aws-cfn-bootstrap hibagent rsync awscli + yum update -y + service amazon-ssm-agent restart + + # determine if we have an NVMe SSD attached + find /dev/nvme1 + if [ $? -eq 0 ] + then + mount_point=/var/lib/docker + + # copy existing files from mount point + service docker stop + echo 'DOCKER_STORAGE_OPTIONS="--storage-driver overlay2"' > /etc/sysconfig/docker-storage + mkdir -p /tmp$mount_point + rsync -val $mount_point/ /tmp/$mount_point/ + + # make a new filesystem and mount it + mkfs -t ext4 /dev/nvme1n1 + mkdir -p $mount_point + mount -t ext4 -o noatime /dev/nvme1n1 $mount_point + + # Copy files back to new mount point + rsync -val /tmp/$mount_point/ $mount_point/ + rm -rf /tmp$mount_point + service docker start + + # Make raid appear on reboot + echo >> /etc/fstab + echo "/dev/nvme1n1 $mount_point ext4 noatime 0 0" | tee -a /etc/fstab + fi + + # Set Linux traffic control to limit outbound bandwidth usage of peering + #tc qdisc add dev eth0 root handle 1:0 htb default 1 + #tc class add dev eth0 parent 1:0 classid 1:10 htb rate ${Bandwidth}kbit ceil {BandwidthCeiling}kbit prio 0 + #tc filter add dev eth0 protocol ip parent 1:0 prio 1 u32 match ip dport 30303 0xffff flowid 1:10 + + /opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + /opt/aws/bin/cfn-signal -e $? --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSAutoScalingGroup + /usr/bin/enable-ec2-spot-hibernation + + # Attach an EIP from the pool of available EIPs in scope "vpc" + alloc=`aws ec2 describe-addresses --region ${AWS::Region} --output text | grep -v eni | head -1 | cut -f 2` + instanceid=`curl --silent 169.254.169.254/latest/meta-data/instance-id` + aws ec2 associate-address --region ${AWS::Region} --allocation-id $alloc --instance-id $instanceid + echo "ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=5m" >> /etc/ecs/ecs.config + + reboot + + Metadata: + AWS::CloudFormation::Init: + config: + packages: + yum: + awslogs: [] + + commands: + 01_add_instance_to_cluster: + command: !Sub echo ECS_CLUSTER=${Cluster} >> /etc/ecs/ecs.config + files: + "/etc/cfn/cfn-hup.conf": + mode: 000400 + owner: root + group: root + content: !Sub | + [main] + stack=${AWS::StackId} + region=${AWS::Region} + + "/etc/cfn/hooks.d/cfn-auto-reloader.conf": + content: !Sub | + [cfn-auto-reloader-hook] + triggers=post.update + path=Resources.ECSLaunchConfiguration.Metadata.AWS::CloudFormation::Init + action=/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + + services: + sysvinit: + cfn-hup: + enabled: true + ensureRunning: true + files: + - /etc/cfn/cfn-hup.conf + - /etc/cfn/hooks.d/cfn-auto-reloader.conf + + NodeLoadBalancer: + Type: AWS::ElasticLoadBalancingV2::LoadBalancer + Properties: + Name: !Sub ${AWS::StackName}-node-NLB + Type: network + Scheme: internal + Subnets: !Ref Subnets + Tags: + - Key: Name + Value: !Sub ${AWS::StackName}-node-NLB + + NodeTargetGroup: + Type: AWS::ElasticLoadBalancingV2::TargetGroup + DependsOn: NodeLoadBalancer + Properties: + VpcId: !Ref VPC + Port: !Ref RpcPort + Protocol: TCP + TargetGroupAttributes: + - Key: deregistration_delay.timeout_seconds + Value: 120 + + NodeListener: + Type: AWS::ElasticLoadBalancingV2::Listener + Properties: + DefaultActions: + - Type: forward + TargetGroupArn: !Ref NodeTargetGroup + LoadBalancerArn: !Ref NodeLoadBalancer + Port: !Ref RpcPort + Protocol: TCP + + NodeWsTargetGroup: + Type: AWS::ElasticLoadBalancingV2::TargetGroup + DependsOn: NodeLoadBalancer + Properties: + VpcId: !Ref VPC + Port: !Ref WsPort + Protocol: TCP + TargetGroupAttributes: + - Key: deregistration_delay.timeout_seconds + Value: 120 + + NodeWsListener: + Type: AWS::ElasticLoadBalancingV2::Listener + Properties: + DefaultActions: + - Type: forward + TargetGroupArn: !Ref NodeWsTargetGroup + LoadBalancerArn: !Ref NodeLoadBalancer + Port: !Ref WsPort + Protocol: TCP + + # This IAM Role is attached to all of the ECS hosts. It is based on the default role + # published here: + # http://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html + # + # You can add other IAM policy statements here to allow access from your ECS hosts + # to other AWS services. + + ECSRole: + Type: AWS::IAM::Role + Properties: + Path: / + RoleName: !Sub ${AWS::StackName}-ECSRole-${AWS::Region} + AssumeRolePolicyDocument: | + { + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + } + }] + } + Policies: + - PolicyName: ecs-service + PolicyDocument: | + { + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecs:CreateCluster", + "ecs:DeregisterContainerInstance", + "ecs:DiscoverPollEndpoint", + "ecs:Poll", + "ecs:RegisterContainerInstance", + "ecs:StartTelemetrySession", + "ecs:Submit*", + "logs:CreateLogStream", + "logs:PutLogEvents", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken", + "ssm:DescribeAssociation", + "ssm:GetDeployablePatchSnapshotForInstance", + "ssm:GetDocument", + "ssm:GetManifest", + "ssm:GetParameters", + "ssm:ListAssociations", + "ssm:ListInstanceAssociations", + "ssm:PutInventory", + "ssm:PutComplianceItems", + "ssm:PutConfigurePackageResult", + "ssm:PutParameter", + "ssm:UpdateAssociationStatus", + "ssm:UpdateInstanceAssociationStatus", + "ssm:UpdateInstanceInformation", + "ec2messages:AcknowledgeMessage", + "ec2messages:DeleteMessage", + "ec2messages:FailMessage", + "ec2messages:GetEndpoint", + "ec2messages:GetMessages", + "ec2messages:SendReply", + "cloudwatch:PutMetricData", + "ec2:DescribeInstanceStatus", + "ds:CreateComputer", + "ds:DescribeDirectories", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:AbortMultipartUpload", + "s3:ListMultipartUploadParts", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "firehose:PutRecord", + "firehose:PutRecordBatch", + "ec2:DescribeAddresses", + "ec2:DescribeInstances", + "ec2:AssociateAddress" + ], + "Resource": "*" + }] + } + + ECSInstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + Path: / + Roles: + - !Ref ECSRole + + ECSServiceAutoScalingRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + Action: + - 'sts:AssumeRole' + Effect: Allow + Principal: + Service: + - application-autoscaling.amazonaws.com + Path: / + Policies: + - PolicyName: ecs-service-autoscaling + PolicyDocument: + Statement: + Effect: Allow + Action: + - application-autoscaling:* + - cloudwatch:DescribeAlarms + - cloudwatch:PutMetricAlarm + - ecs:DescribeServices + - ecs:UpdateService + Resource: "*" + + NodeTaskExecutionRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + Service: ecs-tasks.amazonaws.com + ManagedPolicyArns: + - arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy + Policies: + - PolicyName: !Sub ecs-task-S3-${AWS::StackName} + PolicyDocument: + Version: 2012-10-17 + Statement: + - + Effect: Allow + Action: + - "s3:Get*" + - "s3:List*" + Resource: + - Fn::Join: + - "" + - + - "arn:aws:s3:::" + - !FindInMap + - RegionMap + - !Ref 'AWS::Region' + - !Ref Network + + NodeLogGroup: + Type: AWS::Logs::LogGroup + Properties: + LogGroupName: !Sub /ecs/${AWS::StackName}-node + RetentionInDays: 14 + + NodeECSService: + Type: AWS::ECS::Service + DependsOn: NodeListener + Properties: + Cluster: !Ref Cluster + DesiredCount: !Ref NodeDesiredCount + HealthCheckGracePeriodSeconds: 3600 + TaskDefinition: !Ref NodeTaskDefinition + LaunchType: EC2 + DeploymentConfiguration: + MaximumPercent: 150 + MinimumHealthyPercent: 50 + LoadBalancers: + - ContainerName: !Ref NodeTaskName + ContainerPort: !Ref RpcPort + TargetGroupArn: !Ref NodeTargetGroup + - ContainerName: !Ref NodeTaskName + ContainerPort: !Ref WsPort + TargetGroupArn: !Ref NodeWsTargetGroup + + NodeTaskDefinition: + Type: AWS::ECS::TaskDefinition + Properties: + Family: !Ref NodeTaskName + RequiresCompatibilities: + - EC2 + NetworkMode: host + ExecutionRoleArn: !Ref NodeTaskExecutionRole + ContainerDefinitions: + - Name: !Ref NodeTaskName + Image: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${NodeRepository} + Essential: true + MemoryReservation: !Ref MemoryLimit + Environment: + - Name: "region" + Value: !Ref AWS::Region + - Name: "network" + Value: !Ref Network + - Name: "syncmode" + Value: !Ref SyncMode + - Name: "connections" + Value: !Ref Connections + - Name: "rpcport" + Value: !Ref RpcPort + - Name: "wsport" + Value: !Ref WsPort + - Name: "netport" + Value: !Ref NetPort + - Name: "chainbucket" + Value: !FindInMap + - RegionMap + - !Ref 'AWS::Region' + - !Ref Network + - Name: "s3key" + Value: node + PortMappings: + - ContainerPort: !Ref RpcPort + - ContainerPort: !Ref WsPort + - ContainerPort: !Ref NetPort + LogConfiguration: + LogDriver: awslogs + Options: + awslogs-region: !Ref AWS::Region + awslogs-group: !Ref NodeLogGroup + awslogs-stream-prefix: !Ref AWS::StackName + #HealthCheck: + # Command: + # - CMD-SHELL + # - '[ `echo "eth.syncing.highestBlock - eth.syncing.currentBlock"|geth attach|head -10|tail -1` -lt 200 ] || exit 1' + # Interval: 300 + # Timeout: 60 + # Retries: 10 + # StartPeriod: 300 + +# CodePipeline Resources + + NodeRepository: + Type: AWS::ECR::Repository + + NodeCodeBuildServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codebuild.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: "*" + Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + - ecr:GetAuthorizationToken + - Resource: !Sub arn:aws:s3:::${NodeArtifactBucket}/* + Effect: Allow + Action: + - s3:GetObject + - s3:PutObject + - s3:GetObjectVersion + - Resource: !Sub arn:aws:ecr:${AWS::Region}:${AWS::AccountId}:repository/${NodeRepository} + Effect: Allow + Action: + - ecr:GetDownloadUrlForLayer + - ecr:BatchGetImage + - ecr:BatchCheckLayerAvailability + - ecr:PutImage + - ecr:InitiateLayerUpload + - ecr:UploadLayerPart + - ecr:CompleteLayerUpload + + NodeCodePipelineServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codepipeline.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: + - !Sub arn:aws:s3:::${NodeArtifactBucket}/* + Effect: Allow + Action: + - s3:PutObject + - s3:GetObject + - s3:GetObjectVersion + - s3:GetBucketVersioning + - Resource: "*" + Effect: Allow + Action: + - ecs:DescribeServices + - ecs:DescribeTaskDefinition + - ecs:DescribeTasks + - ecs:ListTasks + - ecs:RegisterTaskDefinition + - ecs:UpdateService + - codebuild:StartBuild + - codebuild:BatchGetBuilds + - iam:PassRole + + NodeArtifactBucket: + Type: AWS::S3::Bucket + + NodeCodeBuildProject: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: CODEPIPELINE + Source: + Type: CODEPIPELINE + BuildSpec: | + version: 0.2 + phases: + install: + runtime-versions: + docker: 19 + pre_build: + commands: + - $(aws ecr get-login --no-include-email) + - TAG="$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | head -c 8)" + - IMAGE_URI="${REPOSITORY_URI}:${TAG}" + - cp infra/Dockerfile.node ./Dockerfile + build: + commands: + - docker build --tag "$IMAGE_URI" . + - docker build --tag "${REPOSITORY_URI}:latest" . + post_build: + commands: + - docker push "$IMAGE_URI" + - docker push "${REPOSITORY_URI}:latest" + - printf '[{"name":"mev-geth-node","imageUri":"%s"}]' "$IMAGE_URI" > images.json + artifacts: + files: images.json + Environment: + ComputeType: BUILD_GENERAL1_LARGE + Image: aws/codebuild/amazonlinux2-aarch64-standard:1.0 + Type: ARM_CONTAINER + PrivilegedMode: true + EnvironmentVariables: + - Name: AWS_DEFAULT_REGION + Value: !Ref AWS::Region + - Name: REPOSITORY_URI + Value: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${NodeRepository} + Cache: + Type: S3 + Location: !Sub ${NodeArtifactBucket}/buildcache + Name: !Sub ${AWS::StackName}-node + ServiceRole: !Ref NodeCodeBuildServiceRole + + NodePipeline: + Type: AWS::CodePipeline::Pipeline + Properties: + RoleArn: !GetAtt NodeCodePipelineServiceRole.Arn + ArtifactStore: + Type: S3 + Location: !Ref NodeArtifactBucket + Stages: + - Name: Source + Actions: + - Name: App + ActionTypeId: + Category: Source + Owner: ThirdParty + Version: 1 + Provider: GitHub + Configuration: + Owner: !Ref GitHubUser + Repo: !Ref NodeGitHubRepo + Branch: !Ref NodeGitHubBranch + OAuthToken: !Ref GitHubToken + OutputArtifacts: + - Name: App + RunOrder: 1 + - Name: Build + Actions: + - Name: Build + ActionTypeId: + Category: Build + Owner: AWS + Version: 1 + Provider: CodeBuild + Configuration: + ProjectName: !Ref NodeCodeBuildProject + InputArtifacts: + - Name: App + OutputArtifacts: + - Name: BuildOutput + RunOrder: 1 + - Name: Deploy + Actions: + - Name: Deploy + ActionTypeId: + Category: Deploy + Owner: AWS + Version: 1 + Provider: ECS + Configuration: + ClusterName: !Ref Cluster + ServiceName: !Ref NodeECSService + FileName: images.json + InputArtifacts: + - Name: BuildOutput + RunOrder: 1 + +# SNS Resources + + SNSTopic: + Type: AWS::SNS::Topic + Properties: + DisplayName: String + Subscription: + - + Endpoint: !Ref SNSSubscriptionEndpoint + Protocol: !Ref SNSSubscriptionProtocol + TopicName: !Ref AWS::StackName + +# CloudWatch Resources + + CPUAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub ${AWS::StackName} average CPU utilization greater than threshold. + AlarmDescription: Alarm if CPU utilization is greater than threshold. + Namespace: AWS/ECS + MetricName: CPUUtilization + Dimensions: + - Name: ClusterName + Value: !Ref Cluster + Statistic: Average + Period: '60' + EvaluationPeriods: '3' + Threshold: !Ref CPUAlarmThreshold + ComparisonOperator: GreaterThanThreshold + AlarmActions: + - Ref: SNSTopic + OKActions: + - Ref: SNSTopic + + MemoryAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub ${AWS::StackName} average memory utilization greater than threshold. + AlarmDescription: Alarm if memory utilization is greater than threshold. + Namespace: AWS/ECS + MetricName: MemoryUtilization + Dimensions: + - Name: ClusterName + Value: !Ref Cluster + Statistic: Average + Period: '60' + EvaluationPeriods: '3' + Threshold: !Ref MemoryAlarmThreshold + ComparisonOperator: GreaterThanThreshold + AlarmActions: + - Ref: SNSTopic + OKActions: + - Ref: SNSTopic + + HealthyHostAlarm: + Type: 'AWS::CloudWatch::Alarm' + Properties: + AlarmName: !Sub ${AWS::StackName} alarm no healthy hosts connected to ELB. + AlarmDescription: Alarm if no healthy hosts connected to ELB. + MetricName: HealthyHostCount + Namespace: AWS/NetworkELB + Statistic: Average + Period: '60' + EvaluationPeriods: '3' + Threshold: '1' + ComparisonOperator: LessThanThreshold + Dimensions: + - Name: TargetGroup + Value: !GetAtt NodeTargetGroup.TargetGroupFullName + - Name: LoadBalancer + Value: !GetAtt NodeLoadBalancer.LoadBalancerFullName + AlarmActions: + - Ref: SNSTopic + OKActions: + - Ref: SNSTopic + +Outputs: + ClusterName: + Value: !Ref Cluster + NodeService: + Value: !Ref NodeECSService + NodePipelineUrl: + Value: !Sub https://console.aws.amazon.com/codepipeline/home?region=${AWS::Region}#/view/${NodePipeline} + NodeTargetGroup: + Value: !Ref NodeTargetGroup + NodeServiceUrl: + Description: URL of the load balancer for the node service. + Value: !Sub http://${NodeLoadBalancer.DNSName} diff --git a/infra/mev-geth-nodes-x86-64.yaml b/infra/mev-geth-nodes-x86-64.yaml new file mode 100644 index 000000000000..bf7a196caa52 --- /dev/null +++ b/infra/mev-geth-nodes-x86-64.yaml @@ -0,0 +1,972 @@ +--- +AWSTemplateFormatVersion: 2010-09-09 + +Description: > + This template creates an automated continuous deployment pipeline to Amazon Elastic Container Service (ECS) + Created by Luke Youngblood, luke@blockscale.net + +Parameters: + # GitHub Parameters + + GitHubUser: + Type: String + Default: lyoungblood + Description: Your team or username on GitHub. + + NodeGitHubRepo: + Type: String + Default: mev-geth + Description: The repo name of the node service. + + NodeGitHubBranch: + Type: String + Default: master + Description: The branch of the node repo to continuously deploy. + + GitHubToken: + Type: String + NoEcho: true + Description: > + Token for the team or user specified above. (https://github.com/settings/tokens) + + # VPC Parameters + + VPC: + Type: AWS::EC2::VPC::Id + + Subnets: + Type: List + + VpcCIDR: + Type: String + Default: 172.31.0.0/16 + + # ECS Parameters + + InstanceType: + Type: String + Default: i3en.large + + MemoryLimit: + Type: Number + Default: 6144 + + KeyPair: + Type: AWS::EC2::KeyPair::KeyName + + SpotPrice: + Type: Number + Default: 0.0904 + + ClusterSize: + Type: Number + Default: 5 + + Bandwidth: + Type: Number + Default: 2048 + + BandwidthCeiling: + Type: Number + Default: 4096 + + NodeDesiredCount: + Type: Number + Default: 0 + + NodeTaskName: + Type: String + Default: mev-geth-node + + ECSAMI: + Type: AWS::SSM::Parameter::Value + Default: /aws/service/ecs/optimized-ami/amazon-linux-2/recommended/image_id + + # SNS Parameters + + SNSSubscriptionEndpoint: + Type: String + Default: https://events.pagerduty.com/integration/44cbdb66f22b4f3caf5dd15741c7eb17/enqueue + + SNSSubscriptionProtocol: + Type: String + Default: HTTPS + + # CloudWatch Alarm Parameters + + CPUAlarmThreshold: + Type: Number + Default: 80 + + MemoryAlarmThreshold: + Type: Number + Default: 80 + + # Mev-Geth Parameters + + Network: + Type: String + Default: mainnet + AllowedValues: + - mainnet + - goerli + + SyncMode: + Type: String + Default: fast + AllowedValues: + - full + - fast + - light + + Connections: + Type: Number + Default: 50 + + RpcPort: + Type: Number + Default: 8545 + + WsPort: + Type: Number + Default: 8546 + + NetPort: + Type: Number + Default: 30303 + +Metadata: + AWS::CloudFormation::Interface: + ParameterLabels: + GitHubUser: + default: "User" + NodeGitHubRepo: + default: "Node Repo" + NodeGitHubBranch: + default: "Node Branch" + GitHubToken: + default: "Personal Access Token" + VPC: + default: "Choose which VPC the autoscaling group should be deployed to" + Subnets: + default: "Choose which subnets the autoscaling group should be deployed to" + VpcCIDR: + default: "VPC CIDR Block" + InstanceType: + default: "Which instance type should we use to build the ECS cluster?" + MemoryLimit: + default: "How much memory should be reserved for each task. Set to greater than 50% of instance memory capacity." + KeyPair: + default: "Which keypair should be used to allow SSH to the nodes?" + ClusterSize: + default: "How many ECS hosts do you want to initially deploy?" + SpotPrice: + default: "The maximum spot price to pay for instances - this should normally be set to the on demand price." + Bandwidth: + default: "How much bandwidth, in kb/sec., should be allocated to Ethereum peers (upload) per EC2 instance" + BandwidthCeiling: + default: "How much bandwidth, in kb/sec., should be allocated to Ethereum peers as a ceiling (max. upload)" + NodeDesiredCount: + default: "How many ECS Tasks do you want to initially execute?" + NodeTaskName: + default: "The name of the node ECS Task" + ECSAMI: + default: "The ECS AMI ID populated from SSM." + Network: + default: "The Ethereum network you will be connecting to" + SyncMode: + default: "The synchronization mode that Mev-Geth should use (full, fast, or light)" + Connections: + default: "The number of desired connections on the Mev-Geth node" + RpcPort: + default: "The RPC port used for communication with the local Mev-Geth node" + WsPort: + default: "The Websockets port used for communication with the local Mev-Geth node" + NetPort: + default: "The TCP port used for connectivity to other Ethereum peer nodes" + ParameterGroups: + - Label: + default: GitHub Configuration + Parameters: + - NodeGitHubRepo + - NodeGitHubBranch + - GitHubUser + - GitHubToken + - Label: + default: VPC Configuration + Parameters: + - VPC + - Subnets + - VpcCIDR + - Label: + default: ECS Configuration + Parameters: + - InstanceType + - MemoryLimit + - KeyPair + - SpotPrice + - ClusterSize + - Bandwidth + - BandwidthCeiling + - NodeDesiredCount + - NodeTaskName + - ECSAMI + - Label: + default: Mev-Geth Configuration + Parameters: + - Network + - SyncMode + - Connections + - RpcPort + - WsPort + - NetPort + - Label: + default: PagerDuty Endpoint Configuration + Parameters: + - SNSSubscriptionEndpoint + - SNSSubscriptionProtocol + - Label: + default: CloudWatch Alarms Configuration + Parameters: + - CPUAlarmThreshold + - MemoryAlarmThreshold + +# Mappings + +Mappings: + RegionMap: + us-east-2: + mainnet: mev-geth-updater-fast-chainbucket-17p2xhnhcydlz + goerli: mev-geth-updater-fast-goerli-chainbucket-j6dujg8apbna + #us-west-2: + # mainnet: + # goerli: + +Resources: + # ECS Resources + + Cluster: + Type: AWS::ECS::Cluster + Properties: + ClusterName: !Ref AWS::StackName + + SecurityGroup: + Type: "AWS::EC2::SecurityGroup" + Properties: + GroupDescription: !Sub ${AWS::StackName}-sg + VpcId: !Ref VPC + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref RpcPort + ToPort: !Ref RpcPort + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref WsPort + ToPort: !Ref WsPort + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + + ECSAutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + VPCZoneIdentifier: !Ref Subnets + LaunchConfigurationName: !Ref ECSLaunchConfiguration + MinSize: !Ref ClusterSize + MaxSize: !Ref ClusterSize + DesiredCapacity: !Ref ClusterSize + Tags: + - Key: Name + Value: !Sub ${AWS::StackName} ECS host + PropagateAtLaunch: true + CreationPolicy: + ResourceSignal: + Timeout: PT15M + UpdatePolicy: + AutoScalingRollingUpdate: + MinInstancesInService: 2 + MaxBatchSize: 1 + PauseTime: PT15M + SuspendProcesses: + - HealthCheck + - ReplaceUnhealthy + - AZRebalance + - AlarmNotification + - ScheduledActions + WaitOnResourceSignals: true + + ECSLaunchConfiguration: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ECSAMI + InstanceType: !Ref InstanceType + KeyName: !Ref KeyPair + AssociatePublicIpAddress: True + # Uncomment if you would like to use Spot instances (subject to unexpected termination) + # SpotPrice: !Ref SpotPrice + SecurityGroups: + - !Ref SecurityGroup + IamInstanceProfile: !Ref ECSInstanceProfile + UserData: + "Fn::Base64": !Sub | + #!/bin/bash + yum install -y aws-cfn-bootstrap hibagent rsync awscli + yum update -y + service amazon-ssm-agent restart + + # determine if we have an NVMe SSD attached + find /dev/nvme1 + if [ $? -eq 0 ] + then + mount_point=/var/lib/docker + + # copy existing files from mount point + service docker stop + echo 'DOCKER_STORAGE_OPTIONS="--storage-driver overlay2"' > /etc/sysconfig/docker-storage + mkdir -p /tmp$mount_point + rsync -val $mount_point/ /tmp/$mount_point/ + + # make a new filesystem and mount it + mkfs -t ext4 /dev/nvme1n1 + mkdir -p $mount_point + mount -t ext4 -o noatime /dev/nvme1n1 $mount_point + + # Copy files back to new mount point + rsync -val /tmp/$mount_point/ $mount_point/ + rm -rf /tmp$mount_point + service docker start + + # Make raid appear on reboot + echo >> /etc/fstab + echo "/dev/nvme1n1 $mount_point ext4 noatime 0 0" | tee -a /etc/fstab + fi + + # Set Linux traffic control to limit outbound bandwidth usage of peering + #tc qdisc add dev eth0 root handle 1:0 htb default 1 + #tc class add dev eth0 parent 1:0 classid 1:10 htb rate ${Bandwidth}kbit ceil {BandwidthCeiling}kbit prio 0 + #tc filter add dev eth0 protocol ip parent 1:0 prio 1 u32 match ip dport 30303 0xffff flowid 1:10 + + /opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + /opt/aws/bin/cfn-signal -e $? --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSAutoScalingGroup + /usr/bin/enable-ec2-spot-hibernation + + # Attach an EIP from the pool of available EIPs in scope "vpc" + alloc=`aws ec2 describe-addresses --region ${AWS::Region} --output text | grep -v eni | head -1 | cut -f 2` + instanceid=`curl --silent 169.254.169.254/latest/meta-data/instance-id` + aws ec2 associate-address --region ${AWS::Region} --allocation-id $alloc --instance-id $instanceid + echo "ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=5m" >> /etc/ecs/ecs.config + + reboot + + Metadata: + AWS::CloudFormation::Init: + config: + packages: + yum: + awslogs: [] + + commands: + 01_add_instance_to_cluster: + command: !Sub echo ECS_CLUSTER=${Cluster} >> /etc/ecs/ecs.config + files: + "/etc/cfn/cfn-hup.conf": + mode: 000400 + owner: root + group: root + content: !Sub | + [main] + stack=${AWS::StackId} + region=${AWS::Region} + + "/etc/cfn/hooks.d/cfn-auto-reloader.conf": + content: !Sub | + [cfn-auto-reloader-hook] + triggers=post.update + path=Resources.ECSLaunchConfiguration.Metadata.AWS::CloudFormation::Init + action=/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + + services: + sysvinit: + cfn-hup: + enabled: true + ensureRunning: true + files: + - /etc/cfn/cfn-hup.conf + - /etc/cfn/hooks.d/cfn-auto-reloader.conf + + NodeLoadBalancer: + Type: AWS::ElasticLoadBalancingV2::LoadBalancer + Properties: + Name: !Sub ${AWS::StackName}-node-NLB + Type: network + Scheme: internal + Subnets: !Ref Subnets + Tags: + - Key: Name + Value: !Sub ${AWS::StackName}-node-NLB + + NodeTargetGroup: + Type: AWS::ElasticLoadBalancingV2::TargetGroup + DependsOn: NodeLoadBalancer + Properties: + VpcId: !Ref VPC + Port: !Ref RpcPort + Protocol: TCP + TargetGroupAttributes: + - Key: deregistration_delay.timeout_seconds + Value: 120 + + NodeListener: + Type: AWS::ElasticLoadBalancingV2::Listener + Properties: + DefaultActions: + - Type: forward + TargetGroupArn: !Ref NodeTargetGroup + LoadBalancerArn: !Ref NodeLoadBalancer + Port: !Ref RpcPort + Protocol: TCP + + NodeWsTargetGroup: + Type: AWS::ElasticLoadBalancingV2::TargetGroup + DependsOn: NodeLoadBalancer + Properties: + VpcId: !Ref VPC + Port: !Ref WsPort + Protocol: TCP + TargetGroupAttributes: + - Key: deregistration_delay.timeout_seconds + Value: 120 + + NodeWsListener: + Type: AWS::ElasticLoadBalancingV2::Listener + Properties: + DefaultActions: + - Type: forward + TargetGroupArn: !Ref NodeWsTargetGroup + LoadBalancerArn: !Ref NodeLoadBalancer + Port: !Ref WsPort + Protocol: TCP + + # This IAM Role is attached to all of the ECS hosts. It is based on the default role + # published here: + # http://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html + # + # You can add other IAM policy statements here to allow access from your ECS hosts + # to other AWS services. + + ECSRole: + Type: AWS::IAM::Role + Properties: + Path: / + RoleName: !Sub ${AWS::StackName}-ECSRole-${AWS::Region} + AssumeRolePolicyDocument: | + { + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + } + }] + } + Policies: + - PolicyName: ecs-service + PolicyDocument: | + { + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecs:CreateCluster", + "ecs:DeregisterContainerInstance", + "ecs:DiscoverPollEndpoint", + "ecs:Poll", + "ecs:RegisterContainerInstance", + "ecs:StartTelemetrySession", + "ecs:Submit*", + "logs:CreateLogStream", + "logs:PutLogEvents", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken", + "ssm:DescribeAssociation", + "ssm:GetDeployablePatchSnapshotForInstance", + "ssm:GetDocument", + "ssm:GetManifest", + "ssm:GetParameters", + "ssm:ListAssociations", + "ssm:ListInstanceAssociations", + "ssm:PutInventory", + "ssm:PutComplianceItems", + "ssm:PutConfigurePackageResult", + "ssm:PutParameter", + "ssm:UpdateAssociationStatus", + "ssm:UpdateInstanceAssociationStatus", + "ssm:UpdateInstanceInformation", + "ec2messages:AcknowledgeMessage", + "ec2messages:DeleteMessage", + "ec2messages:FailMessage", + "ec2messages:GetEndpoint", + "ec2messages:GetMessages", + "ec2messages:SendReply", + "cloudwatch:PutMetricData", + "ec2:DescribeInstanceStatus", + "ds:CreateComputer", + "ds:DescribeDirectories", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:AbortMultipartUpload", + "s3:ListMultipartUploadParts", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "firehose:PutRecord", + "firehose:PutRecordBatch", + "ec2:DescribeAddresses", + "ec2:DescribeInstances", + "ec2:AssociateAddress" + ], + "Resource": "*" + }] + } + + ECSInstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + Path: / + Roles: + - !Ref ECSRole + + ECSServiceAutoScalingRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + Action: + - "sts:AssumeRole" + Effect: Allow + Principal: + Service: + - application-autoscaling.amazonaws.com + Path: / + Policies: + - PolicyName: ecs-service-autoscaling + PolicyDocument: + Statement: + Effect: Allow + Action: + - application-autoscaling:* + - cloudwatch:DescribeAlarms + - cloudwatch:PutMetricAlarm + - ecs:DescribeServices + - ecs:UpdateService + Resource: "*" + + NodeTaskExecutionRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + Service: ecs-tasks.amazonaws.com + ManagedPolicyArns: + - arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy + Policies: + - PolicyName: !Sub ecs-task-S3-${AWS::StackName} + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - "s3:Get*" + - "s3:List*" + Resource: + - Fn::Join: + - "" + - - "arn:aws:s3:::" + - !FindInMap + - RegionMap + - !Ref "AWS::Region" + - !Ref Network + + NodeLogGroup: + Type: AWS::Logs::LogGroup + Properties: + LogGroupName: !Sub /ecs/${AWS::StackName}-node + RetentionInDays: 14 + + NodeECSService: + Type: AWS::ECS::Service + DependsOn: NodeListener + Properties: + Cluster: !Ref Cluster + DesiredCount: !Ref NodeDesiredCount + HealthCheckGracePeriodSeconds: 3600 + TaskDefinition: !Ref NodeTaskDefinition + LaunchType: EC2 + DeploymentConfiguration: + MaximumPercent: 150 + MinimumHealthyPercent: 50 + LoadBalancers: + - ContainerName: !Ref NodeTaskName + ContainerPort: !Ref RpcPort + TargetGroupArn: !Ref NodeTargetGroup + - ContainerName: !Ref NodeTaskName + ContainerPort: !Ref WsPort + TargetGroupArn: !Ref NodeWsTargetGroup + + NodeTaskDefinition: + Type: AWS::ECS::TaskDefinition + Properties: + Family: !Ref NodeTaskName + RequiresCompatibilities: + - EC2 + NetworkMode: host + ExecutionRoleArn: !Ref NodeTaskExecutionRole + ContainerDefinitions: + - Name: !Ref NodeTaskName + Image: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${NodeRepository} + Essential: true + MemoryReservation: !Ref MemoryLimit + Environment: + - Name: "region" + Value: !Ref AWS::Region + - Name: "network" + Value: !Ref Network + - Name: "syncmode" + Value: !Ref SyncMode + - Name: "connections" + Value: !Ref Connections + - Name: "rpcport" + Value: !Ref RpcPort + - Name: "wsport" + Value: !Ref WsPort + - Name: "netport" + Value: !Ref NetPort + - Name: "chainbucket" + Value: !FindInMap + - RegionMap + - !Ref "AWS::Region" + - !Ref Network + - Name: "s3key" + Value: node + PortMappings: + - ContainerPort: !Ref RpcPort + - ContainerPort: !Ref WsPort + - ContainerPort: !Ref NetPort + LogConfiguration: + LogDriver: awslogs + Options: + awslogs-region: !Ref AWS::Region + awslogs-group: !Ref NodeLogGroup + awslogs-stream-prefix: !Ref AWS::StackName + #HealthCheck: + # Command: + # - CMD-SHELL + # - '[ `echo "eth.syncing.highestBlock - eth.syncing.currentBlock"|geth attach|head -10|tail -1` -lt 200 ] || exit 1' + # Interval: 300 + # Timeout: 60 + # Retries: 10 + # StartPeriod: 300 + + # CodePipeline Resources + + NodeRepository: + Type: AWS::ECR::Repository + + NodeCodeBuildServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codebuild.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: "*" + Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + - ecr:GetAuthorizationToken + - Resource: !Sub arn:aws:s3:::${NodeArtifactBucket}/* + Effect: Allow + Action: + - s3:GetObject + - s3:PutObject + - s3:GetObjectVersion + - Resource: !Sub arn:aws:ecr:${AWS::Region}:${AWS::AccountId}:repository/${NodeRepository} + Effect: Allow + Action: + - ecr:GetDownloadUrlForLayer + - ecr:BatchGetImage + - ecr:BatchCheckLayerAvailability + - ecr:PutImage + - ecr:InitiateLayerUpload + - ecr:UploadLayerPart + - ecr:CompleteLayerUpload + + NodeCodePipelineServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codepipeline.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: + - !Sub arn:aws:s3:::${NodeArtifactBucket}/* + Effect: Allow + Action: + - s3:PutObject + - s3:GetObject + - s3:GetObjectVersion + - s3:GetBucketVersioning + - Resource: "*" + Effect: Allow + Action: + - ecs:DescribeServices + - ecs:DescribeTaskDefinition + - ecs:DescribeTasks + - ecs:ListTasks + - ecs:RegisterTaskDefinition + - ecs:UpdateService + - codebuild:StartBuild + - codebuild:BatchGetBuilds + - iam:PassRole + + NodeArtifactBucket: + Type: AWS::S3::Bucket + + NodeCodeBuildProject: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: CODEPIPELINE + Source: + Type: CODEPIPELINE + BuildSpec: | + version: 0.2 + phases: + install: + runtime-versions: + docker: 19 + pre_build: + commands: + - $(aws ecr get-login --no-include-email) + - TAG="$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | head -c 8)" + - IMAGE_URI="${REPOSITORY_URI}:${TAG}" + - cp infra/Dockerfile.node ./Dockerfile + build: + commands: + - docker build --tag "$IMAGE_URI" . + - docker build --tag "${REPOSITORY_URI}:latest" . + post_build: + commands: + - docker push "$IMAGE_URI" + - docker push "${REPOSITORY_URI}:latest" + - printf '[{"name":"mev-geth-node","imageUri":"%s"}]' "$IMAGE_URI" > images.json + artifacts: + files: images.json + Environment: + ComputeType: BUILD_GENERAL1_SMALL + Image: aws/codebuild/docker:17.09.0 + Type: LINUX_CONTAINER + PrivilegedMode: true + EnvironmentVariables: + - Name: AWS_DEFAULT_REGION + Value: !Ref AWS::Region + - Name: REPOSITORY_URI + Value: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${NodeRepository} + Cache: + Type: S3 + Location: !Sub ${NodeArtifactBucket}/buildcache + Name: !Sub ${AWS::StackName}-node + ServiceRole: !Ref NodeCodeBuildServiceRole + + NodePipeline: + Type: AWS::CodePipeline::Pipeline + Properties: + RoleArn: !GetAtt NodeCodePipelineServiceRole.Arn + ArtifactStore: + Type: S3 + Location: !Ref NodeArtifactBucket + Stages: + - Name: Source + Actions: + - Name: App + ActionTypeId: + Category: Source + Owner: ThirdParty + Version: 1 + Provider: GitHub + Configuration: + Owner: !Ref GitHubUser + Repo: !Ref NodeGitHubRepo + Branch: !Ref NodeGitHubBranch + OAuthToken: !Ref GitHubToken + OutputArtifacts: + - Name: App + RunOrder: 1 + - Name: Build + Actions: + - Name: Build + ActionTypeId: + Category: Build + Owner: AWS + Version: 1 + Provider: CodeBuild + Configuration: + ProjectName: !Ref NodeCodeBuildProject + InputArtifacts: + - Name: App + OutputArtifacts: + - Name: BuildOutput + RunOrder: 1 + - Name: Deploy + Actions: + - Name: Deploy + ActionTypeId: + Category: Deploy + Owner: AWS + Version: 1 + Provider: ECS + Configuration: + ClusterName: !Ref Cluster + ServiceName: !Ref NodeECSService + FileName: images.json + InputArtifacts: + - Name: BuildOutput + RunOrder: 1 + + # SNS Resources + + SNSTopic: + Type: AWS::SNS::Topic + Properties: + DisplayName: String + Subscription: + - Endpoint: !Ref SNSSubscriptionEndpoint + Protocol: !Ref SNSSubscriptionProtocol + TopicName: !Ref AWS::StackName + + # CloudWatch Resources + + CPUAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub ${AWS::StackName} average CPU utilization greater than threshold. + AlarmDescription: Alarm if CPU utilization is greater than threshold. + Namespace: AWS/ECS + MetricName: CPUUtilization + Dimensions: + - Name: ClusterName + Value: !Ref Cluster + Statistic: Average + Period: "60" + EvaluationPeriods: "3" + Threshold: !Ref CPUAlarmThreshold + ComparisonOperator: GreaterThanThreshold + AlarmActions: + - Ref: SNSTopic + OKActions: + - Ref: SNSTopic + + MemoryAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub ${AWS::StackName} average memory utilization greater than threshold. + AlarmDescription: Alarm if memory utilization is greater than threshold. + Namespace: AWS/ECS + MetricName: MemoryUtilization + Dimensions: + - Name: ClusterName + Value: !Ref Cluster + Statistic: Average + Period: "60" + EvaluationPeriods: "3" + Threshold: !Ref MemoryAlarmThreshold + ComparisonOperator: GreaterThanThreshold + AlarmActions: + - Ref: SNSTopic + OKActions: + - Ref: SNSTopic + + HealthyHostAlarm: + Type: "AWS::CloudWatch::Alarm" + Properties: + AlarmName: !Sub ${AWS::StackName} alarm no healthy hosts connected to ELB. + AlarmDescription: Alarm if no healthy hosts connected to ELB. + MetricName: HealthyHostCount + Namespace: AWS/NetworkELB + Statistic: Average + Period: "60" + EvaluationPeriods: "3" + Threshold: "1" + ComparisonOperator: LessThanThreshold + Dimensions: + - Name: TargetGroup + Value: !GetAtt NodeTargetGroup.TargetGroupFullName + - Name: LoadBalancer + Value: !GetAtt NodeLoadBalancer.LoadBalancerFullName + AlarmActions: + - Ref: SNSTopic + OKActions: + - Ref: SNSTopic + +Outputs: + ClusterName: + Value: !Ref Cluster + NodeService: + Value: !Ref NodeECSService + NodePipelineUrl: + Value: !Sub https://console.aws.amazon.com/codepipeline/home?region=${AWS::Region}#/view/${NodePipeline} + NodeTargetGroup: + Value: !Ref NodeTargetGroup + NodeServiceUrl: + Description: URL of the load balancer for the node service. + Value: !Sub http://${NodeLoadBalancer.DNSName} diff --git a/infra/mev-geth-updater-arm64.yaml b/infra/mev-geth-updater-arm64.yaml new file mode 100644 index 000000000000..ad81ece1b034 --- /dev/null +++ b/infra/mev-geth-updater-arm64.yaml @@ -0,0 +1,749 @@ +--- +AWSTemplateFormatVersion: 2010-09-09 + +Description: > + This template creates an automated continuous deployment pipeline to Amazon Elastic Container Service (ECS) + Created by Luke Youngblood, luke@blockscale.net + +Parameters: + +# GitHub Parameters + + GitHubUser: + Type: String + Default: lyoungblood + Description: Your team or username on GitHub. + + GitHubRepo: + Type: String + Default: mev-geth + Description: The repo name of the baker service. + + GitHubBranch: + Type: String + Default: master + Description: The branch of the repo to continuously deploy. + + GitHubToken: + Type: String + NoEcho: true + Description: > + Token for the team or user specified above. (https://github.com/settings/tokens) + +# VPC Parameters + + VPC: + Type: AWS::EC2::VPC::Id + + Subnets: + Type: List + + VpcCIDR: + Type: String + Default: 172.31.0.0/16 + +# ECS Parameters + + InstanceType: + Type: String + Default: m6gd.large + + KeyPair: + Type: AWS::EC2::KeyPair::KeyName + + ClusterSize: + Type: Number + Default: 1 + + DesiredCount: + Type: Number + Default: 0 + + TaskName: + Type: String + Default: mev-geth-updater + + ECSAMI: + Type: AWS::SSM::Parameter::Value + Default: /aws/service/ecs/optimized-ami/amazon-linux-2/arm64/recommended/image_id + +# Mev-Geth Parameters + + Network: + Type: String + Default: mainnet + AllowedValues: + - mainnet + - goerli + + SyncMode: + Type: String + Default: fast + AllowedValues: + - full + - fast + - light + + Connections: + Type: Number + Default: 50 + + NetPort: + Type: Number + Default: 30303 + +Metadata: + + AWS::CloudFormation::Interface: + ParameterLabels: + GitHubUser: + default: "User" + GitHubRepo: + default: "Mev-Geth GitHub Repository" + GitHubBranch: + default: "Branch in GitHub repository" + GitHubToken: + default: "Personal Access Token" + VPC: + default: "Choose which VPC the autoscaling group should be deployed to" + Subnets: + default: "Choose which subnets the autoscaling group should be deployed to" + VpcCIDR: + default: "VPC CIDR Block" + InstanceType: + default: "Which instance type should we use to build the ECS cluster?" + KeyPair: + default: "Which keypair should be used for access to the ECS cluster?" + ClusterSize: + default: "How many ECS hosts do you want to initially deploy?" + DesiredCount: + default: "How many Updater tasks do you want to initially execute?" + TaskName: + default: "The name of the Updater ECS Task" + ECSAMI: + default: "The ECS AMI ID populated from SSM." + Network: + default: "The network the Mev-Geth node should join" + SyncMode: + default: "The synchronization mode that Mev-Geth should use (full, fast, or light)" + Connections: + default: "The number of connections the Mev-Geth node should be configured with" + NetPort: + default: "The TCP/UDP port used for Mev-Geth connectivity to other Ethereum peer nodes" + ParameterGroups: + - Label: + default: GitHub Configuration + Parameters: + - GitHubRepo + - GitHubBranch + - GitHubUser + - GitHubToken + - Label: + default: VPC Configuration + Parameters: + - VPC + - Subnets + - VpcCIDR + - Label: + default: ECS Configuration + Parameters: + - InstanceType + - KeyPair + - ClusterSize + - DesiredCount + - TaskName + - ECSAMI + - Label: + default: Mev-Geth Configuration + Parameters: + - Network + - SyncMode + - Connections + - NetPort + +Resources: + +# ECS Resources + + ChainBucket: + Type: AWS::S3::Bucket + + ChainBucketPolicy: + Type: AWS::S3::BucketPolicy + Properties: + Bucket: !Ref ChainBucket + PolicyDocument: + Statement: + - + Action: + - s3:GetObject + - s3:ListBucket + Effect: Allow + Resource: + - Fn::Join: + - "" + - + - "arn:aws:s3:::" + - + Ref: "ChainBucket" + - "/*" + - Fn::Join: + - "" + - + - "arn:aws:s3:::" + - + Ref: "ChainBucket" + Principal: + AWS: "*" + + Cluster: + Type: AWS::ECS::Cluster + Properties: + ClusterName: !Ref AWS::StackName + + SecurityGroup: + Type: "AWS::EC2::SecurityGroup" + Properties: + GroupDescription: !Sub ${AWS::StackName}-sg + VpcId: !Ref VPC + Tags: + - + Key: Name + Value: !Sub ${AWS::StackName}-sg + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + + ECSAutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + VPCZoneIdentifier: !Ref Subnets + LaunchConfigurationName: !Ref ECSLaunchConfiguration + MinSize: !Ref ClusterSize + MaxSize: !Ref ClusterSize + DesiredCapacity: !Ref ClusterSize + Tags: + - Key: Name + Value: !Sub ${AWS::StackName} ECS host + PropagateAtLaunch: true + CreationPolicy: + ResourceSignal: + Timeout: PT15M + UpdatePolicy: + AutoScalingRollingUpdate: + MinInstancesInService: 0 + MaxBatchSize: 1 + PauseTime: PT15M + SuspendProcesses: + - HealthCheck + - ReplaceUnhealthy + - AZRebalance + - AlarmNotification + - ScheduledActions + WaitOnResourceSignals: true + + ECSLaunchConfiguration: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ECSAMI + InstanceType: !Ref InstanceType + KeyName: !Ref KeyPair + SecurityGroups: + - !Ref SecurityGroup + IamInstanceProfile: !Ref ECSInstanceProfile + UserData: + "Fn::Base64": !Sub | + #!/bin/bash + yum install -y aws-cfn-bootstrap hibagent rsync awscli + yum update -y + service amazon-ssm-agent restart + + # determine if we have an NVMe SSD attached + find /dev/nvme1 + if [ $? -eq 0 ] + then + mount_point=/var/lib/docker + + # copy existing files from mount point + service docker stop + echo 'DOCKER_STORAGE_OPTIONS="--storage-driver overlay2"' > /etc/sysconfig/docker-storage + mkdir -p /tmp$mount_point + rsync -val $mount_point/ /tmp/$mount_point/ + + # make a new filesystem and mount it + mkfs -t ext4 /dev/nvme1n1 + mkdir -p $mount_point + mount -t ext4 -o noatime /dev/nvme1n1 $mount_point + + # Copy files back to new mount point + rsync -val /tmp/$mount_point/ $mount_point/ + rm -rf /tmp$mount_point + service docker start + + # Make raid appear on reboot + echo >> /etc/fstab + echo "/dev/nvme1n1 $mount_point ext4 noatime 0 0" | tee -a /etc/fstab + fi + + /opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + /opt/aws/bin/cfn-signal -e $? --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSAutoScalingGroup + /usr/bin/enable-ec2-spot-hibernation + echo "ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=5m" >> /etc/ecs/ecs.config + + reboot + + Metadata: + AWS::CloudFormation::Init: + config: + packages: + yum: + awslogs: [] + + commands: + 01_add_instance_to_cluster: + command: !Sub echo ECS_CLUSTER=${Cluster} >> /etc/ecs/ecs.config + files: + "/etc/cfn/cfn-hup.conf": + mode: 000400 + owner: root + group: root + content: !Sub | + [main] + stack=${AWS::StackId} + region=${AWS::Region} + + "/etc/cfn/hooks.d/cfn-auto-reloader.conf": + content: !Sub | + [cfn-auto-reloader-hook] + triggers=post.update + path=Resources.ECSLaunchConfiguration.Metadata.AWS::CloudFormation::Init + action=/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + + "/etc/awslogs/awscli.conf": + content: !Sub | + [plugins] + cwlogs = cwlogs + [default] + region = ${AWS::Region} + + services: + sysvinit: + cfn-hup: + enabled: true + ensureRunning: true + files: + - /etc/cfn/cfn-hup.conf + - /etc/cfn/hooks.d/cfn-auto-reloader.conf + + # This IAM Role is attached to all of the ECS hosts. It is based on the default role + # published here: + # http://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html + # + # You can add other IAM policy statements here to allow access from your ECS hosts + # to other AWS services. + + ECSRole: + Type: AWS::IAM::Role + Properties: + Path: / + RoleName: !Sub ${AWS::StackName}-ECSRole-${AWS::Region} + AssumeRolePolicyDocument: | + { + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + } + }] + } + Policies: + - PolicyName: ecs-service + PolicyDocument: | + { + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecs:CreateCluster", + "ecs:DeregisterContainerInstance", + "ecs:DiscoverPollEndpoint", + "ecs:Poll", + "ecs:RegisterContainerInstance", + "ecs:StartTelemetrySession", + "ecs:Submit*", + "logs:CreateLogStream", + "logs:PutLogEvents", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken", + "ssm:DescribeAssociation", + "ssm:GetDeployablePatchSnapshotForInstance", + "ssm:GetDocument", + "ssm:GetManifest", + "ssm:GetParameters", + "ssm:ListAssociations", + "ssm:ListInstanceAssociations", + "ssm:PutInventory", + "ssm:PutComplianceItems", + "ssm:PutConfigurePackageResult", + "ssm:PutParameter", + "ssm:UpdateAssociationStatus", + "ssm:UpdateInstanceAssociationStatus", + "ssm:UpdateInstanceInformation", + "ec2messages:AcknowledgeMessage", + "ec2messages:DeleteMessage", + "ec2messages:FailMessage", + "ec2messages:GetEndpoint", + "ec2messages:GetMessages", + "ec2messages:SendReply", + "cloudwatch:PutMetricData", + "ec2:DescribeInstanceStatus", + "ds:CreateComputer", + "ds:DescribeDirectories", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "s3:*" + ], + "Resource": "*" + }] + } + + ECSInstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + Path: / + Roles: + - !Ref ECSRole + + ECSServiceAutoScalingRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + Action: + - 'sts:AssumeRole' + Effect: Allow + Principal: + Service: + - application-autoscaling.amazonaws.com + Path: / + Policies: + - PolicyName: ecs-service-autoscaling + PolicyDocument: + Statement: + Effect: Allow + Action: + - application-autoscaling:* + - cloudwatch:DescribeAlarms + - cloudwatch:PutMetricAlarm + - ecs:DescribeServices + - ecs:UpdateService + Resource: "*" + + TaskExecutionRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + Service: ecs-tasks.amazonaws.com + ManagedPolicyArns: + - arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy + Policies: + - PolicyName: !Sub ecs-task-S3-${AWS::StackName} + PolicyDocument: + Version: 2012-10-17 + Statement: + - + Effect: Allow + Action: + - "s3:Get*" + - "s3:List*" + - "s3:Put*" + Resource: + - !GetAtt ChainBucket.Arn + - PolicyName: !Sub ecs-task-SSM-${AWS::StackName} + PolicyDocument: + Version: 2012-10-17 + Statement: + - + Effect: Allow + Action: + - "ssm:DescribeParameters" + - "ssm:PutParameter" + - "ssm:GetParameters" + Resource: + - !Sub "arn:aws:ssm:${AWS::Region}:${AWS::AccountId}:parameter/${AWS::StackName}/*" + + LogGroup: + Type: AWS::Logs::LogGroup + Properties: + LogGroupName: !Sub /ecs/${AWS::StackName} + RetentionInDays: 14 + + ECSService: + Type: AWS::ECS::Service + Properties: + Cluster: !Ref Cluster + DesiredCount: !Ref DesiredCount + TaskDefinition: !Ref TaskDefinition + LaunchType: EC2 + DeploymentConfiguration: + MaximumPercent: 100 + MinimumHealthyPercent: 0 + + TaskDefinition: + Type: AWS::ECS::TaskDefinition + Properties: + Family: !Sub ${AWS::StackName}-${TaskName} + RequiresCompatibilities: + - EC2 + NetworkMode: host + ExecutionRoleArn: !Ref TaskExecutionRole + ContainerDefinitions: + - Name: !Ref TaskName + Image: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${Repository} + Essential: true + MemoryReservation: 6144 + Environment: + - Name: "network" + Value: !Ref Network + - Name: "syncmode" + Value: !Ref SyncMode + - Name: "connections" + Value: !Ref Connections + - Name: "netport" + Value: !Ref NetPort + - Name: "region" + Value: !Ref AWS::Region + - Name: "chainbucket" + Value: !Ref ChainBucket + - Name: "s3key" + Value: node + PortMappings: + - ContainerPort: !Ref NetPort + LogConfiguration: + LogDriver: awslogs + Options: + awslogs-region: !Ref AWS::Region + awslogs-group: !Ref LogGroup + awslogs-stream-prefix: !Ref AWS::StackName + +# CodePipeline Resources + + Repository: + Type: AWS::ECR::Repository + + CodeBuildServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codebuild.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: "*" + Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + - ecr:GetAuthorizationToken + - Resource: !Sub arn:aws:s3:::${ArtifactBucket}/* + Effect: Allow + Action: + - s3:GetObject + - s3:PutObject + - s3:GetObjectVersion + - Resource: !Sub arn:aws:ecr:${AWS::Region}:${AWS::AccountId}:repository/${Repository} + Effect: Allow + Action: + - ecr:GetDownloadUrlForLayer + - ecr:BatchGetImage + - ecr:BatchCheckLayerAvailability + - ecr:PutImage + - ecr:InitiateLayerUpload + - ecr:UploadLayerPart + - ecr:CompleteLayerUpload + + CodePipelineServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codepipeline.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: + - !Sub arn:aws:s3:::${ArtifactBucket}/* + Effect: Allow + Action: + - s3:PutObject + - s3:GetObject + - s3:GetObjectVersion + - s3:GetBucketVersioning + - Resource: "*" + Effect: Allow + Action: + - ecs:DescribeServices + - ecs:DescribeTaskDefinition + - ecs:DescribeTasks + - ecs:ListTasks + - ecs:RegisterTaskDefinition + - ecs:UpdateService + - codebuild:StartBuild + - codebuild:BatchGetBuilds + - iam:PassRole + + ArtifactBucket: + Type: AWS::S3::Bucket + + CodeBuildProject: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: CODEPIPELINE + Source: + Type: CODEPIPELINE + BuildSpec: | + version: 0.2 + phases: + install: + runtime-versions: + docker: 19 + pre_build: + commands: + - $(aws ecr get-login --no-include-email) + - TAG="$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | head -c 8)" + - IMAGE_URI="${REPOSITORY_URI}:${TAG}" + - cp infra/Dockerfile.updater ./Dockerfile + build: + commands: + - docker build --tag "$IMAGE_URI" . + - docker build --tag "${REPOSITORY_URI}:latest" . + post_build: + commands: + - docker push "$IMAGE_URI" + - docker push "${REPOSITORY_URI}:latest" + - printf '[{"name":"mev-geth-updater","imageUri":"%s"}]' "$IMAGE_URI" > images.json + artifacts: + files: images.json + Environment: + ComputeType: BUILD_GENERAL1_LARGE + Image: aws/codebuild/amazonlinux2-aarch64-standard:1.0 + Type: ARM_CONTAINER + PrivilegedMode: true + EnvironmentVariables: + - Name: AWS_DEFAULT_REGION + Value: !Ref AWS::Region + - Name: REPOSITORY_URI + Value: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${Repository} + Name: !Ref AWS::StackName + ServiceRole: !Ref CodeBuildServiceRole + + Pipeline: + Type: AWS::CodePipeline::Pipeline + Properties: + RoleArn: !GetAtt CodePipelineServiceRole.Arn + ArtifactStore: + Type: S3 + Location: !Ref ArtifactBucket + Stages: + - Name: Source + Actions: + - Name: App + ActionTypeId: + Category: Source + Owner: ThirdParty + Version: 1 + Provider: GitHub + Configuration: + Owner: !Ref GitHubUser + Repo: !Ref GitHubRepo + Branch: !Ref GitHubBranch + OAuthToken: !Ref GitHubToken + OutputArtifacts: + - Name: App + RunOrder: 1 + - Name: Build + Actions: + - Name: Build + ActionTypeId: + Category: Build + Owner: AWS + Version: 1 + Provider: CodeBuild + Configuration: + ProjectName: !Ref CodeBuildProject + InputArtifacts: + - Name: App + OutputArtifacts: + - Name: BuildOutput + RunOrder: 1 + - Name: Deploy + Actions: + - Name: Deploy + ActionTypeId: + Category: Deploy + Owner: AWS + Version: 1 + Provider: ECS + Configuration: + ClusterName: !Ref Cluster + ServiceName: !Ref ECSService + FileName: images.json + InputArtifacts: + - Name: BuildOutput + RunOrder: 1 + +Outputs: + + ClusterName: + Value: !Ref Cluster + Service: + Value: !Ref ECSService + PipelineUrl: + Value: !Sub https://console.aws.amazon.com/codepipeline/home?region=${AWS::Region}#/view/${Pipeline} \ No newline at end of file diff --git a/infra/mev-geth-updater-x86-64.yaml b/infra/mev-geth-updater-x86-64.yaml new file mode 100644 index 000000000000..a69d1bb10d18 --- /dev/null +++ b/infra/mev-geth-updater-x86-64.yaml @@ -0,0 +1,737 @@ +--- +AWSTemplateFormatVersion: 2010-09-09 + +Description: > + This template creates an automated continuous deployment pipeline to Amazon Elastic Container Service (ECS) + Created by Luke Youngblood, luke@blockscale.net + +Parameters: + # GitHub Parameters + + GitHubUser: + Type: String + Default: lyoungblood + Description: Your team or username on GitHub. + + GitHubRepo: + Type: String + Default: mev-geth + Description: The repo name of the baker service. + + GitHubBranch: + Type: String + Default: master + Description: The branch of the repo to continuously deploy. + + GitHubToken: + Type: String + NoEcho: true + Description: > + Token for the team or user specified above. (https://github.com/settings/tokens) + + # VPC Parameters + + VPC: + Type: AWS::EC2::VPC::Id + + Subnets: + Type: List + + VpcCIDR: + Type: String + Default: 172.31.0.0/16 + + # ECS Parameters + + InstanceType: + Type: String + Default: i3en.large + + KeyPair: + Type: AWS::EC2::KeyPair::KeyName + + ClusterSize: + Type: Number + Default: 1 + + DesiredCount: + Type: Number + Default: 0 + + TaskName: + Type: String + Default: mev-geth-updater + + ECSAMI: + Type: AWS::SSM::Parameter::Value + Default: /aws/service/ecs/optimized-ami/amazon-linux-2/recommended/image_id + + # Mev-Geth Parameters + + Network: + Type: String + Default: mainnet + AllowedValues: + - mainnet + - goerli + + SyncMode: + Type: String + Default: fast + AllowedValues: + - full + - fast + - light + + Connections: + Type: Number + Default: 50 + + NetPort: + Type: Number + Default: 30303 + +Metadata: + AWS::CloudFormation::Interface: + ParameterLabels: + GitHubUser: + default: "User" + GitHubRepo: + default: "Mev-Geth GitHub Repository" + GitHubBranch: + default: "Branch in GitHub repository" + GitHubToken: + default: "Personal Access Token" + VPC: + default: "Choose which VPC the autoscaling group should be deployed to" + Subnets: + default: "Choose which subnets the autoscaling group should be deployed to" + VpcCIDR: + default: "VPC CIDR Block" + InstanceType: + default: "Which instance type should we use to build the ECS cluster?" + KeyPair: + default: "Which keypair should be used for access to the ECS cluster?" + ClusterSize: + default: "How many ECS hosts do you want to initially deploy?" + DesiredCount: + default: "How many Updater tasks do you want to initially execute?" + TaskName: + default: "The name of the Updater ECS Task" + ECSAMI: + default: "The ECS AMI ID populated from SSM." + Network: + default: "The network the Mev-Geth node should join" + SyncMode: + default: "The synchronization mode that Mev-Geth should use (full, fast, or light)" + Connections: + default: "The number of connections the Mev-Geth node should be configured with" + NetPort: + default: "The TCP/UDP port used for Mev-Geth connectivity to other Ethereum peer nodes" + ParameterGroups: + - Label: + default: GitHub Configuration + Parameters: + - GitHubRepo + - GitHubBranch + - GitHubUser + - GitHubToken + - Label: + default: VPC Configuration + Parameters: + - VPC + - Subnets + - VpcCIDR + - Label: + default: ECS Configuration + Parameters: + - InstanceType + - KeyPair + - ClusterSize + - DesiredCount + - TaskName + - ECSAMI + - Label: + default: Mev-Geth Configuration + Parameters: + - Network + - SyncMode + - Connections + - NetPort + +Resources: + # ECS Resources + + ChainBucket: + Type: AWS::S3::Bucket + + ChainBucketPolicy: + Type: AWS::S3::BucketPolicy + Properties: + Bucket: !Ref ChainBucket + PolicyDocument: + Statement: + - Action: + - s3:GetObject + - s3:ListBucket + Effect: Allow + Resource: + - Fn::Join: + - "" + - - "arn:aws:s3:::" + - Ref: "ChainBucket" + - "/*" + - Fn::Join: + - "" + - - "arn:aws:s3:::" + - Ref: "ChainBucket" + Principal: + AWS: "*" + + Cluster: + Type: AWS::ECS::Cluster + Properties: + ClusterName: !Ref AWS::StackName + + SecurityGroup: + Type: "AWS::EC2::SecurityGroup" + Properties: + GroupDescription: !Sub ${AWS::StackName}-sg + VpcId: !Ref VPC + Tags: + - Key: Name + Value: !Sub ${AWS::StackName}-sg + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + + ECSAutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + VPCZoneIdentifier: !Ref Subnets + LaunchConfigurationName: !Ref ECSLaunchConfiguration + MinSize: !Ref ClusterSize + MaxSize: !Ref ClusterSize + DesiredCapacity: !Ref ClusterSize + Tags: + - Key: Name + Value: !Sub ${AWS::StackName} ECS host + PropagateAtLaunch: true + CreationPolicy: + ResourceSignal: + Timeout: PT15M + UpdatePolicy: + AutoScalingRollingUpdate: + MinInstancesInService: 0 + MaxBatchSize: 1 + PauseTime: PT15M + SuspendProcesses: + - HealthCheck + - ReplaceUnhealthy + - AZRebalance + - AlarmNotification + - ScheduledActions + WaitOnResourceSignals: true + + ECSLaunchConfiguration: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ECSAMI + InstanceType: !Ref InstanceType + KeyName: !Ref KeyPair + SecurityGroups: + - !Ref SecurityGroup + IamInstanceProfile: !Ref ECSInstanceProfile + UserData: + "Fn::Base64": !Sub | + #!/bin/bash + yum install -y aws-cfn-bootstrap hibagent rsync awscli + yum update -y + service amazon-ssm-agent restart + + # determine if we have an NVMe SSD attached + find /dev/nvme1 + if [ $? -eq 0 ] + then + mount_point=/var/lib/docker + + # copy existing files from mount point + service docker stop + echo 'DOCKER_STORAGE_OPTIONS="--storage-driver overlay2"' > /etc/sysconfig/docker-storage + mkdir -p /tmp$mount_point + rsync -val $mount_point/ /tmp/$mount_point/ + + # make a new filesystem and mount it + mkfs -t ext4 /dev/nvme1n1 + mkdir -p $mount_point + mount -t ext4 -o noatime /dev/nvme1n1 $mount_point + + # Copy files back to new mount point + rsync -val /tmp/$mount_point/ $mount_point/ + rm -rf /tmp$mount_point + service docker start + + # Make raid appear on reboot + echo >> /etc/fstab + echo "/dev/nvme1n1 $mount_point ext4 noatime 0 0" | tee -a /etc/fstab + fi + + /opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + /opt/aws/bin/cfn-signal -e $? --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSAutoScalingGroup + /usr/bin/enable-ec2-spot-hibernation + echo "ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=5m" >> /etc/ecs/ecs.config + + reboot + + Metadata: + AWS::CloudFormation::Init: + config: + packages: + yum: + awslogs: [] + + commands: + 01_add_instance_to_cluster: + command: !Sub echo ECS_CLUSTER=${Cluster} >> /etc/ecs/ecs.config + files: + "/etc/cfn/cfn-hup.conf": + mode: 000400 + owner: root + group: root + content: !Sub | + [main] + stack=${AWS::StackId} + region=${AWS::Region} + + "/etc/cfn/hooks.d/cfn-auto-reloader.conf": + content: !Sub | + [cfn-auto-reloader-hook] + triggers=post.update + path=Resources.ECSLaunchConfiguration.Metadata.AWS::CloudFormation::Init + action=/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + + "/etc/awslogs/awscli.conf": + content: !Sub | + [plugins] + cwlogs = cwlogs + [default] + region = ${AWS::Region} + + services: + sysvinit: + cfn-hup: + enabled: true + ensureRunning: true + files: + - /etc/cfn/cfn-hup.conf + - /etc/cfn/hooks.d/cfn-auto-reloader.conf + + # This IAM Role is attached to all of the ECS hosts. It is based on the default role + # published here: + # http://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html + # + # You can add other IAM policy statements here to allow access from your ECS hosts + # to other AWS services. + + ECSRole: + Type: AWS::IAM::Role + Properties: + Path: / + RoleName: !Sub ${AWS::StackName}-ECSRole-${AWS::Region} + AssumeRolePolicyDocument: | + { + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + } + }] + } + Policies: + - PolicyName: ecs-service + PolicyDocument: | + { + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecs:CreateCluster", + "ecs:DeregisterContainerInstance", + "ecs:DiscoverPollEndpoint", + "ecs:Poll", + "ecs:RegisterContainerInstance", + "ecs:StartTelemetrySession", + "ecs:Submit*", + "logs:CreateLogStream", + "logs:PutLogEvents", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken", + "ssm:DescribeAssociation", + "ssm:GetDeployablePatchSnapshotForInstance", + "ssm:GetDocument", + "ssm:GetManifest", + "ssm:GetParameters", + "ssm:ListAssociations", + "ssm:ListInstanceAssociations", + "ssm:PutInventory", + "ssm:PutComplianceItems", + "ssm:PutConfigurePackageResult", + "ssm:PutParameter", + "ssm:UpdateAssociationStatus", + "ssm:UpdateInstanceAssociationStatus", + "ssm:UpdateInstanceInformation", + "ec2messages:AcknowledgeMessage", + "ec2messages:DeleteMessage", + "ec2messages:FailMessage", + "ec2messages:GetEndpoint", + "ec2messages:GetMessages", + "ec2messages:SendReply", + "cloudwatch:PutMetricData", + "ec2:DescribeInstanceStatus", + "ds:CreateComputer", + "ds:DescribeDirectories", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "s3:*" + ], + "Resource": "*" + }] + } + + ECSInstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + Path: / + Roles: + - !Ref ECSRole + + ECSServiceAutoScalingRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + Action: + - "sts:AssumeRole" + Effect: Allow + Principal: + Service: + - application-autoscaling.amazonaws.com + Path: / + Policies: + - PolicyName: ecs-service-autoscaling + PolicyDocument: + Statement: + Effect: Allow + Action: + - application-autoscaling:* + - cloudwatch:DescribeAlarms + - cloudwatch:PutMetricAlarm + - ecs:DescribeServices + - ecs:UpdateService + Resource: "*" + + TaskExecutionRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + Service: ecs-tasks.amazonaws.com + ManagedPolicyArns: + - arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy + Policies: + - PolicyName: !Sub ecs-task-S3-${AWS::StackName} + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - "s3:Get*" + - "s3:List*" + - "s3:Put*" + Resource: + - !GetAtt ChainBucket.Arn + - PolicyName: !Sub ecs-task-SSM-${AWS::StackName} + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - "ssm:DescribeParameters" + - "ssm:PutParameter" + - "ssm:GetParameters" + Resource: + - !Sub "arn:aws:ssm:${AWS::Region}:${AWS::AccountId}:parameter/${AWS::StackName}/*" + + LogGroup: + Type: AWS::Logs::LogGroup + Properties: + LogGroupName: !Sub /ecs/${AWS::StackName} + RetentionInDays: 14 + + ECSService: + Type: AWS::ECS::Service + Properties: + Cluster: !Ref Cluster + DesiredCount: !Ref DesiredCount + TaskDefinition: !Ref TaskDefinition + LaunchType: EC2 + DeploymentConfiguration: + MaximumPercent: 100 + MinimumHealthyPercent: 0 + + TaskDefinition: + Type: AWS::ECS::TaskDefinition + Properties: + Family: !Sub ${AWS::StackName}-${TaskName} + RequiresCompatibilities: + - EC2 + NetworkMode: host + ExecutionRoleArn: !Ref TaskExecutionRole + ContainerDefinitions: + - Name: !Ref TaskName + Image: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${Repository} + Essential: true + MemoryReservation: 6144 + Environment: + - Name: "network" + Value: !Ref Network + - Name: "syncmode" + Value: !Ref SyncMode + - Name: "connections" + Value: !Ref Connections + - Name: "netport" + Value: !Ref NetPort + - Name: "region" + Value: !Ref AWS::Region + - Name: "chainbucket" + Value: !Ref ChainBucket + - Name: "s3key" + Value: node + PortMappings: + - ContainerPort: !Ref NetPort + LogConfiguration: + LogDriver: awslogs + Options: + awslogs-region: !Ref AWS::Region + awslogs-group: !Ref LogGroup + awslogs-stream-prefix: !Ref AWS::StackName + + # CodePipeline Resources + + Repository: + Type: AWS::ECR::Repository + + CodeBuildServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codebuild.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: "*" + Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + - ecr:GetAuthorizationToken + - Resource: !Sub arn:aws:s3:::${ArtifactBucket}/* + Effect: Allow + Action: + - s3:GetObject + - s3:PutObject + - s3:GetObjectVersion + - Resource: !Sub arn:aws:ecr:${AWS::Region}:${AWS::AccountId}:repository/${Repository} + Effect: Allow + Action: + - ecr:GetDownloadUrlForLayer + - ecr:BatchGetImage + - ecr:BatchCheckLayerAvailability + - ecr:PutImage + - ecr:InitiateLayerUpload + - ecr:UploadLayerPart + - ecr:CompleteLayerUpload + + CodePipelineServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codepipeline.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: + - !Sub arn:aws:s3:::${ArtifactBucket}/* + Effect: Allow + Action: + - s3:PutObject + - s3:GetObject + - s3:GetObjectVersion + - s3:GetBucketVersioning + - Resource: "*" + Effect: Allow + Action: + - ecs:DescribeServices + - ecs:DescribeTaskDefinition + - ecs:DescribeTasks + - ecs:ListTasks + - ecs:RegisterTaskDefinition + - ecs:UpdateService + - codebuild:StartBuild + - codebuild:BatchGetBuilds + - iam:PassRole + + ArtifactBucket: + Type: AWS::S3::Bucket + + CodeBuildProject: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: CODEPIPELINE + Source: + Type: CODEPIPELINE + BuildSpec: | + version: 0.2 + phases: + install: + runtime-versions: + docker: 19 + pre_build: + commands: + - $(aws ecr get-login --no-include-email) + - TAG="$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | head -c 8)" + - IMAGE_URI="${REPOSITORY_URI}:${TAG}" + - cp infra/Dockerfile.updater ./Dockerfile + build: + commands: + - docker build --tag "$IMAGE_URI" . + - docker build --tag "${REPOSITORY_URI}:latest" . + post_build: + commands: + - docker push "$IMAGE_URI" + - docker push "${REPOSITORY_URI}:latest" + - printf '[{"name":"mev-geth-updater","imageUri":"%s"}]' "$IMAGE_URI" > images.json + artifacts: + files: images.json + Environment: + ComputeType: BUILD_GENERAL1_SMALL + Image: aws/codebuild/docker:17.09.0 + Type: LINUX_CONTAINER + PrivilegedMode: true + EnvironmentVariables: + - Name: AWS_DEFAULT_REGION + Value: !Ref AWS::Region + - Name: REPOSITORY_URI + Value: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${Repository} + Name: !Ref AWS::StackName + ServiceRole: !Ref CodeBuildServiceRole + + Pipeline: + Type: AWS::CodePipeline::Pipeline + Properties: + RoleArn: !GetAtt CodePipelineServiceRole.Arn + ArtifactStore: + Type: S3 + Location: !Ref ArtifactBucket + Stages: + - Name: Source + Actions: + - Name: App + ActionTypeId: + Category: Source + Owner: ThirdParty + Version: 1 + Provider: GitHub + Configuration: + Owner: !Ref GitHubUser + Repo: !Ref GitHubRepo + Branch: !Ref GitHubBranch + OAuthToken: !Ref GitHubToken + OutputArtifacts: + - Name: App + RunOrder: 1 + - Name: Build + Actions: + - Name: Build + ActionTypeId: + Category: Build + Owner: AWS + Version: 1 + Provider: CodeBuild + Configuration: + ProjectName: !Ref CodeBuildProject + InputArtifacts: + - Name: App + OutputArtifacts: + - Name: BuildOutput + RunOrder: 1 + - Name: Deploy + Actions: + - Name: Deploy + ActionTypeId: + Category: Deploy + Owner: AWS + Version: 1 + Provider: ECS + Configuration: + ClusterName: !Ref Cluster + ServiceName: !Ref ECSService + FileName: images.json + InputArtifacts: + - Name: BuildOutput + RunOrder: 1 + +Outputs: + ClusterName: + Value: !Ref Cluster + Service: + Value: !Ref ECSService + PipelineUrl: + Value: !Sub https://console.aws.amazon.com/codepipeline/home?region=${AWS::Region}#/view/${Pipeline} diff --git a/infra/start-mev-geth-node.sh b/infra/start-mev-geth-node.sh new file mode 100755 index 000000000000..05ad50c61003 --- /dev/null +++ b/infra/start-mev-geth-node.sh @@ -0,0 +1,96 @@ +#!/bin/sh -x +# Starts the Mev-Geth node client +# Written by Luke Youngblood, luke@blockscale.net + +# network=mainnet # normally set by environment +# syncmode=fast # normally set by environment +# rpcport=8545 # normally set by environment +# wsport=8546 # normally set by environment +# netport=30303 # normally set by environment + +init_node() { + # You can put any commands you would like to run to initialize the node here. + echo Initializing node... +} + +start_node() { + if [ $network = "goerli" ] + then + geth \ + --port $netport \ + --http \ + --http.addr 0.0.0.0 \ + --http.port $rpcport \ + --http.api eth,net,web3 \ + --http.vhosts '*' \ + --http.corsdomain '*' \ + --graphql \ + --graphql.corsdomain '*' \ + --graphql.vhosts '*' \ + --ws \ + --ws.addr 0.0.0.0 \ + --ws.port $wsport \ + --ws.api eth,net,web3 \ + --ws.origins '*' \ + --syncmode $syncmode \ + --cache 4096 \ + --maxpeers $connections \ + --goerli + if [ $? -ne 0 ] + then + echo "Node failed to start; exiting." + exit 1 + fi + else + geth \ + --port $netport \ + --http \ + --http.addr 0.0.0.0 \ + --http.port $rpcport \ + --http.api eth,net,web3 \ + --http.vhosts '*' \ + --http.corsdomain '*' \ + --graphql \ + --graphql.corsdomain '*' \ + --graphql.vhosts '*' \ + --ws \ + --ws.addr 0.0.0.0 \ + --ws.port $wsport \ + --ws.api eth,net,web3 \ + --ws.origins '*' \ + --syncmode $syncmode \ + --cache 4096 \ + --maxpeers $connections + if [ $? -ne 0 ] + then + echo "Node failed to start; exiting." + exit 1 + fi + fi +} + +s3_sync() { + # Determine data directory + if [ $network = "goerli" ] + then + datadir=/root/.ethereum/goerli/geth/chaindata + else + datadir=/root/.ethereum/geth/chaindata + fi + # If the current1 key exists, node1 is the most current set of blockchain data + echo "A 404 error below is expected and nothing to be concerned with." + aws s3api head-object --request-payer requester --bucket $chainbucket --key current1 + if [ $? -eq 0 ] + then + s3key=node1 + else + s3key=node2 + fi + aws s3 sync --only-show-errors --request-payer requester --region $region s3://$chainbucket/$s3key $datadir +} + +# main + +init_node +s3_sync +start_node diff --git a/infra/start-mev-geth-updater.sh b/infra/start-mev-geth-updater.sh new file mode 100755 index 000000000000..11a6a533aa14 --- /dev/null +++ b/infra/start-mev-geth-updater.sh @@ -0,0 +1,181 @@ +#!/bin/sh -x +# Starts the Mev-Geth updater client +# Written by Luke Youngblood, luke@blockscale.net + +# netport=30303 # normally set by environment + +init_node() { + # Initialization steps can go here + echo Initializing node... + aws configure set default.s3.max_concurrent_requests 64 + aws configure set default.s3.max_queue_size 20000 +} + +start_node() { + if [ $network = "goerli" ] + then + geth \ + --port $netport \ + --syncmode $syncmode \ + --cache 4096 \ + --maxpeers $connections \ + --goerli & + if [ $? -ne 0 ] + then + echo "Node failed to start; exiting." + exit 1 + fi + else + geth \ + --port $netport \ + --syncmode $syncmode \ + --cache 4096 \ + --maxpeers $connections & + if [ $? -ne 0 ] + then + echo "Node failed to start; exiting." + exit 1 + fi + fi +} + +s3_sync_down() { + # Determine data directory + if [ $network = "goerli" ] + then + datadir=/root/.ethereum/goerli/geth/chaindata + else + datadir=/root/.ethereum/geth/chaindata + fi + + # If the current1 object exists, node1 is the key we should download + echo "A 404 error below is expected and nothing to be concerned with." + aws s3api head-object --bucket $chainbucket --key current1 + if [ $? -eq 0 ] + then + echo "current1 key exists; downloading node1" + s3key=node1 + else + echo "current1 key doesn't exist; downloading node2" + s3key=node2 + fi + + aws s3 sync --region $region --only-show-errors s3://$chainbucket/$s3key $datadir + if [ $? -ne 0 ] + then + echo "aws s3 sync command failed; exiting." + exit 2 + fi +} + +kill_node() { + tries=0 + while [ ! -z `ps -ef |grep geth|grep -v geth-updater|grep -v grep|awk '{print $1}'` ] + do + ps -ef |grep geth|grep -v geth-updater|grep -v grep + pid=`ps -ef |grep geth|grep -v geth-updater|grep -v grep|awk '{print $1}'` + kill $pid + sleep 30 + echo "Waiting for the node to shutdown cleanly... try number $tries" + let "tries+=1" + if [ $tries -gt 29 ] + then + echo "Node has not stopped cleanly after $tries, forcibly killing." + ps -ef |grep geth|grep -v geth-updater|grep -v grep + pid=`ps -ef |grep geth|grep -v geth-updater|grep -v grep|awk '{print $1}'` + kill -9 $pid + fi + if [ $tries -gt 30 ] + then + echo "Node has not stopped cleanly after $tries, exiting..." + exit 3 + fi + done +} + +s3_sync_up() { + # Determine data directory + if [ $network = "goerli" ] + then + datadir=/root/.ethereum/goerli/geth/chaindata + else + datadir=/root/.ethereum/geth/chaindata + fi + + # If the current1 object exists, node1 is the folder that clients will download, so we should update node2 + aws s3api head-object --bucket $chainbucket --key current1 + if [ $? -eq 0 ] + then + echo "current1 key exists; updating node2" + s3key=node2 + else + echo "current1 key doesn't exist; updating node1" + s3key=node1 + fi + + aws s3 sync --delete --region $region --only-show-errors --acl public-read $datadir s3://$chainbucket/$s3key + if [ $? -ne 0 ] + then + echo "aws s3 sync upload command failed; exiting." + exit 4 + fi + + if [ "$s3key" = "node2" ] + then + echo "Removing current1 key, as the node2 key was just updated." + aws s3 rm --region $region s3://$chainbucket/current1 + if [ $? -ne 0 ] + then + echo "aws s3 rm command failed; retrying." + sleep 5 + aws s3 rm --region $region s3://$chainbucket/current1 + if [ $? -ne 0 ] + then + echo "aws s3 rm command failed; exiting." + exit 5 + fi + fi + else + echo "Touching current1 key, as the node1 key was just updated." + touch ~/current1 + aws s3 cp --region $region --acl public-read ~/current1 s3://$chainbucket/ + if [ $? -ne 0 ] + then + echo "aws s3 cp command failed; retrying." + sleep 5 + aws s3 cp --region $region --acl public-read ~/current1 s3://$chainbucket/ + if [ $? -ne 0 ] + then + echo "aws s3 cp command failed; exiting." + exit 6 + fi + fi + fi +} + +continuous() { + # This function continuously stops the node every hour + # and syncs the chain data with S3, then restarts the node. + while true + do + echo "Sleeping for 60 minutes at `date`..." + sleep 3600 + echo "Cleanly shutting down the node so we can update S3 with the latest chaindata at `date`..." + kill_node + echo "Syncing chain data to S3 at `date`..." + s3_sync_up + echo "Restarting the node after syncing to S3 at `date`..." + start_node + done +} + +# main + +echo "Initializing the node at `date`..." +init_node +echo "Syncing initial chain data with stored chain data in S3 at `date`..." +s3_sync_down +echo "Starting the node at `date`..." +start_node +echo "Starting the continuous loop at `date`..." +continuous diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 366d5714175d..5599714f7ab3 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -2082,3 +2082,54 @@ func toHexSlice(b [][]byte) []string { } return r } + +// ---------------------------------------------------------------- FlashBots ---------------------------------------------------------------- + +// PrivateTxBundleAPI offers an API for accepting bundled transactions +type PrivateTxBundleAPI struct { + b Backend +} + +// NewPrivateTxBundleAPI creates a new Tx Bundle API instance. +func NewPrivateTxBundleAPI(b Backend) *PrivateTxBundleAPI { + return &PrivateTxBundleAPI{b} +} + +// SendBundleArgs represents the arguments for a call. +type SendBundleArgs struct { + Txs []hexutil.Bytes `json:"txs"` + BlockNumber rpc.BlockNumber `json:"blockNumber"` + MinTimestamp *uint64 `json:"minTimestamp"` + MaxTimestamp *uint64 `json:"maxTimestamp"` + RevertingTxHashes []common.Hash `json:"revertingTxHashes"` +} + +// SendBundle will add the signed transaction to the transaction pool. +// The sender is responsible for signing the transaction and using the correct nonce and ensuring validity +func (s *PrivateTxBundleAPI) SendBundle(ctx context.Context, args SendBundleArgs) error { + var txs types.Transactions + if len(args.Txs) == 0 { + return errors.New("bundle missing txs") + } + if args.BlockNumber == 0 { + return errors.New("bundle missing blockNumber") + } + + for _, encodedTx := range args.Txs { + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(encodedTx); err != nil { + return err + } + txs = append(txs, tx) + } + + var minTimestamp, maxTimestamp uint64 + if args.MinTimestamp != nil { + minTimestamp = *args.MinTimestamp + } + if args.MaxTimestamp != nil { + maxTimestamp = *args.MaxTimestamp + } + + return s.b.SendBundle(ctx, txs, args.BlockNumber, minTimestamp, maxTimestamp, args.RevertingTxHashes) +} diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index bc60fb2a64f6..bcdccf2bd9d6 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -74,6 +74,7 @@ type Backend interface { // Transaction pool API SendTx(ctx context.Context, signedTx *types.Transaction) error + SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash) error GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) GetPoolTransactions() (types.Transactions, error) GetPoolTransaction(txHash common.Hash) *types.Transaction @@ -137,6 +138,11 @@ func GetAPIs(apiBackend Backend) []rpc.API { Version: "1.0", Service: NewPrivateAccountAPI(apiBackend, nonceLock), Public: false, + }, { + Namespace: "eth", + Version: "1.0", + Service: NewPrivateTxBundleAPI(apiBackend), + Public: true, }, } } diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index 87bf464157ba..7fb98255d0ff 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -581,6 +581,11 @@ web3._extend({ call: 'eth_getLogs', params: 1, }), + new web3._extend.Method({ + name: 'sendBundle', + call: 'eth_sendBundle', + params: 1, + }), ], properties: [ new web3._extend.Property({ diff --git a/les/api_backend.go b/les/api_backend.go index 11a9ca128aab..9bb08c79f6a7 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -198,6 +198,9 @@ func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) func (b *LesApiBackend) RemoveTx(txHash common.Hash) { b.eth.txPool.RemoveTx(txHash) } +func (b *LesApiBackend) SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash) error { + return b.eth.txPool.AddMevBundle(txs, big.NewInt(blockNumber.Int64()), minTimestamp, maxTimestamp, revertingTxHashes) +} func (b *LesApiBackend) GetPoolTransactions() (types.Transactions, error) { return b.eth.txPool.GetTransactions() diff --git a/light/txpool.go b/light/txpool.go index a7df4aeec388..f8563f91d3d6 100644 --- a/light/txpool.go +++ b/light/txpool.go @@ -550,3 +550,14 @@ func (pool *TxPool) RemoveTx(hash common.Hash) { pool.chainDb.Delete(hash[:]) pool.relay.Discard([]common.Hash{hash}) } + +// MevBundles returns a list of bundles valid for the given blockNumber/blockTimestamp +// also prunes bundles that are outdated +func (pool *TxPool) MevBundles(blockNumber *big.Int, blockTimestamp uint64) ([]types.Transactions, error) { + return nil, nil +} + +// AddMevBundle adds a mev bundle to the pool +func (pool *TxPool) AddMevBundle(txs types.Transactions, blockNumber *big.Int, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash) error { + return nil +} diff --git a/miner/miner.go b/miner/miner.go index 20e12c240e12..923adef9f5d1 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -45,21 +45,22 @@ type Backend interface { // Config is the configuration parameters of mining. type Config struct { - Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account) - Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages (only useful in ethash). - NotifyFull bool `toml:",omitempty"` // Notify with pending block headers instead of work packages - ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner - GasFloor uint64 // Target gas floor for mined blocks. - GasCeil uint64 // Target gas ceiling for mined blocks. - GasPrice *big.Int // Minimum gas price for mining a transaction - Recommit time.Duration // The time interval for miner to re-create mining work. - Noverify bool // Disable remote mining solution verification(only useful in ethash). + Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account) + Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages (only useful in ethash). + NotifyFull bool `toml:",omitempty"` // Notify with pending block headers instead of work packages + ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner + GasFloor uint64 // Target gas floor for mined blocks. + GasCeil uint64 // Target gas ceiling for mined blocks. + GasPrice *big.Int // Minimum gas price for mining a transaction + Recommit time.Duration // The time interval for miner to re-create mining work. + Noverify bool // Disable remote mining solution verification(only useful in ethash). + MaxMergedBundles int } // Miner creates blocks and searches for proof-of-work values. type Miner struct { mux *event.TypeMux - worker *worker + worker *multiWorker coinbase common.Address eth Backend engine consensus.Engine @@ -78,7 +79,7 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even exitCh: make(chan struct{}), startCh: make(chan common.Address), stopCh: make(chan struct{}), - worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true), + worker: newMultiWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true), } miner.wg.Add(1) go miner.update() @@ -190,7 +191,7 @@ func (miner *Miner) SetRecommitInterval(interval time.Duration) { // Pending returns the currently pending block and associated state. func (miner *Miner) Pending() (*types.Block, *state.StateDB) { - return miner.worker.pending() + return miner.worker.regularWorker.pending() } // PendingBlock returns the currently pending block. @@ -199,7 +200,7 @@ func (miner *Miner) Pending() (*types.Block, *state.StateDB) { // simultaneously, please use Pending(), as the pending state can // change between multiple method calls func (miner *Miner) PendingBlock() *types.Block { - return miner.worker.pendingBlock() + return miner.worker.regularWorker.pendingBlock() } // PendingBlockAndReceipts returns the currently pending block and corresponding receipts. @@ -238,11 +239,11 @@ func (miner *Miner) DisablePreseal() { // GetSealingBlock retrieves a sealing block based on the given parameters. // The returned block is not sealed but all other fields should be filled. func (miner *Miner) GetSealingBlock(parent common.Hash, timestamp uint64, coinbase common.Address, random common.Hash) (*types.Block, error) { - return miner.worker.getSealingBlock(parent, timestamp, coinbase, random) + return miner.worker.regularWorker.getSealingBlock(parent, timestamp, coinbase, random) } // SubscribePendingLogs starts delivering logs from pending transactions // to the given channel. func (miner *Miner) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription { - return miner.worker.pendingLogsFeed.Subscribe(ch) + return miner.worker.regularWorker.pendingLogsFeed.Subscribe(ch) } diff --git a/miner/multi_worker.go b/miner/multi_worker.go new file mode 100644 index 000000000000..9a39983c5a43 --- /dev/null +++ b/miner/multi_worker.go @@ -0,0 +1,118 @@ +package miner + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" +) + +type multiWorker struct { + workers []*worker + regularWorker *worker +} + +func (w *multiWorker) stop() { + for _, worker := range w.workers { + worker.stop() + } +} + +func (w *multiWorker) start() { + for _, worker := range w.workers { + worker.start() + } +} + +func (w *multiWorker) close() { + for _, worker := range w.workers { + worker.close() + } +} + +func (w *multiWorker) isRunning() bool { + for _, worker := range w.workers { + if worker.isRunning() { + return true + } + } + return false +} + +// pendingBlockAndReceipts returns pending block and corresponding receipts from the `regularWorker` +func (w *multiWorker) pendingBlockAndReceipts() (*types.Block, types.Receipts) { + // return a snapshot to avoid contention on currentMu mutex + return w.regularWorker.pendingBlockAndReceipts() +} + +func (w *multiWorker) setGasCeil(ceil uint64) { + for _, worker := range w.workers { + worker.setGasCeil(ceil) + } +} + +func (w *multiWorker) setExtra(extra []byte) { + for _, worker := range w.workers { + worker.setExtra(extra) + } +} + +func (w *multiWorker) setRecommitInterval(interval time.Duration) { + for _, worker := range w.workers { + worker.setRecommitInterval(interval) + } +} + +func (w *multiWorker) setEtherbase(addr common.Address) { + for _, worker := range w.workers { + worker.setEtherbase(addr) + } +} + +func (w *multiWorker) enablePreseal() { + for _, worker := range w.workers { + worker.enablePreseal() + } +} + +func (w *multiWorker) disablePreseal() { + for _, worker := range w.workers { + worker.disablePreseal() + } +} + +func newMultiWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool) *multiWorker { + queue := make(chan *task) + + regularWorker := newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, init, &flashbotsData{ + isFlashbots: false, + queue: queue, + }) + + workers := []*worker{regularWorker} + + for i := 1; i <= config.MaxMergedBundles; i++ { + workers = append(workers, + newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, init, &flashbotsData{ + isFlashbots: true, + queue: queue, + maxMergedBundles: i, + })) + } + + log.Info("creating multi worker", "config.MaxMergedBundles", config.MaxMergedBundles, "worker", len(workers)) + return &multiWorker{ + regularWorker: regularWorker, + workers: workers, + } +} + +type flashbotsData struct { + isFlashbots bool + queue chan *task + maxMergedBundles int +} diff --git a/miner/worker.go b/miner/worker.go index c6927a1ca1e8..1052b5291621 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "math/big" + "sort" "sync" "sync/atomic" "time" @@ -39,7 +40,7 @@ import ( const ( // resultQueueSize is the size of channel listening to sealing result. - resultQueueSize = 10 + resultQueueSize = 20 // txChanSize is the size of channel listening to NewTxsEvent. // The number is referenced from the size of tx pool. @@ -88,6 +89,7 @@ type environment struct { tcount int // tx count in cycle gasPool *core.GasPool // available gas used to pack transactions coinbase common.Address + profit *big.Int header *types.Header txs []*types.Transaction @@ -104,6 +106,7 @@ func (env *environment) copy() *environment { family: env.family.Clone(), tcount: env.tcount, coinbase: env.coinbase, + profit: new(big.Int).Set(env.profit), header: types.CopyHeader(env.header), receipts: copyReceipts(env.receipts), } @@ -147,6 +150,10 @@ type task struct { state *state.StateDB block *types.Block createdAt time.Time + + profit *big.Int + isFlashbots bool + worker int } const ( @@ -239,6 +246,8 @@ type worker struct { // External functions isLocalBlock func(header *types.Header) bool // Function used to determine whether the specified block is mined by local miner. + flashbots *flashbotsData + // Test hooks newTaskHook func(*task) // Method to call upon receiving a new sealing task. skipSealHook func(*task) bool // Method to decide whether skipping the sealing. @@ -246,7 +255,30 @@ type worker struct { resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. } -func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool) *worker { +func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool, flashbots *flashbotsData) *worker { + exitCh := make(chan struct{}) + taskCh := make(chan *task) + if flashbots.isFlashbots { + // publish to the flashbots queue + taskCh = flashbots.queue + } else { + // read from the flashbots queue + go func() { + for { + select { + case flashbotsTask := <-flashbots.queue: + select { + case taskCh <- flashbotsTask: + case <-exitCh: + return + } + case <-exitCh: + return + } + } + }() + } + worker := &worker{ config: config, chainConfig: chainConfig, @@ -264,12 +296,13 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), newWorkCh: make(chan *newWorkReq), getWorkCh: make(chan *getWorkReq), - taskCh: make(chan *task), + taskCh: taskCh, resultCh: make(chan *types.Block, resultQueueSize), - exitCh: make(chan struct{}), + exitCh: exitCh, startCh: make(chan struct{}, 1), resubmitIntervalCh: make(chan time.Duration), resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), + flashbots: flashbots, } // Subscribe NewTxsEvent for tx pool worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) @@ -284,11 +317,15 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus recommit = minRecommitInterval } - worker.wg.Add(4) + worker.wg.Add(2) go worker.mainLoop() go worker.newWorkLoop(recommit) - go worker.resultLoop() - go worker.taskLoop() + if !flashbots.isFlashbots { + // only mine if not flashbots + worker.wg.Add(2) + go worker.resultLoop() + go worker.taskLoop() + } // Submit first work to initialize pending state. if init { @@ -629,6 +666,9 @@ func (w *worker) taskLoop() { var ( stopCh chan struct{} prev common.Hash + + prevParentHash common.Hash + prevProfit *big.Int ) // interrupt aborts the in-flight sealing task. @@ -649,10 +689,20 @@ func (w *worker) taskLoop() { if sealHash == prev { continue } + + taskParentHash := task.block.Header().ParentHash + // reject new tasks which don't profit + if taskParentHash == prevParentHash && + prevProfit != nil && task.profit.Cmp(prevProfit) < 0 { + continue + } + prevParentHash = taskParentHash + prevProfit = task.profit + // Interrupt previous sealing operation interrupt() stopCh, prev = make(chan struct{}), sealHash - + log.Info("Proposed miner block", "blockNumber", task.block.Number(), "profit", ethIntToFloat(prevProfit), "isFlashbots", task.isFlashbots, "sealhash", sealHash, "parentHash", prevParentHash, "worker", task.worker) if w.skipSealHook != nil && w.skipSealHook(task) { continue } @@ -775,6 +825,7 @@ func (w *worker) makeEnv(parent *types.Block, header *types.Header, coinbase com family: mapset.NewSet(), header: header, uncles: make(map[common.Hash]*types.Header), + profit: new(big.Int), } // when 08 is processed ancestors contain 07 (quick block) for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { @@ -786,6 +837,7 @@ func (w *worker) makeEnv(parent *types.Block, header *types.Header, coinbase com } // Keep track of transactions which return errors so they can be removed env.tcount = 0 + env.gasPool = new(core.GasPool).AddGas(header.GasLimit) return env, nil } @@ -830,6 +882,11 @@ func (w *worker) updateSnapshot(env *environment) { func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) { snap := env.state.Snapshot() + gasPrice, err := tx.EffectiveGasTip(env.header.BaseFee) + if err != nil { + return nil, err + } + receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig()) if err != nil { env.state.RevertToSnapshot(snap) @@ -838,9 +895,121 @@ func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]* env.txs = append(env.txs, tx) env.receipts = append(env.receipts, receipt) + gasUsed := new(big.Int).SetUint64(receipt.GasUsed) + env.profit.Add(env.profit, gasUsed.Mul(gasUsed, gasPrice)) + return receipt.Logs, nil } +func (w *worker) commitBundle(env *environment, txs types.Transactions, interrupt *int32) bool { + gasLimit := env.header.GasLimit + if env.gasPool == nil { + env.gasPool = new(core.GasPool).AddGas(gasLimit) + } + + var coalescedLogs []*types.Log + + for _, tx := range txs { + // In the following three cases, we will interrupt the execution of the transaction. + // (1) new head block event arrival, the interrupt signal is 1 + // (2) worker start or restart, the interrupt signal is 1 + // (3) worker recreate the sealing block with any newly arrived transactions, the interrupt signal is 2. + // For the first two cases, the semi-finished work will be discarded. + // For the third case, the semi-finished work will be submitted to the consensus engine. + if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { + // Notify resubmit loop to increase resubmitting interval due to too frequent commits. + if atomic.LoadInt32(interrupt) == commitInterruptResubmit { + ratio := float64(gasLimit-env.gasPool.Gas()) / float64(gasLimit) + if ratio < 0.1 { + ratio = 0.1 + } + w.resubmitAdjustCh <- &intervalAdjust{ + ratio: ratio, + inc: true, + } + } + return atomic.LoadInt32(interrupt) == commitInterruptNewHead + } + // If we don't have enough gas for any further transactions then we're done + if env.gasPool.Gas() < params.TxGas { + log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) + break + } + + // Error may be ignored here. The error has already been checked + // during transaction acceptance is the transaction pool. + // + // We use the eip155 signer regardless of the current hf. + from, _ := types.Sender(env.signer, tx) + // Check whether the tx is replay protected. If we're not in the EIP155 hf + // phase, start ignoring the sender until we do. + if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { + log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) + + return true + } + // Start executing the transaction + env.state.Prepare(tx.Hash(), env.tcount) + + logs, err := w.commitTransaction(env, tx) + switch { + case errors.Is(err, core.ErrGasLimitReached): + // Pop the current out-of-gas transaction without shifting in the next from the account + log.Trace("Gas limit exceeded for current block", "sender", from) + return true + + case errors.Is(err, core.ErrNonceTooLow): + // New head notification data race between the transaction pool and miner, shift + log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) + return true + + case errors.Is(err, core.ErrNonceTooHigh): + // Reorg notification data race between the transaction pool and miner, skip account = + log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) + return true + + case errors.Is(err, nil): + // Everything ok, collect the logs and shift in the next transaction from the same account + coalescedLogs = append(coalescedLogs, logs...) + env.tcount++ + continue + + case errors.Is(err, core.ErrTxTypeNotSupported): + // Pop the unsupported transaction without shifting in the next from the account + log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) + return true + + default: + // Strange error, discard the transaction and get the next in line (note, the + // nonce-too-high clause will prevent us from executing in vain). + log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + return true + } + } + + if !w.isRunning() && len(coalescedLogs) > 0 { + // We don't push the pendingLogsEvent while we are sealing. The reason is that + // when we are sealing, the worker will regenerate a sealing block every 3 seconds. + // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. + + // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined + // logs by filling in the block hash when the block was mined by the local miner. This can + // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. + cpy := make([]*types.Log, len(coalescedLogs)) + for i, l := range coalescedLogs { + cpy[i] = new(types.Log) + *cpy[i] = *l + } + w.pendingLogsFeed.Send(cpy) + } + // Notify resubmit loop to decrease resubmitting interval if current interval is larger + // than the user-specified one. + if interrupt != nil { + w.resubmitAdjustCh <- &intervalAdjust{inc: false} + } + return false +} + func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *int32) bool { gasLimit := env.header.GasLimit if env.gasPool == nil { @@ -1061,6 +1230,27 @@ func (w *worker) fillTransactions(interrupt *int32, env *environment) { localTxs[account] = txs } } + if w.flashbots.isFlashbots { + bundles, err := w.eth.TxPool().MevBundles(env.header.Number, env.header.Time) + if err != nil { + log.Error("Failed to fetch pending transactions", "err", err) + return + } + + bundleTxs, bundle, numBundles, err := w.generateFlashbotsBundle(env, bundles, pending) + if err != nil { + log.Error("Failed to generate flashbots bundle", "err", err) + return + } + log.Info("Flashbots bundle", "ethToCoinbase", ethIntToFloat(bundle.totalEth), "gasUsed", bundle.totalGasUsed, "bundleScore", bundle.mevGasPrice, "bundleLength", len(bundleTxs), "numBundles", numBundles, "worker", w.flashbots.maxMergedBundles) + if len(bundleTxs) == 0 { + return + } + if w.commitBundle(env, bundleTxs, interrupt) { + return + } + env.profit.Add(env.profit, bundle.totalEth) + } if len(localTxs) > 0 { txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee) if w.commitTransactions(env, txs, interrupt) { @@ -1144,13 +1334,12 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti // If we're post merge, just ignore if !w.isTTDReached(block.Header()) { select { - case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now()}: + case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now(), profit: env.profit, isFlashbots: w.flashbots.isFlashbots, worker: w.flashbots.maxMergedBundles}: w.unconfirmed.Shift(block.NumberU64() - 1) log.Info("Commit new sealing work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), - "uncles", len(env.uncles), "txs", env.tcount, - "gas", block.GasUsed(), "fees", totalFees(block, env.receipts), - "elapsed", common.PrettyDuration(time.Since(start))) - + "uncles", len(env.uncles), "txs", env.tcount, "gas", block.GasUsed(), "fees", totalFees(block, env.receipts), + "profit", ethIntToFloat(env.profit), "elapsed", common.PrettyDuration(time.Since(start)), + "isFlashbots", w.flashbots.isFlashbots, "worker", w.flashbots.maxMergedBundles) case <-w.exitCh: log.Info("Worker has exited") } @@ -1195,6 +1384,195 @@ func (w *worker) isTTDReached(header *types.Header) bool { return td != nil && ttd != nil && td.Cmp(ttd) >= 0 } +type simulatedBundle struct { + mevGasPrice *big.Int + totalEth *big.Int + ethSentToCoinbase *big.Int + totalGasUsed uint64 + originalBundle types.MevBundle +} + +func (w *worker) generateFlashbotsBundle(env *environment, bundles []types.MevBundle, pendingTxs map[common.Address]types.Transactions) (types.Transactions, simulatedBundle, int, error) { + simulatedBundles, err := w.simulateBundles(env, bundles, pendingTxs) + if err != nil { + return nil, simulatedBundle{}, 0, err + } + + sort.SliceStable(simulatedBundles, func(i, j int) bool { + return simulatedBundles[j].mevGasPrice.Cmp(simulatedBundles[i].mevGasPrice) < 0 + }) + + return w.mergeBundles(env, simulatedBundles, pendingTxs) +} + +func (w *worker) mergeBundles(env *environment, bundles []simulatedBundle, pendingTxs map[common.Address]types.Transactions) (types.Transactions, simulatedBundle, int, error) { + finalBundle := types.Transactions{} + + currentState := env.state.Copy() + gasPool := new(core.GasPool).AddGas(env.header.GasLimit) + + var prevState *state.StateDB + var prevGasPool *core.GasPool + + mergedBundle := simulatedBundle{ + totalEth: new(big.Int), + ethSentToCoinbase: new(big.Int), + } + + count := 0 + for _, bundle := range bundles { + prevState = currentState.Copy() + prevGasPool = new(core.GasPool).AddGas(gasPool.Gas()) + + // the floor gas price is 99/100 what was simulated at the top of the block + floorGasPrice := new(big.Int).Mul(bundle.mevGasPrice, big.NewInt(99)) + floorGasPrice = floorGasPrice.Div(floorGasPrice, big.NewInt(100)) + + simmed, err := w.computeBundleGas(env, bundle.originalBundle, currentState, gasPool, pendingTxs, len(finalBundle)) + if err != nil || simmed.mevGasPrice.Cmp(floorGasPrice) <= 0 { + currentState = prevState + gasPool = prevGasPool + continue + } + + log.Info("Included bundle", "ethToCoinbase", ethIntToFloat(simmed.totalEth), "gasUsed", simmed.totalGasUsed, "bundleScore", simmed.mevGasPrice, "bundleLength", len(simmed.originalBundle.Txs), "worker", w.flashbots.maxMergedBundles) + + finalBundle = append(finalBundle, bundle.originalBundle.Txs...) + mergedBundle.totalEth.Add(mergedBundle.totalEth, simmed.totalEth) + mergedBundle.ethSentToCoinbase.Add(mergedBundle.ethSentToCoinbase, simmed.ethSentToCoinbase) + mergedBundle.totalGasUsed += simmed.totalGasUsed + count++ + + if count >= w.flashbots.maxMergedBundles { + break + } + } + + if len(finalBundle) == 0 || count != w.flashbots.maxMergedBundles { + return nil, simulatedBundle{}, count, nil + } + + return finalBundle, simulatedBundle{ + mevGasPrice: new(big.Int).Div(mergedBundle.totalEth, new(big.Int).SetUint64(mergedBundle.totalGasUsed)), + totalEth: mergedBundle.totalEth, + ethSentToCoinbase: mergedBundle.ethSentToCoinbase, + totalGasUsed: mergedBundle.totalGasUsed, + }, count, nil +} + +func (w *worker) simulateBundles(env *environment, bundles []types.MevBundle, pendingTxs map[common.Address]types.Transactions) ([]simulatedBundle, error) { + simulatedBundles := []simulatedBundle{} + + for _, bundle := range bundles { + state := env.state.Copy() + gasPool := new(core.GasPool).AddGas(env.header.GasLimit) + if len(bundle.Txs) == 0 { + continue + } + simmed, err := w.computeBundleGas(env, bundle, state, gasPool, pendingTxs, 0) + + if err != nil { + log.Debug("Error computing gas for a bundle", "error", err) + continue + } + simulatedBundles = append(simulatedBundles, simmed) + } + + return simulatedBundles, nil +} + +func containsHash(arr []common.Hash, match common.Hash) bool { + for _, elem := range arr { + if elem == match { + return true + } + } + return false +} + +// Compute the adjusted gas price for a whole bundle +// Done by calculating all gas spent, adding transfers to the coinbase, and then dividing by gas used +func (w *worker) computeBundleGas(env *environment, bundle types.MevBundle, state *state.StateDB, gasPool *core.GasPool, pendingTxs map[common.Address]types.Transactions, currentTxCount int) (simulatedBundle, error) { + var totalGasUsed uint64 = 0 + var tempGasUsed uint64 + gasFees := new(big.Int) + + ethSentToCoinbase := new(big.Int) + + for i, tx := range bundle.Txs { + if env.header.BaseFee != nil && tx.Type() == 2 { + // Sanity check for extremely large numbers + if tx.GasFeeCap().BitLen() > 256 { + return simulatedBundle{}, core.ErrFeeCapVeryHigh + } + if tx.GasTipCap().BitLen() > 256 { + return simulatedBundle{}, core.ErrTipVeryHigh + } + // Ensure gasFeeCap is greater than or equal to gasTipCap. + if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + return simulatedBundle{}, core.ErrTipAboveFeeCap + } + } + + state.Prepare(tx.Hash(), i+currentTxCount) + coinbaseBalanceBefore := state.GetBalance(env.coinbase) + + receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, gasPool, state, env.header, tx, &tempGasUsed, *w.chain.GetVMConfig()) + if err != nil { + return simulatedBundle{}, err + } + if receipt.Status == types.ReceiptStatusFailed && !containsHash(bundle.RevertingTxHashes, receipt.TxHash) { + return simulatedBundle{}, errors.New("failed tx") + } + + totalGasUsed += receipt.GasUsed + + from, err := types.Sender(env.signer, tx) + if err != nil { + return simulatedBundle{}, err + } + + txInPendingPool := false + if accountTxs, ok := pendingTxs[from]; ok { + // check if tx is in pending pool + txNonce := tx.Nonce() + + for _, accountTx := range accountTxs { + if accountTx.Nonce() == txNonce { + txInPendingPool = true + break + } + } + } + + gasUsed := new(big.Int).SetUint64(receipt.GasUsed) + gasPrice, err := tx.EffectiveGasTip(env.header.BaseFee) + if err != nil { + return simulatedBundle{}, err + } + gasFeesTx := gasUsed.Mul(gasUsed, gasPrice) + coinbaseBalanceAfter := state.GetBalance(env.coinbase) + coinbaseDelta := big.NewInt(0).Sub(coinbaseBalanceAfter, coinbaseBalanceBefore) + coinbaseDelta.Sub(coinbaseDelta, gasFeesTx) + ethSentToCoinbase.Add(ethSentToCoinbase, coinbaseDelta) + + if !txInPendingPool { + // If tx is not in pending pool, count the gas fees + gasFees.Add(gasFees, gasFeesTx) + } + } + + totalEth := new(big.Int).Add(ethSentToCoinbase, gasFees) + + return simulatedBundle{ + mevGasPrice: new(big.Int).Div(totalEth, new(big.Int).SetUint64(totalGasUsed)), + totalEth: totalEth, + ethSentToCoinbase: ethSentToCoinbase, + totalGasUsed: totalGasUsed, + originalBundle: bundle, + }, nil +} + // copyReceipts makes a deep copy of the given receipts. func copyReceipts(receipts []*types.Receipt) []*types.Receipt { result := make([]*types.Receipt, len(receipts)) @@ -1213,6 +1591,14 @@ func (w *worker) postSideBlock(event core.ChainSideEvent) { } } +// ethIntToFloat is for formatting a big.Int in wei to eth +func ethIntToFloat(eth *big.Int) *big.Float { + if eth == nil { + return big.NewFloat(0) + } + return new(big.Float).Quo(new(big.Float).SetInt(eth), new(big.Float).SetInt(big.NewInt(params.Ether))) +} + // totalFees computes total consumed miner fees in ETH. Block transactions and receipts have to have the same order. func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { feesWei := new(big.Int) @@ -1220,5 +1606,5 @@ func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { minerFee, _ := tx.EffectiveGasTip(block.BaseFee()) feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), minerFee)) } - return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) + return ethIntToFloat(feesWei) } diff --git a/miner/worker_test.go b/miner/worker_test.go index dd029433b8bf..2e06ace9fc92 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -202,7 +202,10 @@ func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) { backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) backend.txPool.AddLocals(pendingTxs) - w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false) + w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false, &flashbotsData{ + isFlashbots: false, + queue: nil, + }) w.setEtherbase(testBankAddress) return w, backend } From 465e998574f7bbf74463443e31ffa52dc87d7abe Mon Sep 17 00:00:00 2001 From: Ivan Bogatyy Date: Wed, 27 Oct 2021 16:08:07 -0400 Subject: [PATCH 2/8] Flashbots changes v0.3 to v0.4 --- cmd/geth/main.go | 3 +- cmd/geth/usage.go | 3 +- cmd/utils/flags.go | 28 +++++++++++++- core/tx_pool.go | 62 +++++++++++++++++++++++++++--- eth/api_backend.go | 4 ++ internal/ethapi/api.go | 76 ++++++++++++++++++++++++++++++++++++- internal/ethapi/backend.go | 1 + internal/web3ext/web3ext.go | 5 +++ les/api_backend.go | 5 +++ miner/miner.go | 1 + miner/multi_worker.go | 27 +++++++++---- miner/worker.go | 51 +++++++++++++++++++++---- 12 files changed, 240 insertions(+), 26 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 68f8ef8af76b..9fc03f3fda20 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -131,7 +131,8 @@ var ( utils.MinerExtraDataFlag, utils.MinerRecommitIntervalFlag, utils.MinerNoVerifyFlag, - utils.MinerMaxMergedBundles, + utils.MinerMaxMergedBundlesFlag, + utils.MinerTrustedRelaysFlag, utils.NATFlag, utils.NoDiscoverFlag, utils.DiscoveryV5Flag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index d3d39e2861d2..027711b6c357 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -189,7 +189,8 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.MinerExtraDataFlag, utils.MinerRecommitIntervalFlag, utils.MinerNoVerifyFlag, - utils.MinerMaxMergedBundles, + utils.MinerMaxMergedBundlesFlag, + utils.MinerTrustedRelaysFlag, }, }, { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 030ba4a4c851..c199eac1f955 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -475,11 +475,16 @@ var ( Usage: "Time interval to recreate the block being mined", Value: ethconfig.Defaults.Miner.Recommit, } - MinerMaxMergedBundles = cli.IntFlag{ + MinerMaxMergedBundlesFlag = cli.IntFlag{ Name: "miner.maxmergedbundles", Usage: "flashbots - The maximum amount of bundles to merge. The miner will run this many workers in parallel to calculate if the full block is more profitable with these additional bundles.", Value: 3, } + MinerTrustedRelaysFlag = cli.StringFlag{ + Name: "miner.trustedrelays", + Usage: "flashbots - The Ethereum addresses of trusted relays for signature verification. The miner will accept signed bundles and other tasks from the relay, being reasonably certain about DDoS safety.", + Value: "0x870e2734DdBe2Fba9864f33f3420d59Bc641f2be", + } MinerNoVerifyFlag = cli.BoolFlag{ Name: "miner.noverify", Usage: "Disable remote sealing verification", @@ -1355,6 +1360,15 @@ func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) { if ctx.GlobalIsSet(TxPoolLifetimeFlag.Name) { cfg.Lifetime = ctx.GlobalDuration(TxPoolLifetimeFlag.Name) } + + addresses := strings.Split(ctx.GlobalString(MinerTrustedRelaysFlag.Name), ",") + for _, address := range addresses { + if trimmed := strings.TrimSpace(address); !common.IsHexAddress(trimmed) { + Fatalf("Invalid account in --miner.trustedrelays: %s", trimmed) + } else { + cfg.TrustedRelays = append(cfg.TrustedRelays, common.HexToAddress(trimmed)) + } + } } func setEthash(ctx *cli.Context, cfg *ethconfig.Config) { @@ -1408,7 +1422,17 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { log.Warn("The generic --miner.gastarget flag is deprecated and will be removed in the future!") } - cfg.MaxMergedBundles = ctx.GlobalInt(MinerMaxMergedBundles.Name) + cfg.MaxMergedBundles = ctx.GlobalInt(MinerMaxMergedBundlesFlag.Name) + + addresses := strings.Split(ctx.GlobalString(MinerTrustedRelaysFlag.Name), ",") + for _, address := range addresses { + if trimmed := strings.TrimSpace(address); !common.IsHexAddress(trimmed) { + Fatalf("Invalid account in --miner.trustedrelays: %s", trimmed) + } else { + cfg.TrustedRelays = append(cfg.TrustedRelays, common.HexToAddress(trimmed)) + } + } + log.Info("Trusted relays set as", "addresses", cfg.TrustedRelays) } func setWhitelist(ctx *cli.Context, cfg *ethconfig.Config) { diff --git a/core/tx_pool.go b/core/tx_pool.go index 51312d3e78b6..75b5ac101949 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -165,6 +165,8 @@ type TxPoolConfig struct { GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts Lifetime time.Duration // Maximum amount of time non-executable transaction are queued + + TrustedRelays []common.Address // Trusted relay addresses. Duplicated from the miner config. } // DefaultTxPoolConfig contains the default configurations for the transaction @@ -251,12 +253,13 @@ type TxPool struct { locals *accountSet // Set of local transaction to exempt from eviction rules journal *txJournal // Journal of local transaction to back up to disk - pending map[common.Address]*txList // All currently processable transactions - queue map[common.Address]*txList // Queued but non-processable transactions - beats map[common.Address]time.Time // Last heartbeat from each known account - mevBundles []types.MevBundle - all *txLookup // All transactions to allow lookups - priced *txPricedList // All transactions sorted by price + pending map[common.Address]*txList // All currently processable transactions + queue map[common.Address]*txList // Queued but non-processable transactions + beats map[common.Address]time.Time // Last heartbeat from each known account + mevBundles []types.MevBundle + megabundles map[common.Address]types.MevBundle // One megabundle per each trusted relay + all *txLookup // All transactions to allow lookups + priced *txPricedList // All transactions sorted by price chainHeadCh chan ChainHeadEvent chainHeadSub event.Subscription @@ -290,6 +293,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block pending: make(map[common.Address]*txList), queue: make(map[common.Address]*txList), beats: make(map[common.Address]time.Time), + megabundles: make(map[common.Address]types.MevBundle), all: newTxLookup(), chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), reqResetCh: make(chan *txpoolResetRequest), @@ -611,6 +615,52 @@ func (pool *TxPool) AddMevBundle(txs types.Transactions, blockNumber *big.Int, m return nil } +// AddMegaBundle adds a megabundle to the pool. Assumes the relay signature has been verified already. +func (pool *TxPool) AddMegabundle(relayAddr common.Address, txs types.Transactions, blockNumber *big.Int, minTimestamp, maxTimestamp uint64, revertingTxHashes []common.Hash) error { + pool.mu.Lock() + defer pool.mu.Unlock() + + fromTrustedRelay := false + for _, trustedAddr := range pool.config.TrustedRelays { + if relayAddr == trustedAddr { + fromTrustedRelay = true + } + } + if !fromTrustedRelay { + return errors.New("megabundle from non-trusted address") + } + + pool.megabundles[relayAddr] = types.MevBundle{ + Txs: txs, + BlockNumber: blockNumber, + MinTimestamp: minTimestamp, + MaxTimestamp: maxTimestamp, + RevertingTxHashes: revertingTxHashes, + } + return nil +} + +// GetMegabundle returns the latest megabundle submitted by a given relay. +func (pool *TxPool) GetMegabundle(relayAddr common.Address, blockNumber *big.Int, blockTimestamp uint64) (types.MevBundle, error) { + pool.mu.Lock() + defer pool.mu.Unlock() + + megabundle, ok := pool.megabundles[relayAddr] + if !ok { + return types.MevBundle{}, errors.New("No megabundle found") + } + if megabundle.BlockNumber.Cmp(blockNumber) != 0 { + return types.MevBundle{}, errors.New("Megabundle does not fit blockNumber constraints") + } + if megabundle.MinTimestamp != 0 && megabundle.MinTimestamp > blockTimestamp { + return types.MevBundle{}, errors.New("Megabundle does not fit minTimestamp constraints") + } + if megabundle.MaxTimestamp != 0 && megabundle.MaxTimestamp < blockTimestamp { + return types.MevBundle{}, errors.New("Megabundle does not fit maxTimestamp constraints") + } + return megabundle, nil +} + // Locals retrieves the accounts currently considered local by the pool. func (pool *TxPool) Locals() []common.Address { pool.mu.Lock() diff --git a/eth/api_backend.go b/eth/api_backend.go index 8454c0afe701..ea3b4a2e0461 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -243,6 +243,10 @@ func (b *EthAPIBackend) SendBundle(ctx context.Context, txs types.Transactions, return b.eth.txPool.AddMevBundle(txs, big.NewInt(blockNumber.Int64()), minTimestamp, maxTimestamp, revertingTxHashes) } +func (b *EthAPIBackend) SendMegabundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash, relayAddr common.Address) error { + return b.eth.txPool.AddMegabundle(relayAddr, txs, big.NewInt(blockNumber.Int64()), minTimestamp, maxTimestamp, revertingTxHashes) +} + func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) { pending := b.eth.txPool.Pending(false) var txs types.Transactions diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 5599714f7ab3..63b046c90a2f 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -2095,7 +2095,7 @@ func NewPrivateTxBundleAPI(b Backend) *PrivateTxBundleAPI { return &PrivateTxBundleAPI{b} } -// SendBundleArgs represents the arguments for a call. +// SendBundleArgs represents the arguments for a SendBundle call. type SendBundleArgs struct { Txs []hexutil.Bytes `json:"txs"` BlockNumber rpc.BlockNumber `json:"blockNumber"` @@ -2104,6 +2104,25 @@ type SendBundleArgs struct { RevertingTxHashes []common.Hash `json:"revertingTxHashes"` } +// SendMegabundleArgs represents the arguments for a SendMegabundle call. +type SendMegabundleArgs struct { + Txs []hexutil.Bytes `json:"txs"` + BlockNumber uint64 `json:"blockNumber"` + MinTimestamp *uint64 `json:"minTimestamp"` + MaxTimestamp *uint64 `json:"maxTimestamp"` + RevertingTxHashes []common.Hash `json:"revertingTxHashes"` + RelaySignature hexutil.Bytes `json:"relaySignature"` +} + +// UnsignedMegabundle is used for serialization and subsequent digital signing. +type UnsignedMegabundle struct { + Txs []hexutil.Bytes + BlockNumber uint64 + MinTimestamp uint64 + MaxTimestamp uint64 + RevertingTxHashes []common.Hash +} + // SendBundle will add the signed transaction to the transaction pool. // The sender is responsible for signing the transaction and using the correct nonce and ensuring validity func (s *PrivateTxBundleAPI) SendBundle(ctx context.Context, args SendBundleArgs) error { @@ -2133,3 +2152,58 @@ func (s *PrivateTxBundleAPI) SendBundle(ctx context.Context, args SendBundleArgs return s.b.SendBundle(ctx, txs, args.BlockNumber, minTimestamp, maxTimestamp, args.RevertingTxHashes) } + +// Recovers the Ethereum address of the trusted relay that signed the megabundle. +func RecoverRelayAddress(args SendMegabundleArgs) (common.Address, error) { + megabundle := UnsignedMegabundle{Txs: args.Txs, BlockNumber: args.BlockNumber, RevertingTxHashes: args.RevertingTxHashes} + if args.MinTimestamp != nil { + megabundle.MinTimestamp = *args.MinTimestamp + } else { + megabundle.MinTimestamp = 0 + } + if args.MaxTimestamp != nil { + megabundle.MaxTimestamp = *args.MaxTimestamp + } else { + megabundle.MaxTimestamp = 0 + } + rlpEncoding, _ := rlp.EncodeToBytes(megabundle) + signature := args.RelaySignature + signature[64] -= 27 // account for Ethereum V + recoveredPubkey, err := crypto.SigToPub(accounts.TextHash(rlpEncoding), args.RelaySignature) + if err != nil { + return common.Address{}, err + } + return crypto.PubkeyToAddress(*recoveredPubkey), nil +} + +// SendMegabundle will add the signed megabundle to one of the workers for evaluation. +func (s *PrivateTxBundleAPI) SendMegabundle(ctx context.Context, args SendMegabundleArgs) error { + log.Info("Received a Megabundle request", "signature", args.RelaySignature) + var txs types.Transactions + if len(args.Txs) == 0 { + return errors.New("megabundle missing txs") + } + if args.BlockNumber == 0 { + return errors.New("megabundle missing blockNumber") + } + for _, encodedTx := range args.Txs { + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(encodedTx); err != nil { + return err + } + txs = append(txs, tx) + } + var minTimestamp, maxTimestamp uint64 + if args.MinTimestamp != nil { + minTimestamp = *args.MinTimestamp + } + if args.MaxTimestamp != nil { + maxTimestamp = *args.MaxTimestamp + } + relayAddr, err := RecoverRelayAddress(args) + log.Info("Megabundle", "relayAddr", relayAddr, "err", err) + if err != nil { + return err + } + return s.b.SendMegabundle(ctx, txs, rpc.BlockNumber(args.BlockNumber), minTimestamp, maxTimestamp, args.RevertingTxHashes, relayAddr) +} diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index bcdccf2bd9d6..58c8f0bf04e1 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -75,6 +75,7 @@ type Backend interface { // Transaction pool API SendTx(ctx context.Context, signedTx *types.Transaction) error SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash) error + SendMegabundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash, relayAddr common.Address) error GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) GetPoolTransactions() (types.Transactions, error) GetPoolTransaction(txHash common.Hash) *types.Transaction diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index 7fb98255d0ff..0e38eecc0d38 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -586,6 +586,11 @@ web3._extend({ call: 'eth_sendBundle', params: 1, }), + new web3._extend.Method({ + name: 'sendMegabundle', + call: 'eth_sendMegabundle', + params: 1 + }), ], properties: [ new web3._extend.Property({ diff --git a/les/api_backend.go b/les/api_backend.go index 9bb08c79f6a7..b910bd3e1f48 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -198,10 +198,15 @@ func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) func (b *LesApiBackend) RemoveTx(txHash common.Hash) { b.eth.txPool.RemoveTx(txHash) } + func (b *LesApiBackend) SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash) error { return b.eth.txPool.AddMevBundle(txs, big.NewInt(blockNumber.Int64()), minTimestamp, maxTimestamp, revertingTxHashes) } +func (b *LesApiBackend) SendMegabundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash, relayAddr common.Address) error { + return nil +} + func (b *LesApiBackend) GetPoolTransactions() (types.Transactions, error) { return b.eth.txPool.GetTransactions() } diff --git a/miner/miner.go b/miner/miner.go index 923adef9f5d1..f4b8e740fd60 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -55,6 +55,7 @@ type Config struct { Recommit time.Duration // The time interval for miner to re-create mining work. Noverify bool // Disable remote mining solution verification(only useful in ethash). MaxMergedBundles int + TrustedRelays []common.Address `toml:",omitempty"` // Trusted relay addresses to receive tasks from. } // Miner creates blocks and searches for proof-of-work values. diff --git a/miner/multi_worker.go b/miner/multi_worker.go index 9a39983c5a43..050ea38af4e5 100644 --- a/miner/multi_worker.go +++ b/miner/multi_worker.go @@ -98,13 +98,24 @@ func newMultiWorker(config *Config, chainConfig *params.ChainConfig, engine cons for i := 1; i <= config.MaxMergedBundles; i++ { workers = append(workers, newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, init, &flashbotsData{ - isFlashbots: true, - queue: queue, - maxMergedBundles: i, + isFlashbots: true, + isMegabundleWorker: false, + queue: queue, + maxMergedBundles: i, })) } - log.Info("creating multi worker", "config.MaxMergedBundles", config.MaxMergedBundles, "worker", len(workers)) + for i := 0; i < len(config.TrustedRelays); i++ { + workers = append(workers, + newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, init, &flashbotsData{ + isFlashbots: true, + isMegabundleWorker: true, + queue: queue, + relayAddr: config.TrustedRelays[i], + })) + } + + log.Info("creating multi worker", "config.MaxMergedBundles", config.MaxMergedBundles, "config.TrustedRelays", config.TrustedRelays, "worker", len(workers)) return &multiWorker{ regularWorker: regularWorker, workers: workers, @@ -112,7 +123,9 @@ func newMultiWorker(config *Config, chainConfig *params.ChainConfig, engine cons } type flashbotsData struct { - isFlashbots bool - queue chan *task - maxMergedBundles int + isFlashbots bool + isMegabundleWorker bool + queue chan *task + maxMergedBundles int + relayAddr common.Address } diff --git a/miner/worker.go b/miner/worker.go index 1052b5291621..2205f4fa71da 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -151,9 +151,10 @@ type task struct { block *types.Block createdAt time.Time - profit *big.Int - isFlashbots bool - worker int + profit *big.Int + isFlashbots bool + worker int + isMegabundle bool } const ( @@ -702,7 +703,7 @@ func (w *worker) taskLoop() { // Interrupt previous sealing operation interrupt() stopCh, prev = make(chan struct{}), sealHash - log.Info("Proposed miner block", "blockNumber", task.block.Number(), "profit", ethIntToFloat(prevProfit), "isFlashbots", task.isFlashbots, "sealhash", sealHash, "parentHash", prevParentHash, "worker", task.worker) + log.Info("Proposed miner block", "blockNumber", task.block.Number(), "profit", ethIntToFloat(prevProfit), "isFlashbots", task.isFlashbots, "sealhash", sealHash, "parentHash", prevParentHash, "worker", task.worker, "isMegabundle", task.isMegabundle) if w.skipSealHook != nil && w.skipSealHook(task) { continue } @@ -1230,7 +1231,7 @@ func (w *worker) fillTransactions(interrupt *int32, env *environment) { localTxs[account] = txs } } - if w.flashbots.isFlashbots { + if w.flashbots.isFlashbots && !w.flashbots.isMegabundleWorker { bundles, err := w.eth.TxPool().MevBundles(env.header.Number, env.header.Time) if err != nil { log.Error("Failed to fetch pending transactions", "err", err) @@ -1249,8 +1250,42 @@ func (w *worker) fillTransactions(interrupt *int32, env *environment) { if w.commitBundle(env, bundleTxs, interrupt) { return } - env.profit.Add(env.profit, bundle.totalEth) + env.profit.Add(env.profit, bundle.ethSentToCoinbase) } + if w.flashbots.isMegabundleWorker { + megabundle, err := w.eth.TxPool().GetMegabundle(w.flashbots.relayAddr, env.header.Number, env.header.Time) + log.Info("Starting to process a Megabundle", "relay", w.flashbots.relayAddr, "megabundle", megabundle, "error", err) + if err != nil { + return // no valid megabundle for this relay, nothing to do + } + // Flashbots bundle merging duplicates work by simulating TXes and then committing them once more. + // Megabundles API focuses on speed and runs everything in one cycle. + coinbaseBalanceBefore := env.state.GetBalance(env.coinbase) + if w.commitBundle(env, megabundle.Txs, interrupt) { + log.Info("Could not commit a Megabundle", "relay", w.flashbots.relayAddr, "megabundle", megabundle) + return + } + var txStatuses = map[common.Hash]bool{} + for _, receipt := range env.receipts { + txStatuses[receipt.TxHash] = receipt.Status == types.ReceiptStatusSuccessful + } + for _, tx := range megabundle.Txs { + status, ok := txStatuses[tx.Hash()] + if !ok { + log.Error("No TX receipt after megabundle simulation", "TxHash", tx.Hash()) + return + } + if !status && !containsHash(megabundle.RevertingTxHashes, tx.Hash()) { + log.Info("Ignoring megabundle because of failing TX", "relay", w.flashbots.relayAddr, "TxHash", tx.Hash()) + return + } + } + coinbaseBalanceAfter := env.state.GetBalance(env.coinbase) + coinbaseDelta := big.NewInt(0).Sub(coinbaseBalanceAfter, coinbaseBalanceBefore) + env.profit = coinbaseDelta + log.Info("Megabundle processed", "relay", w.flashbots.relayAddr, "totalProfit", ethIntToFloat(env.profit)) + } + if len(localTxs) > 0 { txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee) if w.commitTransactions(env, txs, interrupt) { @@ -1334,12 +1369,12 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti // If we're post merge, just ignore if !w.isTTDReached(block.Header()) { select { - case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now(), profit: env.profit, isFlashbots: w.flashbots.isFlashbots, worker: w.flashbots.maxMergedBundles}: + case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now(), profit: env.profit, isFlashbots: w.flashbots.isFlashbots, worker: w.flashbots.maxMergedBundles, isMegabundle: w.flashbots.isMegabundleWorker}: w.unconfirmed.Shift(block.NumberU64() - 1) log.Info("Commit new sealing work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), "uncles", len(env.uncles), "txs", env.tcount, "gas", block.GasUsed(), "fees", totalFees(block, env.receipts), "profit", ethIntToFloat(env.profit), "elapsed", common.PrettyDuration(time.Since(start)), - "isFlashbots", w.flashbots.isFlashbots, "worker", w.flashbots.maxMergedBundles) + "isFlashbots", w.flashbots.isFlashbots, "worker", w.flashbots.maxMergedBundles, "isMegabundle", w.flashbots.isMegabundleWorker) case <-w.exitCh: log.Info("Worker has exited") } From bfaccc8f6711eac66a3bb418870c2fc0db97d389 Mon Sep 17 00:00:00 2001 From: Mateusz Morusiewicz <11313015+Ruteri@users.noreply.github.com> Date: Mon, 21 Feb 2022 11:06:38 +0100 Subject: [PATCH 3/8] Flashbots changes v0.4 to v0.5 * fix issue with geth not shutting down (#97) * Add eth_callBundle rpc method (#14) * flashbots: add eth_estimateGasBundle (#102) * feat(ethash): flashbots_getWork RPC with profit (#106) * Calculate megabundle as soon as it's received (#112) * Add v0.5 specification link (#118) --- README.md | 1 + cmd/evm/internal/t8ntool/block.go | 2 +- cmd/geth/consolecmd_test.go | 2 +- consensus/beacon/consensus.go | 4 +- consensus/clique/clique.go | 2 +- consensus/consensus.go | 2 +- consensus/ethash/api.go | 7 +- consensus/ethash/ethash.go | 6 + consensus/ethash/ethash_test.go | 6 +- consensus/ethash/flashbots_api.go | 38 ++++ consensus/ethash/sealer.go | 26 ++- consensus/ethash/sealer_test.go | 10 +- core/state_processor.go | 56 ++++++ core/tx_pool.go | 24 ++- eth/backend.go | 2 +- infra/Dockerfile.node | 2 +- infra/Dockerfile.updater | 2 +- infra/mev-geth-nodes-x86-64.yaml | 73 +++++-- infra/start-mev-geth-node.sh | 5 +- infra/start-mev-geth-updater.sh | 2 + internal/ethapi/api.go | 310 ++++++++++++++++++++++++++++++ internal/ethapi/backend.go | 7 +- les/client.go | 2 +- miner/multi_worker.go | 29 ++- miner/worker.go | 50 +++-- 25 files changed, 599 insertions(+), 71 deletions(-) create mode 100644 consensus/ethash/flashbots_api.go diff --git a/README.md b/README.md index 8fe7df9e4255..4f87877d7494 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ See [here](https://docs.flashbots.net) for Flashbots documentation. | Version | Spec | | ------- | ------------------------------------------------------------------------------------------- | +| v0.5 | [MEV-Geth Spec v0.5](https://docs.flashbots.net/flashbots-auction/miners/mev-geth-spec/v05) | | v0.4 | [MEV-Geth Spec v0.4](https://docs.flashbots.net/flashbots-auction/miners/mev-geth-spec/v04) | | v0.3 | [MEV-Geth Spec v0.3](https://docs.flashbots.net/flashbots-auction/miners/mev-geth-spec/v03) | | v0.2 | [MEV-Geth Spec v0.2](https://docs.flashbots.net/flashbots-auction/miners/mev-geth-spec/v02) | diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go index d4edd33bdeb7..ceb2388cd468 100644 --- a/cmd/evm/internal/t8ntool/block.go +++ b/cmd/evm/internal/t8ntool/block.go @@ -188,7 +188,7 @@ func (i *bbInput) sealEthash(block *types.Block) (*types.Block, error) { // If the testmode is used, the sealer will return quickly, and complain // "Sealing result is not read by miner" if it cannot write the result. results := make(chan *types.Block, 1) - if err := engine.Seal(nil, block, results, nil); err != nil { + if err := engine.Seal(nil, block, nil, results, nil); err != nil { panic(fmt.Sprintf("failed to seal block: %v", err)) } found := <-results diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index 845ede2f9cbd..e4e4cd8ca1a2 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -31,7 +31,7 @@ import ( ) const ( - ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 ethash:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 txpool:1.0 web3:1.0" + ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 ethash:1.0 flashbots:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 txpool:1.0 web3:1.0" httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0" ) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 1fd7deb872fb..a3c71c84b3ff 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -294,9 +294,9 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea // // Note, the method returns immediately and will send the result async. More // than one result may also be returned depending on the consensus algorithm. -func (beacon *Beacon) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { +func (beacon *Beacon) Seal(chain consensus.ChainHeaderReader, block *types.Block, profit *big.Int, results chan<- *types.Block, stop <-chan struct{}) error { if !beacon.IsPoSHeader(block.Header()) { - return beacon.ethone.Seal(chain, block, results, stop) + return beacon.ethone.Seal(chain, block, profit, results, stop) } // The seal verification is done by the external consensus engine, // return directly without pushing any block back. In another word diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 685186817d2d..576d14f3c3a1 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -589,7 +589,7 @@ func (c *Clique) Authorize(signer common.Address, signFn SignerFn) { // Seal implements consensus.Engine, attempting to create a sealed block using // the local signing credentials. -func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { +func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, profit *big.Int, results chan<- *types.Block, stop <-chan struct{}) error { header := block.Header() // Sealing the genesis block is not supported diff --git a/consensus/consensus.go b/consensus/consensus.go index af8ce98ff3be..540c78209ff0 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -105,7 +105,7 @@ type Engine interface { // // Note, the method returns immediately and will send the result async. More // than one result may also be returned depending on the consensus algorithm. - Seal(chain ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error + Seal(chain ChainHeaderReader, block *types.Block, profit *big.Int, results chan<- *types.Block, stop <-chan struct{}) error // SealHash returns the hash of a block prior to it being sealed. SealHash(header *types.Header) common.Hash diff --git a/consensus/ethash/api.go b/consensus/ethash/api.go index f4d3802e0b37..8aece9c7bb89 100644 --- a/consensus/ethash/api.go +++ b/consensus/ethash/api.go @@ -44,7 +44,7 @@ func (api *API) GetWork() ([4]string, error) { } var ( - workCh = make(chan [4]string, 1) + workCh = make(chan [5]string, 1) errc = make(chan error, 1) ) select { @@ -53,7 +53,10 @@ func (api *API) GetWork() ([4]string, error) { return [4]string{}, errEthashStopped } select { - case work := <-workCh: + case fullWork := <-workCh: + var work [4]string + copy(work[:], fullWork[:4]) + return work, nil case err := <-errc: return [4]string{}, err diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index 4e33d99c8dde..ad8dd1c34760 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -683,6 +683,12 @@ func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API { Service: &API{ethash}, Public: true, }, + { + Namespace: "flashbots", + Version: "1.0", + Service: &FlashbotsAPI{ethash}, + Public: true, + }, } } diff --git a/consensus/ethash/ethash_test.go b/consensus/ethash/ethash_test.go index 382eefeecf12..0b1c40572611 100644 --- a/consensus/ethash/ethash_test.go +++ b/consensus/ethash/ethash_test.go @@ -38,7 +38,7 @@ func TestTestMode(t *testing.T) { defer ethash.Close() results := make(chan *types.Block) - err := ethash.Seal(nil, types.NewBlockWithHeader(header), results, nil) + err := ethash.Seal(nil, types.NewBlockWithHeader(header), nil, results, nil) if err != nil { t.Fatalf("failed to seal block: %v", err) } @@ -111,7 +111,7 @@ func TestRemoteSealer(t *testing.T) { // Push new work. results := make(chan *types.Block) - ethash.Seal(nil, block, results, nil) + ethash.Seal(nil, block, nil, results, nil) var ( work [4]string @@ -128,7 +128,7 @@ func TestRemoteSealer(t *testing.T) { header = &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(1000)} block = types.NewBlockWithHeader(header) sealhash = ethash.SealHash(header) - ethash.Seal(nil, block, results, nil) + ethash.Seal(nil, block, nil, results, nil) if work, err = api.GetWork(); err != nil || work[0] != sealhash.Hex() { t.Error("expect to return the latest pushed work") diff --git a/consensus/ethash/flashbots_api.go b/consensus/ethash/flashbots_api.go new file mode 100644 index 000000000000..527d2a44352e --- /dev/null +++ b/consensus/ethash/flashbots_api.go @@ -0,0 +1,38 @@ +package ethash + +import "errors" + +// FlashbotsAPI exposes Flashbots related methods for the RPC interface. +type FlashbotsAPI struct { + ethash *Ethash +} + +// GetWork returns a work package for external miner. +// +// The work package consists of 5 strings: +// result[0] - 32 bytes hex encoded current block header pow-hash +// result[1] - 32 bytes hex encoded seed hash used for DAG +// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty +// result[3] - hex encoded block number +// result[4] - hex encoded profit generated from this block +func (api *FlashbotsAPI) GetWork() ([5]string, error) { + if api.ethash.remote == nil { + return [5]string{}, errors.New("not supported") + } + + var ( + workCh = make(chan [5]string, 1) + errc = make(chan error, 1) + ) + select { + case api.ethash.remote.fetchWorkCh <- &sealWork{errc: errc, res: workCh}: + case <-api.ethash.remote.exitCh: + return [5]string{}, errEthashStopped + } + select { + case work := <-workCh: + return work, nil + case err := <-errc: + return [5]string{}, err + } +} diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go index 6fa60ef6a8bb..d2b9253e5c34 100644 --- a/consensus/ethash/sealer.go +++ b/consensus/ethash/sealer.go @@ -48,7 +48,7 @@ var ( // Seal implements consensus.Engine, attempting to find a nonce that satisfies // the block's difficulty requirements. -func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { +func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block, profit *big.Int, results chan<- *types.Block, stop <-chan struct{}) error { // If we're running a fake PoW, simply return a 0 nonce immediately if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake { header := block.Header() @@ -62,7 +62,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block } // If we're running a shared PoW, delegate sealing to it if ethash.shared != nil { - return ethash.shared.Seal(chain, block, results, stop) + return ethash.shared.Seal(chain, block, profit, results, stop) } // Create a runner and the multiple search threads it directs abort := make(chan struct{}) @@ -86,7 +86,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block } // Push new work to remote sealer if ethash.remote != nil { - ethash.remote.workCh <- &sealTask{block: block, results: results} + ethash.remote.workCh <- &sealTask{block: block, profit: profit, results: results} } var ( pend sync.WaitGroup @@ -117,7 +117,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block case <-ethash.update: // Thread count was changed on user request, restart close(abort) - if err := ethash.Seal(chain, block, results, stop); err != nil { + if err := ethash.Seal(chain, block, profit, results, stop); err != nil { ethash.config.Log.Error("Failed to restart sealing after update", "err", err) } } @@ -194,7 +194,7 @@ type remoteSealer struct { works map[common.Hash]*types.Block rates map[common.Hash]hashrate currentBlock *types.Block - currentWork [4]string + currentWork [5]string notifyCtx context.Context cancelNotify context.CancelFunc // cancels all notification requests reqWG sync.WaitGroup // tracks notification request goroutines @@ -215,6 +215,7 @@ type remoteSealer struct { // sealTask wraps a seal block with relative result channel for remote sealer thread. type sealTask struct { block *types.Block + profit *big.Int results chan<- *types.Block } @@ -239,7 +240,7 @@ type hashrate struct { // sealWork wraps a seal work package for remote sealer. type sealWork struct { errc chan error - res chan [4]string + res chan [5]string } func startRemoteSealer(ethash *Ethash, urls []string, noverify bool) *remoteSealer { @@ -281,7 +282,7 @@ func (s *remoteSealer) loop() { // Update current work with new received block. // Note same work can be past twice, happens when changing CPU threads. s.results = work.results - s.makeWork(work.block) + s.makeWork(work.block, work.profit) s.notifyWork() case work := <-s.fetchWorkCh: @@ -338,18 +339,23 @@ func (s *remoteSealer) loop() { // makeWork creates a work package for external miner. // -// The work package consists of 3 strings: +// The work package consists of 5 strings: // result[0], 32 bytes hex encoded current block header pow-hash // result[1], 32 bytes hex encoded seed hash used for DAG // result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty // result[3], hex encoded block number -func (s *remoteSealer) makeWork(block *types.Block) { +// result[4], hex encoded profit generated from this block, if present +func (s *remoteSealer) makeWork(block *types.Block, profit *big.Int) { hash := s.ethash.SealHash(block.Header()) s.currentWork[0] = hash.Hex() s.currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex() s.currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex() s.currentWork[3] = hexutil.EncodeBig(block.Number()) + if profit != nil { + s.currentWork[4] = hexutil.EncodeBig(profit) + } + // Trace the seal work fetched by remote sealer. s.currentBlock = block s.works[hash] = block @@ -375,7 +381,7 @@ func (s *remoteSealer) notifyWork() { } } -func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [4]string) { +func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [5]string) { defer s.reqWG.Done() req, err := http.NewRequest("POST", url, bytes.NewReader(json)) diff --git a/consensus/ethash/sealer_test.go b/consensus/ethash/sealer_test.go index c34e76aec243..bcab88f4d74f 100644 --- a/consensus/ethash/sealer_test.go +++ b/consensus/ethash/sealer_test.go @@ -57,7 +57,7 @@ func TestRemoteNotify(t *testing.T) { header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} block := types.NewBlockWithHeader(header) - ethash.Seal(nil, block, nil, nil) + ethash.Seal(nil, block, nil, nil, nil) select { case work := <-sink: if want := ethash.SealHash(header).Hex(); work[0] != want { @@ -105,7 +105,7 @@ func TestRemoteNotifyFull(t *testing.T) { header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} block := types.NewBlockWithHeader(header) - ethash.Seal(nil, block, nil, nil) + ethash.Seal(nil, block, nil, nil, nil) select { case work := <-sink: if want := "0x" + strconv.FormatUint(header.Number.Uint64(), 16); work["number"] != want { @@ -151,7 +151,7 @@ func TestRemoteMultiNotify(t *testing.T) { for i := 0; i < cap(sink); i++ { header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)} block := types.NewBlockWithHeader(header) - ethash.Seal(nil, block, results, nil) + ethash.Seal(nil, block, nil, results, nil) } for i := 0; i < cap(sink); i++ { @@ -200,7 +200,7 @@ func TestRemoteMultiNotifyFull(t *testing.T) { for i := 0; i < cap(sink); i++ { header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)} block := types.NewBlockWithHeader(header) - ethash.Seal(nil, block, results, nil) + ethash.Seal(nil, block, nil, results, nil) } for i := 0; i < cap(sink); i++ { @@ -266,7 +266,7 @@ func TestStaleSubmission(t *testing.T) { for id, c := range testcases { for _, h := range c.headers { - ethash.Seal(nil, types.NewBlockWithHeader(h), results, nil) + ethash.Seal(nil, types.NewBlockWithHeader(h), nil, results, nil) } if res := api.SubmitWork(fakeNonce, ethash.SealHash(c.headers[c.submitIndex]), fakeDigest); res != c.submitRes { t.Errorf("case %d submit result mismatch, want %t, get %t", id+1, c.submitRes, res) diff --git a/core/state_processor.go b/core/state_processor.go index d4c77ae41042..05064a27cb21 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -137,6 +137,51 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon return receipt, err } +func applyTransactionWithResult(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, *ExecutionResult, error) { + // Create a new context to be used in the EVM environment. + txContext := NewEVMTxContext(msg) + evm.Reset(txContext, statedb) + + // Apply the transaction to the current state (included in the env). + result, err := ApplyMessage(evm, msg, gp) + if err != nil { + return nil, nil, err + } + + // Update the state with pending changes. + var root []byte + if config.IsByzantium(header.Number) { + statedb.Finalise(true) + } else { + root = statedb.IntermediateRoot(config.IsEIP158(header.Number)).Bytes() + } + *usedGas += result.UsedGas + + // Create a new receipt for the transaction, storing the intermediate root and gas used + // by the tx. + receipt := &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: *usedGas} + if result.Failed() { + receipt.Status = types.ReceiptStatusFailed + } else { + receipt.Status = types.ReceiptStatusSuccessful + } + receipt.TxHash = tx.Hash() + receipt.GasUsed = result.UsedGas + + // If the transaction created a contract, store the creation address in the receipt. + if msg.To() == nil { + receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) + } + + // Set the receipt logs and create the bloom filter. + receipt.Logs = statedb.GetLogs(tx.Hash(), header.Hash()) + receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) + receipt.BlockHash = header.Hash() + receipt.BlockNumber = header.Number + receipt.TransactionIndex = uint(statedb.TxIndex()) + return receipt, result, err +} + // ApplyTransaction attempts to apply a transaction to the given state database // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, @@ -151,3 +196,14 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg) return applyTransaction(msg, config, bc, author, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) } + +func ApplyTransactionWithResult(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, *ExecutionResult, error) { + msg, err := tx.AsMessage(types.MakeSigner(config, header.Number), header.BaseFee) + if err != nil { + return nil, nil, err + } + // Create a new context to be used in the EVM environment + blockContext := NewEVMBlockContext(header, bc, author) + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg) + return applyTransactionWithResult(msg, config, bc, author, gp, statedb, header, tx, usedGas, vmenv) +} diff --git a/core/tx_pool.go b/core/tx_pool.go index 75b5ac101949..ee507a5b7055 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -253,13 +253,14 @@ type TxPool struct { locals *accountSet // Set of local transaction to exempt from eviction rules journal *txJournal // Journal of local transaction to back up to disk - pending map[common.Address]*txList // All currently processable transactions - queue map[common.Address]*txList // Queued but non-processable transactions - beats map[common.Address]time.Time // Last heartbeat from each known account - mevBundles []types.MevBundle - megabundles map[common.Address]types.MevBundle // One megabundle per each trusted relay - all *txLookup // All transactions to allow lookups - priced *txPricedList // All transactions sorted by price + pending map[common.Address]*txList // All currently processable transactions + queue map[common.Address]*txList // Queued but non-processable transactions + beats map[common.Address]time.Time // Last heartbeat from each known account + mevBundles []types.MevBundle + megabundles map[common.Address]types.MevBundle // One megabundle per each trusted relay + NewMegabundleHooks []func(common.Address, *types.MevBundle) + all *txLookup // All transactions to allow lookups + priced *txPricedList // All transactions sorted by price chainHeadCh chan ChainHeadEvent chainHeadSub event.Subscription @@ -630,13 +631,20 @@ func (pool *TxPool) AddMegabundle(relayAddr common.Address, txs types.Transactio return errors.New("megabundle from non-trusted address") } - pool.megabundles[relayAddr] = types.MevBundle{ + megabundle := types.MevBundle{ Txs: txs, BlockNumber: blockNumber, MinTimestamp: minTimestamp, MaxTimestamp: maxTimestamp, RevertingTxHashes: revertingTxHashes, } + + pool.megabundles[relayAddr] = megabundle + + for _, hook := range pool.NewMegabundleHooks { + go hook(relayAddr, &megabundle) + } + return nil } diff --git a/eth/backend.go b/eth/backend.go index 22535e0e2289..1dbeebdd9718 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -292,7 +292,7 @@ func makeExtraData(extra []byte) []byte { // APIs return the collection of RPC services the ethereum package offers. // NOTE, some of these services probably need to be moved to somewhere else. func (s *Ethereum) APIs() []rpc.API { - apis := ethapi.GetAPIs(s.APIBackend) + apis := ethapi.GetAPIs(s.APIBackend, s.BlockChain()) // Append any APIs exposed explicitly by the consensus engine apis = append(apis, s.engine.APIs(s.BlockChain())...) diff --git a/infra/Dockerfile.node b/infra/Dockerfile.node index db8e99ac937e..7868453eba7f 100644 --- a/infra/Dockerfile.node +++ b/infra/Dockerfile.node @@ -4,7 +4,7 @@ FROM golang:1.15-alpine as builder RUN apk add --no-cache make gcc musl-dev linux-headers git ADD . /go-ethereum -RUN cd /go-ethereum && make geth +RUN cd /go-ethereum && GO111MODULE=on go run build/ci.go install ./cmd/geth # Pull Geth into a second stage deploy alpine container FROM alpine:latest diff --git a/infra/Dockerfile.updater b/infra/Dockerfile.updater index d3099d19ce1a..808f55aa2b9c 100644 --- a/infra/Dockerfile.updater +++ b/infra/Dockerfile.updater @@ -4,7 +4,7 @@ FROM golang:1.15-alpine as builder RUN apk add --no-cache make gcc musl-dev linux-headers git ADD . /go-ethereum -RUN cd /go-ethereum && make geth +RUN cd /go-ethereum && GO111MODULE=on go run build/ci.go install ./cmd/geth # Pull Geth into a second stage deploy alpine container FROM alpine:latest diff --git a/infra/mev-geth-nodes-x86-64.yaml b/infra/mev-geth-nodes-x86-64.yaml index bf7a196caa52..f93c19153394 100644 --- a/infra/mev-geth-nodes-x86-64.yaml +++ b/infra/mev-geth-nodes-x86-64.yaml @@ -45,11 +45,11 @@ Parameters: InstanceType: Type: String - Default: i3en.large + Default: i3en.xlarge MemoryLimit: Type: Number - Default: 6144 + Default: 20000 KeyPair: Type: AWS::EC2::KeyPair::KeyName @@ -82,6 +82,14 @@ Parameters: Type: AWS::SSM::Parameter::Value Default: /aws/service/ecs/optimized-ami/amazon-linux-2/recommended/image_id + PrivateZoneName: + Type: String + Default: geth.internal + + ServiceName: + Type: String + Default: node + # SNS Parameters SNSSubscriptionEndpoint: @@ -172,6 +180,10 @@ Metadata: default: "The name of the node ECS Task" ECSAMI: default: "The ECS AMI ID populated from SSM." + PrivateZoneName: + default: "The DNS zone that should be used for service discovery records." + ServiceName: + default: "The service name prefix that should be used for service discovery records." Network: default: "The Ethereum network you will be connecting to" SyncMode: @@ -211,6 +223,8 @@ Metadata: - NodeDesiredCount - NodeTaskName - ECSAMI + - PrivateZoneName + - ServiceName - Label: default: Mev-Geth Configuration Parameters: @@ -238,6 +252,8 @@ Mappings: us-east-2: mainnet: mev-geth-updater-fast-chainbucket-17p2xhnhcydlz goerli: mev-geth-updater-fast-goerli-chainbucket-j6dujg8apbna + eu-west-1: + mainnet: mev-geth-updater-chainbucket-11hs3dhhz7k0s #us-west-2: # mainnet: # goerli: @@ -284,6 +300,14 @@ Resources: FromPort: !Ref NetPort ToPort: !Ref NetPort CidrIpv6: ::/0 + - IpProtocol: tcp + FromPort: !Ref RpcPort + ToPort: !Ref RpcPort + CidrIp: 172.31.0.0/16 + - IpProtocol: tcp + FromPort: !Ref WsPort + ToPort: !Ref WsPort + CidrIp: 172.31.0.0/16 ECSAutoScalingGroup: Type: AWS::AutoScaling::AutoScalingGroup @@ -629,7 +653,7 @@ Resources: Properties: Cluster: !Ref Cluster DesiredCount: !Ref NodeDesiredCount - HealthCheckGracePeriodSeconds: 3600 + HealthCheckGracePeriodSeconds: 14400 TaskDefinition: !Ref NodeTaskDefinition LaunchType: EC2 DeploymentConfiguration: @@ -642,6 +666,11 @@ Resources: - ContainerName: !Ref NodeTaskName ContainerPort: !Ref WsPort TargetGroupArn: !Ref NodeWsTargetGroup + ServiceName: !Sub ${ServiceName}-${Network}-${SyncMode} + ServiceRegistries: + - RegistryArn: !GetAtt DiscoveryService.Arn + ContainerName: !Ref NodeTaskName + ContainerPort: !Ref RpcPort NodeTaskDefinition: Type: AWS::ECS::TaskDefinition @@ -688,14 +717,34 @@ Resources: awslogs-region: !Ref AWS::Region awslogs-group: !Ref NodeLogGroup awslogs-stream-prefix: !Ref AWS::StackName - #HealthCheck: - # Command: - # - CMD-SHELL - # - '[ `echo "eth.syncing.highestBlock - eth.syncing.currentBlock"|geth attach|head -10|tail -1` -lt 200 ] || exit 1' - # Interval: 300 - # Timeout: 60 - # Retries: 10 - # StartPeriod: 300 + HealthCheck: + Command: + - CMD-SHELL + - '[ `echo "eth.syncing" | geth attach | head -10 | tail -1` = "false" ] || exit 1' + Interval: 300 + Timeout: 60 + Retries: 10 + StartPeriod: 300 + + PrivateNamespace: + Type: AWS::ServiceDiscovery::PrivateDnsNamespace + Properties: + Name: !Ref PrivateZoneName + Vpc: !Ref VPC + + DiscoveryService: + Type: AWS::ServiceDiscovery::Service + Properties: + Description: Discovery service for nodes + DnsConfig: + RoutingPolicy: MULTIVALUE + DnsRecords: + - TTL: 60 + Type: SRV + HealthCheckCustomConfig: + FailureThreshold: 1 + Name: !Sub ${ServiceName}-${Network}-${SyncMode} + NamespaceId: !Ref PrivateNamespace # CodePipeline Resources @@ -969,4 +1018,4 @@ Outputs: Value: !Ref NodeTargetGroup NodeServiceUrl: Description: URL of the load balancer for the node service. - Value: !Sub http://${NodeLoadBalancer.DNSName} + Value: !Sub http://${NodeLoadBalancer.DNSName} \ No newline at end of file diff --git a/infra/start-mev-geth-node.sh b/infra/start-mev-geth-node.sh index 05ad50c61003..45ef0c519734 100755 --- a/infra/start-mev-geth-node.sh +++ b/infra/start-mev-geth-node.sh @@ -33,6 +33,7 @@ start_node() { --ws.api eth,net,web3 \ --ws.origins '*' \ --syncmode $syncmode \ + --gcmode archive \ --cache 4096 \ --maxpeers $connections \ --goerli @@ -41,7 +42,7 @@ start_node() { echo "Node failed to start; exiting." exit 1 fi - else + else geth \ --port $netport \ --http \ @@ -59,7 +60,9 @@ start_node() { --ws.api eth,net,web3 \ --ws.origins '*' \ --syncmode $syncmode \ + --gcmode archive \ --cache 4096 \ + --snapshot=false \ --maxpeers $connections if [ $? -ne 0 ] then diff --git a/infra/start-mev-geth-updater.sh b/infra/start-mev-geth-updater.sh index 11a6a533aa14..abad72fab9bb 100755 --- a/infra/start-mev-geth-updater.sh +++ b/infra/start-mev-geth-updater.sh @@ -18,6 +18,7 @@ start_node() { --port $netport \ --syncmode $syncmode \ --cache 4096 \ + --gcmode archive \ --maxpeers $connections \ --goerli & if [ $? -ne 0 ] @@ -30,6 +31,7 @@ start_node() { --port $netport \ --syncmode $syncmode \ --cache 4096 \ + --gcmode archive \ --maxpeers $connections & if [ $? -ne 0 ] then diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 63b046c90a2f..4497361674b0 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -18,6 +18,8 @@ package ethapi import ( "context" + "crypto/rand" + "encoding/hex" "errors" "fmt" "math/big" @@ -47,6 +49,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/tyler-smith/go-bip39" + "golang.org/x/crypto/sha3" ) // PublicEthereumAPI provides an API to access Ethereum related information. @@ -2207,3 +2210,310 @@ func (s *PrivateTxBundleAPI) SendMegabundle(ctx context.Context, args SendMegabu } return s.b.SendMegabundle(ctx, txs, rpc.BlockNumber(args.BlockNumber), minTimestamp, maxTimestamp, args.RevertingTxHashes, relayAddr) } + +// BundleAPI offers an API for accepting bundled transactions +type BundleAPI struct { + b Backend + chain *core.BlockChain +} + +// NewBundleAPI creates a new Tx Bundle API instance. +func NewBundleAPI(b Backend, chain *core.BlockChain) *BundleAPI { + return &BundleAPI{b, chain} +} + +// CallBundleArgs represents the arguments for a call. +type CallBundleArgs struct { + Txs []hexutil.Bytes `json:"txs"` + BlockNumber rpc.BlockNumber `json:"blockNumber"` + StateBlockNumberOrHash rpc.BlockNumberOrHash `json:"stateBlockNumber"` + Coinbase *string `json:"coinbase"` + Timestamp *uint64 `json:"timestamp"` + Timeout *int64 `json:"timeout"` + GasLimit *uint64 `json:"gasLimit"` + Difficulty *big.Int `json:"difficulty"` + BaseFee *big.Int `json:"baseFee"` +} + +// CallBundle will simulate a bundle of transactions at the top of a given block +// number with the state of another (or the same) block. This can be used to +// simulate future blocks with the current state, or it can be used to simulate +// a past block. +// The sender is responsible for signing the transactions and using the correct +// nonce and ensuring validity +func (s *BundleAPI) CallBundle(ctx context.Context, args CallBundleArgs) (map[string]interface{}, error) { + if len(args.Txs) == 0 { + return nil, errors.New("bundle missing txs") + } + if args.BlockNumber == 0 { + return nil, errors.New("bundle missing blockNumber") + } + + var txs types.Transactions + + for _, encodedTx := range args.Txs { + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(encodedTx); err != nil { + return nil, err + } + txs = append(txs, tx) + } + defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) + + timeoutMilliSeconds := int64(5000) + if args.Timeout != nil { + timeoutMilliSeconds = *args.Timeout + } + timeout := time.Millisecond * time.Duration(timeoutMilliSeconds) + state, parent, err := s.b.StateAndHeaderByNumberOrHash(ctx, args.StateBlockNumberOrHash) + if state == nil || err != nil { + return nil, err + } + blockNumber := big.NewInt(int64(args.BlockNumber)) + + timestamp := parent.Time + 1 + if args.Timestamp != nil { + timestamp = *args.Timestamp + } + coinbase := parent.Coinbase + if args.Coinbase != nil { + coinbase = common.HexToAddress(*args.Coinbase) + } + difficulty := parent.Difficulty + if args.Difficulty != nil { + difficulty = args.Difficulty + } + gasLimit := parent.GasLimit + if args.GasLimit != nil { + gasLimit = *args.GasLimit + } + var baseFee *big.Int + if args.BaseFee != nil { + baseFee = args.BaseFee + } else if s.b.ChainConfig().IsLondon(big.NewInt(args.BlockNumber.Int64())) { + baseFee = misc.CalcBaseFee(s.b.ChainConfig(), parent) + } + header := &types.Header{ + ParentHash: parent.Hash(), + Number: blockNumber, + GasLimit: gasLimit, + Time: timestamp, + Difficulty: difficulty, + Coinbase: coinbase, + BaseFee: baseFee, + } + + // Setup context so it may be cancelled the call has completed + // or, in case of unmetered gas, setup a context with a timeout. + var cancel context.CancelFunc + if timeout > 0 { + ctx, cancel = context.WithTimeout(ctx, timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + // Make sure the context is cancelled when the call has completed + // this makes sure resources are cleaned up. + defer cancel() + + vmconfig := vm.Config{} + + // Setup the gas pool (also for unmetered requests) + // and apply the message. + gp := new(core.GasPool).AddGas(math.MaxUint64) + + results := []map[string]interface{}{} + coinbaseBalanceBefore := state.GetBalance(coinbase) + + bundleHash := sha3.NewLegacyKeccak256() + signer := types.MakeSigner(s.b.ChainConfig(), blockNumber) + var totalGasUsed uint64 + gasFees := new(big.Int) + for i, tx := range txs { + coinbaseBalanceBeforeTx := state.GetBalance(coinbase) + state.Prepare(tx.Hash(), i) + + receipt, result, err := core.ApplyTransactionWithResult(s.b.ChainConfig(), s.chain, &coinbase, gp, state, header, tx, &header.GasUsed, vmconfig) + if err != nil { + return nil, fmt.Errorf("err: %w; txhash %s", err, tx.Hash()) + } + + txHash := tx.Hash().String() + from, err := types.Sender(signer, tx) + if err != nil { + return nil, fmt.Errorf("err: %w; txhash %s", err, tx.Hash()) + } + to := "0x" + if tx.To() != nil { + to = tx.To().String() + } + jsonResult := map[string]interface{}{ + "txHash": txHash, + "gasUsed": receipt.GasUsed, + "fromAddress": from.String(), + "toAddress": to, + } + totalGasUsed += receipt.GasUsed + gasPrice, err := tx.EffectiveGasTip(header.BaseFee) + if err != nil { + return nil, fmt.Errorf("err: %w; txhash %s", err, tx.Hash()) + } + gasFeesTx := new(big.Int).Mul(big.NewInt(int64(receipt.GasUsed)), gasPrice) + gasFees.Add(gasFees, gasFeesTx) + bundleHash.Write(tx.Hash().Bytes()) + if result.Err != nil { + jsonResult["error"] = result.Err.Error() + revert := result.Revert() + if len(revert) > 0 { + jsonResult["revert"] = string(revert) + } + } else { + dst := make([]byte, hex.EncodedLen(len(result.Return()))) + hex.Encode(dst, result.Return()) + jsonResult["value"] = "0x" + string(dst) + } + coinbaseDiffTx := new(big.Int).Sub(state.GetBalance(coinbase), coinbaseBalanceBeforeTx) + jsonResult["coinbaseDiff"] = coinbaseDiffTx.String() + jsonResult["gasFees"] = gasFeesTx.String() + jsonResult["ethSentToCoinbase"] = new(big.Int).Sub(coinbaseDiffTx, gasFeesTx).String() + jsonResult["gasPrice"] = new(big.Int).Div(coinbaseDiffTx, big.NewInt(int64(receipt.GasUsed))).String() + jsonResult["gasUsed"] = receipt.GasUsed + results = append(results, jsonResult) + } + + ret := map[string]interface{}{} + ret["results"] = results + coinbaseDiff := new(big.Int).Sub(state.GetBalance(coinbase), coinbaseBalanceBefore) + ret["coinbaseDiff"] = coinbaseDiff.String() + ret["gasFees"] = gasFees.String() + ret["ethSentToCoinbase"] = new(big.Int).Sub(coinbaseDiff, gasFees).String() + ret["bundleGasPrice"] = new(big.Int).Div(coinbaseDiff, big.NewInt(int64(totalGasUsed))).String() + ret["totalGasUsed"] = totalGasUsed + ret["stateBlockNumber"] = parent.Number.Int64() + + ret["bundleHash"] = "0x" + common.Bytes2Hex(bundleHash.Sum(nil)) + return ret, nil +} + +// EstimateGasBundleArgs represents the arguments for a call +type EstimateGasBundleArgs struct { + Txs []TransactionArgs `json:"txs"` + BlockNumber rpc.BlockNumber `json:"blockNumber"` + StateBlockNumberOrHash rpc.BlockNumberOrHash `json:"stateBlockNumber"` + Coinbase *string `json:"coinbase"` + Timestamp *uint64 `json:"timestamp"` + Timeout *int64 `json:"timeout"` +} + +func (s *BundleAPI) EstimateGasBundle(ctx context.Context, args EstimateGasBundleArgs) (map[string]interface{}, error) { + if len(args.Txs) == 0 { + return nil, errors.New("bundle missing txs") + } + if args.BlockNumber == 0 { + return nil, errors.New("bundle missing blockNumber") + } + + timeoutMS := int64(5000) + if args.Timeout != nil { + timeoutMS = *args.Timeout + } + timeout := time.Millisecond * time.Duration(timeoutMS) + + state, parent, err := s.b.StateAndHeaderByNumberOrHash(ctx, args.StateBlockNumberOrHash) + if state == nil || err != nil { + return nil, err + } + blockNumber := big.NewInt(int64(args.BlockNumber)) + timestamp := parent.Time + 1 + if args.Timestamp != nil { + timestamp = *args.Timestamp + } + coinbase := parent.Coinbase + if args.Coinbase != nil { + coinbase = common.HexToAddress(*args.Coinbase) + } + + header := &types.Header{ + ParentHash: parent.Hash(), + Number: blockNumber, + GasLimit: parent.GasLimit, + Time: timestamp, + Difficulty: parent.Difficulty, + Coinbase: coinbase, + BaseFee: parent.BaseFee, + } + + // Setup context so it may be cancelled when the call + // has completed or, in case of unmetered gas, setup + // a context with a timeout + var cancel context.CancelFunc + if timeout > 0 { + ctx, cancel = context.WithTimeout(ctx, timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // Make sure the context is cancelled when the call has completed + // This makes sure resources are cleaned up + defer cancel() + + // RPC Call gas cap + globalGasCap := s.b.RPCGasCap() + + // Results + results := []map[string]interface{}{} + + // Copy the original db so we don't modify it + statedb := state.Copy() + + // Gas pool + gp := new(core.GasPool).AddGas(math.MaxUint64) + + // Block context + blockContext := core.NewEVMBlockContext(header, s.chain, &coinbase) + + // Feed each of the transactions into the VM ctx + // And try and estimate the gas used + for i, txArgs := range args.Txs { + // Since its a txCall we'll just prepare the + // state with a random hash + var randomHash common.Hash + rand.Read(randomHash[:]) + + // New random hash since its a call + statedb.Prepare(randomHash, i) + + // Convert tx args to msg to apply state transition + msg, err := txArgs.ToMessage(globalGasCap, header.BaseFee) + if err != nil { + return nil, err + } + + // Prepare the hashes + txContext := core.NewEVMTxContext(msg) + + // Get EVM Environment + vmenv := vm.NewEVM(blockContext, txContext, statedb, s.b.ChainConfig(), vm.Config{NoBaseFee: true}) + + // Apply state transition + result, err := core.ApplyMessage(vmenv, msg, gp) + if err != nil { + return nil, err + } + + // Modifications are committed to the state + // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect + statedb.Finalise(vmenv.ChainConfig().IsEIP158(blockNumber)) + + // Append result + jsonResult := map[string]interface{}{ + "gasUsed": result.UsedGas, + } + results = append(results, jsonResult) + } + + // Return results + ret := map[string]interface{}{} + ret["results"] = results + + return ret, nil +} diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 58c8f0bf04e1..783b46fbd296 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -97,7 +97,7 @@ type Backend interface { Engine() consensus.Engine } -func GetAPIs(apiBackend Backend) []rpc.API { +func GetAPIs(apiBackend Backend, chain *core.BlockChain) []rpc.API { nonceLock := new(AddrLocker) return []rpc.API{ { @@ -144,6 +144,11 @@ func GetAPIs(apiBackend Backend) []rpc.API { Version: "1.0", Service: NewPrivateTxBundleAPI(apiBackend), Public: true, + }, { + Namespace: "eth", + Version: "1.0", + Service: NewBundleAPI(apiBackend, chain), + Public: true, }, } } diff --git a/les/client.go b/les/client.go index 43207f3443ec..922c51627824 100644 --- a/les/client.go +++ b/les/client.go @@ -282,7 +282,7 @@ func (s *LightDummyAPI) Mining() bool { // APIs returns the collection of RPC services the ethereum package offers. // NOTE, some of these services probably need to be moved to somewhere else. func (s *LightEthereum) APIs() []rpc.API { - apis := ethapi.GetAPIs(s.ApiBackend) + apis := ethapi.GetAPIs(s.ApiBackend, nil) apis = append(apis, s.engine.APIs(s.BlockChain().HeaderChain())...) return append(apis, []rpc.API{ { diff --git a/miner/multi_worker.go b/miner/multi_worker.go index 050ea38af4e5..da1471fa3e75 100644 --- a/miner/multi_worker.go +++ b/miner/multi_worker.go @@ -105,16 +105,31 @@ func newMultiWorker(config *Config, chainConfig *params.ChainConfig, engine cons })) } + relayWorkerMap := make(map[common.Address]*worker) + for i := 0; i < len(config.TrustedRelays); i++ { - workers = append(workers, - newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, init, &flashbotsData{ - isFlashbots: true, - isMegabundleWorker: true, - queue: queue, - relayAddr: config.TrustedRelays[i], - })) + relayWorker := newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, init, &flashbotsData{ + isFlashbots: true, + isMegabundleWorker: true, + queue: queue, + relayAddr: config.TrustedRelays[i], + }) + workers = append(workers, relayWorker) + relayWorkerMap[config.TrustedRelays[i]] = relayWorker } + eth.TxPool().NewMegabundleHooks = append(eth.TxPool().NewMegabundleHooks, func(relayAddr common.Address, megabundle *types.MevBundle) { + worker, found := relayWorkerMap[relayAddr] + if !found { + return + } + + select { + case worker.newMegabundleCh <- megabundle: + default: + } + }) + log.Info("creating multi worker", "config.MaxMergedBundles", config.MaxMergedBundles, "config.TrustedRelays", config.TrustedRelays, "worker", len(workers)) return &multiWorker{ regularWorker: regularWorker, diff --git a/miner/worker.go b/miner/worker.go index 2205f4fa71da..3f1d7c2dfc2b 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -213,6 +213,7 @@ type worker struct { exitCh chan struct{} resubmitIntervalCh chan time.Duration resubmitAdjustCh chan *intervalAdjust + newMegabundleCh chan *types.MevBundle wg sync.WaitGroup @@ -295,16 +296,18 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus txsCh: make(chan core.NewTxsEvent, txChanSize), chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), - newWorkCh: make(chan *newWorkReq), + newWorkCh: make(chan *newWorkReq, 1), getWorkCh: make(chan *getWorkReq), taskCh: taskCh, resultCh: make(chan *types.Block, resultQueueSize), exitCh: exitCh, startCh: make(chan struct{}, 1), + newMegabundleCh: make(chan *types.MevBundle), resubmitIntervalCh: make(chan time.Duration), resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), flashbots: flashbots, } + // Subscribe NewTxsEvent for tx pool worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) // Subscribe events for blockchain @@ -450,26 +453,38 @@ func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) t func (w *worker) newWorkLoop(recommit time.Duration) { defer w.wg.Done() var ( - interrupt *int32 - minRecommit = recommit // minimal resubmit interval specified by user. - timestamp int64 // timestamp for each round of sealing. + runningInterrupt *int32 // Running task interrupt + queuedInterrupt *int32 // Queued task interrupt + minRecommit = recommit // minimal resubmit interval specified by user. + timestamp int64 // timestamp for each round of sealing. ) timer := time.NewTimer(0) defer timer.Stop() <-timer.C // discard the initial tick - // commit aborts in-flight transaction execution with given signal and resubmits a new one. + // commit aborts in-flight transaction execution with highest seen signal and resubmits a new one commit := func(noempty bool, s int32) { - if interrupt != nil { - atomic.StoreInt32(interrupt, s) - } - interrupt = new(int32) select { - case w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp}: case <-w.exitCh: return + case queuedRequest := <-w.newWorkCh: + // Previously queued request wasn't started yet, update the request and resubmit + queuedRequest.noempty = queuedRequest.noempty || noempty + queuedRequest.timestamp = timestamp + w.newWorkCh <- queuedRequest // guaranteed to be nonblocking + default: + // Previously queued request has already started, cycle interrupt pointer and submit new work + runningInterrupt = queuedInterrupt + queuedInterrupt = new(int32) + + w.newWorkCh <- &newWorkReq{interrupt: queuedInterrupt, noempty: noempty, timestamp: timestamp} // guaranteed to be nonblocking } + + if runningInterrupt != nil && s > atomic.LoadInt32(runningInterrupt) { + atomic.StoreInt32(runningInterrupt, s) + } + timer.Reset(recommit) atomic.StoreInt32(&w.newTxs, 0) } @@ -496,6 +511,11 @@ func (w *worker) newWorkLoop(recommit time.Duration) { timestamp = time.Now().Unix() commit(false, commitInterruptNewHead) + case <-w.newMegabundleCh: + if w.isRunning() { + commit(true, commitInterruptNone) + } + case <-timer.C: // If sealing is running resubmit a new work cycle periodically to pull in // higher priced transactions. Disable this overhead for pending blocks. @@ -564,7 +584,10 @@ func (w *worker) mainLoop() { for { select { case req := <-w.newWorkCh: - w.commitWork(req.interrupt, req.noempty, req.timestamp) + // Don't start if the work has already been interrupted + if req.interrupt == nil || atomic.LoadInt32(req.interrupt) == commitInterruptNone { + w.commitWork(req.interrupt, req.noempty, req.timestamp) + } case req := <-w.getWorkCh: block, err := w.generateWork(req.params) @@ -711,7 +734,7 @@ func (w *worker) taskLoop() { w.pendingTasks[sealHash] = task w.pendingMu.Unlock() - if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil { + if err := w.engine.Seal(w.chain, task.block, task.profit, w.resultCh, stopCh); err != nil { log.Warn("Block sealing failed", "err", err) w.pendingMu.Lock() delete(w.pendingTasks, sealHash) @@ -929,6 +952,7 @@ func (w *worker) commitBundle(env *environment, txs types.Transactions, interrup inc: true, } } + return atomic.LoadInt32(interrupt) == commitInterruptNewHead } // If we don't have enough gas for any further transactions then we're done @@ -1037,6 +1061,7 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP inc: true, } } + return atomic.LoadInt32(interrupt) == commitInterruptNewHead } // If we don't have enough gas for any further transactions then we're done @@ -1258,6 +1283,7 @@ func (w *worker) fillTransactions(interrupt *int32, env *environment) { if err != nil { return // no valid megabundle for this relay, nothing to do } + // Flashbots bundle merging duplicates work by simulating TXes and then committing them once more. // Megabundles API focuses on speed and runs everything in one cycle. coinbaseBalanceBefore := env.state.GetBalance(env.coinbase) From 16f9b7c2a9ddb70fa875a654da6c23c2e620981f Mon Sep 17 00:00:00 2001 From: Carlo Xu Date: Mon, 29 Nov 2021 16:57:18 -0600 Subject: [PATCH 4/8] Private Transaction API Sample (v1.10.13) --- cmd/geth/main.go | 1 + cmd/geth/usage.go | 1 + cmd/utils/flags.go | 8 +++ core/tx_pool.go | 113 +++++++++++++++++++++++++++++++---- eth/api_backend.go | 8 ++- eth/handler.go | 4 ++ eth/handler_test.go | 5 ++ eth/protocols/eth/handler.go | 4 ++ eth/sync.go | 7 ++- graphql/graphql.go | 2 +- internal/ethapi/api.go | 25 ++++++-- internal/ethapi/backend.go | 2 +- internal/web3ext/web3ext.go | 6 ++ les/api_backend.go | 2 +- 14 files changed, 164 insertions(+), 24 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 9fc03f3fda20..d19c53193ac3 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -92,6 +92,7 @@ var ( utils.TxPoolAccountQueueFlag, utils.TxPoolGlobalQueueFlag, utils.TxPoolLifetimeFlag, + utils.TxPoolPrivateLifetimeFlag, utils.SyncModeFlag, utils.ExitWhenSyncedFlag, utils.GCModeFlag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 027711b6c357..7410eeaac5c8 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -105,6 +105,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.TxPoolAccountQueueFlag, utils.TxPoolGlobalQueueFlag, utils.TxPoolLifetimeFlag, + utils.TxPoolPrivateLifetimeFlag, }, }, { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index c199eac1f955..a7bdba85b6a9 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -389,6 +389,11 @@ var ( Usage: "Maximum amount of time non-executable transaction are queued", Value: ethconfig.Defaults.TxPool.Lifetime, } + TxPoolPrivateLifetimeFlag = cli.DurationFlag{ + Name: "txpool.privatelifetime", + Usage: "Maximum amount of time private transactions are withheld from public broadcasting", + Value: ethconfig.Defaults.TxPool.PrivateTxLifetime, + } // Performance tuning settings CacheFlag = cli.IntFlag{ Name: "cache", @@ -1360,6 +1365,9 @@ func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) { if ctx.GlobalIsSet(TxPoolLifetimeFlag.Name) { cfg.Lifetime = ctx.GlobalDuration(TxPoolLifetimeFlag.Name) } + if ctx.GlobalIsSet(TxPoolPrivateLifetimeFlag.Name) { + cfg.PrivateTxLifetime = ctx.GlobalDuration(TxPoolPrivateLifetimeFlag.Name) + } addresses := strings.Split(ctx.GlobalString(MinerTrustedRelaysFlag.Name), ",") for _, address := range addresses { diff --git a/core/tx_pool.go b/core/tx_pool.go index ee507a5b7055..0f340ec1a363 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -88,8 +88,9 @@ var ( ) var ( - evictionInterval = time.Minute // Time interval to check for evictable transactions - statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats + evictionInterval = time.Minute // Time interval to check for evictable transactions + statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats + privateTxCleanupInterval = 1 * time.Hour ) var ( @@ -164,7 +165,8 @@ type TxPoolConfig struct { AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts - Lifetime time.Duration // Maximum amount of time non-executable transaction are queued + Lifetime time.Duration // Maximum amount of time non-executable transaction are queued + PrivateTxLifetime time.Duration // Maximum amount of time to keep private transactions private TrustedRelays []common.Address // Trusted relay addresses. Duplicated from the miner config. } @@ -183,7 +185,8 @@ var DefaultTxPoolConfig = TxPoolConfig{ AccountQueue: 64, GlobalQueue: 1024, - Lifetime: 3 * time.Hour, + Lifetime: 3 * time.Hour, + PrivateTxLifetime: 3 * 24 * time.Hour, } // sanitize checks the provided user configurations and changes anything that's @@ -222,6 +225,10 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig { log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) conf.Lifetime = DefaultTxPoolConfig.Lifetime } + if conf.PrivateTxLifetime < 1 { + log.Warn("Sanitizing invalid txpool private tx lifetime", "provided", conf.PrivateTxLifetime, "updated", DefaultTxPoolConfig.PrivateTxLifetime) + conf.PrivateTxLifetime = DefaultTxPoolConfig.PrivateTxLifetime + } return conf } @@ -261,6 +268,7 @@ type TxPool struct { NewMegabundleHooks []func(common.Address, *types.MevBundle) all *txLookup // All transactions to allow lookups priced *txPricedList // All transactions sorted by price + privateTxs *timestampedTxHashSet chainHeadCh chan ChainHeadEvent chainHeadSub event.Subscription @@ -296,6 +304,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block beats: make(map[common.Address]time.Time), megabundles: make(map[common.Address]types.MevBundle), all: newTxLookup(), + privateTxs: newExpiringTxHashSet(config.PrivateTxLifetime), chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), reqResetCh: make(chan *txpoolResetRequest), reqPromoteCh: make(chan *accountSet), @@ -346,9 +355,10 @@ func (pool *TxPool) loop() { var ( prevPending, prevQueued, prevStales int // Start the stats reporting and transaction eviction tickers - report = time.NewTicker(statsReportInterval) - evict = time.NewTicker(evictionInterval) - journal = time.NewTicker(pool.config.Rejournal) + report = time.NewTicker(statsReportInterval) + evict = time.NewTicker(evictionInterval) + journal = time.NewTicker(pool.config.Rejournal) + privateTx = time.NewTicker(privateTxCleanupInterval) // Track the previous head headers for transaction reorgs head = pool.chain.CurrentBlock() ) @@ -412,6 +422,10 @@ func (pool *TxPool) loop() { } pool.mu.Unlock() } + + // Remove stale hashes that must be kept private + case <-privateTx.C: + pool.privateTxs.prune() } } } @@ -532,6 +546,11 @@ func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types. return pending, queued } +// IsPrivateTxHash indicates whether the transaction should be shared with peers +func (pool *TxPool) IsPrivateTxHash(hash common.Hash) bool { + return pool.privateTxs.Contains(hash) +} + // Pending retrieves all currently processable transactions, grouped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. @@ -958,7 +977,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T // This method is used to add transactions from the RPC API and performs synchronous pool // reorganization and event propagation. func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { - return pool.addTxs(txs, !pool.config.NoLocals, true) + return pool.addTxs(txs, !pool.config.NoLocals, true, false) } // AddLocal enqueues a single local transaction into the pool if it is valid. This is @@ -974,12 +993,18 @@ func (pool *TxPool) AddLocal(tx *types.Transaction) error { // This method is used to add transactions from the p2p network and does not wait for pool // reorganization and internal event propagation. func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { - return pool.addTxs(txs, false, false) + return pool.addTxs(txs, false, false, false) +} + +// AddPrivateRemote adds transactions to the pool, but does not broadcast these transactions to any peers. +func (pool *TxPool) AddPrivateRemote(tx *types.Transaction) error { + errs := pool.addTxs([]*types.Transaction{tx}, false, false, true) + return errs[0] } // This is like AddRemotes, but waits for pool reorganization. Tests use this method. func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { - return pool.addTxs(txs, false, true) + return pool.addTxs(txs, false, true, false) } // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. @@ -998,7 +1023,7 @@ func (pool *TxPool) AddRemote(tx *types.Transaction) error { } // addTxs attempts to queue a batch of transactions if they are valid. -func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { +func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync, private bool) []error { // Filter out known ones without obtaining the pool lock or recovering signatures var ( errs = make([]error, len(txs)) @@ -1027,6 +1052,13 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { return errs } + // Track private transactions, so they don't get leaked to the public mempool + if private { + for _, tx := range news { + pool.privateTxs.Add(tx.Hash()) + } + } + // Process all the new transaction and merge any errors into the original slice pool.mu.Lock() newErrs, dirtyAddrs := pool.addTxsLocked(news, local) @@ -1321,7 +1353,11 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt if len(events) > 0 { var txs []*types.Transaction for _, set := range events { - txs = append(txs, set.Flatten()...) + for _, tx := range set.Flatten() { + if !pool.IsPrivateTxHash(tx.Hash()) { + txs = append(txs, tx) + } + } } pool.txFeed.Send(NewTxsEvent{txs}) } @@ -1935,6 +1971,59 @@ func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions { return found } +type timestampedTxHashSet struct { + lock sync.RWMutex + hashes []common.Hash + timestamps map[common.Hash]time.Time + ttl time.Duration +} + +func newExpiringTxHashSet(ttl time.Duration) *timestampedTxHashSet { + s := ×tampedTxHashSet{ + hashes: make([]common.Hash, 0), + timestamps: make(map[common.Hash]time.Time), + ttl: ttl, + } + + return s +} + +func (s *timestampedTxHashSet) Add(hash common.Hash) { + s.lock.Lock() + defer s.lock.Unlock() + + s.hashes = append(s.hashes, hash) + s.timestamps[hash] = time.Now().Add(s.ttl) +} + +func (s *timestampedTxHashSet) Contains(hash common.Hash) bool { + s.lock.RLock() + defer s.lock.RUnlock() + _, ok := s.timestamps[hash] + return ok +} + +func (s *timestampedTxHashSet) prune() { + s.lock.Lock() + defer s.lock.Unlock() + + var ( + count int + now = time.Now() + ) + for _, hash := range s.hashes { + ts := s.timestamps[hash] + if ts.After(now) { + break + } + + delete(s.timestamps, hash) + count += 1 + } + + s.hashes = s.hashes[count:] +} + // numSlots calculates the number of slots needed for a single transaction. func numSlots(tx *types.Transaction) int { return int((tx.Size() + txSlotSize - 1) / txSlotSize) diff --git a/eth/api_backend.go b/eth/api_backend.go index ea3b4a2e0461..526dffe1f217 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -235,8 +235,12 @@ func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscri return b.eth.BlockChain().SubscribeLogsEvent(ch) } -func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { - return b.eth.txPool.AddLocal(signedTx) +func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction, private bool) error { + if private { + return b.eth.txPool.AddPrivateRemote(signedTx) + } else { + return b.eth.txPool.AddLocal(signedTx) + } } func (b *EthAPIBackend) SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash) error { diff --git a/eth/handler.go b/eth/handler.go index 921a62dba501..e8bcf4d6cfa1 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -72,6 +72,10 @@ type txPool interface { // SubscribeNewTxsEvent should return an event subscription of // NewTxsEvent and send events to the given channel. SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription + + // IsPrivateTxHash indicates if the transaction hash should not + // be broadcast on public channels + IsPrivateTxHash(hash common.Hash) bool } // handlerConfig is the collection of initialization parameters to create a full diff --git a/eth/handler_test.go b/eth/handler_test.go index d967b6df935e..382bed491bd1 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -113,6 +113,11 @@ func (p *testTxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subs return p.txFeed.Subscribe(ch) } +// IsPrivateTxHash always returns false in tests +func (p *testTxPool) IsPrivateTxHash(hash common.Hash) bool { + return false +} + // testHandler is a live implementation of the Ethereum protocol handler, just // preinitialized with some sane testing defaults and the transaction pool mocked // out. diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index 81d45d8b8fcf..a196c00b1f25 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -91,6 +91,10 @@ type Backend interface { type TxPool interface { // Get retrieves the transaction from the local txpool with the given hash. Get(hash common.Hash) *types.Transaction + + // IsPrivateTxHash indicates if the transaction hash should not + // be broadcast on public channels + IsPrivateTxHash(hash common.Hash) bool } // MakeProtocols constructs the P2P protocol definitions for `eth`. diff --git a/eth/sync.go b/eth/sync.go index b8ac67d3b2d1..c8c2fee5dc2f 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -45,7 +45,12 @@ func (h *handler) syncTransactions(p *eth.Peer) { var txs types.Transactions pending := h.txpool.Pending(false) for _, batch := range pending { - txs = append(txs, batch...) + for _, tx := range batch { + // don't share any transactions marked as private + if !h.txpool.IsPrivateTxHash(tx.Hash()) { + txs = append(txs, tx) + } + } } if len(txs) == 0 { return diff --git a/graphql/graphql.go b/graphql/graphql.go index 16e0eb654d97..3b4fb6376db2 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -1153,7 +1153,7 @@ func (r *Resolver) SendRawTransaction(ctx context.Context, args struct{ Data hex if err := tx.UnmarshalBinary(args.Data); err != nil { return common.Hash{}, err } - hash, err := ethapi.SubmitTransaction(ctx, r.backend, tx) + hash, err := ethapi.SubmitTransaction(ctx, r.backend, tx, false) return hash, err } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 4497361674b0..49115e5f3975 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -469,7 +469,7 @@ func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args Transactio log.Warn("Failed transaction send attempt", "from", args.from(), "to", args.To, "value", args.Value.ToInt(), "err", err) return common.Hash{}, err } - return SubmitTransaction(ctx, s.b, signed) + return SubmitTransaction(ctx, s.b, signed, false) } // SignTransaction will create a transaction from the given arguments and @@ -1682,7 +1682,7 @@ func (s *PublicTransactionPoolAPI) sign(addr common.Address, tx *types.Transacti } // SubmitTransaction is a helper function that submits tx to txPool and logs a message. -func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (common.Hash, error) { +func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction, private bool) (common.Hash, error) { // If the transaction fee cap is already specified, ensure the // fee of the given transaction is _reasonable_. if err := checkTxFee(tx.GasPrice(), tx.Gas(), b.RPCTxFeeCap()); err != nil { @@ -1692,7 +1692,7 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c // Ensure only eip155 signed transactions are submitted if EIP155Required is set. return common.Hash{}, errors.New("only replay-protected (EIP-155) transactions allowed over RPC") } - if err := b.SendTx(ctx, tx); err != nil { + if err := b.SendTx(ctx, tx, private); err != nil { return common.Hash{}, err } // Print a log with full tx details for manual investigations and interventions @@ -1740,7 +1740,7 @@ func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args Tra if err != nil { return common.Hash{}, err } - return SubmitTransaction(ctx, s.b, signed) + return SubmitTransaction(ctx, s.b, signed, false) } // FillTransaction fills the defaults (nonce, gas, gasPrice or 1559 fields) @@ -1767,7 +1767,20 @@ func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, input if err := tx.UnmarshalBinary(input); err != nil { return common.Hash{}, err } - return SubmitTransaction(ctx, s.b, tx) + return SubmitTransaction(ctx, s.b, tx, false) +} + +// SendPrivateRawTransaction will add the signed transaction to the transaction pool, +// without broadcasting the transaction to its peers, and mark the transaction to avoid +// future syncs. +// +// See SendRawTransaction. +func (s *PublicTransactionPoolAPI) SendPrivateRawTransaction(ctx context.Context, input hexutil.Bytes) (common.Hash, error) { + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(input); err != nil { + return common.Hash{}, err + } + return SubmitTransaction(ctx, s.b, tx, true) } // Sign calculates an ECDSA signature for: @@ -1900,7 +1913,7 @@ func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs Transact if err != nil { return common.Hash{}, err } - if err = s.b.SendTx(ctx, signedTx); err != nil { + if err = s.b.SendTx(ctx, signedTx, false); err != nil { return common.Hash{}, err } return signedTx.Hash(), nil diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 783b46fbd296..11a9a327e4ee 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -73,7 +73,7 @@ type Backend interface { SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription // Transaction pool API - SendTx(ctx context.Context, signedTx *types.Transaction) error + SendTx(ctx context.Context, signedTx *types.Transaction, private bool) error SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash) error SendMegabundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash, relayAddr common.Address) error GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index 0e38eecc0d38..7f12a39d5ac8 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -516,6 +516,12 @@ web3._extend({ params: 1, inputFormatter: [web3._extend.formatters.inputTransactionFormatter] }), + new web3._extend.Method({ + name: 'sendPrivateRawTransaction', + call: 'eth_sendPrivateRawTransaction', + params: 1, + inputFormatter: [null] + }), new web3._extend.Method({ name: 'fillTransaction', call: 'eth_fillTransaction', diff --git a/les/api_backend.go b/les/api_backend.go index b910bd3e1f48..d0ead5ac8763 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -191,7 +191,7 @@ func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *sta return vm.NewEVM(context, txContext, state, b.eth.chainConfig, *vmConfig), state.Error, nil } -func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { +func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction, private bool) error { return b.eth.txPool.Add(ctx, signedTx) } From 71ab3bd774cea63f7cbd1a5b55a11b15f1aabdaf Mon Sep 17 00:00:00 2001 From: Kevin Chen Date: Fri, 28 Jan 2022 14:46:57 -0600 Subject: [PATCH 5/8] Remove private transactions that are confirmed in blocks --- core/tx_pool.go | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/core/tx_pool.go b/core/tx_pool.go index 0f340ec1a363..d70fee451abb 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -1669,6 +1669,7 @@ func (pool *TxPool) demoteUnexecutables() { for _, tx := range olds { hash := tx.Hash() pool.all.Remove(hash) + pool.privateTxs.Remove(hash) log.Trace("Removed old pending transaction", "hash", hash) } // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later @@ -1973,14 +1974,12 @@ func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions { type timestampedTxHashSet struct { lock sync.RWMutex - hashes []common.Hash timestamps map[common.Hash]time.Time ttl time.Duration } func newExpiringTxHashSet(ttl time.Duration) *timestampedTxHashSet { s := ×tampedTxHashSet{ - hashes: make([]common.Hash, 0), timestamps: make(map[common.Hash]time.Time), ttl: ttl, } @@ -1992,8 +1991,10 @@ func (s *timestampedTxHashSet) Add(hash common.Hash) { s.lock.Lock() defer s.lock.Unlock() - s.hashes = append(s.hashes, hash) - s.timestamps[hash] = time.Now().Add(s.ttl) + _, ok := s.timestamps[hash] + if !ok { + s.timestamps[hash] = time.Now().Add(s.ttl) + } } func (s *timestampedTxHashSet) Contains(hash common.Hash) bool { @@ -2003,25 +2004,26 @@ func (s *timestampedTxHashSet) Contains(hash common.Hash) bool { return ok } -func (s *timestampedTxHashSet) prune() { +func (s *timestampedTxHashSet) Remove(hash common.Hash) { s.lock.Lock() defer s.lock.Unlock() - var ( - count int - now = time.Now() - ) - for _, hash := range s.hashes { - ts := s.timestamps[hash] - if ts.After(now) { - break - } - + _, ok := s.timestamps[hash] + if ok { delete(s.timestamps, hash) - count += 1 } +} + +func (s *timestampedTxHashSet) prune() { + s.lock.Lock() + defer s.lock.Unlock() - s.hashes = s.hashes[count:] + now := time.Now() + for hash, ts := range s.timestamps { + if ts.Before(now) { + delete(s.timestamps, hash) + } + } } // numSlots calculates the number of slots needed for a single transaction. From f8eee8f8a579fdac50ee8b2388579d389058ebe1 Mon Sep 17 00:00:00 2001 From: Mateusz Morusiewicz <11313015+Ruteri@users.noreply.github.com> Date: Thu, 24 Feb 2022 19:40:14 +0100 Subject: [PATCH 6/8] Run private transactions e2e tests in ci --- .github/workflows/go.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 3fc1f2ff8c68..47c3013398fe 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -62,3 +62,4 @@ jobs: sleep 15 yarn run demo-simple yarn run demo-contract + yarn run demo-private-tx From ec93a8f2559390c94517a87234a6ebd322088c1e Mon Sep 17 00:00:00 2001 From: Mateusz Morusiewicz <11313015+Ruteri@users.noreply.github.com> Date: Wed, 2 Mar 2022 09:28:23 +0100 Subject: [PATCH 7/8] Add v0.6 spec link --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 4f87877d7494..0358bbff5c6f 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ See [here](https://docs.flashbots.net) for Flashbots documentation. | Version | Spec | | ------- | ------------------------------------------------------------------------------------------- | +| v0.6 | [MEV-Geth Spec v0.6](https://docs.flashbots.net/flashbots-auction/miners/mev-geth-spec/v06) | | v0.5 | [MEV-Geth Spec v0.5](https://docs.flashbots.net/flashbots-auction/miners/mev-geth-spec/v05) | | v0.4 | [MEV-Geth Spec v0.4](https://docs.flashbots.net/flashbots-auction/miners/mev-geth-spec/v04) | | v0.3 | [MEV-Geth Spec v0.3](https://docs.flashbots.net/flashbots-auction/miners/mev-geth-spec/v03) | From d050ad323fad6e5d3a6e543fa253f93b72f457a1 Mon Sep 17 00:00:00 2001 From: 0xdapper <94534135+0xdapper@users.noreply.github.com> Date: Mon, 28 Mar 2022 08:21:16 +0530 Subject: [PATCH 8/8] feat: use tx hash instead of nonce to determine if the tx is a mempool tx --- miner/worker.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/miner/worker.go b/miner/worker.go index 3f1d7c2dfc2b..a9f16dbe1cf6 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -1596,10 +1596,10 @@ func (w *worker) computeBundleGas(env *environment, bundle types.MevBundle, stat txInPendingPool := false if accountTxs, ok := pendingTxs[from]; ok { // check if tx is in pending pool - txNonce := tx.Nonce() + txHash := tx.Hash() for _, accountTx := range accountTxs { - if accountTx.Nonce() == txNonce { + if accountTx.Hash() == txHash { txInPendingPool = true break }