From cd22ff05054accf2d524be6352e2dda5179b8b0f Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 14 Oct 2024 10:24:30 +0200 Subject: [PATCH] add: discv4 crawling support --- .github/workflows/pull_request_main.yml | 2 +- .github/workflows/push_main.yml | 2 +- Makefile | 4 +- README.md | 62 +- cmd/nebula/cmd.go | 18 +- cmd/nebula/cmd_crawl.go | 57 +- cmd/prefix/gen.go | 2 +- config/bootstrap.go | 2 +- config/config.go | 6 + core/engine.go | 2 +- core/handler_crawl.go | 2 +- db/errors.go | 35 + db/migrations/000028_add_net_errors.down.sql | 1 + db/migrations/000028_add_net_errors.up.sql | 20 + db/models/boil_types.go | 78 +- devp2p/client.go | 200 ---- devp2p/snapTypes.go | 60 -- devp2p/types.go | 291 ------ discv4/client.go | 120 +++ discv4/crawler.go | 372 +++++--- discv4/crawler_test.go | 133 +++ discv4/dialer.go | 4 +- discv4/driver_crawler.go | 284 ++++-- discv4/driver_dialer.go | 6 +- discv4/gen.go | 45 + discv4/gen_test.go | 42 + {discvx => discv4}/prefixmap.go | 4 +- discv5/crawler.go | 10 +- discv5/dialer.go | 4 +- discv5/driver_crawler.go | 6 +- discv5/driver_dialer.go | 6 +- discvx/common.go | 141 --- discvx/lookup.go | 208 ----- discvx/metrics.go | 82 -- discvx/node.go | 100 -- discvx/ntp.go | 111 --- discvx/table.go | 733 --------------- discvx/table_reval.go | 248 ----- discvx/table_reval_test.go | 119 --- discvx/table_test.go | 499 ---------- discvx/table_util_test.go | 352 ------- discvx/v4_lookup_test.go | 366 -------- discvx/v4_udp.go | 813 ----------------- discvx/v4_udp_test.go | 656 ------------- discvx/v5_talk.go | 115 --- discvx/v5_udp.go | 911 ------------------- discvx/v5_udp_test.go | 859 ----------------- go.mod | 18 +- go.sum | 29 +- tele/tele.go | 7 +- 50 files changed, 1103 insertions(+), 7144 deletions(-) create mode 100644 db/migrations/000028_add_net_errors.down.sql create mode 100644 db/migrations/000028_add_net_errors.up.sql delete mode 100644 devp2p/client.go delete mode 100644 devp2p/snapTypes.go delete mode 100644 devp2p/types.go create mode 100644 discv4/client.go create mode 100644 discv4/crawler_test.go create mode 100644 discv4/gen.go create mode 100644 discv4/gen_test.go rename {discvx => discv4}/prefixmap.go (99%) delete mode 100644 discvx/common.go delete mode 100644 discvx/lookup.go delete mode 100644 discvx/metrics.go delete mode 100644 discvx/node.go delete mode 100644 discvx/ntp.go delete mode 100644 discvx/table.go delete mode 100644 discvx/table_reval.go delete mode 100644 discvx/table_reval_test.go delete mode 100644 discvx/table_test.go delete mode 100644 discvx/table_util_test.go delete mode 100644 discvx/v4_lookup_test.go delete mode 100644 discvx/v4_udp.go delete mode 100644 discvx/v4_udp_test.go delete mode 100644 discvx/v5_talk.go delete mode 100644 discvx/v5_udp.go delete mode 100644 discvx/v5_udp_test.go diff --git a/.github/workflows/pull_request_main.yml b/.github/workflows/pull_request_main.yml index 86a2c5b7..f5803096 100644 --- a/.github/workflows/pull_request_main.yml +++ b/.github/workflows/pull_request_main.yml @@ -30,7 +30,7 @@ jobs: - name: Setting up Golang uses: actions/setup-go@v4 with: - go-version: '1.23.1' + go-version: '1.23' - name: Running tests run: make test \ No newline at end of file diff --git a/.github/workflows/push_main.yml b/.github/workflows/push_main.yml index 25ea0453..ba33f071 100644 --- a/.github/workflows/push_main.yml +++ b/.github/workflows/push_main.yml @@ -30,7 +30,7 @@ jobs: - name: Setting up Golang uses: actions/setup-go@v4 with: - go-version: '1.23.1' + go-version: '1.23' - name: Running tests run: make test diff --git a/Makefile b/Makefile index 89a8499e..8b194fa3 100644 --- a/Makefile +++ b/Makefile @@ -30,8 +30,8 @@ docker-push: docker-linux tools: go install -tags 'postgres' github.com/golang-migrate/migrate/v4/cmd/migrate@v4.15.2 - go install github.com/volatiletech/sqlboiler/v4@v4.13.0 - go install github.com/volatiletech/sqlboiler/v4/drivers/sqlboiler-psql@v4.13.0 + go install github.com/volatiletech/sqlboiler/v4@v4.14.1 + go install github.com/volatiletech/sqlboiler/v4/drivers/sqlboiler-psql@v4.14.1 go install go.uber.org/mock/mockgen@v0.3.0 database-reset: database-stop databased migrate-up models diff --git a/README.md b/README.md index 8126aee1..219eac69 100644 --- a/README.md +++ b/README.md @@ -12,28 +12,34 @@ A network agnostic DHT crawler and monitor. The crawler connects to [DHT](https: - [IPFS](https://ipfs.network) - [_Amino DHT_](https://blog.ipfs.tech/2023-09-amino-refactoring/) - [Ethereum](https://ethereum.org/en/) - [_Consensus Layer_](https://ethereum.org/uz/developers/docs/networking-layer/#consensus-discovery) -- [Ethereum](https://ethereum.org/en/) - [_Testnet Holesky_](https://github.com/eth-clients/holesky) (alpha) +- [Ethereum](https://ethereum.org/en/) - [_Execution Layer_](https://ethereum.org/uz/developers/docs/networking-layer/#discovery) - [Filecoin](https://filecoin.io) - [Polkadot](https://polkadot.network/) - [Kusama](https://kusama.network/) - [Rococo](https://substrate.io/developers/rococo-network/) - [Westend](https://wiki.polkadot.network/docs/maintain-networks#westend-test-network) +- [Avail](https://www.availproject.org/) - [Celestia](https://celestia.org/) - [_Mainnet_](https://blog.celestia.org/celestia-mainnet-is-live/) - [Celestia](https://celestia.org/) - [_Arabica_](https://github.com/celestiaorg/celestia-node/blob/9c0a5fb0626ada6e6cdb8bcd816d01a3aa5043ad/nodebuilder/p2p/bootstrap.go#L40) - [Celestia](https://celestia.org/) - [_Mocha_](https://docs.celestia.org/nodes/mocha-testnet) - [Pactus](https://pactus.org) -_The crawler was:_ +The crawler was: - 🏆 _awarded a prize in the [DI2F Workshop hackathon](https://research.protocol.ai/blog/2021/decentralising-the-internet-with-ipfs-and-filecoin-di2f-a-report-from-the-trenches/)._ 🏆 - 🎓 _used for the ACM SigCOMM'22 paper [Design and Evaluation of IPFS: A Storage Layer for the Decentralized Web](https://research.protocol.ai/publications/design-and-evaluation-of-ipfs-a-storage-layer-for-the-decentralized-web/trautwein2022.pdf)_ 🎓 -📊 [ProbeLab](https://probelab.io) is publishing weekly reports for the IPFS Amino DHT based on the crawl results [here](https://github.com/protocol/network-measurements/tree/master/reports)! 📊 +Nebula powers: +- 📊 _the weekly reports for the IPFS Amino DHT [here](https://github.com/protocol/network-measurements/tree/master/reports)!_ 📊 +- 🌐 _many graphs on [probelab.io](https://probelab.io) for most of the supported networks above_ 🌐 -📺 You can find a demo on YouTube: [Nebula: A Network Agnostic DHT Crawler](https://www.youtube.com/watch?v=QDgvCBDqNMc) 📺 + +You can find a demo on YouTube: [Nebula: A Network Agnostic DHT Crawler](https://www.youtube.com/watch?v=QDgvCBDqNMc) 📺 ![Screenshot from a Grafana dashboard](./docs/grafana-screenshot.png) +_Grafana Dashboard is not part of this repository_ + ## Table of Contents - [Table of Contents](#table-of-contents) @@ -156,6 +162,8 @@ nebula --db-user nebula_test --db-name nebula_test monitor When Nebula is configured to store its results in a postgres database, then it also tracks session information of remote peers. A session is one continuous streak of uptime (see below). +However, this is not implemented for all supported networks. The [ProbeLab](https://probelab.network) team is using the monitoring feature for the IPFS, Celestia, Filecoin, and Avail networks. Most notably, the Ethereum discv4/discv5 monitoring implementation still needs some work. + --- There are a few more command line flags that are documented when you run`nebula --help` and `nebula crawl --help`: @@ -170,8 +178,6 @@ random `PeerIDs` with common prefix lengths (CPL) that fall each peers buckets, closer (XOR distance) to the ones `nebula` just constructed. This will effectively yield a list of all `PeerIDs` that a peer has in its routing table. The process repeats for all found peers until `nebula` does not find any new `PeerIDs`. -This process is heavily inspired by the `basic-crawler` in [libp2p/go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht/tree/master/crawler) from [@aschmahmann](https://github.com/aschmahmann). - If Nebula is configured to store its results in a database, every peer that was visited is written to it. The visit information includes latency measurements (dial/connect/crawl durations), current set of multi addresses, current agent version and current set of supported protocols. If the peer was dialable `nebula` will also create a `session` instance that contains the following information: @@ -223,7 +229,8 @@ CREATE TABLE sessions ( At the end of each crawl `nebula` persists general statistics about the crawl like the total duration, dialable peers, encountered errors, agent versions etc... -> **Info:** You can use the `crawl` sub-command with the global `--dry-run` option that skips any database operations. +> [!TIP] +> You can use the `crawl` sub-command with the global `--dry-run` option that skips any database operations. Command line help page: @@ -296,10 +303,10 @@ OPTIONS: ## Development -To develop this project, you need Go `1.19` and the following tools: +To develop this project, you need Go `1.23` and the following tools: - [`golang-migrate/migrate`](https://github.com/golang-migrate/migrate) to manage the SQL migration `v4.15.2` -- [`volatiletech/sqlboiler`](https://github.com/volatiletech/sqlboiler) to generate Go ORM `v4.14.2` +- [`volatiletech/sqlboiler`](https://github.com/volatiletech/sqlboiler) to generate Go ORM `v4.14.1` - `docker` to run a local postgres instance To install the necessary tools you can run `make tools`. This will use the `go install` command to download and install the tools into your `$GOPATH/bin` directory. So make sure you have it in your `$PATH` environment variable. @@ -312,7 +319,8 @@ You need a running postgres instance to persist and/or read the crawl results. R docker run --rm -p 5432:5432 -e POSTGRES_PASSWORD=password -e POSTGRES_USER=nebula_test -e POSTGRES_DB=nebula_test --name nebula_test_db postgres:14 ``` -> **Info:** You can use the `crawl` sub-command with the global `--dry-run` option that skips any database operations or store the results as JSON files with the `--json-out` flag. +> [!TIP] +> You can use the `crawl` sub-command with the global `--dry-run` option that skips any database operations or store the results as JSON files with the `--json-out` flag. The default database settings for local development are: @@ -350,7 +358,7 @@ migrate create -ext sql -dir pkg/db/migrations -seq some_migration_name To run the tests you need a running test database instance: ```shell -make database +make database # or make databased (note the d suffix for "daemon") to start the DB in the background make test ``` @@ -376,6 +384,38 @@ The following presentation shows a ways to use Nebula by showcasing crawls of th [![Nebula: A Network Agnostic DHT Crawler - Dennis Trautwein](https://img.youtube.com/vi/QDgvCBDqNMc/0.jpg)](https://www.youtube.com/watch?v=QDgvCBDqNMc) +## Networks + +> [!NOTE] +> This section is work-in-progress and doesn't include information about all networks yet. + +The following sections document our experience with crawling the different networks. + +### Ethereum Execution (disv4) + +Under the hood Nebula uses packages from [`go-ethereum`](https://github.com/ethereum/go-ethereum) to facilitate peer +communication. Mostly, Nebula relies on the [discover package](https://github.com/ethereum/go-ethereum/tree/master/p2p/discover). +However, we made quite a few changes to the implementation that can be found in +our fork of `go-ethereum` [here](https://github.com/probe-lab/go-ethereum/tree/nebula) in the `nebula` branch. + +Most notably, the custom changes include: + +- export of internal constants, functions, methods and types to customize their behaviour or call them directly +- changes to the response matcher logic. UDP packets won't be forwarded to all matchers. This was required so that + concurrent requests to the same peer don't lead to unhandled packets + +Deployment recommendations: + +- CPUs: 4 (better 8) +- Memory > 4 GB +- UDP Read Buffer size >1 MiB (better 4 MiB) via the `--udp-buffer-size=4194304` command line flag or corresponding environment variable `NEBULA_UDP_BUFFER_SIZE`. + You might need to adjust the maximum buffer size on Linux, so that the flag takes effect: + ```shell + sysctl -w net.core.rmem_max=8388608 # 8MiB + ``` +- UDP Response timeout of `3s` (default) +- Workers: 3000 + ## Maintainers [@dennis-tra](https://github.com/dennis-tra). diff --git a/cmd/nebula/cmd.go b/cmd/nebula/cmd.go index 357cf1df..bef02b14 100644 --- a/cmd/nebula/cmd.go +++ b/cmd/nebula/cmd.go @@ -21,6 +21,7 @@ const ( flagCategoryDatabase = "Database Configuration:" flagCategoryDebugging = "Debugging Configuration:" flagCategoryCache = "Cache Configuration:" + flagCategorySystem = "System Configuration:" flagCategoryNetwork = "Network Specific Configuration:" ) @@ -55,10 +56,11 @@ var rootConfig = &config.Root{ ProtocolsCacheSize: 100, ProtocolsSetCacheSize: 200, }, - RawVersion: version, - BuildCommit: commit, - BuildDate: date, - BuiltBy: builtBy, + UDPBufferSize: 1024 * 1024, + RawVersion: version, + BuildCommit: commit, + BuildDate: date, + BuiltBy: builtBy, } func main() { @@ -218,6 +220,14 @@ func main() { Destination: &rootConfig.Database.DatabaseSSLMode, Category: flagCategoryDatabase, }, + &cli.IntFlag{ + Name: "udp-buffer-size", + Usage: "The rcv/snd buffer size for the UDP sockets (in bytes)", + EnvVars: []string{"NEBULA_UDP_BUFFER_SIZE"}, + Value: rootConfig.UDPBufferSize, + Destination: &rootConfig.UDPBufferSize, + Category: flagCategorySystem, + }, &cli.IntFlag{ Name: "agent-versions-cache-size", Usage: "The cache size to hold agent versions in memory", diff --git a/cmd/nebula/cmd_crawl.go b/cmd/nebula/cmd_crawl.go index d5327e68..4d42e88a 100644 --- a/cmd/nebula/cmd_crawl.go +++ b/cmd/nebula/cmd_crawl.go @@ -29,19 +29,20 @@ import ( ) var crawlConfig = &config.Crawl{ - Root: rootConfig, - CrawlWorkerCount: 1000, - WriteWorkerCount: 10, - CrawlLimit: 0, - PersistNeighbors: false, - FilePathUdgerDB: "", - Network: string(config.NetworkIPFS), - BootstrapPeers: cli.NewStringSlice(), - Protocols: cli.NewStringSlice(string(kaddht.ProtocolDHT)), - AddrTrackTypeStr: "public", - AddrDialTypeStr: "public", - KeepENR: false, - CheckExposed: false, + Root: rootConfig, + CrawlWorkerCount: 1000, + WriteWorkerCount: 10, + CrawlLimit: 0, + PersistNeighbors: false, + FilePathUdgerDB: "", + Network: string(config.NetworkIPFS), + BootstrapPeers: cli.NewStringSlice(), + Protocols: cli.NewStringSlice(string(kaddht.ProtocolDHT)), + AddrTrackTypeStr: "public", + AddrDialTypeStr: "public", + KeepENR: false, + CheckExposed: false, + Discv4RespTimeout: 3 * time.Second, } // CrawlCommand contains the crawl sub-command configuration. @@ -183,6 +184,14 @@ var CrawlCommand = &cli.Command{ Destination: &crawlConfig.KeepENR, Category: flagCategoryNetwork, }, + &cli.DurationFlag{ + Name: "udp-response-timeout", + Usage: "ETHEREUM_EXECUTION: The response timeout for UDP requests in the disv4 DHT", + EnvVars: []string{"NEBULA_CRAWL_UDP_RESPONSE_TIMEOUT"}, + Value: crawlConfig.Discv4RespTimeout, + Destination: &crawlConfig.Discv4RespTimeout, + Category: flagCategoryNetwork, + }, }, } @@ -271,15 +280,19 @@ func CrawlAction(c *cli.Context) error { // configure the crawl driver driverCfg := &discv4.CrawlDriverConfig{ - Version: cfg.Root.Version(), - DialTimeout: cfg.Root.DialTimeout, - TrackNeighbors: cfg.PersistNeighbors, - BootstrapPeers: bpEnodes, - AddrDialType: cfg.AddrDialType(), - AddrTrackType: cfg.AddrTrackType(), - TracerProvider: cfg.Root.TracerProvider, - MeterProvider: cfg.Root.MeterProvider, - LogErrors: cfg.Root.LogErrors, + Version: cfg.Root.Version(), + DialTimeout: cfg.Root.DialTimeout, + CrawlWorkerCount: cfg.CrawlWorkerCount, + TrackNeighbors: cfg.PersistNeighbors, + BootstrapPeers: bpEnodes, + AddrDialType: cfg.AddrDialType(), + AddrTrackType: cfg.AddrTrackType(), + TracerProvider: cfg.Root.TracerProvider, + MeterProvider: cfg.Root.MeterProvider, + LogErrors: cfg.Root.LogErrors, + KeepENR: cfg.KeepENR, + UDPBufferSize: cfg.Root.UDPBufferSize, + UDPRespTimeout: cfg.Discv4RespTimeout, } // init the crawl driver diff --git a/cmd/prefix/gen.go b/cmd/prefix/gen.go index e4b154ba..12d0fee1 100644 --- a/cmd/prefix/gen.go +++ b/cmd/prefix/gen.go @@ -57,7 +57,7 @@ func main() { } fmt.Println("writing prefixes...") - f, err := os.Create("discvx/prefixmap.go") + f, err := os.Create("discv4/prefixmap.go") if err != nil { panic(err) } diff --git a/config/bootstrap.go b/config/bootstrap.go index 6fdd3b30..91510d2e 100644 --- a/config/bootstrap.go +++ b/config/bootstrap.go @@ -248,7 +248,7 @@ var ( "/dns/bootnode.1.lightclient.mainnet.avail.so/tcp/37000/p2p/12D3KooW9x9qnoXhkHAjdNFu92kMvBRSiFBMAoC5NnifgzXjsuiM", } - //BootstrapPeersAvailTuringLightClient + // BootstrapPeersAvailTuringLightClient BootstrapPeersAvailTuringLightClient = []string{ "/dns/bootnode.1.lightclient.turing.avail.so/tcp/37000/p2p/12D3KooWBkLsNGaD3SpMaRWtAmWVuiZg1afdNSPbtJ8M8r9ArGRT", } diff --git a/config/config.go b/config/config.go index 6a1457e6..fbf704ac 100644 --- a/config/config.go +++ b/config/config.go @@ -114,6 +114,9 @@ type Root struct { // TracerProvider is the tracer provider to use when initialising tracing TracerProvider trace.TracerProvider + // The buffer size of the UDP sockets (applicable to ETHEREUM_{CONSENSUS,EXECUTION) + UDPBufferSize int + // The raw version of Nebula in the for X.Y.Z. Raw, because it's missing, e.g., commit information (set by GoReleaser or in Makefile) RawVersion string @@ -278,6 +281,9 @@ type Crawl struct { // Whether to keep the full enr record alongside all parsed kv-pairs KeepENR bool + + // The UDP response timeout when crawling the disv4 DHT + Discv4RespTimeout time.Duration } func (c *Crawl) AddrTrackType() AddrType { diff --git a/core/engine.go b/core/engine.go index 7770ae45..4dda41aa 100644 --- a/core/engine.go +++ b/core/engine.go @@ -445,7 +445,7 @@ func (e *Engine[I, R]) handleWriteResult(ctx context.Context, result Result[Writ "success": result.Value.Error == nil, "written": e.writeCount, "duration": result.Value.Duration, - }).Infoln("Handled writer result") + }).Debugln("Handled writer result") } // reachedProcessingLimit returns true if the processing limit is configured diff --git a/core/handler_crawl.go b/core/handler_crawl.go index 4ed4c4e3..98a746a7 100644 --- a/core/handler_crawl.go +++ b/core/handler_crawl.go @@ -70,10 +70,10 @@ func (r CrawlResult[I]) PeerInfo() I { func (r CrawlResult[I]) LogEntry() *log.Entry { logEntry := log.WithFields(log.Fields{ - "crawlerID": r.CrawlerID, "remoteID": r.Info.ID().ShortString(), "isDialable": r.ConnectError == nil && r.CrawlError == nil, "duration": r.CrawlDuration(), + "rtSize": len(r.RoutingTable.Neighbors), }) if r.ConnectError != nil { diff --git a/db/errors.go b/db/errors.go index 4ecdb422..5062b186 100644 --- a/db/errors.go +++ b/db/errors.go @@ -38,6 +38,24 @@ var KnownErrors = map[string]string{ "connection gated": models.NetErrorConnectionGated, // transient error "RESOURCE_LIMIT_EXCEEDED (201)": models.NetErrorCantConnectOverRelay, // transient error "NO_RESERVATION (204)": models.NetErrorCantConnectOverRelay, // permanent error + // devp2p errors + "no good ip address": models.NetErrorNoIPAddress, + "disconnect requested": models.NetErrorDevp2pDisconnectRequested, + "network error": models.NetErrorDevp2pNetworkError, + "breach of protocol": models.NetErrorDevp2pBreachOfProtocol, + "useless peer": models.NetErrorDevp2pUselessPeer, + "too many peers": models.NetErrorDevp2pTooManyPeers, + "already connected": models.NetErrorDevp2pAlreadyConnected, + "incompatible p2p protocol version": models.NetErrorDevp2pIncompatibleP2PProtocolVersion, + "invalid node identity": models.NetErrorDevp2pInvalidNodeIdentity, + "client quitting": models.NetErrorDevp2pClientQuitting, + "unexpected identity": models.NetErrorDevp2pUnexpectedIdentity, + "connected to self": models.NetErrorDevp2pConnectedToSelf, + "read timeout": models.NetErrorDevp2pReadTimeout, + "subprotocol error": models.NetErrorDevp2pSubprotocolError, + "could not negotiate eth protocol": models.NetErrorDevp2pEthprotocolError, + "handshake failed: EOF": models.NetErrorDevp2pHandshakeEOF, // dependent on error string in discv4 + "malformed disconnect message": models.NetErrorDevp2pMalformedDisconnectMessage, // dependent on error string in discv4 } var ErrorStr = map[string]string{} @@ -79,7 +97,24 @@ var knownErrorsPrecedence = []string{ "Write on stream", "RESOURCE_LIMIT_EXCEEDED (201)", "NO_RESERVATION (204)", + "too many peers", + "no good ip address", + "malformed disconnect message", "handshake did not complete in time", + "disconnect requested", + "network error", + "breach of protocol", + "useless peer", + "already connected", + "incompatible p2p protocol version", + "invalid node identity", + "client quitting", + "unexpected identity", + "connected to self", + "read timeout", + "subprotocol error", + "could not negotiate eth protocol", + "handshake failed: EOF", } // NetError extracts the appropriate error type from the given error. diff --git a/db/migrations/000028_add_net_errors.down.sql b/db/migrations/000028_add_net_errors.down.sql new file mode 100644 index 00000000..a08e72a5 --- /dev/null +++ b/db/migrations/000028_add_net_errors.down.sql @@ -0,0 +1 @@ +-- no down migration \ No newline at end of file diff --git a/db/migrations/000028_add_net_errors.up.sql b/db/migrations/000028_add_net_errors.up.sql new file mode 100644 index 00000000..b057f523 --- /dev/null +++ b/db/migrations/000028_add_net_errors.up.sql @@ -0,0 +1,20 @@ +BEGIN; + +ALTER TYPE net_error ADD VALUE 'devp2p_handshake_eof'; +ALTER TYPE net_error ADD VALUE 'devp2p_disconnect_requested'; +ALTER TYPE net_error ADD VALUE 'devp2p_network_error'; +ALTER TYPE net_error ADD VALUE 'devp2p_breach_of_protocol'; +ALTER TYPE net_error ADD VALUE 'devp2p_useless_peer'; +ALTER TYPE net_error ADD VALUE 'devp2p_too_many_peers'; +ALTER TYPE net_error ADD VALUE 'devp2p_already_connected'; +ALTER TYPE net_error ADD VALUE 'devp2p_incompatible_p2p_protocol_version'; +ALTER TYPE net_error ADD VALUE 'devp2p_invalid_node_identity'; +ALTER TYPE net_error ADD VALUE 'devp2p_client_quitting'; +ALTER TYPE net_error ADD VALUE 'devp2p_unexpected_identity'; +ALTER TYPE net_error ADD VALUE 'devp2p_connected_to_self'; +ALTER TYPE net_error ADD VALUE 'devp2p_read_timeout'; +ALTER TYPE net_error ADD VALUE 'devp2p_subprotocol_error'; +ALTER TYPE net_error ADD VALUE 'devp2p_ethprotocol_error'; +ALTER TYPE net_error ADD VALUE 'devp2p_malformed_disconnect_message'; + +COMMIT \ No newline at end of file diff --git a/db/models/boil_types.go b/db/models/boil_types.go index 2ae86dbc..d5a061c7 100644 --- a/db/models/boil_types.go +++ b/db/models/boil_types.go @@ -53,29 +53,45 @@ func makeCacheKey(cols boil.Columns, nzDefaults []string) string { // Enum values for NetError const ( - NetErrorUnknown string = "unknown" - NetErrorIoTimeout string = "io_timeout" - NetErrorNoRecentNetworkActivity string = "no_recent_network_activity" - NetErrorConnectionRefused string = "connection_refused" - NetErrorProtocolNotSupported string = "protocol_not_supported" - NetErrorPeerIDMismatch string = "peer_id_mismatch" - NetErrorNoRouteToHost string = "no_route_to_host" - NetErrorNetworkUnreachable string = "network_unreachable" - NetErrorNoGoodAddresses string = "no_good_addresses" - NetErrorContextDeadlineExceeded string = "context_deadline_exceeded" - NetErrorNoIPAddress string = "no_ip_address" - NetErrorMaxDialAttemptsExceeded string = "max_dial_attempts_exceeded" - NetErrorMaddrReset string = "maddr_reset" - NetErrorStreamReset string = "stream_reset" - NetErrorHostIsDown string = "host_is_down" - NetErrorNegotiateSecurityProtocol string = "negotiate_security_protocol" - NetErrorNegotiateStreamMultiplexer string = "negotiate_stream_multiplexer" - NetErrorResourceLimitExceeded string = "resource_limit_exceeded" - NetErrorWriteOnStream string = "write_on_stream" - NetErrorConnectionResetByPeer string = "connection_reset_by_peer" - NetErrorCantAssignRequestedAddress string = "cant_assign_requested_address" - NetErrorConnectionGated string = "connection_gated" - NetErrorCantConnectOverRelay string = "cant_connect_over_relay" + NetErrorUnknown string = "unknown" + NetErrorIoTimeout string = "io_timeout" + NetErrorNoRecentNetworkActivity string = "no_recent_network_activity" + NetErrorConnectionRefused string = "connection_refused" + NetErrorProtocolNotSupported string = "protocol_not_supported" + NetErrorPeerIDMismatch string = "peer_id_mismatch" + NetErrorNoRouteToHost string = "no_route_to_host" + NetErrorNetworkUnreachable string = "network_unreachable" + NetErrorNoGoodAddresses string = "no_good_addresses" + NetErrorContextDeadlineExceeded string = "context_deadline_exceeded" + NetErrorNoIPAddress string = "no_ip_address" + NetErrorMaxDialAttemptsExceeded string = "max_dial_attempts_exceeded" + NetErrorMaddrReset string = "maddr_reset" + NetErrorStreamReset string = "stream_reset" + NetErrorHostIsDown string = "host_is_down" + NetErrorNegotiateSecurityProtocol string = "negotiate_security_protocol" + NetErrorNegotiateStreamMultiplexer string = "negotiate_stream_multiplexer" + NetErrorResourceLimitExceeded string = "resource_limit_exceeded" + NetErrorWriteOnStream string = "write_on_stream" + NetErrorConnectionResetByPeer string = "connection_reset_by_peer" + NetErrorCantAssignRequestedAddress string = "cant_assign_requested_address" + NetErrorConnectionGated string = "connection_gated" + NetErrorCantConnectOverRelay string = "cant_connect_over_relay" + NetErrorDevp2pHandshakeEOF string = "devp2p_handshake_eof" + NetErrorDevp2pDisconnectRequested string = "devp2p_disconnect_requested" + NetErrorDevp2pNetworkError string = "devp2p_network_error" + NetErrorDevp2pBreachOfProtocol string = "devp2p_breach_of_protocol" + NetErrorDevp2pUselessPeer string = "devp2p_useless_peer" + NetErrorDevp2pTooManyPeers string = "devp2p_too_many_peers" + NetErrorDevp2pAlreadyConnected string = "devp2p_already_connected" + NetErrorDevp2pIncompatibleP2PProtocolVersion string = "devp2p_incompatible_p2p_protocol_version" + NetErrorDevp2pInvalidNodeIdentity string = "devp2p_invalid_node_identity" + NetErrorDevp2pClientQuitting string = "devp2p_client_quitting" + NetErrorDevp2pUnexpectedIdentity string = "devp2p_unexpected_identity" + NetErrorDevp2pConnectedToSelf string = "devp2p_connected_to_self" + NetErrorDevp2pReadTimeout string = "devp2p_read_timeout" + NetErrorDevp2pSubprotocolError string = "devp2p_subprotocol_error" + NetErrorDevp2pEthprotocolError string = "devp2p_ethprotocol_error" + NetErrorDevp2pMalformedDisconnectMessage string = "devp2p_malformed_disconnect_message" ) func AllNetError() []string { @@ -103,6 +119,22 @@ func AllNetError() []string { NetErrorCantAssignRequestedAddress, NetErrorConnectionGated, NetErrorCantConnectOverRelay, + NetErrorDevp2pHandshakeEOF, + NetErrorDevp2pDisconnectRequested, + NetErrorDevp2pNetworkError, + NetErrorDevp2pBreachOfProtocol, + NetErrorDevp2pUselessPeer, + NetErrorDevp2pTooManyPeers, + NetErrorDevp2pAlreadyConnected, + NetErrorDevp2pIncompatibleP2PProtocolVersion, + NetErrorDevp2pInvalidNodeIdentity, + NetErrorDevp2pClientQuitting, + NetErrorDevp2pUnexpectedIdentity, + NetErrorDevp2pConnectedToSelf, + NetErrorDevp2pReadTimeout, + NetErrorDevp2pSubprotocolError, + NetErrorDevp2pEthprotocolError, + NetErrorDevp2pMalformedDisconnectMessage, } } diff --git a/devp2p/client.go b/devp2p/client.go deleted file mode 100644 index 8b4a77b0..00000000 --- a/devp2p/client.go +++ /dev/null @@ -1,200 +0,0 @@ -package devp2p - -import ( - "crypto/ecdsa" - "fmt" - "net" - "sync" - "time" - - log "github.com/sirupsen/logrus" - - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/crypto/secp256k1" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/rlpx" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - "golang.org/x/net/context" -) - -type Config struct { - DialTimeout time.Duration - Caps []p2p.Cap - HighestProtoVersion uint -} - -func DefaultConfig() *Config { - return &Config{ - DialTimeout: time.Minute, - Caps: []p2p.Cap{ - {Name: "eth", Version: 66}, - {Name: "eth", Version: 67}, - {Name: "eth", Version: 68}, - {Name: "snap", Version: 1}, - }, - HighestProtoVersion: 68, - } -} - -type Client struct { - cfg *Config - dialer net.Dialer - privKey *ecdsa.PrivateKey - - connsMu sync.RWMutex - conns map[peer.ID]*Conn -} - -func NewClient(privKey *ecdsa.PrivateKey, cfg *Config) *Client { - if cfg == nil { - cfg = DefaultConfig() - } - - return &Client{ - cfg: cfg, - privKey: privKey, - dialer: net.Dialer{ - Timeout: cfg.DialTimeout, - }, - conns: map[peer.ID]*Conn{}, - } -} - -func (c *Client) Connect(ctx context.Context, pi peer.AddrInfo) error { - pubKey, err := pi.ID.ExtractPublicKey() - if err != nil { - return fmt.Errorf("extract public key: %w", err) - } - - raw, err := pubKey.Raw() - if err != nil { - return fmt.Errorf("raw bytes from public key: %w", err) - } - - x, y := secp256k1.DecompressPubkey(raw) - ecdsaPubKey := ecdsa.PublicKey{Curve: secp256k1.S256(), X: x, Y: y} - - var fd net.Conn - for _, maddr := range pi.Addrs { - ipAddr, err := maddr.ValueForProtocol(ma.P_IP4) - if err != nil { - ipAddr, err = maddr.ValueForProtocol(ma.P_IP6) - if err != nil { - continue - } - } - - port, err := maddr.ValueForProtocol(ma.P_TCP) - if err != nil { - continue - } - - timeoutCtx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout) - fd, err = c.dialer.DialContext(timeoutCtx, "tcp", fmt.Sprintf("%s:%s", ipAddr, port)) - if err != nil { - cancel() - return fmt.Errorf("failed dialing node: %w", err) - } - cancel() - - break - } - - if fd == nil { - return err - } - - ethConn := &Conn{ - Conn: rlpx.NewConn(fd, &ecdsaPubKey), - ourKey: c.privKey, - negotiatedProtoVersion: 0, - negotiatedSnapProtoVersion: 0, - ourHighestProtoVersion: c.cfg.HighestProtoVersion, - ourHighestSnapProtoVersion: 0, - caps: c.cfg.Caps, - } - - // initiate authed session - if err := fd.SetDeadline(time.Now().Add(10 * time.Second)); err != nil { // TODO: parameterize - log.WithError(err).Warnln("Failed to set connection deadline") - } - - _, err = ethConn.Handshake(c.privKey) // returns remote pubKey -> unused - if err != nil { - return fmt.Errorf("handshake failed: %w", err) - } - - c.connsMu.Lock() - c.conns[pi.ID] = ethConn - c.connsMu.Unlock() - - return nil -} - -func (c *Client) Identify(pid peer.ID) (*Hello, error) { - c.connsMu.RLock() - conn, found := c.conns[pid] - c.connsMu.RUnlock() - - if !found { - return nil, fmt.Errorf("no connection to %s", pid) - } - - pub0 := crypto.FromECDSAPub(&c.privKey.PublicKey)[1:] - req := &Hello{ - Version: 5, - Caps: c.cfg.Caps, - ID: pub0, - } - - if err := conn.SetDeadline(time.Now().Add(10 * time.Second)); err != nil { // TODO: parameterize - log.WithError(err).Warnln("Failed to set connection deadline") - } - - if err := conn.Write(req); err != nil { - return nil, fmt.Errorf("write to conn: %w", err) - } - - resp := conn.Read() - - switch respMsg := resp.(type) { - case *Hello: - if respMsg.Version >= 5 { - conn.SetSnappy(true) - } - return respMsg, nil - case *Error: - return nil, fmt.Errorf("reading handshake response failed: %w", respMsg) - case *Disconnect: - return nil, fmt.Errorf("reading handshake response failed: %s", respMsg.Reason) - default: - return nil, fmt.Errorf("unexpected handshake response message type: %T", resp) - } -} - -func (c *Client) Close() { - c.connsMu.Lock() - defer c.connsMu.Unlock() - - for pid, conn := range c.conns { - delete(c.conns, pid) - if err := conn.Close(); err != nil { - log.WithError(err).WithField("remoteID", pid.ShortString()).Warnln("Failed closing devp2p connection") - } - } -} - -func (c *Client) CloseConn(pid peer.ID) error { - c.connsMu.Lock() - defer c.connsMu.Unlock() - - conn, found := c.conns[pid] - if !found { - return nil - } - - delete(c.conns, pid) - - return conn.Close() -} diff --git a/devp2p/snapTypes.go b/devp2p/snapTypes.go deleted file mode 100644 index 80346e79..00000000 --- a/devp2p/snapTypes.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package devp2p - -import "github.com/ethereum/go-ethereum/eth/protocols/snap" - -// GetAccountRange represents an account range query. -type GetAccountRange snap.GetAccountRangePacket - -func (msg GetAccountRange) Code() int { return 33 } -func (msg GetAccountRange) ReqID() uint64 { return msg.ID } - -type AccountRange snap.AccountRangePacket - -func (msg AccountRange) Code() int { return 34 } -func (msg AccountRange) ReqID() uint64 { return msg.ID } - -type GetStorageRanges snap.GetStorageRangesPacket - -func (msg GetStorageRanges) Code() int { return 35 } -func (msg GetStorageRanges) ReqID() uint64 { return msg.ID } - -type StorageRanges snap.StorageRangesPacket - -func (msg StorageRanges) Code() int { return 36 } -func (msg StorageRanges) ReqID() uint64 { return msg.ID } - -type GetByteCodes snap.GetByteCodesPacket - -func (msg GetByteCodes) Code() int { return 37 } -func (msg GetByteCodes) ReqID() uint64 { return msg.ID } - -type ByteCodes snap.ByteCodesPacket - -func (msg ByteCodes) Code() int { return 38 } -func (msg ByteCodes) ReqID() uint64 { return msg.ID } - -type GetTrieNodes snap.GetTrieNodesPacket - -func (msg GetTrieNodes) Code() int { return 39 } -func (msg GetTrieNodes) ReqID() uint64 { return msg.ID } - -type TrieNodes snap.TrieNodesPacket - -func (msg TrieNodes) Code() int { return 40 } -func (msg TrieNodes) ReqID() uint64 { return msg.ID } diff --git a/devp2p/types.go b/devp2p/types.go deleted file mode 100644 index 390e33cb..00000000 --- a/devp2p/types.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package devp2p - -import ( - "crypto/ecdsa" - "errors" - "fmt" - "time" - - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/rlpx" - "github.com/ethereum/go-ethereum/rlp" -) - -type Message interface { - Code() int - ReqID() uint64 -} - -type Error struct { - err error -} - -func (e *Error) Unwrap() error { return e.err } -func (e *Error) Error() string { return e.err.Error() } -func (e *Error) String() string { return e.Error() } - -func (e *Error) Code() int { return -1 } -func (e *Error) ReqID() uint64 { return 0 } - -func errorf(format string, args ...interface{}) *Error { - return &Error{fmt.Errorf(format, args...)} -} - -// Hello is the RLP structure of the protocol handshake. -type Hello struct { - Version uint64 - Name string - Caps []p2p.Cap - ListenPort uint64 - ID []byte // secp256k1 public key - - // Ignore additional fields (for forward compatibility). - Rest []rlp.RawValue `rlp:"tail"` -} - -func (msg Hello) Code() int { return 0x00 } -func (msg Hello) ReqID() uint64 { return 0 } - -// Disconnect is the RLP structure for a disconnect message. -type Disconnect struct { - Reason p2p.DiscReason -} - -func (msg Disconnect) Code() int { return 0x01 } -func (msg Disconnect) ReqID() uint64 { return 0 } - -type Ping struct{} - -func (msg Ping) Code() int { return 0x02 } -func (msg Ping) ReqID() uint64 { return 0 } - -type Pong struct{} - -func (msg Pong) Code() int { return 0x03 } -func (msg Pong) ReqID() uint64 { return 0 } - -// Status is the network packet for the status message for eth/64 and later. -type Status eth.StatusPacket - -func (msg Status) Code() int { return 16 } -func (msg Status) ReqID() uint64 { return 0 } - -// NewBlockHashes is the network packet for the block announcements. -type NewBlockHashes eth.NewBlockHashesPacket - -func (msg NewBlockHashes) Code() int { return 17 } -func (msg NewBlockHashes) ReqID() uint64 { return 0 } - -type Transactions eth.TransactionsPacket - -func (msg Transactions) Code() int { return 18 } -func (msg Transactions) ReqID() uint64 { return 18 } - -// GetBlockHeaders represents a block header query. -type GetBlockHeaders eth.GetBlockHeadersPacket - -func (msg GetBlockHeaders) Code() int { return 19 } -func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId } - -type BlockHeaders eth.BlockHeadersPacket - -func (msg BlockHeaders) Code() int { return 20 } -func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId } - -// GetBlockBodies represents a GetBlockBodies request -type GetBlockBodies eth.GetBlockBodiesPacket - -func (msg GetBlockBodies) Code() int { return 21 } -func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId } - -// BlockBodies is the network packet for block content distribution. -type BlockBodies eth.BlockBodiesPacket - -func (msg BlockBodies) Code() int { return 22 } -func (msg BlockBodies) ReqID() uint64 { return msg.RequestId } - -// NewBlock is the network packet for the block propagation message. -type NewBlock eth.NewBlockPacket - -func (msg NewBlock) Code() int { return 23 } -func (msg NewBlock) ReqID() uint64 { return 0 } - -// NewPooledTransactionHashes66 is the network packet for the tx hash propagation message. -type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket - -func (msg NewPooledTransactionHashes66) Code() int { return 24 } -func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 } - -// NewPooledTransactionHashes is the network packet for the tx hash propagation message. -type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket - -func (msg NewPooledTransactionHashes) Code() int { return 24 } -func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 } - -type GetPooledTransactions eth.GetPooledTransactionsPacket - -func (msg GetPooledTransactions) Code() int { return 25 } -func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId } - -type PooledTransactions eth.PooledTransactionsPacket - -func (msg PooledTransactions) Code() int { return 26 } -func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId } - -// Conn represents an individual connection with a peer -type Conn struct { - *rlpx.Conn - ourKey *ecdsa.PrivateKey - negotiatedProtoVersion uint - negotiatedSnapProtoVersion uint - ourHighestProtoVersion uint - ourHighestSnapProtoVersion uint - caps []p2p.Cap -} - -// Read reads an eth66 packet from the connection. -func (c *Conn) Read() Message { - code, rawData, _, err := c.Conn.Read() - if err != nil { - return errorf("could not read from connection: %v", err) - } - - var msg Message - switch int(code) { - case (Hello{}).Code(): - msg = new(Hello) - case (Ping{}).Code(): - msg = new(Ping) - case (Pong{}).Code(): - msg = new(Pong) - case (Disconnect{}).Code(): - msg = new(Disconnect) - case (Status{}).Code(): - msg = new(Status) - case (GetBlockHeaders{}).Code(): - ethMsg := new(eth.GetBlockHeadersPacket) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return errorf("could not rlp decode message: %v", err) - } - return (*GetBlockHeaders)(ethMsg) - case (BlockHeaders{}).Code(): - ethMsg := new(eth.BlockHeadersPacket) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return errorf("could not rlp decode message: %v", err) - } - return (*BlockHeaders)(ethMsg) - case (GetBlockBodies{}).Code(): - ethMsg := new(eth.GetBlockBodiesPacket) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return errorf("could not rlp decode message: %v", err) - } - return (*GetBlockBodies)(ethMsg) - case (BlockBodies{}).Code(): - ethMsg := new(eth.BlockBodiesPacket) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return errorf("could not rlp decode message: %v", err) - } - return (*BlockBodies)(ethMsg) - case (NewBlock{}).Code(): - msg = new(NewBlock) - case (NewBlockHashes{}).Code(): - msg = new(NewBlockHashes) - case (Transactions{}).Code(): - msg = new(Transactions) - case (NewPooledTransactionHashes66{}).Code(): - // Try decoding to eth68 - ethMsg := new(NewPooledTransactionHashes) - if err := rlp.DecodeBytes(rawData, ethMsg); err == nil { - return ethMsg - } - msg = new(NewPooledTransactionHashes66) - case (GetPooledTransactions{}.Code()): - ethMsg := new(eth.GetPooledTransactionsPacket) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return errorf("could not rlp decode message: %v", err) - } - return (*GetPooledTransactions)(ethMsg) - case (PooledTransactions{}.Code()): - ethMsg := new(eth.PooledTransactionsPacket) - if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { - return errorf("could not rlp decode message: %v", err) - } - return (*PooledTransactions)(ethMsg) - default: - msg = errorf("invalid message code: %d", code) - } - - if msg != nil { - if err := rlp.DecodeBytes(rawData, msg); err != nil { - return errorf("could not rlp decode message: %v", err) - } - return msg - } - return errorf("invalid message: %s", string(rawData)) -} - -// Write writes a eth packet to the connection. -func (c *Conn) Write(msg Message) error { - payload, err := rlp.EncodeToBytes(msg) - if err != nil { - return err - } - _, err = c.Conn.Write(uint64(msg.Code()), payload) - return err -} - -// ReadSnap reads a snap/1 response with the given id from the connection. -func (c *Conn) ReadSnap(id uint64) (Message, error) { - respId := id + 1 - start := time.Now() - for respId != id && time.Since(start) < 20*time.Second { // timeout - code, rawData, _, err := c.Conn.Read() - if err != nil { - return nil, fmt.Errorf("could not read from connection: %v", err) - } - var snpMsg interface{} - switch int(code) { - case (GetAccountRange{}).Code(): - snpMsg = new(GetAccountRange) - case (AccountRange{}).Code(): - snpMsg = new(AccountRange) - case (GetStorageRanges{}).Code(): - snpMsg = new(GetStorageRanges) - case (StorageRanges{}).Code(): - snpMsg = new(StorageRanges) - case (GetByteCodes{}).Code(): - snpMsg = new(GetByteCodes) - case (ByteCodes{}).Code(): - snpMsg = new(ByteCodes) - case (GetTrieNodes{}).Code(): - snpMsg = new(GetTrieNodes) - case (TrieNodes{}).Code(): - snpMsg = new(TrieNodes) - default: - // return nil, fmt.Errorf("invalid message code: %d", code) - continue - } - if err := rlp.DecodeBytes(rawData, snpMsg); err != nil { - return nil, fmt.Errorf("could not rlp decode message: %v", err) - } - return snpMsg.(Message), nil - } - return nil, errors.New("request timed out") -} diff --git a/discv4/client.go b/discv4/client.go new file mode 100644 index 00000000..357a512d --- /dev/null +++ b/discv4/client.go @@ -0,0 +1,120 @@ +package discv4 + +import ( + "crypto/ecdsa" + "fmt" + "net" + "strings" + "time" + + "github.com/ethereum/go-ethereum/cmd/devp2p/ethtest" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/rlpx" + log "github.com/sirupsen/logrus" + "golang.org/x/net/context" +) + +var errUseOfClosedNetworkConnectionStr = "use of closed network connection" + +type ClientConfig struct { + DialTimeout time.Duration + Caps []p2p.Cap + HighestProtoVersion uint +} + +func DefaultClientConfig() *ClientConfig { + return &ClientConfig{ + DialTimeout: 5 * time.Second, + Caps: []p2p.Cap{ + // pretend to speak everything ¯\_(ツ)_/¯ + {Name: "eth", Version: 62}, + {Name: "eth", Version: 63}, + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + {Name: "eth", Version: 66}, + {Name: "eth", Version: 67}, + {Name: "eth", Version: 68}, + {Name: "eth", Version: 69}, + {Name: "eth", Version: 70}, + {Name: "eth", Version: 100}, + {Name: "snap", Version: 1}, + }, + HighestProtoVersion: 100, + } +} + +type Client struct { + cfg *ClientConfig + dialer net.Dialer + privKey *ecdsa.PrivateKey +} + +func NewClient(privKey *ecdsa.PrivateKey, cfg *ClientConfig) *Client { + if cfg == nil { + cfg = DefaultClientConfig() + } + + return &Client{ + cfg: cfg, + privKey: privKey, + dialer: net.Dialer{ + Timeout: cfg.DialTimeout, + }, + } +} + +func (c *Client) Connect(ctx context.Context, pi PeerInfo) (*ethtest.Conn, error) { + logEntry := log.WithField("remoteID", pi.ID().ShortString()) + + var conn net.Conn + addrPort, ok := pi.Node.TCPEndpoint() + if !ok { + return nil, fmt.Errorf("no good ip address: %s:%d", pi.Node.IP(), pi.Node.TCP()) + } + + tctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout) + defer cancel() + + conn, err := c.dialer.DialContext(tctx, "tcp", addrPort.String()) + if err != nil { + return nil, fmt.Errorf("failed dialing node: %w", err) + } + + ethConn := ðtest.Conn{ + Conn: rlpx.NewConn(conn, pi.Pubkey()), + OurKey: c.privKey, + Caps: c.cfg.Caps, + + OurHighestProtoVersion: c.cfg.HighestProtoVersion, + } + + // cancel handshake if outer context is canceled and + // cancel this go routine when this function exits + exit := make(chan struct{}) + defer close(exit) + go func() { + select { + case <-exit: + case <-ctx.Done(): + if err := ethConn.Close(); err != nil && !strings.Contains(err.Error(), errUseOfClosedNetworkConnectionStr) { + logEntry.WithError(err).Warnln("Failed closing devp2p connection") + } + } + }() + + // set a deadline for the handshake + if err := conn.SetDeadline(time.Now().Add(c.dialer.Timeout)); err != nil { + logEntry.WithError(err).Warnln("Failed to set connection deadline") + } + + _, err = ethConn.Conn.Handshake(c.privKey) // also returns the public key of the remote -> unused + if err != nil { + if ierr := ethConn.Close(); ierr != nil && !strings.Contains(ierr.Error(), errUseOfClosedNetworkConnectionStr) { // inner error + logEntry.WithError(ierr).Warnln("Failed closing devp2p connection") + } + + return nil, fmt.Errorf("handshake failed: %w", err) + } + + return ethConn, ctx.Err() +} diff --git a/discv4/crawler.go b/discv4/crawler.go index 84bd08f2..8088b5e5 100644 --- a/discv4/crawler.go +++ b/discv4/crawler.go @@ -2,46 +2,67 @@ package discv4 import ( "context" + "encoding/hex" "encoding/json" "errors" "fmt" + "math/rand" "net/netip" "strings" "sync" "time" + mapset "github.com/deckarep/golang-set/v2" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/discover/v4wire" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/libp2p/go-libp2p/core/peer" log "github.com/sirupsen/logrus" - "go.uber.org/atomic" - "golang.org/x/sync/errgroup" "github.com/dennis-tra/nebula-crawler/config" "github.com/dennis-tra/nebula-crawler/core" "github.com/dennis-tra/nebula-crawler/db" "github.com/dennis-tra/nebula-crawler/db/models" - "github.com/dennis-tra/nebula-crawler/devp2p" - "github.com/dennis-tra/nebula-crawler/discvx" ) type CrawlerConfig struct { DialTimeout time.Duration AddrDialType config.AddrType + MaxJitter time.Duration LogErrors bool + KeepENR bool } type Crawler struct { id string cfg *CrawlerConfig - listener *discvx.UDPv4 - client *devp2p.Client + listener *discover.UDPv4 + client *Client crawledPeers int + taskDoneChan chan time.Time done chan struct{} } var _ core.Worker[PeerInfo, core.CrawlResult[PeerInfo]] = (*Crawler)(nil) func (c *Crawler) Work(ctx context.Context, task PeerInfo) (core.CrawlResult[PeerInfo], error) { + // indicate to the driver that we have handled a task + defer func() { c.taskDoneChan <- time.Now() }() + + // add a startup jitter delay to prevent all workers to crawl at exactly the + // same time and potentially overwhelm the machine that Nebula is running on + if c.crawledPeers == 0 { + jitter := time.Duration(0) + if c.cfg.MaxJitter > 0 { // could be <= 0 if the worker count is 1 + jitter = time.Duration(rand.Int63n(int64(c.cfg.MaxJitter))) + } + select { + case <-time.After(jitter): + case <-ctx.Done(): + } + } + logEntry := log.WithFields(log.Fields{ "crawlerID": c.id, "remoteID": task.peerID.ShortString(), @@ -55,7 +76,7 @@ func (c *Crawler) Work(ctx context.Context, task PeerInfo) (core.CrawlResult[Pee discv4ResultCh := c.crawlDiscV4(ctx, task) devp2pResultCh := c.crawlDevp2p(ctx, task) - discv4Result := <-discv4ResultCh + discV4Result := <-discv4ResultCh devp2pResult := <-devp2pResultCh properties := map[string]any{} @@ -66,13 +87,37 @@ func (c *Crawler) Work(ctx context.Context, task PeerInfo) (core.CrawlResult[Pee } // keep track of all unknown crawl errors - if discv4Result.ErrorStr == models.NetErrorUnknown && discv4Result.Error != nil { - properties["crawl_error"] = discv4Result.Error.Error() + if discV4Result.ErrorStr == models.NetErrorUnknown && discV4Result.Error != nil { + properties["crawl_error"] = discV4Result.Error.Error() + } + + // keep track of the strategy that we used to crawl that peer + if discV4Result.Strategy != "" { + properties["strategy"] = string(discV4Result.Strategy) + } + + if devp2pResult.Status != nil { + properties["network_id"] = devp2pResult.Status.NetworkID + properties["fork_id"] = hex.EncodeToString(devp2pResult.Status.ForkID.Hash[:]) + } + + if c.cfg.KeepENR { + properties["enr"] = task.Node.String() // discV4Result.ENR.String() panics :/ + } + + // keep track of all unknown connection errors + if devp2pResult.ConnectErrorStr == models.NetErrorUnknown && devp2pResult.ConnectError != nil { + properties["connect_error"] = devp2pResult.ConnectError.Error() + } + + // keep track of all unknown crawl errors + if discV4Result.ErrorStr == models.NetErrorUnknown && discV4Result.Error != nil { + properties["crawl_error"] = discV4Result.Error.Error() } data, err := json.Marshal(properties) if err != nil { - log.WithError(err).WithField("properties", properties).Warnln("Could not marshal peer properties") + logEntry.WithError(err).WithField("properties", properties).Warnln("Could not marshal peer properties") } cr := core.CrawlResult[PeerInfo]{ @@ -80,13 +125,13 @@ func (c *Crawler) Work(ctx context.Context, task PeerInfo) (core.CrawlResult[Pee Info: task, CrawlStartTime: crawlStart, RoutingTableFromAPI: false, - RoutingTable: discv4Result.RoutingTable, + RoutingTable: discV4Result.RoutingTable, Agent: devp2pResult.Agent, Protocols: devp2pResult.Protocols, ConnectError: devp2pResult.ConnectError, ConnectErrorStr: devp2pResult.ConnectErrorStr, - CrawlError: discv4Result.Error, - CrawlErrorStr: discv4Result.ErrorStr, + CrawlError: discV4Result.Error, + CrawlErrorStr: discV4Result.ErrorStr, CrawlEndTime: time.Now(), ConnectStartTime: devp2pResult.ConnectStartTime, ConnectEndTime: devp2pResult.ConnectEndTime, @@ -110,6 +155,9 @@ type DiscV4Result struct { // The neighbors of the crawled peer RoutingTable *core.RoutingTable[PeerInfo] + // The strategy used to crawl the peer + Strategy CrawlStrategy + // The time the draining of bucket entries was finished DoneAt time.Time @@ -124,100 +172,50 @@ func (c *Crawler) crawlDiscV4(ctx context.Context, pi PeerInfo) <-chan DiscV4Res resultCh := make(chan DiscV4Result) go func() { - // mutex to guard access to result and allNeighbors - mu := sync.RWMutex{} - // the final result struct result := DiscV4Result{} - // all neighbors of pi. We're using a map to deduplicate. - allNeighbors := map[string]PeerInfo{} - - // errorBits tracks at which CPL errors have occurred. - // 0000 0000 0000 0000 - No error - // 0000 0000 0000 0001 - An error has occurred at CPL 0 - // 1000 0000 0000 0001 - An error has occurred at CPL 0 and 15 - errorBits := atomic.NewUint32(0) - enr, err := c.listener.RequestENR(pi.Node) if err != nil { result.ENR = pi.Node + err = nil } else { result.ENR = enr now := time.Now() result.RespondedAt = &now } - errg := errgroup.Group{} - for i := 0; i <= 15; i++ { // 15 is maximum - count := i // Copy value - errg.Go(func() error { - pubKey, err := discvx.GenRandomPublicKey(pi.Node.ID(), count) - if err != nil { - log.WithError(err).WithField("enr", pi.Node.String()).Warnln("Failed generating public key") - errorBits.Add(1 << count) - return fmt.Errorf("generating random public key with CPL %d: %w", count, err) - } - - ipAddr, ok := netip.AddrFromSlice(pi.Node.IP()) - if !ok { - return fmt.Errorf("failed to convert ip to netip.Addr: %s", pi.Node.IP()) - } - udpAddr := netip.AddrPortFrom(ipAddr, uint16(pi.Node.UDP())) - - var neighbors []*enode.Node - for retry := 0; retry < 2; retry++ { - neighbors, err = c.listener.FindNode(pi.Node.ID(), udpAddr, pubKey) - if err == nil { - break - } - - errorBits.Add(1 << count) - - if errors.Is(err, discvx.ErrTimeout) { - sleepDur := time.Second * time.Duration(3*(retry+1)) - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(sleepDur): // may add jitter here - continue - } - } - - errorBits.Add(1 << count) - - return fmt.Errorf("getting closest peer with CPL %d: %w", count, err) - } - - mu.Lock() - defer mu.Unlock() + // the number of probes to issue against bucket 0 + probes := 3 - if result.RespondedAt == nil { - now := time.Now() - result.RespondedAt = &now - } + closestMap, closestSet, respondedAt, err := c.probeBucket0(pi, probes, result.RespondedAt != nil) - for _, n := range neighbors { - npi, err := NewPeerInfo(n) - if err != nil { - log.WithError(err).Warnln("Failed parsing ethereum node neighbor") - continue - } - allNeighbors[string(npi.peerID)] = npi - } + if err == nil { + // track the respondedAt timestamp if it wasn't already set + if result.RespondedAt != nil && !respondedAt.IsZero() { + result.RespondedAt = &respondedAt + } - if err != nil { - errorBits.Add(1 << count) - return err - } + result.Strategy = determineStrategy(closestSet) + + var remainingClosest map[peer.ID]PeerInfo + switch result.Strategy { + case crawlStrategySingleProbe: + remainingClosest = c.crawlRemainingBucketsConcurrently(pi.Node, pi.udpAddr, 1) + case crawlStrategyMultiProbe: + remainingClosest = c.crawlRemainingBucketsConcurrently(pi.Node, pi.udpAddr, 3) + case crawlStrategyRandomProbe: + probesPerBucket := int(1.3333 * discover.BucketSize / (float32(len(closestMap)) / float32(probes))) + remainingClosest = c.crawlRemainingBucketsConcurrently(pi.Node, pi.udpAddr, probesPerBucket) + default: + panic("unexpected strategy: " + string(result.Strategy)) + } - return nil - }) + for k, v := range remainingClosest { + closestMap[k] = v + } } - // wait for go routines to finish - err = errg.Wait() - // track done timestamp and error result.DoneAt = time.Now() result.Error = err @@ -225,11 +223,11 @@ func (c *Crawler) crawlDiscV4(ctx context.Context, pi PeerInfo) <-chan DiscV4Res result.RoutingTable = &core.RoutingTable[PeerInfo]{ PeerID: pi.ID(), Neighbors: []PeerInfo{}, - ErrorBits: uint16(errorBits.Load()), + ErrorBits: uint16(0), Error: err, } - for _, n := range allNeighbors { + for _, n := range closestMap { result.RoutingTable.Neighbors = append(result.RoutingTable.Neighbors, n) } @@ -250,13 +248,151 @@ func (c *Crawler) crawlDiscV4(ctx context.Context, pi PeerInfo) <-chan DiscV4Res return resultCh } +func (c *Crawler) probeBucket0(pi PeerInfo, probes int, returnedENR bool) (map[peer.ID]PeerInfo, []mapset.Set[peer.ID], time.Time, error) { + var ( + respondedAt time.Time + closestMap = make(map[peer.ID]PeerInfo) + closestSets []mapset.Set[peer.ID] + errs []error + ) + + // do it sequentially because if a remote peer returns `probes` responses + // containing only three peers each (we've observed that) then these + // will be mapped to a single response because of how the discv4 + // implementation works. This in turn means that the determineStrategy + // won't work + for i := 0; i < probes; i++ { + // first, we generate a random key that falls into bucket 0 + targetKey, err := GenRandomPublicKey(pi.Node.ID(), 0) + if err != nil { + return nil, nil, time.Time{}, err + } + + // second, we do the Find node request + closest, err := c.listener.FindNode(pi.Node.ID(), pi.udpAddr, targetKey) + if err != nil { + // exit early if the node hasn't returned an ENR and the first probe + // also timed out + if !returnedENR && errors.Is(err, discover.ErrTimeout) { + return nil, nil, time.Time{}, fmt.Errorf("failed to probe bucket 0: %w", discover.ErrTimeout) + } + + errs = append(errs, err) + } else if !respondedAt.IsZero() { + respondedAt = time.Now() + } + + // third, we parse the responses into our [PeerInfo] struct + for _, c := range closest { + pi, err := NewPeerInfo(c) + if err != nil { + log.WithError(err).Warnln("Failed parsing ethereum node neighbor") + continue + } + + closestMap[pi.ID()] = pi + } + + closestSets = append(closestSets, mapset.NewThreadUnsafeSetFromMapKeys(closestMap)) + } + + if len(errs) == probes { + return nil, nil, time.Time{}, fmt.Errorf("failed to probe bucket 0: %w", errors.Join(errs...)) + } + + return closestMap, closestSets, respondedAt, nil +} + +type CrawlStrategy string + +const ( + crawlStrategySingleProbe CrawlStrategy = "single-probe" + crawlStrategyMultiProbe CrawlStrategy = "multi-probe" + crawlStrategyRandomProbe CrawlStrategy = "random-probe" +) + +func determineStrategy(sets []mapset.Set[peer.ID]) CrawlStrategy { + // Calculate the average difference between two responses. If the response + // sizes are always 16, one new peer will result in a symmetric difference + // of cardinality 2. One peer in the first set that's not in the second and one + // peer in the second that's not in the first set. We consider that it's the + // happy path if the average symmetric difference is less than 2. + avgSymDiff := float32(0) + diffCount := float32(0) + allNodes := mapset.NewThreadUnsafeSet[peer.ID]() + for i := 0; i < len(sets); i++ { + allNodes = allNodes.Union(sets[i]) + for j := i + 1; j < len(sets); j++ { + diffCount += 1 + avgSymDiff += float32(sets[i].SymmetricDifference(sets[j]).Cardinality()) + } + } + avgSymDiff /= diffCount + + switch { + case avgSymDiff < 2: + return crawlStrategySingleProbe + case allNodes.Cardinality() > v4wire.MaxNeighbors: + return crawlStrategyMultiProbe + default: + return crawlStrategyRandomProbe + } +} + +func (c *Crawler) crawlRemainingBucketsConcurrently(node *enode.Node, udpAddr netip.AddrPort, probesPerBucket int) map[peer.ID]PeerInfo { + var wg sync.WaitGroup + + allNeighborsMu := sync.Mutex{} + allNeighbors := map[peer.ID]PeerInfo{} + for i := 1; i < 15; i++ { // although there are 17 buckets, GenRandomPublicKey only supports the first 16 + for j := 0; j < probesPerBucket; j++ { + wg.Add(1) + + go func() { + defer wg.Done() + + // first, we generate a random key that falls into bucket 0 + targetKey, err := GenRandomPublicKey(node.ID(), i) + if err != nil { + log.WithError(err).WithField("nodeID", node.ID().String()).Warnf("Failed generating random key for bucket %d", i) + return + } + + // second, we do the Find node request + closest, err := c.listener.FindNode(node.ID(), udpAddr, targetKey) + if err != nil { + return + } + + // third, update our neighbors map + allNeighborsMu.Lock() + defer allNeighborsMu.Unlock() + + for _, c := range closest { + pi, err := NewPeerInfo(c) + if err != nil { + log.WithError(err).Warnln("Failed parsing ethereum node neighbor") + continue + } + allNeighbors[pi.ID()] = pi + } + }() + } + } + wg.Wait() + + return allNeighbors +} + type Devp2pResult struct { ConnectStartTime time.Time ConnectEndTime time.Time + IdentifyEndTime time.Time ConnectError error ConnectErrorStr string Agent string Protocols []string + Status *eth.StatusPacket } func (c *Crawler) crawlDevp2p(ctx context.Context, pi PeerInfo) <-chan Devp2pResult { @@ -265,48 +401,51 @@ func (c *Crawler) crawlDevp2p(ctx context.Context, pi PeerInfo) <-chan Devp2pRes // the final result struct result := Devp2pResult{} - addrInfo := peer.AddrInfo{ - ID: pi.ID(), - Addrs: pi.Addrs(), - } - result.ConnectStartTime = time.Now() - for retry := 0; retry < 3; retry++ { - result.ConnectError = c.client.Connect(ctx, addrInfo) - if result.ConnectError == nil { - break - } - - if strings.Contains(result.ConnectError.Error(), "handshake failed: EOF") { - time.Sleep(time.Second) - continue - } - } + conn, err := c.client.Connect(ctx, pi) result.ConnectEndTime = time.Now() + result.ConnectError = err if result.ConnectError == nil { - resp, err := c.client.Identify(pi.ID()) - if err == nil { + + // start another go routine to cancel the entire operation if it + // times out. The context will be cancelled when this function + // returns or the timeout is reached. In both cases, we close the + // connection to the remote peer which will trigger that the call + // to Identify below will return (if the context is canceled because + // of a timeout and not function return). + timeoutCtx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout) + defer cancel() + go func() { + <-timeoutCtx.Done() + // Free connection resources + if err := conn.Close(); err != nil && !strings.Contains(err.Error(), errUseOfClosedNetworkConnectionStr) { + log.WithError(err).WithField("remoteID", pi.ID().ShortString()).Warnln("Could not close connection to peer") + } + }() + + resp, status, err := conn.Identify() + if err != nil && resp == nil && status == nil { + result.ConnectError = err + } + result.IdentifyEndTime = time.Now() + result.Status = status + + if resp != nil { result.Agent = resp.Name protocols := make([]string, len(resp.Caps)) for i, c := range resp.Caps { protocols[i] = "/" + c.String() } result.Protocols = protocols - } else { - log.WithError(err).Debugln("Could not identify peer") } } + // if there was a connection error, parse it to a known one if result.ConnectError != nil { result.ConnectErrorStr = db.NetError(result.ConnectError) } - // Free connection resources - if err := c.client.CloseConn(pi.ID()); err != nil { - log.WithError(err).WithField("remoteID", pi.ID().ShortString()).Warnln("Could not close connection to peer") - } - // send the result back and close channel select { case resultCh <- result: @@ -315,5 +454,6 @@ func (c *Crawler) crawlDevp2p(ctx context.Context, pi PeerInfo) <-chan Devp2pRes close(resultCh) }() + return resultCh } diff --git a/discv4/crawler_test.go b/discv4/crawler_test.go new file mode 100644 index 00000000..e86ae5bb --- /dev/null +++ b/discv4/crawler_test.go @@ -0,0 +1,133 @@ +package discv4 + +import ( + "fmt" + "testing" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/assert" +) + +func Test_determineStrategy(t *testing.T) { + tests := []struct { + name string + sets [][]string + errs []error + want CrawlStrategy + }{ + { + // simulates, we received the same response three times (success case) + name: "all same (3)", + sets: [][]string{ + {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + }, + want: crawlStrategySingleProbe, + }, + { + // simulates, we received the same response two times but also one + // error. This indicates a flaky connection. Just issue one probe + // for each bucket but also retry if failed. + name: "all same with error (2)", + sets: [][]string{ + {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + }, + errs: []error{ + fmt.Errorf("some error"), + }, + want: crawlStrategySingleProbe, + }, + { + // simulates: remote peer replaced a node in its RT during probing + name: "single diff full responses (3)", + sets: [][]string{ + {"A", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + {"B", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + {"B", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + }, + want: crawlStrategySingleProbe, + }, + { + name: "single diff full responses (2)", + sets: [][]string{ + {"A", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + {"B", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + }, + want: crawlStrategyMultiProbe, + }, + { + name: "partial response, full bucket", + sets: [][]string{ + { /* missing */ "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + {"0", "1", "2" /* missing */, "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + {"0", "1", "2", "3", "4", "5" /* missing */, "9", "10", "11", "12", "13", "14", "15"}, + }, + want: crawlStrategyMultiProbe, + }, + { + // simulates: the weird node that only returns three peers for each + // request and all of them are different + name: "weird node (all different responses)", + sets: [][]string{ + {"0", "1", "2"}, + {"3", "4", "5"}, + {"6", "7", "8"}, + }, + want: crawlStrategyRandomProbe, + }, + { + // simulates: the weird node that only returns three peers for each + // request and all of them are different + name: "weird node (single overlap responses)", + sets: [][]string{ + {"0", "1", "2"}, + {"3", "4", "0"}, + {"6", "4", "8"}, + }, + want: crawlStrategyRandomProbe, + }, + { + name: "more than 16 peers in each bucket", + sets: [][]string{ + {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + {"16", "17", "18", "19", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + {"16", "17", "20", "21", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + }, + want: crawlStrategyMultiProbe, + }, + { + name: "partially filled bucket", + sets: [][]string{ + {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"}, + {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"}, + {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"}, + }, + want: crawlStrategySingleProbe, + }, + { + name: "received v4wire.MaxNeighbors responses, full bucket", + sets: [][]string{ + {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"}, + {"2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"}, + {"5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"}, + }, + want: crawlStrategyMultiProbe, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var sets []mapset.Set[peer.ID] + for i, s := range tt.sets { + sets = append(sets, mapset.NewThreadUnsafeSet[peer.ID]()) + for _, item := range s { + sets[i].Add(peer.ID(item)) + } + } + got := determineStrategy(sets) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/discv4/dialer.go b/discv4/dialer.go index 017b1579..99ca8a88 100644 --- a/discv4/dialer.go +++ b/discv4/dialer.go @@ -4,18 +4,18 @@ import ( "context" "time" + "github.com/ethereum/go-ethereum/p2p/discover" log "github.com/sirupsen/logrus" "github.com/dennis-tra/nebula-crawler/core" "github.com/dennis-tra/nebula-crawler/db" - "github.com/dennis-tra/nebula-crawler/discvx" ) // Dialer encapsulates a libp2p host that dials peers. type Dialer struct { id string dialedPeers uint64 - listener *discvx.UDPv4 + listener *discover.UDPv4 } var _ core.Worker[PeerInfo, core.DialResult[PeerInfo]] = (*Dialer)(nil) diff --git a/discv4/driver_crawler.go b/discv4/driver_crawler.go index 21e28740..a6579ca1 100644 --- a/discv4/driver_crawler.go +++ b/discv4/driver_crawler.go @@ -5,12 +5,17 @@ import ( "crypto/elliptic" crand "crypto/rand" "fmt" + "math" "net" - "runtime" + "net/netip" + "sync" + "syscall" "time" ethcrypto "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/secp256k1" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/discover/v4wire" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" @@ -18,20 +23,21 @@ import ( log "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" + "golang.org/x/net/context" "github.com/dennis-tra/nebula-crawler/config" "github.com/dennis-tra/nebula-crawler/core" "github.com/dennis-tra/nebula-crawler/db" "github.com/dennis-tra/nebula-crawler/db/models" - "github.com/dennis-tra/nebula-crawler/devp2p" - "github.com/dennis-tra/nebula-crawler/discvx" + "github.com/dennis-tra/nebula-crawler/tele" "github.com/dennis-tra/nebula-crawler/utils" ) type PeerInfo struct { *enode.Node - peerID peer.ID - maddrs []ma.Multiaddr + peerID peer.ID + maddrs []ma.Multiaddr + udpAddr netip.AddrPort } var _ core.PeerInfo[PeerInfo] = (*PeerInfo)(nil) @@ -79,10 +85,17 @@ func NewPeerInfo(node *enode.Node) (PeerInfo, error) { maddrs = append(maddrs, maddr) } + ipAddr, ok := netip.AddrFromSlice(node.IP()) + if !ok { + return PeerInfo{}, fmt.Errorf("failed to convert ip to netip.Addr: %s", node.IP()) + } + udpAddr := netip.AddrPortFrom(ipAddr, uint16(node.UDP())) + pi := PeerInfo{ - Node: node, - peerID: peerID, - maddrs: maddrs, + Node: node, + peerID: peerID, + maddrs: maddrs, + udpAddr: udpAddr, } return pi, nil @@ -102,15 +115,19 @@ func (p PeerInfo) Merge(other PeerInfo) PeerInfo { } type CrawlDriverConfig struct { - Version string - TrackNeighbors bool - DialTimeout time.Duration - BootstrapPeers []*enode.Node - AddrDialType config.AddrType - AddrTrackType config.AddrType - MeterProvider metric.MeterProvider - TracerProvider trace.TracerProvider - LogErrors bool + Version string + TrackNeighbors bool + CrawlWorkerCount int + DialTimeout time.Duration + BootstrapPeers []*enode.Node + AddrDialType config.AddrType + AddrTrackType config.AddrType + MeterProvider metric.MeterProvider + TracerProvider trace.TracerProvider + LogErrors bool + KeepENR bool + UDPBufferSize int + UDPRespTimeout time.Duration } func (cfg *CrawlDriverConfig) CrawlerConfig() *CrawlerConfig { @@ -118,6 +135,8 @@ func (cfg *CrawlDriverConfig) CrawlerConfig() *CrawlerConfig { DialTimeout: cfg.DialTimeout, AddrDialType: cfg.AddrDialType, LogErrors: cfg.LogErrors, + MaxJitter: time.Duration(cfg.CrawlWorkerCount/50) * time.Second, // e.g., 3000 workers evenly distributed over 60s + KeepENR: false, } } @@ -128,41 +147,53 @@ func (cfg *CrawlDriverConfig) WriterConfig() *core.CrawlWriterConfig { } type CrawlDriver struct { - cfg *CrawlDriverConfig - dbc db.Client - clients []*devp2p.Client - dbCrawl *models.Crawl - tasksChan chan PeerInfo - peerstore *enode.DB - crawlerCount int - writerCount int - crawler []*Crawler + cfg *CrawlDriverConfig + dbc db.Client + client *Client + dbCrawl *models.Crawl + tasksChan chan PeerInfo + peerstore *enode.DB + crawlerCount int + writerCount int + crawler []*Crawler + unhandledChan chan discover.ReadPacket + taskDoneAtChan chan time.Time + + // Telemetry + unhandledPacketsCounter metric.Int64Counter } var _ core.Driver[PeerInfo, core.CrawlResult[PeerInfo]] = (*CrawlDriver)(nil) func NewCrawlDriver(dbc db.Client, crawl *models.Crawl, cfg *CrawlDriverConfig) (*CrawlDriver, error) { - // create a libp2p host per CPU core to distribute load - clients := make([]*devp2p.Client, 0, runtime.NumCPU()) - for i := 0; i < runtime.NumCPU(); i++ { - // If I'm not using the below elliptic curve, some Ethereum clients will reject communication - priv, err := ecdsa.GenerateKey(ethcrypto.S256(), crand.Reader) - if err != nil { - return nil, fmt.Errorf("new ethereum ecdsa key: %w", err) - } - - clientCfg := devp2p.DefaultConfig() - clientCfg.DialTimeout = cfg.DialTimeout + priv, err := ethcrypto.GenerateKey() + if err != nil { + return nil, fmt.Errorf("new ethereum ecdsa key: %w", err) + } - c := devp2p.NewClient(priv, clientCfg) - if err != nil { - return nil, fmt.Errorf("new devp2p host: %w", err) - } + clientCfg := DefaultClientConfig() + clientCfg.DialTimeout = cfg.DialTimeout + client := NewClient(priv, clientCfg) - clients = append(clients, c) + peerstore, err := enode.OpenDB("") // in memory db + if err != nil { + return nil, fmt.Errorf("open in-memory peerstore: %w", err) } + // Init channels: + // unhandledChan: this is a channel that will receive all unhandled + // packets from all discv4 UDP listeners. + // tasksChan: this is the channel that the engine will consume. It + // receives all peers that should be crawled. + // taskDoneAtChan: every time a crawl worker has completed one crawl, it + // will emit a timestamp on this channel. We use this in + // the monitoring of unhandled packets. If the last crawl + // is longer ago than 10s, and we haven't received an + // unhandled Neighbors packet, we close the channel + unhandledChan := make(chan discover.ReadPacket, discover.BucketSize*cfg.CrawlWorkerCount) tasksChan := make(chan PeerInfo, len(cfg.BootstrapPeers)) + taskDoneAtChan := make(chan time.Time, cfg.CrawlWorkerCount) + for _, node := range cfg.BootstrapPeers { pi, err := NewPeerInfo(node) if err != nil { @@ -170,24 +201,42 @@ func NewCrawlDriver(dbc db.Client, crawl *models.Crawl, cfg *CrawlDriverConfig) } tasksChan <- pi } - close(tasksChan) - peerstore, err := enode.OpenDB("") // in memory db + meter := cfg.MeterProvider.Meter(tele.MeterName) + unhandledPacketsCounter, err := meter.Int64Counter("unhandled_packets") if err != nil { - return nil, fmt.Errorf("open in-memory peerstore: %w", err) + return nil, fmt.Errorf("create unhandled packets counter: %w", err) + } + + // set the discovery response timeout + discover.RespTimeout = cfg.UDPRespTimeout + + d := &CrawlDriver{ + cfg: cfg, + dbc: dbc, + client: client, + dbCrawl: crawl, + peerstore: peerstore, + tasksChan: tasksChan, + taskDoneAtChan: taskDoneAtChan, + unhandledChan: unhandledChan, + crawler: make([]*Crawler, 0, cfg.CrawlWorkerCount), + + // Telemetry + unhandledPacketsCounter: unhandledPacketsCounter, } - return &CrawlDriver{ - cfg: cfg, - dbc: dbc, - clients: clients, - dbCrawl: crawl, - tasksChan: tasksChan, - peerstore: peerstore, - crawler: make([]*Crawler, 0), - }, nil + // hand responsibility of tasksChan to this function. It will close the + // channel if the workers have been idle for more than 10s. This will signal + // the engine that we also don't expect any more late unhandled packets. + d.monitorUnhandledPackets() + + return d, nil } +// NewWorker is called multiple times but only log the configured buffer sizes once +var logOnce sync.Once + func (d *CrawlDriver) NewWorker() (core.Worker[PeerInfo, core.CrawlResult[PeerInfo]], error) { // If I'm not using the below elliptic curve, some Ethereum clients will reject communication priv, err := ecdsa.GenerateKey(ethcrypto.S256(), crand.Reader) @@ -202,24 +251,45 @@ func (d *CrawlDriver) NewWorker() (core.Worker[PeerInfo, core.CrawlResult[PeerIn return nil, fmt.Errorf("listen on udp port: %w", err) } - discvxCfg := discvx.Config{ - PrivateKey: priv, + if err = conn.SetReadBuffer(d.cfg.UDPBufferSize); err != nil { + log.Warnln("Failed to set read buffer size on UDP listener", err) } - listener, err := discvx.ListenV4(conn, ethNode, discvxCfg) + rcvbuf, sndbuf, err := getUDPBufferSize(conn) + logOnce.Do(func() { + logEntry := log.WithFields(log.Fields{ + "rcvbuf": rcvbuf, + "sndbuf": sndbuf, + "rcvtgt": d.cfg.UDPBufferSize, // receive target + }) + if rcvbuf < d.cfg.UDPBufferSize { + logEntry.Warnln("Failed to increase UDP buffer sizes, using default") + } else { + logEntry.Infoln("Configured UDP buffer sizes") + } + }) + + log.Debugln("Listening on UDP port ", conn.LocalAddr().String(), " for Ethereum discovery") + + discvxCfg := discover.Config{ + PrivateKey: priv, + Unhandled: d.unhandledChan, + } + listener, err := discover.ListenV4(conn, ethNode, discvxCfg) if err != nil { return nil, fmt.Errorf("listen discv4: %w", err) } - // evenly assign a libp2p hosts to crawler workers - client := d.clients[d.crawlerCount%len(d.clients)] + crawlerCfg := d.cfg.CrawlerConfig() + crawlerCfg.KeepENR = d.cfg.KeepENR c := &Crawler{ - id: fmt.Sprintf("crawler-%02d", d.crawlerCount), - cfg: d.cfg.CrawlerConfig(), - client: client, - listener: listener, - done: make(chan struct{}), + id: fmt.Sprintf("crawler-%02d", d.crawlerCount), + cfg: crawlerCfg, + client: d.client, + listener: listener, + taskDoneChan: d.taskDoneAtChan, + done: make(chan struct{}), } d.crawlerCount += 1 @@ -246,6 +316,90 @@ func (d *CrawlDriver) Tasks() <-chan PeerInfo { func (d *CrawlDriver) Close() { for _, c := range d.crawler { c.listener.Close() - c.client.Close() } + close(d.unhandledChan) + + // wait for the go routine that reads the unhandled packets to close + select { + case <-d.tasksChan: + case <-time.After(time.Second): + log.Warnln("Timed out waiting for packetsDone channel to close") + } +} + +func (d *CrawlDriver) monitorUnhandledPackets() { + go func() { + defer close(d.tasksChan) + + timeout := 10 * time.Second + latestTaskDone := time.Now() + timer := time.NewTimer(math.MaxInt64) + + LOOP: + for { + select { + case <-timer.C: + log.Infof("No Neighbors packet received from any crawler worker for %s. Stop monitoring unhandled packets.", timeout) + break LOOP + case taskDoneAt := <-d.taskDoneAtChan: + if taskDoneAt.After(latestTaskDone) { + latestTaskDone = taskDoneAt + timer.Reset(timeout) + } + case packet, more := <-d.unhandledChan: + if !more { + break LOOP + } + + rawpacket, _, _, err := v4wire.Decode(packet.Data) + if err != nil { + continue + } + + neighborsPacket, ok := rawpacket.(*v4wire.Neighbors) + if !ok { + continue + } + + d.unhandledPacketsCounter.Add(context.TODO(), 1) + for _, n := range neighborsPacket.Nodes { + node, err := discover.NodeFromRPC(packet.Addr, n, nil) + if err != nil { + continue + } + + pi, err := NewPeerInfo(node) + if err != nil { + continue + } + + d.tasksChan <- pi + } + } + } + }() +} + +// getUDPBufferSize reads the receive and send buffer sizes from the system +func getUDPBufferSize(conn *net.UDPConn) (rcvbuf int, sndbuf int, err error) { + rawConn, err := conn.SyscallConn() + if err != nil { + return 0, 0, err + } + + var ( + rcverr error + snderr error + ) + err = rawConn.Control(func(fd uintptr) { + rcvbuf, rcverr = syscall.GetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVBUF) + sndbuf, snderr = syscall.GetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVBUF) + }) + if rcverr != nil { + err = rcverr + } else if snderr != nil { + err = snderr + } + + return } diff --git a/discv4/driver_dialer.go b/discv4/driver_dialer.go index b3af1d58..9a141745 100644 --- a/discv4/driver_dialer.go +++ b/discv4/driver_dialer.go @@ -11,6 +11,7 @@ import ( "time" ethcrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" @@ -18,7 +19,6 @@ import ( "github.com/dennis-tra/nebula-crawler/core" "github.com/dennis-tra/nebula-crawler/db" - "github.com/dennis-tra/nebula-crawler/discvx" "github.com/dennis-tra/nebula-crawler/utils" ) @@ -75,12 +75,12 @@ func (d *DialDriver) NewWorker() (core.Worker[PeerInfo, core.DialResult[PeerInfo return nil, fmt.Errorf("listen on udp port: %w", err) } - discv4Cfg := discvx.Config{ + discv4Cfg := discover.Config{ PrivateKey: priv, ValidSchemes: enode.ValidSchemes, } - listener, err := discvx.ListenV4(conn, ethNode, discv4Cfg) + listener, err := discover.ListenV4(conn, ethNode, discv4Cfg) if err != nil { return nil, fmt.Errorf("listen discv5: %w", err) } diff --git a/discv4/gen.go b/discv4/gen.go new file mode 100644 index 00000000..2458c7c8 --- /dev/null +++ b/discv4/gen.go @@ -0,0 +1,45 @@ +package discv4 + +import ( + "crypto/rand" + "encoding/binary" + "fmt" + + "github.com/ethereum/go-ethereum/p2p/discover/v4wire" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +// GenRandomPublicKey generates a public key that, when hashed with Keccak256, +// yields a [v4wire.Pubkey] that has a common prefix length of targetCPL. +func GenRandomPublicKey(targetID enode.ID, targetCPL int) (v4wire.Pubkey, error) { + targetPrefix := binary.BigEndian.Uint16(targetID[:]) + + // For host with ID `L`, an ID `K` belongs to a bucket with ID `B` ONLY IF CommonPrefixLen(L,K) is EXACTLY B. + // Hence, to achieve a targetPrefix `T`, we must toggle the (T+1)th bit in L & then copy (T+1) bits from L + // to our randomly generated prefix. + toggledTargetPrefix := targetPrefix ^ (uint16(0x8000) >> targetCPL) + + randUInt16Bytes := new([2]byte) + _, err := rand.Read(randUInt16Bytes[:]) + if err != nil { + return [64]byte{}, fmt.Errorf("read random bytes: %w", err) + } + randUint16 := binary.BigEndian.Uint16(randUInt16Bytes[:]) + + // generate a mask that starts with targetCPL + 1 ones and the rest zeroes + mask := (^uint16(0)) << (16 - (targetCPL + 1)) + + // toggledTargetPrefix & mask: use the first targetCPL + 1 bits from the toggledTargetPrefix + // randUint16 & ^mask: use the remaining bits from the random uint16 + // by or'ing them together with | we composed the final prefix + prefix := (toggledTargetPrefix & mask) | (randUint16 & ^mask) + + // Lookup the preimage in the key prefix map + key := keyPrefixMap[prefix] + + // generate public key + out := new([64]byte) + binary.BigEndian.PutUint32(out[:], key) + + return *out, nil +} diff --git a/discv4/gen_test.go b/discv4/gen_test.go new file mode 100644 index 00000000..0e105e57 --- /dev/null +++ b/discv4/gen_test.go @@ -0,0 +1,42 @@ +package discv4 + +import ( + "encoding/hex" + "math/bits" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGenRandomPublicKey(t *testing.T) { + tests := []struct { + targetID string + }{ + {targetID: "c845e51a5e470e445ad424f7cb516339237f469ad7b3c903221b5c49ce55863f"}, + } + for _, tt := range tests { + t.Run(tt.targetID, func(t *testing.T) { + nodeID, err := hex.DecodeString(tt.targetID) + require.NoError(t, err) + + // fmt.Printf(" %08b\n", nodeID[:2]) + for i := 0; i < 16; i++ { + got, err := GenRandomPublicKey(enode.ID(nodeID), i) + require.NoError(t, err) + + gotHashed := crypto.Keccak256Hash(got[:]) + + // fmt.Printf("[%d] %08b - %08b %08b\n", i, gotHashed[:2], nodeID[0]^gotHashed[0], nodeID[1]^gotHashed[1]) + + lz := bits.LeadingZeros8(nodeID[0] ^ gotHashed[0]) + if i > 8 { + lz += bits.LeadingZeros8(nodeID[1] ^ gotHashed[1]) + } + assert.Equal(t, i, lz) + } + }) + } +} diff --git a/discvx/prefixmap.go b/discv4/prefixmap.go similarity index 99% rename from discvx/prefixmap.go rename to discv4/prefixmap.go index 1c79aad7..1a60bb6e 100644 --- a/discvx/prefixmap.go +++ b/discv4/prefixmap.go @@ -1,4 +1,4 @@ -package discvx +package discv4 // Code generated by gen.go DO NOT EDIT var keyPrefixMap = [...]uint32{ @@ -4098,4 +4098,4 @@ var keyPrefixMap = [...]uint32{ 173483, 67626, 6220, 14385, 7905, 111913, 59878, 107594, 84778, 95575, 36563, 59941, 49249, 20387, 19794, 133000, 59257, 169791, 40347, 131321, 109253, 6420, 58173, 58758, 80390, 3365, 201391, 18316, 164366, 17009, 4024, 168998, 46667, 82073, 93376, 393693, 16407, 5389, 54367, 81100, 59820, 81259, 15536, 27154, 30418, 3965, 13501, 153155, -} \ No newline at end of file +} diff --git a/discv5/crawler.go b/discv5/crawler.go index 40066088..037cc4bd 100644 --- a/discv5/crawler.go +++ b/discv5/crawler.go @@ -10,6 +10,7 @@ import ( "time" "github.com/cenkalti/backoff/v4" + "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/libp2p/go-libp2p/core/peer" basichost "github.com/libp2p/go-libp2p/p2p/host/basic" @@ -21,7 +22,6 @@ import ( "github.com/dennis-tra/nebula-crawler/core" "github.com/dennis-tra/nebula-crawler/db" "github.com/dennis-tra/nebula-crawler/db/models" - "github.com/dennis-tra/nebula-crawler/discvx" ) const MaxCrawlRetriesAfterTimeout = 2 // magic @@ -37,7 +37,7 @@ type Crawler struct { id string cfg *CrawlerConfig host *basichost.BasicHost - listener *discvx.UDPv5 + listener *discover.UDPv5 crawledPeers int done chan struct{} } @@ -502,12 +502,12 @@ func (c *Crawler) crawlDiscV5(ctx context.Context, pi PeerInfo) chan DiscV5Resul // internally, so we won't gain much by spawning multiple parallel go // routines here. Stop the process as soon as we have received a timeout and // don't let the following calls time out as well. - for i := 0; i <= discvx.NBuckets; i++ { // 17 is maximum + for i := 0; i <= discover.NBuckets; i++ { // 17 is maximum var neighbors []*enode.Node - neighbors, err = c.listener.FindNode(pi.Node, []uint{uint(discvx.HashBits - i)}) + neighbors, err = c.listener.FindNode(pi.Node, []uint{uint(discover.HashBits - i)}) if err != nil { - if errors.Is(err, discvx.ErrTimeout) { + if errors.Is(err, discover.ErrTimeout) { timeouts += 1 if timeouts < MaxCrawlRetriesAfterTimeout { continue diff --git a/discv5/dialer.go b/discv5/dialer.go index 0b3a6cbf..149058a4 100644 --- a/discv5/dialer.go +++ b/discv5/dialer.go @@ -4,18 +4,18 @@ import ( "context" "time" + "github.com/ethereum/go-ethereum/p2p/discover" log "github.com/sirupsen/logrus" "github.com/dennis-tra/nebula-crawler/core" "github.com/dennis-tra/nebula-crawler/db" - "github.com/dennis-tra/nebula-crawler/discvx" ) // Dialer encapsulates a libp2p host that dials peers. type Dialer struct { id string dialedPeers uint64 - listener *discvx.UDPv5 + listener *discover.UDPv5 } var _ core.Worker[PeerInfo, core.DialResult[PeerInfo]] = (*Dialer)(nil) diff --git a/discv5/driver_crawler.go b/discv5/driver_crawler.go index 13585b81..8a117459 100644 --- a/discv5/driver_crawler.go +++ b/discv5/driver_crawler.go @@ -13,6 +13,7 @@ import ( secp256k1v4 "github.com/decred/dcrd/dcrec/secp256k1/v4" ethcrypto "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/secp256k1" + "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/libp2p/go-libp2p" mplex "github.com/libp2p/go-libp2p-mplex" @@ -35,7 +36,6 @@ import ( "github.com/dennis-tra/nebula-crawler/core" "github.com/dennis-tra/nebula-crawler/db" "github.com/dennis-tra/nebula-crawler/db/models" - "github.com/dennis-tra/nebula-crawler/discvx" "github.com/dennis-tra/nebula-crawler/utils" ) @@ -222,12 +222,12 @@ func (d *CrawlDriver) NewWorker() (core.Worker[PeerInfo, core.CrawlResult[PeerIn return nil, fmt.Errorf("listen on udp port: %w", err) } - discv5Cfg := discvx.Config{ + discv5Cfg := discover.Config{ PrivateKey: priv, ValidSchemes: enode.ValidSchemes, } - listener, err := discvx.ListenV5(conn, ethNode, discv5Cfg) + listener, err := discover.ListenV5(conn, ethNode, discv5Cfg) if err != nil { return nil, fmt.Errorf("listen discv5: %w", err) } diff --git a/discv5/driver_dialer.go b/discv5/driver_dialer.go index 60878dec..2c793419 100644 --- a/discv5/driver_dialer.go +++ b/discv5/driver_dialer.go @@ -11,6 +11,7 @@ import ( "time" ethcrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" @@ -18,7 +19,6 @@ import ( "github.com/dennis-tra/nebula-crawler/core" "github.com/dennis-tra/nebula-crawler/db" - "github.com/dennis-tra/nebula-crawler/discvx" "github.com/dennis-tra/nebula-crawler/utils" ) @@ -75,12 +75,12 @@ func (d *DialDriver) NewWorker() (core.Worker[PeerInfo, core.DialResult[PeerInfo return nil, fmt.Errorf("listen on udp port: %w", err) } - discv5Cfg := discvx.Config{ + discv5Cfg := discover.Config{ PrivateKey: priv, ValidSchemes: enode.ValidSchemes, } - listener, err := discvx.ListenV5(conn, ethNode, discv5Cfg) + listener, err := discover.ListenV5(conn, ethNode, discv5Cfg) if err != nil { return nil, fmt.Errorf("listen discv5: %w", err) } diff --git a/discvx/common.go b/discvx/common.go deleted file mode 100644 index 0f1e8153..00000000 --- a/discvx/common.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "crypto/ecdsa" - crand "crypto/rand" - "encoding/binary" - "math/rand" - "net" - "net/netip" - "sync" - "time" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/p2p/netutil" -) - -// UDPConn is a network connection on which discovery can operate. -type UDPConn interface { - ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) - WriteToUDPAddrPort(b []byte, addr netip.AddrPort) (n int, err error) - Close() error - LocalAddr() net.Addr -} - -// Config holds settings for the discovery listener. -type Config struct { - // These settings are required and configure the UDP listener: - PrivateKey *ecdsa.PrivateKey - - // All remaining settings are optional. - - // Packet handling configuration: - NetRestrict *netutil.Netlist // list of allowed IP networks - Unhandled chan<- ReadPacket // unhandled packets are sent on this channel - - // Node table configuration: - Bootnodes []*enode.Node // list of bootstrap nodes - PingInterval time.Duration // speed of node liveness check - RefreshInterval time.Duration // used in bucket refresh - NoFindnodeLivenessCheck bool // turns off validation of table nodes in FINDNODE handler - - // The options below are useful in very specific cases, like in unit tests. - V5ProtocolID *[6]byte - Log log.Logger // if set, log messages go here - ValidSchemes enr.IdentityScheme // allowed identity schemes - Clock mclock.Clock -} - -func (cfg Config) withDefaults() Config { - // Node table configuration: - if cfg.PingInterval == 0 { - cfg.PingInterval = 3 * time.Second - } - if cfg.RefreshInterval == 0 { - cfg.RefreshInterval = 30 * time.Minute - } - - // Debug/test settings: - if cfg.Log == nil { - cfg.Log = log.Root() - } - if cfg.ValidSchemes == nil { - cfg.ValidSchemes = enode.ValidSchemes - } - if cfg.Clock == nil { - cfg.Clock = mclock.System{} - } - return cfg -} - -// ListenUDP starts listening for discovery packets on the given UDP socket. -func ListenUDP(c UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv4, error) { - return ListenV4(c, ln, cfg) -} - -// ReadPacket is a packet that couldn't be handled. Those packets are sent to the unhandled -// channel if configured. -type ReadPacket struct { - Data []byte - Addr netip.AddrPort -} - -type randomSource interface { - Intn(int) int - Int63n(int64) int64 - Shuffle(int, func(int, int)) -} - -// reseedingRandom is a random number generator that tracks when it was last re-seeded. -type reseedingRandom struct { - mu sync.Mutex - cur *rand.Rand -} - -func (r *reseedingRandom) seed() { - var b [8]byte - crand.Read(b[:]) - seed := binary.BigEndian.Uint64(b[:]) - new := rand.New(rand.NewSource(int64(seed))) - - r.mu.Lock() - r.cur = new - r.mu.Unlock() -} - -func (r *reseedingRandom) Intn(n int) int { - r.mu.Lock() - defer r.mu.Unlock() - return r.cur.Intn(n) -} - -func (r *reseedingRandom) Int63n(n int64) int64 { - r.mu.Lock() - defer r.mu.Unlock() - return r.cur.Int63n(n) -} - -func (r *reseedingRandom) Shuffle(n int, swap func(i, j int)) { - r.mu.Lock() - defer r.mu.Unlock() - r.cur.Shuffle(n, swap) -} diff --git a/discvx/lookup.go b/discvx/lookup.go deleted file mode 100644 index 59b1860f..00000000 --- a/discvx/lookup.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "context" - "errors" - "time" - - "github.com/ethereum/go-ethereum/p2p/enode" -) - -// lookup performs a network search for nodes close to the given target. It approaches the -// target by querying nodes that are closer to it on each iteration. The given target does -// not need to be an actual node identifier. -type lookup struct { - tab *Table - queryfunc queryFunc - replyCh chan []*enode.Node - cancelCh <-chan struct{} - asked, seen map[enode.ID]bool - result nodesByDistance - replyBuffer []*enode.Node - queries int -} - -type queryFunc func(*enode.Node) ([]*enode.Node, error) - -func newLookup(ctx context.Context, tab *Table, target enode.ID, q queryFunc) *lookup { - it := &lookup{ - tab: tab, - queryfunc: q, - asked: make(map[enode.ID]bool), - seen: make(map[enode.ID]bool), - result: nodesByDistance{target: target}, - replyCh: make(chan []*enode.Node, alpha), - cancelCh: ctx.Done(), - queries: -1, - } - // Don't query further if we hit ourself. - // Unlikely to happen often in practice. - it.asked[tab.self().ID()] = true - return it -} - -// run runs the lookup to completion and returns the closest nodes found. -func (it *lookup) run() []*enode.Node { - for it.advance() { - } - return it.result.entries -} - -// advance advances the lookup until any new nodes have been found. -// It returns false when the lookup has ended. -func (it *lookup) advance() bool { - for it.startQueries() { - select { - case nodes := <-it.replyCh: - it.replyBuffer = it.replyBuffer[:0] - for _, n := range nodes { - if n != nil && !it.seen[n.ID()] { - it.seen[n.ID()] = true - it.result.push(n, bucketSize) - it.replyBuffer = append(it.replyBuffer, n) - } - } - it.queries-- - if len(it.replyBuffer) > 0 { - return true - } - case <-it.cancelCh: - it.shutdown() - } - } - return false -} - -func (it *lookup) shutdown() { - for it.queries > 0 { - <-it.replyCh - it.queries-- - } - it.queryfunc = nil - it.replyBuffer = nil -} - -func (it *lookup) startQueries() bool { - if it.queryfunc == nil { - return false - } - - // The first query returns nodes from the local table. - if it.queries == -1 { - closest := it.tab.findnodeByID(it.result.target, bucketSize, false) - // Avoid finishing the lookup too quickly if table is empty. It'd be better to wait - // for the table to fill in this case, but there is no good mechanism for that - // yet. - if len(closest.entries) == 0 { - it.slowdown() - } - it.queries = 1 - it.replyCh <- closest.entries - return true - } - - // Ask the closest nodes that we haven't asked yet. - for i := 0; i < len(it.result.entries) && it.queries < alpha; i++ { - n := it.result.entries[i] - if !it.asked[n.ID()] { - it.asked[n.ID()] = true - it.queries++ - go it.query(n, it.replyCh) - } - } - // The lookup ends when no more nodes can be asked. - return it.queries > 0 -} - -func (it *lookup) slowdown() { - sleep := time.NewTimer(1 * time.Second) - defer sleep.Stop() - select { - case <-sleep.C: - case <-it.tab.closeReq: - } -} - -func (it *lookup) query(n *enode.Node, reply chan<- []*enode.Node) { - r, err := it.queryfunc(n) - if !errors.Is(err, errClosed) { // avoid recording failures on shutdown. - success := len(r) > 0 - it.tab.trackRequest(n, success, r) - if err != nil { - it.tab.log.Trace("FINDNODE failed", "id", n.ID(), "err", err) - } - } - reply <- r -} - -// lookupIterator performs lookup operations and iterates over all seen nodes. -// When a lookup finishes, a new one is created through nextLookup. -type lookupIterator struct { - buffer []*enode.Node - nextLookup lookupFunc - ctx context.Context - cancel func() - lookup *lookup -} - -type lookupFunc func(ctx context.Context) *lookup - -func newLookupIterator(ctx context.Context, next lookupFunc) *lookupIterator { - ctx, cancel := context.WithCancel(ctx) - return &lookupIterator{ctx: ctx, cancel: cancel, nextLookup: next} -} - -// Node returns the current node. -func (it *lookupIterator) Node() *enode.Node { - if len(it.buffer) == 0 { - return nil - } - return it.buffer[0] -} - -// Next moves to the next node. -func (it *lookupIterator) Next() bool { - // Consume next node in buffer. - if len(it.buffer) > 0 { - it.buffer = it.buffer[1:] - } - // Advance the lookup to refill the buffer. - for len(it.buffer) == 0 { - if it.ctx.Err() != nil { - it.lookup = nil - it.buffer = nil - return false - } - if it.lookup == nil { - it.lookup = it.nextLookup(it.ctx) - continue - } - if !it.lookup.advance() { - it.lookup = nil - continue - } - it.buffer = it.lookup.replyBuffer - } - return true -} - -// Close ends the iterator. -func (it *lookupIterator) Close() { - it.cancel() -} diff --git a/discvx/metrics.go b/discvx/metrics.go deleted file mode 100644 index 6e47985e..00000000 --- a/discvx/metrics.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "fmt" - "net" - "net/netip" - - "github.com/ethereum/go-ethereum/metrics" -) - -const ( - moduleName = "discover" - // ingressMeterName is the prefix of the per-packet inbound metrics. - ingressMeterName = moduleName + "/ingress" - - // egressMeterName is the prefix of the per-packet outbound metrics. - egressMeterName = moduleName + "/egress" -) - -var ( - bucketsCounter []metrics.Counter - ingressTrafficMeter = metrics.NewRegisteredMeter(ingressMeterName, nil) - egressTrafficMeter = metrics.NewRegisteredMeter(egressMeterName, nil) -) - -func init() { - for i := 0; i < NBuckets; i++ { - bucketsCounter = append(bucketsCounter, metrics.NewRegisteredCounter(fmt.Sprintf("%s/bucket/%d/count", moduleName, i), nil)) - } -} - -// meteredUdpConn is a wrapper around a net.UDPConn that meters both the -// inbound and outbound network traffic. -type meteredUdpConn struct { - udpConn UDPConn -} - -func newMeteredConn(conn UDPConn) UDPConn { - // Short circuit if metrics are disabled - if !metrics.Enabled { - return conn - } - return &meteredUdpConn{udpConn: conn} -} - -func (c *meteredUdpConn) Close() error { - return c.udpConn.Close() -} - -func (c *meteredUdpConn) LocalAddr() net.Addr { - return c.udpConn.LocalAddr() -} - -// ReadFromUDPAddrPort delegates a network read to the underlying connection, bumping the udp ingress traffic meter along the way. -func (c *meteredUdpConn) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) { - n, addr, err = c.udpConn.ReadFromUDPAddrPort(b) - ingressTrafficMeter.Mark(int64(n)) - return n, addr, err -} - -// WriteToUDPAddrPort delegates a network write to the underlying connection, bumping the udp egress traffic meter along the way. -func (c *meteredUdpConn) WriteToUDPAddrPort(b []byte, addr netip.AddrPort) (n int, err error) { - n, err = c.udpConn.WriteToUDPAddrPort(b, addr) - egressTrafficMeter.Mark(int64(n)) - return n, err -} diff --git a/discvx/node.go b/discvx/node.go deleted file mode 100644 index b4435615..00000000 --- a/discvx/node.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "slices" - "sort" - "time" - - "github.com/ethereum/go-ethereum/p2p/enode" -) - -type BucketNode struct { - Node *enode.Node `json:"node"` - AddedToTable time.Time `json:"addedToTable"` - AddedToBucket time.Time `json:"addedToBucket"` - Checks int `json:"checks"` - Live bool `json:"live"` -} - -// tableNode is an entry in Table. -type tableNode struct { - *enode.Node - revalList *revalidationList - addedToTable time.Time // first time node was added to bucket or replacement list - addedToBucket time.Time // time it was added in the actual bucket - livenessChecks uint // how often liveness was checked - isValidatedLive bool // true if existence of node is considered validated right now -} - -func unwrapNodes(ns []*tableNode) []*enode.Node { - result := make([]*enode.Node, len(ns)) - for i, n := range ns { - result[i] = n.Node - } - return result -} - -func (n *tableNode) String() string { - return n.Node.String() -} - -// nodesByDistance is a list of nodes, ordered by distance to target. -type nodesByDistance struct { - entries []*enode.Node - target enode.ID -} - -// push adds the given node to the list, keeping the total size below maxElems. -func (h *nodesByDistance) push(n *enode.Node, maxElems int) { - ix := sort.Search(len(h.entries), func(i int) bool { - return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0 - }) - - end := len(h.entries) - if len(h.entries) < maxElems { - h.entries = append(h.entries, n) - } - if ix < end { - // Slide existing entries down to make room. - // This will overwrite the entry we just appended. - copy(h.entries[ix+1:], h.entries[ix:]) - h.entries[ix] = n - } -} - -type nodeType interface { - ID() enode.ID -} - -// containsID reports whether ns contains a node with the given ID. -func containsID[N nodeType](ns []N, id enode.ID) bool { - for _, n := range ns { - if n.ID() == id { - return true - } - } - return false -} - -// deleteNode removes a node from the list. -func deleteNode[N nodeType](list []N, id enode.ID) []N { - return slices.DeleteFunc(list, func(n N) bool { - return n.ID() == id - }) -} diff --git a/discvx/ntp.go b/discvx/ntp.go deleted file mode 100644 index 124e38bb..00000000 --- a/discvx/ntp.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Contains the NTP time drift detection via the SNTP protocol: -// https://tools.ietf.org/html/rfc4330 - -package discvx - -import ( - "fmt" - "net" - "slices" - "time" - - "github.com/ethereum/go-ethereum/log" -) - -const ( - ntpPool = "pool.ntp.org" // ntpPool is the NTP server to query for the current time - ntpChecks = 3 // Number of measurements to do against the NTP server -) - -// checkClockDrift queries an NTP server for clock drifts and warns the user if -// one large enough is detected. -func checkClockDrift() { - drift, err := sntpDrift(ntpChecks) - if err != nil { - return - } - if drift < -driftThreshold || drift > driftThreshold { - log.Warn(fmt.Sprintf("System clock seems off by %v, which can prevent network connectivity", drift)) - log.Warn("Please enable network time synchronisation in system settings.") - } else { - log.Debug("NTP sanity check done", "drift", drift) - } -} - -// sntpDrift does a naive time resolution against an NTP server and returns the -// measured drift. This method uses the simple version of NTP. It's not precise -// but should be fine for these purposes. -// -// Note, it executes two extra measurements compared to the number of requested -// ones to be able to discard the two extremes as outliers. -func sntpDrift(measurements int) (time.Duration, error) { - // Resolve the address of the NTP server - addr, err := net.ResolveUDPAddr("udp", ntpPool+":123") - if err != nil { - return 0, err - } - // Construct the time request (empty package with only 2 fields set): - // Bits 3-5: Protocol version, 3 - // Bits 6-8: Mode of operation, client, 3 - request := make([]byte, 48) - request[0] = 3<<3 | 3 - - // Execute each of the measurements - drifts := []time.Duration{} - for i := 0; i < measurements+2; i++ { - // Dial the NTP server and send the time retrieval request - conn, err := net.DialUDP("udp", nil, addr) - if err != nil { - return 0, err - } - defer conn.Close() - - sent := time.Now() - if _, err = conn.Write(request); err != nil { - return 0, err - } - // Retrieve the reply and calculate the elapsed time - conn.SetDeadline(time.Now().Add(5 * time.Second)) - - reply := make([]byte, 48) - if _, err = conn.Read(reply); err != nil { - return 0, err - } - elapsed := time.Since(sent) - - // Reconstruct the time from the reply data - sec := uint64(reply[43]) | uint64(reply[42])<<8 | uint64(reply[41])<<16 | uint64(reply[40])<<24 - frac := uint64(reply[47]) | uint64(reply[46])<<8 | uint64(reply[45])<<16 | uint64(reply[44])<<24 - - nanosec := sec*1e9 + (frac*1e9)>>32 - - t := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC).Add(time.Duration(nanosec)).Local() - - // Calculate the drift based on an assumed answer time of RRT/2 - drifts = append(drifts, sent.Sub(t)+elapsed/2) - } - // Calculate average drift (drop two extremities to avoid outliers) - slices.Sort(drifts) - - drift := time.Duration(0) - for i := 1; i < len(drifts)-1; i++ { - drift += drifts[i] - } - return drift / time.Duration(measurements), nil -} diff --git a/discvx/table.go b/discvx/table.go deleted file mode 100644 index e92d77d1..00000000 --- a/discvx/table.go +++ /dev/null @@ -1,733 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package discover implements the Node Discovery Protocol. -// -// The Node Discovery protocol provides a way to find RLPx nodes that -// can be connected to. It uses a Kademlia-like protocol to maintain a -// distributed database of the IDs and endpoints of all listening -// nodes. -package discvx - -import ( - "context" - "crypto/rand" - "encoding/binary" - "fmt" - "net/netip" - "slices" - "sync" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/p2p/discover/v4wire" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/netutil" -) - -const ( - alpha = 3 // Kademlia concurrency factor - bucketSize = 16 // Kademlia bucket size - maxReplacements = 10 // Size of per-bucket replacement list - - // We keep buckets for the upper 1/15 of distances because - // it's very unlikely we'll ever encounter a node that's closer. - HashBits = len(common.Hash{}) * 8 - NBuckets = HashBits / 15 // Number of buckets - bucketMinDistance = HashBits - NBuckets // Log distance of closest bucket - - // IP address limits. - bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24 - tableIPLimit, tableSubnet = 10, 24 - - seedMinTableTime = 5 * time.Minute - seedCount = 30 - seedMaxAge = 5 * 24 * time.Hour -) - -// Table is the 'node table', a Kademlia-like index of neighbor nodes. The table keeps -// itself up-to-date by verifying the liveness of neighbors and requesting their node -// records when announcements of a new record version are received. -type Table struct { - mutex sync.Mutex // protects buckets, bucket content, nursery, rand - buckets [NBuckets]*bucket // index of known nodes by distance - nursery []*enode.Node // bootstrap nodes - rand reseedingRandom // source of randomness, periodically reseeded - ips netutil.DistinctNetSet - revalidation tableRevalidation - - db *enode.DB // database of known nodes - net transport - cfg Config - log log.Logger - - // loop channels - refreshReq chan chan struct{} - revalResponseCh chan revalidationResponse - addNodeCh chan addNodeOp - addNodeHandled chan bool - trackRequestCh chan trackRequestOp - initDone chan struct{} - closeReq chan struct{} - closed chan struct{} - - nodeAddedHook func(*bucket, *tableNode) - nodeRemovedHook func(*bucket, *tableNode) -} - -// transport is implemented by the UDP transports. -type transport interface { - Self() *enode.Node - RequestENR(*enode.Node) (*enode.Node, error) - lookupRandom() []*enode.Node - lookupSelf() []*enode.Node - ping(*enode.Node) (seq uint64, err error) -} - -// bucket contains nodes, ordered by their last activity. the entry -// that was most recently active is the first element in entries. -type bucket struct { - entries []*tableNode // live entries, sorted by time of last contact - replacements []*tableNode // recently seen nodes to be used if revalidation fails - ips netutil.DistinctNetSet - index int -} - -type addNodeOp struct { - node *enode.Node - isInbound bool - forceSetLive bool // for tests -} - -type trackRequestOp struct { - node *enode.Node - foundNodes []*enode.Node - success bool -} - -func newTable(t transport, db *enode.DB, cfg Config) (*Table, error) { - cfg = cfg.withDefaults() - tab := &Table{ - net: t, - db: db, - cfg: cfg, - log: cfg.Log, - refreshReq: make(chan chan struct{}), - revalResponseCh: make(chan revalidationResponse), - addNodeCh: make(chan addNodeOp), - addNodeHandled: make(chan bool), - trackRequestCh: make(chan trackRequestOp), - initDone: make(chan struct{}), - closeReq: make(chan struct{}), - closed: make(chan struct{}), - ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, - } - for i := range tab.buckets { - tab.buckets[i] = &bucket{ - index: i, - ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit}, - } - } - tab.rand.seed() - tab.revalidation.init(&cfg) - - // initial table content - if err := tab.setFallbackNodes(cfg.Bootnodes); err != nil { - return nil, err - } - tab.loadSeedNodes() - - return tab, nil -} - -// Nodes returns all nodes contained in the table. -func (tab *Table) Nodes() [][]BucketNode { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - nodes := make([][]BucketNode, len(tab.buckets)) - for i, b := range &tab.buckets { - nodes[i] = make([]BucketNode, len(b.entries)) - for j, n := range b.entries { - nodes[i][j] = BucketNode{ - Node: n.Node, - Checks: int(n.livenessChecks), - Live: n.isValidatedLive, - AddedToTable: n.addedToTable, - AddedToBucket: n.addedToBucket, - } - } - } - return nodes -} - -func (tab *Table) self() *enode.Node { - return tab.net.Self() -} - -// getNode returns the node with the given ID or nil if it isn't in the table. -func (tab *Table) getNode(id enode.ID) *enode.Node { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - b := tab.bucket(id) - for _, e := range b.entries { - if e.ID() == id { - return e.Node - } - } - return nil -} - -// close terminates the network listener and flushes the node database. -func (tab *Table) close() { - close(tab.closeReq) - <-tab.closed -} - -// setFallbackNodes sets the initial points of contact. These nodes -// are used to connect to the network if the table is empty and there -// are no known nodes in the database. -func (tab *Table) setFallbackNodes(nodes []*enode.Node) error { - nursery := make([]*enode.Node, 0, len(nodes)) - for _, n := range nodes { - if err := n.ValidateComplete(); err != nil { - return fmt.Errorf("bad bootstrap node %q: %v", n, err) - } - if tab.cfg.NetRestrict != nil && !tab.cfg.NetRestrict.ContainsAddr(n.IPAddr()) { - tab.log.Error("Bootstrap node filtered by netrestrict", "id", n.ID(), "ip", n.IPAddr()) - continue - } - nursery = append(nursery, n) - } - tab.nursery = nursery - return nil -} - -// isInitDone returns whether the table's initial seeding procedure has completed. -func (tab *Table) isInitDone() bool { - select { - case <-tab.initDone: - return true - default: - return false - } -} - -func (tab *Table) refresh() <-chan struct{} { - done := make(chan struct{}) - select { - case tab.refreshReq <- done: - case <-tab.closeReq: - close(done) - } - return done -} - -// findnodeByID returns the n nodes in the table that are closest to the given id. -// This is used by the FINDNODE/v4 handler. -// -// The preferLive parameter says whether the caller wants liveness-checked results. If -// preferLive is true and the table contains any verified nodes, the result will not -// contain unverified nodes. However, if there are no verified nodes at all, the result -// will contain unverified nodes. -func (tab *Table) findnodeByID(target enode.ID, nresults int, preferLive bool) *nodesByDistance { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - // Scan all buckets. There might be a better way to do this, but there aren't that many - // buckets, so this solution should be fine. The worst-case complexity of this loop - // is O(tab.len() * nresults). - nodes := &nodesByDistance{target: target} - liveNodes := &nodesByDistance{target: target} - for _, b := range &tab.buckets { - for _, n := range b.entries { - nodes.push(n.Node, nresults) - if preferLive && n.isValidatedLive { - liveNodes.push(n.Node, nresults) - } - } - } - - if preferLive && len(liveNodes.entries) > 0 { - return liveNodes - } - return nodes -} - -// appendBucketNodes adds nodes at the given distance to the result slice. -// This is used by the FINDNODE/v5 handler. -func (tab *Table) appendBucketNodes(dist uint, result []*enode.Node, checkLive bool) []*enode.Node { - if dist > 256 { - return result - } - if dist == 0 { - return append(result, tab.self()) - } - - tab.mutex.Lock() - for _, n := range tab.bucketAtDistance(int(dist)).entries { - if !checkLive || n.isValidatedLive { - result = append(result, n.Node) - } - } - tab.mutex.Unlock() - - // Shuffle result to avoid always returning same nodes in FINDNODE/v5. - tab.rand.Shuffle(len(result), func(i, j int) { - result[i], result[j] = result[j], result[i] - }) - return result -} - -// len returns the number of nodes in the table. -func (tab *Table) len() (n int) { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - for _, b := range &tab.buckets { - n += len(b.entries) - } - return n -} - -// addFoundNode adds a node which may not be live. If the bucket has space available, -// adding the node succeeds immediately. Otherwise, the node is added to the replacements -// list. -// -// The caller must not hold tab.mutex. -func (tab *Table) addFoundNode(n *enode.Node, forceSetLive bool) bool { - op := addNodeOp{node: n, isInbound: false, forceSetLive: forceSetLive} - select { - case tab.addNodeCh <- op: - return <-tab.addNodeHandled - case <-tab.closeReq: - return false - } -} - -// addInboundNode adds a node from an inbound contact. If the bucket has no space, the -// node is added to the replacements list. -// -// There is an additional safety measure: if the table is still initializing the node is -// not added. This prevents an attack where the table could be filled by just sending ping -// repeatedly. -// -// The caller must not hold tab.mutex. -func (tab *Table) addInboundNode(n *enode.Node) bool { - op := addNodeOp{node: n, isInbound: true} - select { - case tab.addNodeCh <- op: - return <-tab.addNodeHandled - case <-tab.closeReq: - return false - } -} - -func (tab *Table) trackRequest(n *enode.Node, success bool, foundNodes []*enode.Node) { - op := trackRequestOp{n, foundNodes, success} - select { - case tab.trackRequestCh <- op: - case <-tab.closeReq: - } -} - -// loop is the main loop of Table. -func (tab *Table) loop() { - var ( - refresh = time.NewTimer(tab.nextRefreshTime()) - refreshDone = make(chan struct{}) // where doRefresh reports completion - waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs - revalTimer = mclock.NewAlarm(tab.cfg.Clock) - reseedRandTimer = time.NewTicker(10 * time.Minute) - ) - defer refresh.Stop() - defer revalTimer.Stop() - defer reseedRandTimer.Stop() - - // Start initial refresh. - go tab.doRefresh(refreshDone) - -loop: - for { - nextTime := tab.revalidation.run(tab, tab.cfg.Clock.Now()) - revalTimer.Schedule(nextTime) - - select { - case <-reseedRandTimer.C: - tab.rand.seed() - - case <-revalTimer.C(): - - case r := <-tab.revalResponseCh: - tab.revalidation.handleResponse(tab, r) - - case op := <-tab.addNodeCh: - tab.mutex.Lock() - ok := tab.handleAddNode(op) - tab.mutex.Unlock() - tab.addNodeHandled <- ok - - case op := <-tab.trackRequestCh: - tab.handleTrackRequest(op) - - case <-refresh.C: - if refreshDone == nil { - refreshDone = make(chan struct{}) - go tab.doRefresh(refreshDone) - } - - case req := <-tab.refreshReq: - waiting = append(waiting, req) - if refreshDone == nil { - refreshDone = make(chan struct{}) - go tab.doRefresh(refreshDone) - } - - case <-refreshDone: - for _, ch := range waiting { - close(ch) - } - waiting, refreshDone = nil, nil - refresh.Reset(tab.nextRefreshTime()) - - case <-tab.closeReq: - break loop - } - } - - if refreshDone != nil { - <-refreshDone - } - for _, ch := range waiting { - close(ch) - } - close(tab.closed) -} - -// doRefresh performs a lookup for a random target to keep buckets full. seed nodes are -// inserted if the table is empty (initial bootstrap or discarded faulty peers). -func (tab *Table) doRefresh(done chan struct{}) { - defer close(done) - - // Load nodes from the database and insert - // them. This should yield a few previously seen nodes that are - // (hopefully) still alive. - tab.loadSeedNodes() - - // Run self lookup to discover new neighbor nodes. - tab.net.lookupSelf() - - // The Kademlia paper specifies that the bucket refresh should - // perform a lookup in the least recently used bucket. We cannot - // adhere to this because the findnode target is a 512bit value - // (not hash-sized) and it is not easily possible to generate a - // sha3 preimage that falls into a chosen bucket. - // We perform a few lookups with a random target instead. - for i := 0; i < 3; i++ { - tab.net.lookupRandom() - } -} - -func (tab *Table) loadSeedNodes() { - seeds := tab.db.QuerySeeds(seedCount, seedMaxAge) - seeds = append(seeds, tab.nursery...) - for i := range seeds { - seed := seeds[i] - if tab.log.Enabled(context.Background(), log.LevelTrace) { - age := time.Since(tab.db.LastPongReceived(seed.ID(), seed.IPAddr())) - addr, _ := seed.UDPEndpoint() - tab.log.Trace("Found seed node in database", "id", seed.ID(), "addr", addr, "age", age) - } - tab.mutex.Lock() - tab.handleAddNode(addNodeOp{node: seed, isInbound: false}) - tab.mutex.Unlock() - } -} - -func (tab *Table) nextRefreshTime() time.Duration { - half := tab.cfg.RefreshInterval / 2 - return half + time.Duration(tab.rand.Int63n(int64(half))) -} - -// bucket returns the bucket for the given node ID hash. -func (tab *Table) bucket(id enode.ID) *bucket { - d := enode.LogDist(tab.self().ID(), id) - return tab.bucketAtDistance(d) -} - -func (tab *Table) bucketAtDistance(d int) *bucket { - if d <= bucketMinDistance { - return tab.buckets[0] - } - return tab.buckets[d-bucketMinDistance-1] -} - -func (tab *Table) addIP(b *bucket, ip netip.Addr) bool { - if !ip.IsValid() || ip.IsUnspecified() { - return false // Nodes without IP cannot be added. - } - if netutil.AddrIsLAN(ip) { - return true - } - if !tab.ips.AddAddr(ip) { - tab.log.Debug("IP exceeds table limit", "ip", ip) - return false - } - if !b.ips.AddAddr(ip) { - tab.log.Debug("IP exceeds bucket limit", "ip", ip) - tab.ips.RemoveAddr(ip) - return false - } - return true -} - -func (tab *Table) removeIP(b *bucket, ip netip.Addr) { - if netutil.AddrIsLAN(ip) { - return - } - tab.ips.RemoveAddr(ip) - b.ips.RemoveAddr(ip) -} - -// handleAddNode adds the node in the request to the table, if there is space. -// The caller must hold tab.mutex. -func (tab *Table) handleAddNode(req addNodeOp) bool { - if req.node.ID() == tab.self().ID() { - return false - } - // For nodes from inbound contact, there is an additional safety measure: if the table - // is still initializing the node is not added. - if req.isInbound && !tab.isInitDone() { - return false - } - - b := tab.bucket(req.node.ID()) - n, _ := tab.bumpInBucket(b, req.node, req.isInbound) - if n != nil { - // Already in bucket. - return false - } - if len(b.entries) >= bucketSize { - // Bucket full, maybe add as replacement. - tab.addReplacement(b, req.node) - return false - } - if !tab.addIP(b, req.node.IPAddr()) { - // Can't add: IP limit reached. - return false - } - - // Add to bucket. - wn := &tableNode{Node: req.node} - if req.forceSetLive { - wn.livenessChecks = 1 - wn.isValidatedLive = true - } - b.entries = append(b.entries, wn) - b.replacements = deleteNode(b.replacements, wn.ID()) - tab.nodeAdded(b, wn) - return true -} - -// addReplacement adds n to the replacement cache of bucket b. -func (tab *Table) addReplacement(b *bucket, n *enode.Node) { - if containsID(b.replacements, n.ID()) { - // TODO: update ENR - return - } - if !tab.addIP(b, n.IPAddr()) { - return - } - - wn := &tableNode{Node: n, addedToTable: time.Now()} - var removed *tableNode - b.replacements, removed = pushNode(b.replacements, wn, maxReplacements) - if removed != nil { - tab.removeIP(b, removed.IPAddr()) - } -} - -func (tab *Table) nodeAdded(b *bucket, n *tableNode) { - if n.addedToTable == (time.Time{}) { - n.addedToTable = time.Now() - } - n.addedToBucket = time.Now() - tab.revalidation.nodeAdded(tab, n) - if tab.nodeAddedHook != nil { - tab.nodeAddedHook(b, n) - } - if metrics.Enabled { - bucketsCounter[b.index].Inc(1) - } -} - -func (tab *Table) nodeRemoved(b *bucket, n *tableNode) { - tab.revalidation.nodeRemoved(n) - if tab.nodeRemovedHook != nil { - tab.nodeRemovedHook(b, n) - } - if metrics.Enabled { - bucketsCounter[b.index].Dec(1) - } -} - -// deleteInBucket removes node n from the table. -// If there are replacement nodes in the bucket, the node is replaced. -func (tab *Table) deleteInBucket(b *bucket, id enode.ID) *tableNode { - index := slices.IndexFunc(b.entries, func(e *tableNode) bool { return e.ID() == id }) - if index == -1 { - // Entry has been removed already. - return nil - } - - // Remove the node. - n := b.entries[index] - b.entries = slices.Delete(b.entries, index, index+1) - tab.removeIP(b, n.IPAddr()) - tab.nodeRemoved(b, n) - - // Add replacement. - if len(b.replacements) == 0 { - tab.log.Debug("Removed dead node", "b", b.index, "id", n.ID(), "ip", n.IPAddr()) - return nil - } - rindex := tab.rand.Intn(len(b.replacements)) - rep := b.replacements[rindex] - b.replacements = slices.Delete(b.replacements, rindex, rindex+1) - b.entries = append(b.entries, rep) - tab.nodeAdded(b, rep) - tab.log.Debug("Replaced dead node", "b", b.index, "id", n.ID(), "ip", n.IPAddr(), "r", rep.ID(), "rip", rep.IPAddr()) - return rep -} - -// bumpInBucket updates a node record if it exists in the bucket. -// The second return value reports whether the node's endpoint (IP/port) was updated. -func (tab *Table) bumpInBucket(b *bucket, newRecord *enode.Node, isInbound bool) (n *tableNode, endpointChanged bool) { - i := slices.IndexFunc(b.entries, func(elem *tableNode) bool { - return elem.ID() == newRecord.ID() - }) - if i == -1 { - return nil, false // not in bucket - } - n = b.entries[i] - - // For inbound updates (from the node itself) we accept any change, even if it sets - // back the sequence number. For found nodes (!isInbound), seq has to advance. Note - // this check also ensures found discv4 nodes (which always have seq=0) can't be - // updated. - if newRecord.Seq() <= n.Seq() && !isInbound { - return n, false - } - - // Check endpoint update against IP limits. - ipchanged := newRecord.IPAddr() != n.IPAddr() - portchanged := newRecord.UDP() != n.UDP() - if ipchanged { - tab.removeIP(b, n.IPAddr()) - if !tab.addIP(b, newRecord.IPAddr()) { - // It doesn't fit with the limit, put the previous record back. - tab.addIP(b, n.IPAddr()) - return n, false - } - } - - // Apply update. - n.Node = newRecord - if ipchanged || portchanged { - // Ensure node is revalidated quickly for endpoint changes. - tab.revalidation.nodeEndpointChanged(tab, n) - return n, true - } - return n, false -} - -func (tab *Table) handleTrackRequest(op trackRequestOp) { - var fails int - if op.success { - // Reset failure counter because it counts _consecutive_ failures. - tab.db.UpdateFindFails(op.node.ID(), op.node.IPAddr(), 0) - } else { - fails = tab.db.FindFails(op.node.ID(), op.node.IPAddr()) - fails++ - tab.db.UpdateFindFails(op.node.ID(), op.node.IPAddr(), fails) - } - - tab.mutex.Lock() - defer tab.mutex.Unlock() - - b := tab.bucket(op.node.ID()) - // Remove the node from the local table if it fails to return anything useful too - // many times, but only if there are enough other nodes in the bucket. This latter - // condition specifically exists to make bootstrapping in smaller test networks more - // reliable. - if fails >= maxFindnodeFailures && len(b.entries) >= bucketSize/4 { - tab.deleteInBucket(b, op.node.ID()) - } - - // Add found nodes. - for _, n := range op.foundNodes { - tab.handleAddNode(addNodeOp{n, false, false}) - } -} - -// pushNode adds n to the front of list, keeping at most max items. -func pushNode(list []*tableNode, n *tableNode, max int) ([]*tableNode, *tableNode) { - if len(list) < max { - list = append(list, nil) - } - removed := list[len(list)-1] - copy(list[1:], list) - list[0] = n - return list, removed -} - -// GenRandomPublicKey generates a public key that, when hashed with Keccak256, -// yields an [enode.ID] that has a common prefix length of targetCPL. -func GenRandomPublicKey(targetID enode.ID, targetCPL int) (v4wire.Pubkey, error) { - targetPrefix := binary.BigEndian.Uint16(targetID[:]) - - // For host with ID `L`, an ID `K` belongs to a bucket with ID `B` ONLY IF CommonPrefixLen(L,K) is EXACTLY B. - // Hence, to achieve a targetPrefix `T`, we must toggle the (T+1)th bit in L & then copy (T+1) bits from L - // to our randomly generated prefix. - toggledTargetPrefix := targetPrefix ^ (uint16(0x8000) >> targetCPL) - - randUInt16Bytes := new([2]byte) - _, err := rand.Read(randUInt16Bytes[:]) - if err != nil { - return [64]byte{}, fmt.Errorf("read random bytes: %w", err) - } - randUint16 := binary.BigEndian.Uint16(randUInt16Bytes[:]) - - // generate a mask that starts with targetCPL + 1 ones and the rest zeroes - mask := (^uint16(0)) << (16 - (targetCPL + 1)) - - // toggledTargetPrefix & mask: use the first targetCPL + 1 bits from the toggledTargetPrefix - // randUint16 & ^mask: use the remaining bits from the random uint16 - // by or'ing them together with | we composed the final prefix - prefix := (toggledTargetPrefix & mask) | (randUint16 & ^mask) - - // Lookup the preimage in the key prefix map - key := keyPrefixMap[prefix] - - // generate public key - out := new([64]byte) - binary.BigEndian.PutUint32(out[:], key) - return *out, nil -} diff --git a/discvx/table_reval.go b/discvx/table_reval.go deleted file mode 100644 index c3a70faf..00000000 --- a/discvx/table_reval.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2024 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "fmt" - "math" - "slices" - "time" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/p2p/enode" -) - -const never = mclock.AbsTime(math.MaxInt64) - -const slowRevalidationFactor = 3 - -// tableRevalidation implements the node revalidation process. -// It tracks all nodes contained in Table, and schedules sending PING to them. -type tableRevalidation struct { - fast revalidationList - slow revalidationList - activeReq map[enode.ID]struct{} -} - -type revalidationResponse struct { - n *tableNode - newRecord *enode.Node - didRespond bool -} - -func (tr *tableRevalidation) init(cfg *Config) { - tr.activeReq = make(map[enode.ID]struct{}) - tr.fast.nextTime = never - tr.fast.interval = cfg.PingInterval - tr.fast.name = "fast" - tr.slow.nextTime = never - tr.slow.interval = cfg.PingInterval * slowRevalidationFactor - tr.slow.name = "slow" -} - -// nodeAdded is called when the table receives a new node. -func (tr *tableRevalidation) nodeAdded(tab *Table, n *tableNode) { - tr.fast.push(n, tab.cfg.Clock.Now(), &tab.rand) -} - -// nodeRemoved is called when a node was removed from the table. -func (tr *tableRevalidation) nodeRemoved(n *tableNode) { - if n.revalList == nil { - panic(fmt.Errorf("removed node %v has nil revalList", n.ID())) - } - n.revalList.remove(n) -} - -// nodeEndpointChanged is called when a change in IP or port is detected. -func (tr *tableRevalidation) nodeEndpointChanged(tab *Table, n *tableNode) { - n.isValidatedLive = false - tr.moveToList(&tr.fast, n, tab.cfg.Clock.Now(), &tab.rand) -} - -// run performs node revalidation. -// It returns the next time it should be invoked, which is used in the Table main loop -// to schedule a timer. However, run can be called at any time. -func (tr *tableRevalidation) run(tab *Table, now mclock.AbsTime) (nextTime mclock.AbsTime) { - reval := func(list *revalidationList) { - if list.nextTime <= now { - if n := list.get(now, &tab.rand, tr.activeReq); n != nil { - tr.startRequest(tab, n) - } - // Update nextTime regardless if any requests were started because - // current value has passed. - list.schedule(now, &tab.rand) - } - } - reval(&tr.fast) - reval(&tr.slow) - - return min(tr.fast.nextTime, tr.slow.nextTime) -} - -// startRequest spawns a revalidation request for node n. -func (tr *tableRevalidation) startRequest(tab *Table, n *tableNode) { - if _, ok := tr.activeReq[n.ID()]; ok { - panic(fmt.Errorf("duplicate startRequest (node %v)", n.ID())) - } - tr.activeReq[n.ID()] = struct{}{} - resp := revalidationResponse{n: n} - - // Fetch the node while holding lock. - tab.mutex.Lock() - node := n.Node - tab.mutex.Unlock() - - go tab.doRevalidate(resp, node) -} - -func (tab *Table) doRevalidate(resp revalidationResponse, node *enode.Node) { - // Ping the selected node and wait for a pong response. - remoteSeq, err := tab.net.ping(node) - resp.didRespond = err == nil - - // Also fetch record if the node replied and returned a higher sequence number. - if remoteSeq > node.Seq() { - newrec, err := tab.net.RequestENR(node) - if err != nil { - tab.log.Debug("ENR request failed", "id", node.ID(), "err", err) - } else { - resp.newRecord = newrec - } - } - - select { - case tab.revalResponseCh <- resp: - case <-tab.closed: - } -} - -// handleResponse processes the result of a revalidation request. -func (tr *tableRevalidation) handleResponse(tab *Table, resp revalidationResponse) { - var ( - now = tab.cfg.Clock.Now() - n = resp.n - b = tab.bucket(n.ID()) - ) - delete(tr.activeReq, n.ID()) - - // If the node was removed from the table while getting checked, we need to stop - // processing here to avoid re-adding it. - if n.revalList == nil { - return - } - - // Store potential seeds in database. - // This is done via defer to avoid holding Table lock while writing to DB. - defer func() { - if n.isValidatedLive && n.livenessChecks > 5 { - tab.db.UpdateNode(resp.n.Node) - } - }() - - // Remaining logic needs access to Table internals. - tab.mutex.Lock() - defer tab.mutex.Unlock() - - if !resp.didRespond { - n.livenessChecks /= 3 - if n.livenessChecks <= 0 { - tab.deleteInBucket(b, n.ID()) - } else { - tab.log.Debug("Node revalidation failed", "b", b.index, "id", n.ID(), "checks", n.livenessChecks, "q", n.revalList.name) - tr.moveToList(&tr.fast, n, now, &tab.rand) - } - return - } - - // The node responded. - n.livenessChecks++ - n.isValidatedLive = true - tab.log.Debug("Node revalidated", "b", b.index, "id", n.ID(), "checks", n.livenessChecks, "q", n.revalList.name) - var endpointChanged bool - if resp.newRecord != nil { - _, endpointChanged = tab.bumpInBucket(b, resp.newRecord, false) - } - - // Node moves to slow list if it passed and hasn't changed. - if !endpointChanged { - tr.moveToList(&tr.slow, n, now, &tab.rand) - } -} - -// moveToList ensures n is in the 'dest' list. -func (tr *tableRevalidation) moveToList(dest *revalidationList, n *tableNode, now mclock.AbsTime, rand randomSource) { - if n.revalList == dest { - return - } - if n.revalList != nil { - n.revalList.remove(n) - } - dest.push(n, now, rand) -} - -// revalidationList holds a list nodes and the next revalidation time. -type revalidationList struct { - nodes []*tableNode - nextTime mclock.AbsTime - interval time.Duration - name string -} - -// get returns a random node from the queue. Nodes in the 'exclude' map are not returned. -func (list *revalidationList) get(now mclock.AbsTime, rand randomSource, exclude map[enode.ID]struct{}) *tableNode { - if len(list.nodes) == 0 { - return nil - } - for i := 0; i < len(list.nodes)*3; i++ { - n := list.nodes[rand.Intn(len(list.nodes))] - _, excluded := exclude[n.ID()] - if !excluded { - return n - } - } - return nil -} - -func (list *revalidationList) schedule(now mclock.AbsTime, rand randomSource) { - list.nextTime = now.Add(time.Duration(rand.Int63n(int64(list.interval)))) -} - -func (list *revalidationList) push(n *tableNode, now mclock.AbsTime, rand randomSource) { - list.nodes = append(list.nodes, n) - if list.nextTime == never { - list.schedule(now, rand) - } - n.revalList = list -} - -func (list *revalidationList) remove(n *tableNode) { - i := slices.Index(list.nodes, n) - if i == -1 { - panic(fmt.Errorf("node %v not found in list", n.ID())) - } - list.nodes = slices.Delete(list.nodes, i, i+1) - if len(list.nodes) == 0 { - list.nextTime = never - } - n.revalList = nil -} - -func (list *revalidationList) contains(id enode.ID) bool { - return slices.ContainsFunc(list.nodes, func(n *tableNode) bool { - return n.ID() == id - }) -} diff --git a/discvx/table_reval_test.go b/discvx/table_reval_test.go deleted file mode 100644 index ace7b76e..00000000 --- a/discvx/table_reval_test.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2024 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "net" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" -) - -// This test checks that revalidation can handle a node disappearing while -// a request is active. -func TestRevalidation_nodeRemoved(t *testing.T) { - var ( - clock mclock.Simulated - transport = newPingRecorder() - tab, db = newInactiveTestTable(transport, Config{Clock: &clock}) - tr = &tab.revalidation - ) - defer db.Close() - - // Add a node to the table. - node := nodeAtDistance(tab.self().ID(), 255, net.IP{77, 88, 99, 1}) - tab.handleAddNode(addNodeOp{node: node}) - - // Start a revalidation request. Schedule once to get the next start time, - // then advance the clock to that point and schedule again to start. - next := tr.run(tab, clock.Now()) - clock.Run(time.Duration(next + 1)) - tr.run(tab, clock.Now()) - if len(tr.activeReq) != 1 { - t.Fatal("revalidation request did not start:", tr.activeReq) - } - - // Delete the node. - tab.deleteInBucket(tab.bucket(node.ID()), node.ID()) - - // Now finish the revalidation request. - var resp revalidationResponse - select { - case resp = <-tab.revalResponseCh: - case <-time.After(1 * time.Second): - t.Fatal("timed out waiting for revalidation") - } - tr.handleResponse(tab, resp) - - // Ensure the node was not re-added to the table. - if tab.getNode(node.ID()) != nil { - t.Fatal("node was re-added to Table") - } - if tr.fast.contains(node.ID()) || tr.slow.contains(node.ID()) { - t.Fatal("removed node contained in revalidation list") - } -} - -// This test checks that nodes with an updated endpoint remain in the fast revalidation list. -func TestRevalidation_endpointUpdate(t *testing.T) { - var ( - clock mclock.Simulated - transport = newPingRecorder() - tab, db = newInactiveTestTable(transport, Config{Clock: &clock}) - tr = &tab.revalidation - ) - defer db.Close() - - // Add node to table. - node := nodeAtDistance(tab.self().ID(), 255, net.IP{77, 88, 99, 1}) - tab.handleAddNode(addNodeOp{node: node}) - - // Update the record in transport, including endpoint update. - record := node.Record() - record.Set(enr.IP{100, 100, 100, 100}) - record.Set(enr.UDP(9999)) - nodev2 := enode.SignNull(record, node.ID()) - transport.updateRecord(nodev2) - - // Start a revalidation request. Schedule once to get the next start time, - // then advance the clock to that point and schedule again to start. - next := tr.run(tab, clock.Now()) - clock.Run(time.Duration(next + 1)) - tr.run(tab, clock.Now()) - if len(tr.activeReq) != 1 { - t.Fatal("revalidation request did not start:", tr.activeReq) - } - - // Now finish the revalidation request. - var resp revalidationResponse - select { - case resp = <-tab.revalResponseCh: - case <-time.After(1 * time.Second): - t.Fatal("timed out waiting for revalidation") - } - tr.handleResponse(tab, resp) - - if tr.fast.nodes[0].ID() != node.ID() { - t.Fatal("node not contained in fast revalidation list") - } - if tr.fast.nodes[0].isValidatedLive { - t.Fatal("node is marked live after endpoint change") - } -} diff --git a/discvx/table_test.go b/discvx/table_test.go deleted file mode 100644 index b3a4ca35..00000000 --- a/discvx/table_test.go +++ /dev/null @@ -1,499 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "crypto/ecdsa" - "fmt" - "math/rand" - "net" - "reflect" - "slices" - "testing" - "testing/quick" - "time" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/internal/testlog" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/p2p/netutil" -) - -func TestTable_pingReplace(t *testing.T) { - run := func(newNodeResponding, lastInBucketResponding bool) { - name := fmt.Sprintf("newNodeResponding=%t/lastInBucketResponding=%t", newNodeResponding, lastInBucketResponding) - t.Run(name, func(t *testing.T) { - t.Parallel() - testPingReplace(t, newNodeResponding, lastInBucketResponding) - }) - } - - run(true, true) - run(false, true) - run(true, false) - run(false, false) -} - -func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) { - simclock := new(mclock.Simulated) - transport := newPingRecorder() - tab, db := newTestTable(transport, Config{ - Clock: simclock, - Log: testlog.Logger(t, log.LevelTrace), - }) - defer db.Close() - defer tab.close() - - <-tab.initDone - - // Fill up the sender's bucket. - replacementNodeKey, _ := crypto.HexToECDSA("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8") - replacementNode := enode.NewV4(&replacementNodeKey.PublicKey, net.IP{127, 0, 0, 1}, 99, 99) - last := fillBucket(tab, replacementNode.ID()) - tab.mutex.Lock() - nodeEvents := newNodeEventRecorder(128) - tab.nodeAddedHook = nodeEvents.nodeAdded - tab.nodeRemovedHook = nodeEvents.nodeRemoved - tab.mutex.Unlock() - - // The revalidation process should replace - // this node in the bucket if it is unresponsive. - transport.dead[last.ID()] = !lastInBucketIsResponding - transport.dead[replacementNode.ID()] = !newNodeIsResponding - - // Add replacement node to table. - tab.addFoundNode(replacementNode, false) - - t.Log("last:", last.ID()) - t.Log("replacement:", replacementNode.ID()) - - // Wait until the last node was pinged. - waitForRevalidationPing(t, transport, tab, last.ID()) - - if !lastInBucketIsResponding { - if !nodeEvents.waitNodeAbsent(last.ID(), 2*time.Second) { - t.Error("last node was not removed") - } - if !nodeEvents.waitNodePresent(replacementNode.ID(), 2*time.Second) { - t.Error("replacement node was not added") - } - - // If a replacement is expected, we also need to wait until the replacement node - // was pinged and added/removed. - waitForRevalidationPing(t, transport, tab, replacementNode.ID()) - if !newNodeIsResponding { - if !nodeEvents.waitNodeAbsent(replacementNode.ID(), 2*time.Second) { - t.Error("replacement node was not removed") - } - } - } - - // Check bucket content. - tab.mutex.Lock() - defer tab.mutex.Unlock() - wantSize := bucketSize - if !lastInBucketIsResponding && !newNodeIsResponding { - wantSize-- - } - bucket := tab.bucket(replacementNode.ID()) - if l := len(bucket.entries); l != wantSize { - t.Errorf("wrong bucket size after revalidation: got %d, want %d", l, wantSize) - } - if ok := containsID(bucket.entries, last.ID()); ok != lastInBucketIsResponding { - t.Errorf("revalidated node found: %t, want: %t", ok, lastInBucketIsResponding) - } - wantNewEntry := newNodeIsResponding && !lastInBucketIsResponding - if ok := containsID(bucket.entries, replacementNode.ID()); ok != wantNewEntry { - t.Errorf("replacement node found: %t, want: %t", ok, wantNewEntry) - } -} - -// waitForRevalidationPing waits until a PING message is sent to a node with the given id. -func waitForRevalidationPing(t *testing.T, transport *pingRecorder, tab *Table, id enode.ID) *enode.Node { - t.Helper() - - simclock := tab.cfg.Clock.(*mclock.Simulated) - maxAttempts := tab.len() * 8 - for i := 0; i < maxAttempts; i++ { - simclock.Run(tab.cfg.PingInterval * slowRevalidationFactor) - p := transport.waitPing(2 * time.Second) - if p == nil { - continue - } - if id == (enode.ID{}) || p.ID() == id { - return p - } - } - t.Fatalf("Table did not ping node %v (%d attempts)", id, maxAttempts) - return nil -} - -// This checks that the table-wide IP limit is applied correctly. -func TestTable_IPLimit(t *testing.T) { - transport := newPingRecorder() - tab, db := newTestTable(transport, Config{}) - defer db.Close() - defer tab.close() - - for i := 0; i < tableIPLimit+1; i++ { - n := nodeAtDistance(tab.self().ID(), i, net.IP{172, 0, 1, byte(i)}) - tab.addFoundNode(n, false) - } - if tab.len() > tableIPLimit { - t.Errorf("too many nodes in table") - } - checkIPLimitInvariant(t, tab) -} - -// This checks that the per-bucket IP limit is applied correctly. -func TestTable_BucketIPLimit(t *testing.T) { - transport := newPingRecorder() - tab, db := newTestTable(transport, Config{}) - defer db.Close() - defer tab.close() - - d := 3 - for i := 0; i < bucketIPLimit+1; i++ { - n := nodeAtDistance(tab.self().ID(), d, net.IP{172, 0, 1, byte(i)}) - tab.addFoundNode(n, false) - } - if tab.len() > bucketIPLimit { - t.Errorf("too many nodes in table") - } - checkIPLimitInvariant(t, tab) -} - -// checkIPLimitInvariant checks that ip limit sets contain an entry for every -// node in the table and no extra entries. -func checkIPLimitInvariant(t *testing.T, tab *Table) { - t.Helper() - - tabset := netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit} - for _, b := range tab.buckets { - for _, n := range b.entries { - tabset.AddAddr(n.IPAddr()) - } - } - if tabset.String() != tab.ips.String() { - t.Errorf("table IP set is incorrect:\nhave: %v\nwant: %v", tab.ips, tabset) - } -} - -func TestTable_findnodeByID(t *testing.T) { - t.Parallel() - - test := func(test *closeTest) bool { - // for any node table, Target and N - transport := newPingRecorder() - tab, db := newTestTable(transport, Config{}) - defer db.Close() - defer tab.close() - fillTable(tab, test.All, true) - - // check that closest(Target, N) returns nodes - result := tab.findnodeByID(test.Target, test.N, false).entries - if hasDuplicates(result) { - t.Errorf("result contains duplicates") - return false - } - if !sortedByDistanceTo(test.Target, result) { - t.Errorf("result is not sorted by distance to target") - return false - } - - // check that the number of results is min(N, tablen) - wantN := test.N - if tlen := tab.len(); tlen < test.N { - wantN = tlen - } - if len(result) != wantN { - t.Errorf("wrong number of nodes: got %d, want %d", len(result), wantN) - return false - } else if len(result) == 0 { - return true // no need to check distance - } - - // check that the result nodes have minimum distance to target. - for _, b := range tab.buckets { - for _, n := range b.entries { - if containsID(result, n.ID()) { - continue // don't run the check below for nodes in result - } - farthestResult := result[len(result)-1].ID() - if enode.DistCmp(test.Target, n.ID(), farthestResult) < 0 { - t.Errorf("table contains node that is closer to target but it's not in result") - t.Logf(" Target: %v", test.Target) - t.Logf(" Farthest Result: %v", farthestResult) - t.Logf(" ID: %v", n.ID()) - return false - } - } - } - return true - } - if err := quick.Check(test, quickcfg()); err != nil { - t.Error(err) - } -} - -type closeTest struct { - Self enode.ID - Target enode.ID - All []*enode.Node - N int -} - -func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value { - t := &closeTest{ - Self: gen(enode.ID{}, rand).(enode.ID), - Target: gen(enode.ID{}, rand).(enode.ID), - N: rand.Intn(bucketSize), - } - for _, id := range gen([]enode.ID{}, rand).([]enode.ID) { - r := new(enr.Record) - r.Set(enr.IPv4Addr(netutil.RandomAddr(rand, true))) - n := enode.SignNull(r, id) - t.All = append(t.All, n) - } - return reflect.ValueOf(t) -} - -func TestTable_addInboundNode(t *testing.T) { - tab, db := newTestTable(newPingRecorder(), Config{}) - <-tab.initDone - defer db.Close() - defer tab.close() - - // Insert two nodes. - n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1}) - n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2}) - tab.addFoundNode(n1, false) - tab.addFoundNode(n2, false) - checkBucketContent(t, tab, []*enode.Node{n1, n2}) - - // Add a changed version of n2. The bucket should be updated. - newrec := n2.Record() - newrec.Set(enr.IP{99, 99, 99, 99}) - n2v2 := enode.SignNull(newrec, n2.ID()) - tab.addInboundNode(n2v2) - checkBucketContent(t, tab, []*enode.Node{n1, n2v2}) - - // Try updating n2 without sequence number change. The update is accepted - // because it's inbound. - newrec = n2.Record() - newrec.Set(enr.IP{100, 100, 100, 100}) - newrec.SetSeq(n2.Seq()) - n2v3 := enode.SignNull(newrec, n2.ID()) - tab.addInboundNode(n2v3) - checkBucketContent(t, tab, []*enode.Node{n1, n2v3}) -} - -func TestTable_addFoundNode(t *testing.T) { - tab, db := newTestTable(newPingRecorder(), Config{}) - <-tab.initDone - defer db.Close() - defer tab.close() - - // Insert two nodes. - n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1}) - n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2}) - tab.addFoundNode(n1, false) - tab.addFoundNode(n2, false) - checkBucketContent(t, tab, []*enode.Node{n1, n2}) - - // Add a changed version of n2. The bucket should be updated. - newrec := n2.Record() - newrec.Set(enr.IP{99, 99, 99, 99}) - n2v2 := enode.SignNull(newrec, n2.ID()) - tab.addFoundNode(n2v2, false) - checkBucketContent(t, tab, []*enode.Node{n1, n2v2}) - - // Try updating n2 without a sequence number change. - // The update should not be accepted. - newrec = n2.Record() - newrec.Set(enr.IP{100, 100, 100, 100}) - newrec.SetSeq(n2.Seq()) - n2v3 := enode.SignNull(newrec, n2.ID()) - tab.addFoundNode(n2v3, false) - checkBucketContent(t, tab, []*enode.Node{n1, n2v2}) -} - -// This test checks that discv4 nodes can update their own endpoint via PING. -func TestTable_addInboundNodeUpdateV4Accept(t *testing.T) { - tab, db := newTestTable(newPingRecorder(), Config{}) - <-tab.initDone - defer db.Close() - defer tab.close() - - // Add a v4 node. - key, _ := crypto.HexToECDSA("dd3757a8075e88d0f2b1431e7d3c5b1562e1c0aab9643707e8cbfcc8dae5cfe3") - n1 := enode.NewV4(&key.PublicKey, net.IP{88, 77, 66, 1}, 9000, 9000) - tab.addInboundNode(n1) - checkBucketContent(t, tab, []*enode.Node{n1}) - - // Add an updated version with changed IP. - // The update will be accepted because it is inbound. - n1v2 := enode.NewV4(&key.PublicKey, net.IP{99, 99, 99, 99}, 9000, 9000) - tab.addInboundNode(n1v2) - checkBucketContent(t, tab, []*enode.Node{n1v2}) -} - -// This test checks that discv4 node entries will NOT be updated when a -// changed record is found. -func TestTable_addFoundNodeV4UpdateReject(t *testing.T) { - tab, db := newTestTable(newPingRecorder(), Config{}) - <-tab.initDone - defer db.Close() - defer tab.close() - - // Add a v4 node. - key, _ := crypto.HexToECDSA("dd3757a8075e88d0f2b1431e7d3c5b1562e1c0aab9643707e8cbfcc8dae5cfe3") - n1 := enode.NewV4(&key.PublicKey, net.IP{88, 77, 66, 1}, 9000, 9000) - tab.addFoundNode(n1, false) - checkBucketContent(t, tab, []*enode.Node{n1}) - - // Add an updated version with changed IP. - // The update won't be accepted because it isn't inbound. - n1v2 := enode.NewV4(&key.PublicKey, net.IP{99, 99, 99, 99}, 9000, 9000) - tab.addFoundNode(n1v2, false) - checkBucketContent(t, tab, []*enode.Node{n1}) -} - -func checkBucketContent(t *testing.T, tab *Table, nodes []*enode.Node) { - t.Helper() - - b := tab.bucket(nodes[0].ID()) - if reflect.DeepEqual(unwrapNodes(b.entries), nodes) { - return - } - t.Log("wrong bucket content. have nodes:") - for _, n := range b.entries { - t.Logf(" %v (seq=%v, ip=%v)", n.ID(), n.Seq(), n.IPAddr()) - } - t.Log("want nodes:") - for _, n := range nodes { - t.Logf(" %v (seq=%v, ip=%v)", n.ID(), n.Seq(), n.IPAddr()) - } - t.FailNow() - - // Also check IP limits. - checkIPLimitInvariant(t, tab) -} - -// This test checks that ENR updates happen during revalidation. If a node in the table -// announces a new sequence number, the new record should be pulled. -func TestTable_revalidateSyncRecord(t *testing.T) { - transport := newPingRecorder() - tab, db := newTestTable(transport, Config{ - Clock: new(mclock.Simulated), - Log: testlog.Logger(t, log.LevelTrace), - }) - <-tab.initDone - defer db.Close() - defer tab.close() - - // Insert a node. - var r enr.Record - r.Set(enr.IP(net.IP{127, 0, 0, 1})) - id := enode.ID{1} - n1 := enode.SignNull(&r, id) - tab.addFoundNode(n1, false) - - // Update the node record. - r.Set(enr.WithEntry("foo", "bar")) - n2 := enode.SignNull(&r, id) - transport.updateRecord(n2) - - // Wait for revalidation. We wait for the node to be revalidated two times - // in order to synchronize with the update in the table. - waitForRevalidationPing(t, transport, tab, n2.ID()) - waitForRevalidationPing(t, transport, tab, n2.ID()) - - intable := tab.getNode(id) - if !reflect.DeepEqual(intable, n2) { - t.Fatalf("table contains old record with seq %d, want seq %d", intable.Seq(), n2.Seq()) - } -} - -func TestNodesPush(t *testing.T) { - var target enode.ID - n1 := nodeAtDistance(target, 255, intIP(1)) - n2 := nodeAtDistance(target, 254, intIP(2)) - n3 := nodeAtDistance(target, 253, intIP(3)) - perm := [][]*enode.Node{ - {n3, n2, n1}, - {n3, n1, n2}, - {n2, n3, n1}, - {n2, n1, n3}, - {n1, n3, n2}, - {n1, n2, n3}, - } - - // Insert all permutations into lists with size limit 3. - for _, nodes := range perm { - list := nodesByDistance{target: target} - for _, n := range nodes { - list.push(n, 3) - } - if !slices.EqualFunc(list.entries, perm[0], nodeIDEqual) { - t.Fatal("not equal") - } - } - - // Insert all permutations into lists with size limit 2. - for _, nodes := range perm { - list := nodesByDistance{target: target} - for _, n := range nodes { - list.push(n, 2) - } - if !slices.EqualFunc(list.entries, perm[0][:2], nodeIDEqual) { - t.Fatal("not equal") - } - } -} - -func nodeIDEqual[N nodeType](n1, n2 N) bool { - return n1.ID() == n2.ID() -} - -// gen wraps quick.Value so it's easier to use. -// it generates a random value of the given value's type. -func gen(typ interface{}, rand *rand.Rand) interface{} { - v, ok := quick.Value(reflect.TypeOf(typ), rand) - if !ok { - panic(fmt.Sprintf("couldn't generate random value of type %T", typ)) - } - return v.Interface() -} - -func quickcfg() *quick.Config { - return &quick.Config{ - MaxCount: 5000, - Rand: rand.New(rand.NewSource(time.Now().Unix())), - } -} - -func newkey() *ecdsa.PrivateKey { - key, err := crypto.GenerateKey() - if err != nil { - panic("couldn't generate key: " + err.Error()) - } - return key -} diff --git a/discvx/table_util_test.go b/discvx/table_util_test.go deleted file mode 100644 index 75e7aa89..00000000 --- a/discvx/table_util_test.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "bytes" - "crypto/ecdsa" - "encoding/hex" - "errors" - "fmt" - "math/rand" - "net" - "slices" - "sync" - "sync/atomic" - "time" - - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/p2p/discover/v4wire" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" -) - -var nullNode *enode.Node - -func init() { - var r enr.Record - r.Set(enr.IP{0, 0, 0, 0}) - nullNode = enode.SignNull(&r, enode.ID{}) -} - -func newTestTable(t transport, cfg Config) (*Table, *enode.DB) { - tab, db := newInactiveTestTable(t, cfg) - go tab.loop() - return tab, db -} - -// newInactiveTestTable creates a Table without running the main loop. -func newInactiveTestTable(t transport, cfg Config) (*Table, *enode.DB) { - db, _ := enode.OpenDB("") - tab, _ := newTable(t, db, cfg) - return tab, db -} - -// nodeAtDistance creates a node for which enode.LogDist(base, n.id) == ld. -func nodeAtDistance(base enode.ID, ld int, ip net.IP) *enode.Node { - var r enr.Record - r.Set(enr.IP(ip)) - r.Set(enr.UDP(30303)) - return enode.SignNull(&r, idAtDistance(base, ld)) -} - -// nodesAtDistance creates n nodes for which enode.LogDist(base, node.ID()) == ld. -func nodesAtDistance(base enode.ID, ld int, n int) []*enode.Node { - results := make([]*enode.Node, n) - for i := range results { - results[i] = nodeAtDistance(base, ld, intIP(i)) - } - return results -} - -func nodesToRecords(nodes []*enode.Node) []*enr.Record { - records := make([]*enr.Record, len(nodes)) - for i := range nodes { - records[i] = nodes[i].Record() - } - return records -} - -// idAtDistance returns a random hash such that enode.LogDist(a, b) == n -func idAtDistance(a enode.ID, n int) (b enode.ID) { - if n == 0 { - return a - } - // flip bit at position n, fill the rest with random bits - b = a - pos := len(a) - n/8 - 1 - bit := byte(0x01) << (byte(n%8) - 1) - if bit == 0 { - pos++ - bit = 0x80 - } - b[pos] = a[pos]&^bit | ^a[pos]&bit // TODO: randomize end bits - for i := pos + 1; i < len(a); i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} - -// intIP returns a LAN IP address based on i. -func intIP(i int) net.IP { - return net.IP{10, 0, byte(i >> 8), byte(i & 0xFF)} -} - -// fillBucket inserts nodes into the given bucket until it is full. -func fillBucket(tab *Table, id enode.ID) (last *tableNode) { - ld := enode.LogDist(tab.self().ID(), id) - b := tab.bucket(id) - for len(b.entries) < bucketSize { - node := nodeAtDistance(tab.self().ID(), ld, intIP(ld)) - if !tab.addFoundNode(node, false) { - panic("node not added") - } - } - return b.entries[bucketSize-1] -} - -// fillTable adds nodes the table to the end of their corresponding bucket -// if the bucket is not full. The caller must not hold tab.mutex. -func fillTable(tab *Table, nodes []*enode.Node, setLive bool) { - for _, n := range nodes { - tab.addFoundNode(n, setLive) - } -} - -type pingRecorder struct { - mu sync.Mutex - cond *sync.Cond - dead map[enode.ID]bool - records map[enode.ID]*enode.Node - pinged []*enode.Node - n *enode.Node -} - -func newPingRecorder() *pingRecorder { - var r enr.Record - r.Set(enr.IP{0, 0, 0, 0}) - n := enode.SignNull(&r, enode.ID{}) - - t := &pingRecorder{ - dead: make(map[enode.ID]bool), - records: make(map[enode.ID]*enode.Node), - n: n, - } - t.cond = sync.NewCond(&t.mu) - return t -} - -// updateRecord updates a node record. Future calls to ping and -// RequestENR will return this record. -func (t *pingRecorder) updateRecord(n *enode.Node) { - t.mu.Lock() - defer t.mu.Unlock() - t.records[n.ID()] = n -} - -// Stubs to satisfy the transport interface. -func (t *pingRecorder) Self() *enode.Node { return nullNode } -func (t *pingRecorder) lookupSelf() []*enode.Node { return nil } -func (t *pingRecorder) lookupRandom() []*enode.Node { return nil } - -func (t *pingRecorder) waitPing(timeout time.Duration) *enode.Node { - t.mu.Lock() - defer t.mu.Unlock() - - // Wake up the loop on timeout. - var timedout atomic.Bool - timer := time.AfterFunc(timeout, func() { - timedout.Store(true) - t.cond.Broadcast() - }) - defer timer.Stop() - - // Wait for a ping. - for { - if timedout.Load() { - return nil - } - if len(t.pinged) > 0 { - n := t.pinged[0] - t.pinged = append(t.pinged[:0], t.pinged[1:]...) - return n - } - t.cond.Wait() - } -} - -// ping simulates a ping request. -func (t *pingRecorder) ping(n *enode.Node) (seq uint64, err error) { - t.mu.Lock() - defer t.mu.Unlock() - - t.pinged = append(t.pinged, n) - t.cond.Broadcast() - - if t.dead[n.ID()] { - return 0, ErrTimeout - } - if t.records[n.ID()] != nil { - seq = t.records[n.ID()].Seq() - } - return seq, nil -} - -// RequestENR simulates an ENR request. -func (t *pingRecorder) RequestENR(n *enode.Node) (*enode.Node, error) { - t.mu.Lock() - defer t.mu.Unlock() - - if t.dead[n.ID()] || t.records[n.ID()] == nil { - return nil, ErrTimeout - } - return t.records[n.ID()], nil -} - -func hasDuplicates(slice []*enode.Node) bool { - seen := make(map[enode.ID]bool, len(slice)) - for i, e := range slice { - if e == nil { - panic(fmt.Sprintf("nil *Node at %d", i)) - } - if seen[e.ID()] { - return true - } - seen[e.ID()] = true - } - return false -} - -// checkNodesEqual checks whether the two given node lists contain the same nodes. -func checkNodesEqual(got, want []*enode.Node) error { - if len(got) == len(want) { - for i := range got { - if !nodeEqual(got[i], want[i]) { - goto NotEqual - } - } - } - return nil - -NotEqual: - output := new(bytes.Buffer) - fmt.Fprintf(output, "got %d nodes:\n", len(got)) - for _, n := range got { - fmt.Fprintf(output, " %v %v\n", n.ID(), n) - } - fmt.Fprintf(output, "want %d:\n", len(want)) - for _, n := range want { - fmt.Fprintf(output, " %v %v\n", n.ID(), n) - } - return errors.New(output.String()) -} - -func nodeEqual(n1 *enode.Node, n2 *enode.Node) bool { - return n1.ID() == n2.ID() && n1.IPAddr() == n2.IPAddr() -} - -func sortByID[N nodeType](nodes []N) { - slices.SortFunc(nodes, func(a, b N) int { - return bytes.Compare(a.ID().Bytes(), b.ID().Bytes()) - }) -} - -func sortedByDistanceTo(distbase enode.ID, slice []*enode.Node) bool { - return slices.IsSortedFunc(slice, func(a, b *enode.Node) int { - return enode.DistCmp(distbase, a.ID(), b.ID()) - }) -} - -// hexEncPrivkey decodes h as a private key. -func hexEncPrivkey(h string) *ecdsa.PrivateKey { - b, err := hex.DecodeString(h) - if err != nil { - panic(err) - } - key, err := crypto.ToECDSA(b) - if err != nil { - panic(err) - } - return key -} - -// hexEncPubkey decodes h as a public key. -func hexEncPubkey(h string) (ret v4wire.Pubkey) { - b, err := hex.DecodeString(h) - if err != nil { - panic(err) - } - if len(b) != len(ret) { - panic("invalid length") - } - copy(ret[:], b) - return ret -} - -type nodeEventRecorder struct { - evc chan recordedNodeEvent -} - -type recordedNodeEvent struct { - node *tableNode - added bool -} - -func newNodeEventRecorder(buffer int) *nodeEventRecorder { - return &nodeEventRecorder{ - evc: make(chan recordedNodeEvent, buffer), - } -} - -func (set *nodeEventRecorder) nodeAdded(b *bucket, n *tableNode) { - select { - case set.evc <- recordedNodeEvent{n, true}: - default: - panic("no space in event buffer") - } -} - -func (set *nodeEventRecorder) nodeRemoved(b *bucket, n *tableNode) { - select { - case set.evc <- recordedNodeEvent{n, false}: - default: - panic("no space in event buffer") - } -} - -func (set *nodeEventRecorder) waitNodePresent(id enode.ID, timeout time.Duration) bool { - return set.waitNodeEvent(id, timeout, true) -} - -func (set *nodeEventRecorder) waitNodeAbsent(id enode.ID, timeout time.Duration) bool { - return set.waitNodeEvent(id, timeout, false) -} - -func (set *nodeEventRecorder) waitNodeEvent(id enode.ID, timeout time.Duration, added bool) bool { - timer := time.NewTimer(timeout) - defer timer.Stop() - for { - select { - case ev := <-set.evc: - if ev.node.ID() == id && ev.added == added { - return true - } - case <-timer.C: - return false - } - } -} diff --git a/discvx/v4_lookup_test.go b/discvx/v4_lookup_test.go deleted file mode 100644 index ac263093..00000000 --- a/discvx/v4_lookup_test.go +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "crypto/ecdsa" - "fmt" - "net/netip" - "slices" - "sync" - "testing" - - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/p2p/discover/v4wire" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" -) - -func TestUDPv4_Lookup(t *testing.T) { - t.Parallel() - test := newUDPTest(t) - - // Lookup on empty table returns no nodes. - targetKey, _ := v4wire.DecodePubkey(crypto.S256(), lookupTestnet.target) - if results := test.udp.LookupPubkey(targetKey); len(results) > 0 { - t.Fatalf("lookup on empty table returned %d results: %#v", len(results), results) - } - - // Seed table with initial node. - fillTable(test.table, []*enode.Node{lookupTestnet.node(256, 0)}, true) - - // Start the lookup. - resultC := make(chan []*enode.Node, 1) - go func() { - resultC <- test.udp.LookupPubkey(targetKey) - test.close() - }() - - // Answer lookup packets. - serveTestnet(test, lookupTestnet) - - // Verify result nodes. - results := <-resultC - t.Logf("results:") - for _, e := range results { - t.Logf(" ld=%d, %x", enode.LogDist(lookupTestnet.target.ID(), e.ID()), e.ID().Bytes()) - } - if len(results) != bucketSize { - t.Errorf("wrong number of results: got %d, want %d", len(results), bucketSize) - } - checkLookupResults(t, lookupTestnet, results) -} - -func TestUDPv4_LookupIterator(t *testing.T) { - t.Parallel() - test := newUDPTest(t) - var wg sync.WaitGroup - defer func() { - test.close() - wg.Wait() - }() - - // Seed table with initial nodes. - bootnodes := make([]*enode.Node, len(lookupTestnet.dists[256])) - for i := range lookupTestnet.dists[256] { - bootnodes[i] = lookupTestnet.node(256, i) - } - fillTable(test.table, bootnodes, true) - wg.Add(1) - go func() { - serveTestnet(test, lookupTestnet) - wg.Done() - }() - - // Create the iterator and collect the nodes it yields. - iter := test.udp.RandomNodes() - seen := make(map[enode.ID]*enode.Node) - for limit := lookupTestnet.len(); iter.Next() && len(seen) < limit; { - seen[iter.Node().ID()] = iter.Node() - } - iter.Close() - - // Check that all nodes in lookupTestnet were seen by the iterator. - results := make([]*enode.Node, 0, len(seen)) - for _, n := range seen { - results = append(results, n) - } - sortByID(results) - want := lookupTestnet.nodes() - if err := checkNodesEqual(results, want); err != nil { - t.Fatal(err) - } -} - -// TestUDPv4_LookupIteratorClose checks that lookupIterator ends when its Close -// method is called. -func TestUDPv4_LookupIteratorClose(t *testing.T) { - t.Parallel() - test := newUDPTest(t) - var wg sync.WaitGroup - defer func() { - test.close() - wg.Wait() - }() - - // Seed table with initial nodes. - bootnodes := make([]*enode.Node, len(lookupTestnet.dists[256])) - for i := range lookupTestnet.dists[256] { - bootnodes[i] = lookupTestnet.node(256, i) - } - fillTable(test.table, bootnodes, true) - - wg.Add(1) - go func() { - serveTestnet(test, lookupTestnet) - wg.Done() - }() - - it := test.udp.RandomNodes() - if ok := it.Next(); !ok || it.Node() == nil { - t.Fatalf("iterator didn't return any node") - } - - it.Close() - - ncalls := 0 - for ; ncalls < 100 && it.Next(); ncalls++ { - if it.Node() == nil { - t.Error("iterator returned Node() == nil node after Next() == true") - } - } - t.Logf("iterator returned %d nodes after close", ncalls) - if it.Next() { - t.Errorf("Next() == true after close and %d more calls", ncalls) - } - if n := it.Node(); n != nil { - t.Errorf("iterator returned non-nil node after close and %d more calls", ncalls) - } -} - -func serveTestnet(test *udpTest, testnet *preminedTestnet) { - for done := false; !done; { - done = test.waitPacketOut(func(p v4wire.Packet, to netip.AddrPort, hash []byte) { - n, key := testnet.nodeByAddr(to) - switch p.(type) { - case *v4wire.Ping: - test.packetInFrom(nil, key, to, &v4wire.Pong{Expiration: futureExp, ReplyTok: hash}) - case *v4wire.Findnode: - dist := enode.LogDist(n.ID(), testnet.target.ID()) - nodes := testnet.nodesAtDistance(dist - 1) - test.packetInFrom(nil, key, to, &v4wire.Neighbors{Expiration: futureExp, Nodes: nodes}) - } - }) - } -} - -// checkLookupResults verifies that the results of a lookup are the closest nodes to -// the testnet's target. -func checkLookupResults(t *testing.T, tn *preminedTestnet, results []*enode.Node) { - t.Helper() - t.Logf("results:") - for _, e := range results { - t.Logf(" ld=%d, %x", enode.LogDist(tn.target.ID(), e.ID()), e.ID().Bytes()) - } - if hasDuplicates(results) { - t.Errorf("result set contains duplicate entries") - } - if !sortedByDistanceTo(tn.target.ID(), results) { - t.Errorf("result set not sorted by distance to target") - } - wantNodes := tn.closest(len(results)) - if err := checkNodesEqual(results, wantNodes); err != nil { - t.Error(err) - } -} - -// This is the test network for the Lookup test. -// The nodes were obtained by running lookupTestnet.mine with a random NodeID as target. -var lookupTestnet = &preminedTestnet{ - target: hexEncPubkey("5d485bdcbe9bc89314a10ae9231e429d33853e3a8fa2af39f5f827370a2e4185e344ace5d16237491dad41f278f1d3785210d29ace76cd627b9147ee340b1125"), - dists: [257][]*ecdsa.PrivateKey{ - 251: { - hexEncPrivkey("29738ba0c1a4397d6a65f292eee07f02df8e58d41594ba2be3cf84ce0fc58169"), - hexEncPrivkey("511b1686e4e58a917f7f848e9bf5539d206a68f5ad6b54b552c2399fe7d174ae"), - hexEncPrivkey("d09e5eaeec0fd596236faed210e55ef45112409a5aa7f3276d26646080dcfaeb"), - hexEncPrivkey("c1e20dbbf0d530e50573bd0a260b32ec15eb9190032b4633d44834afc8afe578"), - hexEncPrivkey("ed5f38f5702d92d306143e5d9154fb21819777da39af325ea359f453d179e80b"), - }, - 252: { - hexEncPrivkey("1c9b1cafbec00848d2c174b858219914b42a7d5c9359b1ca03fd650e8239ae94"), - hexEncPrivkey("e0e1e8db4a6f13c1ffdd3e96b72fa7012293ced187c9dcdcb9ba2af37a46fa10"), - hexEncPrivkey("3d53823e0a0295cb09f3e11d16c1b44d07dd37cec6f739b8df3a590189fe9fb9"), - }, - 253: { - hexEncPrivkey("2d0511ae9bf590166597eeab86b6f27b1ab761761eaea8965487b162f8703847"), - hexEncPrivkey("6cfbd7b8503073fc3dbdb746a7c672571648d3bd15197ccf7f7fef3d904f53a2"), - hexEncPrivkey("a30599b12827b69120633f15b98a7f6bc9fc2e9a0fd6ae2ebb767c0e64d743ab"), - hexEncPrivkey("14a98db9b46a831d67eff29f3b85b1b485bb12ae9796aea98d91be3dc78d8a91"), - hexEncPrivkey("2369ff1fc1ff8ca7d20b17e2673adc3365c3674377f21c5d9dafaff21fe12e24"), - hexEncPrivkey("9ae91101d6b5048607f41ec0f690ef5d09507928aded2410aabd9237aa2727d7"), - hexEncPrivkey("05e3c59090a3fd1ae697c09c574a36fcf9bedd0afa8fe3946f21117319ca4973"), - hexEncPrivkey("06f31c5ea632658f718a91a1b1b9ae4b7549d7b3bc61cbc2be5f4a439039f3ad"), - }, - 254: { - hexEncPrivkey("dec742079ec00ff4ec1284d7905bc3de2366f67a0769431fd16f80fd68c58a7c"), - hexEncPrivkey("ff02c8861fa12fbd129d2a95ea663492ef9c1e51de19dcfbbfe1c59894a28d2b"), - hexEncPrivkey("4dded9e4eefcbce4262be4fd9e8a773670ab0b5f448f286ec97dfc8cf681444a"), - hexEncPrivkey("750d931e2a8baa2c9268cb46b7cd851f4198018bed22f4dceb09dd334a2395f6"), - hexEncPrivkey("ce1435a956a98ffec484cd11489c4f165cf1606819ab6b521cee440f0c677e9e"), - hexEncPrivkey("996e7f8d1638be92d7328b4770f47e5420fc4bafecb4324fd33b1f5d9f403a75"), - hexEncPrivkey("ebdc44e77a6cc0eb622e58cf3bb903c3da4c91ca75b447b0168505d8fc308b9c"), - hexEncPrivkey("46bd1eddcf6431bea66fc19ebc45df191c1c7d6ed552dcdc7392885009c322f0"), - }, - 255: { - hexEncPrivkey("da8645f90826e57228d9ea72aff84500060ad111a5d62e4af831ed8e4b5acfb8"), - hexEncPrivkey("3c944c5d9af51d4c1d43f5d0f3a1a7ef65d5e82744d669b58b5fed242941a566"), - hexEncPrivkey("5ebcde76f1d579eebf6e43b0ffe9157e65ffaa391175d5b9aa988f47df3e33da"), - hexEncPrivkey("97f78253a7d1d796e4eaabce721febcc4550dd68fb11cc818378ba807a2cb7de"), - hexEncPrivkey("a38cd7dc9b4079d1c0406afd0fdb1165c285f2c44f946eca96fc67772c988c7d"), - hexEncPrivkey("d64cbb3ffdf712c372b7a22a176308ef8f91861398d5dbaf326fd89c6eaeef1c"), - hexEncPrivkey("d269609743ef29d6446e3355ec647e38d919c82a4eb5837e442efd7f4218944f"), - hexEncPrivkey("d8f7bcc4a530efde1d143717007179e0d9ace405ddaaf151c4d863753b7fd64c"), - }, - 256: { - hexEncPrivkey("8c5b422155d33ea8e9d46f71d1ad3e7b24cb40051413ffa1a81cff613d243ba9"), - hexEncPrivkey("937b1af801def4e8f5a3a8bd225a8bcff1db764e41d3e177f2e9376e8dd87233"), - hexEncPrivkey("120260dce739b6f71f171da6f65bc361b5fad51db74cf02d3e973347819a6518"), - hexEncPrivkey("1fa56cf25d4b46c2bf94e82355aa631717b63190785ac6bae545a88aadc304a9"), - hexEncPrivkey("3c38c503c0376f9b4adcbe935d5f4b890391741c764f61b03cd4d0d42deae002"), - hexEncPrivkey("3a54af3e9fa162bc8623cdf3e5d9b70bf30ade1d54cc3abea8659aba6cff471f"), - hexEncPrivkey("6799a02ea1999aefdcbcc4d3ff9544478be7365a328d0d0f37c26bd95ade0cda"), - hexEncPrivkey("e24a7bc9051058f918646b0f6e3d16884b2a55a15553b89bab910d55ebc36116"), - }, - }, -} - -type preminedTestnet struct { - target v4wire.Pubkey - dists [HashBits + 1][]*ecdsa.PrivateKey -} - -func (tn *preminedTestnet) len() int { - n := 0 - for _, keys := range tn.dists { - n += len(keys) - } - return n -} - -func (tn *preminedTestnet) nodes() []*enode.Node { - result := make([]*enode.Node, 0, tn.len()) - for dist, keys := range tn.dists { - for index := range keys { - result = append(result, tn.node(dist, index)) - } - } - sortByID(result) - return result -} - -func (tn *preminedTestnet) node(dist, index int) *enode.Node { - key := tn.dists[dist][index] - rec := new(enr.Record) - rec.Set(enr.IP{127, byte(dist >> 8), byte(dist), byte(index)}) - rec.Set(enr.UDP(5000)) - enode.SignV4(rec, key) - n, _ := enode.New(enode.ValidSchemes, rec) - return n -} - -func (tn *preminedTestnet) nodeByAddr(addr netip.AddrPort) (*enode.Node, *ecdsa.PrivateKey) { - ip := addr.Addr().As4() - dist := int(ip[1])<<8 + int(ip[2]) - index := int(ip[3]) - key := tn.dists[dist][index] - return tn.node(dist, index), key -} - -func (tn *preminedTestnet) nodesAtDistance(dist int) []v4wire.Node { - result := make([]v4wire.Node, len(tn.dists[dist])) - for i := range result { - result[i] = nodeToRPC(tn.node(dist, i)) - } - return result -} - -func (tn *preminedTestnet) neighborsAtDistances(base *enode.Node, distances []uint, elems int) []*enode.Node { - var result []*enode.Node - for d := range lookupTestnet.dists { - for i := range lookupTestnet.dists[d] { - n := lookupTestnet.node(d, i) - d := enode.LogDist(base.ID(), n.ID()) - if slices.Contains(distances, uint(d)) { - result = append(result, n) - if len(result) >= elems { - return result - } - } - } - } - return result -} - -func (tn *preminedTestnet) closest(n int) (nodes []*enode.Node) { - for d := range tn.dists { - for i := range tn.dists[d] { - nodes = append(nodes, tn.node(d, i)) - } - } - slices.SortFunc(nodes, func(a, b *enode.Node) int { - return enode.DistCmp(tn.target.ID(), a.ID(), b.ID()) - }) - return nodes[:n] -} - -var _ = (*preminedTestnet).mine // avoid linter warning about mine being dead code. - -// mine generates a testnet struct literal with nodes at -// various distances to the network's target. -func (tn *preminedTestnet) mine() { - // Clear existing slices first (useful when re-mining). - for i := range tn.dists { - tn.dists[i] = nil - } - - targetSha := tn.target.ID() - found, need := 0, 40 - for found < need { - k := newkey() - ld := enode.LogDist(targetSha, v4wire.EncodePubkey(&k.PublicKey).ID()) - if len(tn.dists[ld]) < 8 { - tn.dists[ld] = append(tn.dists[ld], k) - found++ - fmt.Printf("found ID with ld %d (%d/%d)\n", ld, found, need) - } - } - fmt.Printf("&preminedTestnet{\n") - fmt.Printf(" target: hexEncPubkey(\"%x\"),\n", tn.target[:]) - fmt.Printf(" dists: [%d][]*ecdsa.PrivateKey{\n", len(tn.dists)) - for ld, ns := range tn.dists { - if len(ns) == 0 { - continue - } - fmt.Printf(" %d: {\n", ld) - for _, key := range ns { - fmt.Printf(" hexEncPrivkey(\"%x\"),\n", crypto.FromECDSA(key)) - } - fmt.Printf(" },\n") - } - fmt.Printf(" },\n") - fmt.Printf("}\n") -} diff --git a/discvx/v4_udp.go b/discvx/v4_udp.go deleted file mode 100644 index 299d44fd..00000000 --- a/discvx/v4_udp.go +++ /dev/null @@ -1,813 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "bytes" - "container/list" - "context" - "crypto/ecdsa" - crand "crypto/rand" - "errors" - "fmt" - "io" - "net/netip" - "sync" - "time" - - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/discover/v4wire" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/netutil" -) - -// Errors -var ( - errExpired = errors.New("expired") - errUnsolicitedReply = errors.New("unsolicited reply") - errUnknownNode = errors.New("unknown node") - ErrTimeout = errors.New("RPC timeout") - errClockWarp = errors.New("reply deadline too far in the future") - errClosed = errors.New("socket closed") - errLowPort = errors.New("low port") - errNoUDPEndpoint = errors.New("node has no UDP endpoint") -) - -const ( - respTimeout = 500 * time.Millisecond - expiration = 20 * time.Second - bondExpiration = 24 * time.Hour - - maxFindnodeFailures = 5 // nodes exceeding this limit are dropped - ntpFailureThreshold = 32 // Continuous timeouts after which to check NTP - ntpWarningCooldown = 10 * time.Minute // Minimum amount of time to pass before repeating NTP warning - driftThreshold = 10 * time.Second // Allowed clock drift before warning user - - // Discovery packets are defined to be no larger than 1280 bytes. - // Packets larger than this size will be cut at the end and treated - // as invalid because their hash won't match. - maxPacketSize = 1280 -) - -// UDPv4 implements the v4 wire protocol. -type UDPv4 struct { - conn UDPConn - log log.Logger - netrestrict *netutil.Netlist - priv *ecdsa.PrivateKey - localNode *enode.LocalNode - db *enode.DB - tab *Table - closeOnce sync.Once - wg sync.WaitGroup - - addReplyMatcher chan *replyMatcher - gotreply chan reply - closeCtx context.Context - cancelCloseCtx context.CancelFunc -} - -// replyMatcher represents a pending reply. -// -// Some implementations of the protocol wish to send more than one -// reply packet to findnode. In general, any neighbors packet cannot -// be matched up with a specific findnode packet. -// -// Our implementation handles this by storing a callback function for -// each pending reply. Incoming packets from a node are dispatched -// to all callback functions for that node. -type replyMatcher struct { - // these fields must match in the reply. - from enode.ID - ip netip.Addr - ptype byte - - // time when the request must complete - deadline time.Time - - // callback is called when a matching reply arrives. If it returns matched == true, the - // reply was acceptable. The second return value indicates whether the callback should - // be removed from the pending reply queue. If it returns false, the reply is considered - // incomplete and the callback will be invoked again for the next matching reply. - callback replyMatchFunc - - // errc receives nil when the callback indicates completion or an - // error if no further reply is received within the timeout. - errc chan error - - // reply contains the most recent reply. This field is safe for reading after errc has - // received a value. - reply v4wire.Packet -} - -type replyMatchFunc func(v4wire.Packet) (matched bool, requestDone bool) - -// reply is a reply packet from a certain node. -type reply struct { - from enode.ID - ip netip.Addr - data v4wire.Packet - // loop indicates whether there was - // a matching request by sending on this channel. - matched chan<- bool -} - -func ListenV4(c UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv4, error) { - cfg = cfg.withDefaults() - closeCtx, cancel := context.WithCancel(context.Background()) - t := &UDPv4{ - conn: newMeteredConn(c), - priv: cfg.PrivateKey, - netrestrict: cfg.NetRestrict, - localNode: ln, - db: ln.Database(), - gotreply: make(chan reply), - addReplyMatcher: make(chan *replyMatcher), - closeCtx: closeCtx, - cancelCloseCtx: cancel, - log: cfg.Log, - } - - tab, err := newTable(t, ln.Database(), cfg) - if err != nil { - return nil, err - } - t.tab = tab - go tab.loop() - - t.wg.Add(2) - go t.loop() - go t.readLoop(cfg.Unhandled) - return t, nil -} - -// Self returns the local node. -func (t *UDPv4) Self() *enode.Node { - return t.localNode.Node() -} - -// Close shuts down the socket and aborts any running queries. -func (t *UDPv4) Close() { - t.closeOnce.Do(func() { - t.cancelCloseCtx() - t.conn.Close() - t.wg.Wait() - t.tab.close() - }) -} - -// Resolve searches for a specific node with the given ID and tries to get the most recent -// version of the node record for it. It returns n if the node could not be resolved. -func (t *UDPv4) Resolve(n *enode.Node) *enode.Node { - // Try asking directly. This works if the node is still responding on the endpoint we have. - if rn, err := t.RequestENR(n); err == nil { - return rn - } - // Check table for the ID, we might have a newer version there. - if intable := t.tab.getNode(n.ID()); intable != nil && intable.Seq() > n.Seq() { - n = intable - if rn, err := t.RequestENR(n); err == nil { - return rn - } - } - // Otherwise perform a network lookup. - var key enode.Secp256k1 - if n.Load(&key) != nil { - return n // no secp256k1 key - } - result := t.LookupPubkey((*ecdsa.PublicKey)(&key)) - for _, rn := range result { - if rn.ID() == n.ID() { - if rn, err := t.RequestENR(rn); err == nil { - return rn - } - } - } - return n -} - -func (t *UDPv4) ourEndpoint() v4wire.Endpoint { - node := t.Self() - addr, ok := node.UDPEndpoint() - if !ok { - return v4wire.Endpoint{} - } - return v4wire.NewEndpoint(addr, uint16(node.TCP())) -} - -// Ping sends a ping message to the given node. -func (t *UDPv4) Ping(n *enode.Node) error { - _, err := t.ping(n) - return err -} - -// ping sends a ping message to the given node and waits for a reply. -func (t *UDPv4) ping(n *enode.Node) (seq uint64, err error) { - addr, ok := n.UDPEndpoint() - if !ok { - return 0, errNoUDPEndpoint - } - rm := t.sendPing(n.ID(), addr, nil) - if err = <-rm.errc; err == nil { - seq = rm.reply.(*v4wire.Pong).ENRSeq - } - return seq, err -} - -// sendPing sends a ping message to the given node and invokes the callback -// when the reply arrives. -func (t *UDPv4) sendPing(toid enode.ID, toaddr netip.AddrPort, callback func()) *replyMatcher { - req := t.makePing(toaddr) - packet, hash, err := v4wire.Encode(t.priv, req) - if err != nil { - errc := make(chan error, 1) - errc <- err - return &replyMatcher{errc: errc} - } - // Add a matcher for the reply to the pending reply queue. Pongs are matched if they - // reference the ping we're about to send. - rm := t.pending(toid, toaddr.Addr(), v4wire.PongPacket, func(p v4wire.Packet) (matched bool, requestDone bool) { - matched = bytes.Equal(p.(*v4wire.Pong).ReplyTok, hash) - if matched && callback != nil { - callback() - } - return matched, matched - }) - // Send the packet. - t.localNode.UDPContact(toaddr) - t.write(toaddr, toid, req.Name(), packet) - return rm -} - -func (t *UDPv4) makePing(toaddr netip.AddrPort) *v4wire.Ping { - return &v4wire.Ping{ - Version: 4, - From: t.ourEndpoint(), - To: v4wire.NewEndpoint(toaddr, 0), - Expiration: uint64(time.Now().Add(expiration).Unix()), - ENRSeq: t.localNode.Node().Seq(), - } -} - -// LookupPubkey finds the closest nodes to the given public key. -func (t *UDPv4) LookupPubkey(key *ecdsa.PublicKey) []*enode.Node { - if t.tab.len() == 0 { - // All nodes were dropped, refresh. The very first query will hit this - // case and run the bootstrapping logic. - <-t.tab.refresh() - } - return t.newLookup(t.closeCtx, v4wire.EncodePubkey(key)).run() -} - -// RandomNodes is an iterator yielding nodes from a random walk of the DHT. -func (t *UDPv4) RandomNodes() enode.Iterator { - return newLookupIterator(t.closeCtx, t.newRandomLookup) -} - -// lookupRandom implements transport. -func (t *UDPv4) lookupRandom() []*enode.Node { - return t.newRandomLookup(t.closeCtx).run() -} - -// lookupSelf implements transport. -func (t *UDPv4) lookupSelf() []*enode.Node { - pubkey := v4wire.EncodePubkey(&t.priv.PublicKey) - return t.newLookup(t.closeCtx, pubkey).run() -} - -func (t *UDPv4) newRandomLookup(ctx context.Context) *lookup { - var target v4wire.Pubkey - crand.Read(target[:]) - return t.newLookup(ctx, target) -} - -func (t *UDPv4) newLookup(ctx context.Context, targetKey v4wire.Pubkey) *lookup { - target := enode.ID(crypto.Keccak256Hash(targetKey[:])) - it := newLookup(ctx, t.tab, target, func(n *enode.Node) ([]*enode.Node, error) { - addr, ok := n.UDPEndpoint() - if !ok { - return nil, errNoUDPEndpoint - } - return t.FindNode(n.ID(), addr, targetKey) - }) - return it -} - -// FindNode sends a findnode request to the given node and waits until -// the node has sent up to k neighbors. -func (t *UDPv4) FindNode(toid enode.ID, toAddrPort netip.AddrPort, target v4wire.Pubkey) ([]*enode.Node, error) { - t.ensureBond(toid, toAddrPort) - - // Add a matcher for 'neighbours' replies to the pending reply queue. The matcher is - // active until enough nodes have been received. - nodes := make([]*enode.Node, 0, bucketSize) - nreceived := 0 - rm := t.pending(toid, toAddrPort.Addr(), v4wire.NeighborsPacket, func(r v4wire.Packet) (matched bool, requestDone bool) { - reply := r.(*v4wire.Neighbors) - for _, rn := range reply.Nodes { - nreceived++ - n, err := t.nodeFromRPC(toAddrPort, rn) - if err != nil { - t.log.Trace("Invalid neighbor node received", "ip", rn.IP, "addr", toAddrPort, "err", err) - continue - } - nodes = append(nodes, n) - } - return true, nreceived >= bucketSize - }) - t.send(toAddrPort, toid, &v4wire.Findnode{ - Target: target, - Expiration: uint64(time.Now().Add(expiration).Unix()), - }) - // Ensure that callers don't see a timeout if the node actually responded. Since - // findnode can receive more than one neighbors response, the reply matcher will be - // active until the remote node sends enough nodes. If the remote end doesn't have - // enough nodes the reply matcher will time out waiting for the second reply, but - // there's no need for an error in that case. - err := <-rm.errc - if errors.Is(err, ErrTimeout) && rm.reply != nil { - err = nil - } - return nodes, err -} - -// RequestENR sends ENRRequest to the given node and waits for a response. -func (t *UDPv4) RequestENR(n *enode.Node) (*enode.Node, error) { - addr, _ := n.UDPEndpoint() - t.ensureBond(n.ID(), addr) - - req := &v4wire.ENRRequest{ - Expiration: uint64(time.Now().Add(expiration).Unix()), - } - packet, hash, err := v4wire.Encode(t.priv, req) - if err != nil { - return nil, err - } - - // Add a matcher for the reply to the pending reply queue. Responses are matched if - // they reference the request we're about to send. - rm := t.pending(n.ID(), addr.Addr(), v4wire.ENRResponsePacket, func(r v4wire.Packet) (matched bool, requestDone bool) { - matched = bytes.Equal(r.(*v4wire.ENRResponse).ReplyTok, hash) - return matched, matched - }) - // Send the packet and wait for the reply. - t.write(addr, n.ID(), req.Name(), packet) - if err := <-rm.errc; err != nil { - return nil, err - } - // Verify the response record. - respN, err := enode.New(enode.ValidSchemes, &rm.reply.(*v4wire.ENRResponse).Record) - if err != nil { - return nil, err - } - if respN.ID() != n.ID() { - return nil, errors.New("invalid ID in response record") - } - if respN.Seq() < n.Seq() { - return n, nil // response record is older - } - if err := netutil.CheckRelayAddr(addr.Addr(), respN.IPAddr()); err != nil { - return nil, fmt.Errorf("invalid IP in response record: %v", err) - } - return respN, nil -} - -func (t *UDPv4) TableBuckets() [][]BucketNode { - return t.tab.Nodes() -} - -// pending adds a reply matcher to the pending reply queue. -// see the documentation of type replyMatcher for a detailed explanation. -func (t *UDPv4) pending(id enode.ID, ip netip.Addr, ptype byte, callback replyMatchFunc) *replyMatcher { - ch := make(chan error, 1) - p := &replyMatcher{from: id, ip: ip, ptype: ptype, callback: callback, errc: ch} - select { - case t.addReplyMatcher <- p: - // loop will handle it - case <-t.closeCtx.Done(): - ch <- errClosed - } - return p -} - -// handleReply dispatches a reply packet, invoking reply matchers. It returns -// whether any matcher considered the packet acceptable. -func (t *UDPv4) handleReply(from enode.ID, fromIP netip.Addr, req v4wire.Packet) bool { - matched := make(chan bool, 1) - select { - case t.gotreply <- reply{from, fromIP, req, matched}: - // loop will handle it - return <-matched - case <-t.closeCtx.Done(): - return false - } -} - -// loop runs in its own goroutine. it keeps track of -// the refresh timer and the pending reply queue. -func (t *UDPv4) loop() { - defer t.wg.Done() - - var ( - plist = list.New() - timeout = time.NewTimer(0) - nextTimeout *replyMatcher // head of plist when timeout was last reset - contTimeouts = 0 // number of continuous timeouts to do NTP checks - ntpWarnTime = time.Unix(0, 0) - ) - <-timeout.C // ignore first timeout - defer timeout.Stop() - - resetTimeout := func() { - if plist.Front() == nil || nextTimeout == plist.Front().Value { - return - } - // Start the timer so it fires when the next pending reply has expired. - now := time.Now() - for el := plist.Front(); el != nil; el = el.Next() { - nextTimeout = el.Value.(*replyMatcher) - if dist := nextTimeout.deadline.Sub(now); dist < 2*respTimeout { - timeout.Reset(dist) - return - } - // Remove pending replies whose deadline is too far in the - // future. These can occur if the system clock jumped - // backwards after the deadline was assigned. - nextTimeout.errc <- errClockWarp - plist.Remove(el) - } - nextTimeout = nil - timeout.Stop() - } - - for { - resetTimeout() - - select { - case <-t.closeCtx.Done(): - for el := plist.Front(); el != nil; el = el.Next() { - el.Value.(*replyMatcher).errc <- errClosed - } - return - - case p := <-t.addReplyMatcher: - p.deadline = time.Now().Add(respTimeout) - plist.PushBack(p) - - case r := <-t.gotreply: - var matched bool // whether any replyMatcher considered the reply acceptable. - for el := plist.Front(); el != nil; el = el.Next() { - p := el.Value.(*replyMatcher) - if p.from == r.from && p.ptype == r.data.Kind() && p.ip == r.ip { - ok, requestDone := p.callback(r.data) - matched = matched || ok - p.reply = r.data - // Remove the matcher if callback indicates that all replies have been received. - if requestDone { - p.errc <- nil - plist.Remove(el) - } - // Reset the continuous timeout counter (time drift detection) - contTimeouts = 0 - } - } - r.matched <- matched - - case now := <-timeout.C: - nextTimeout = nil - - // Notify and remove callbacks whose deadline is in the past. - for el := plist.Front(); el != nil; el = el.Next() { - p := el.Value.(*replyMatcher) - if now.After(p.deadline) || now.Equal(p.deadline) { - p.errc <- ErrTimeout - plist.Remove(el) - contTimeouts++ - } - } - // If we've accumulated too many timeouts, do an NTP time sync check - if contTimeouts > ntpFailureThreshold { - if time.Since(ntpWarnTime) >= ntpWarningCooldown { - ntpWarnTime = time.Now() - go checkClockDrift() - } - contTimeouts = 0 - } - } - } -} - -func (t *UDPv4) send(toaddr netip.AddrPort, toid enode.ID, req v4wire.Packet) ([]byte, error) { - packet, hash, err := v4wire.Encode(t.priv, req) - if err != nil { - return hash, err - } - return hash, t.write(toaddr, toid, req.Name(), packet) -} - -func (t *UDPv4) write(toaddr netip.AddrPort, toid enode.ID, what string, packet []byte) error { - _, err := t.conn.WriteToUDPAddrPort(packet, toaddr) - t.log.Trace(">> "+what, "id", toid, "addr", toaddr, "err", err) - return err -} - -// readLoop runs in its own goroutine. it handles incoming UDP packets. -func (t *UDPv4) readLoop(unhandled chan<- ReadPacket) { - defer t.wg.Done() - if unhandled != nil { - defer close(unhandled) - } - - buf := make([]byte, maxPacketSize) - for { - nbytes, from, err := t.conn.ReadFromUDPAddrPort(buf) - if netutil.IsTemporaryError(err) { - // Ignore temporary read errors. - t.log.Debug("Temporary UDP read error", "err", err) - continue - } else if err != nil { - // Shut down the loop for permanent errors. - if !errors.Is(err, io.EOF) { - t.log.Debug("UDP read error", "err", err) - } - return - } - if err := t.handlePacket(from, buf[:nbytes]); err != nil && unhandled == nil { - t.log.Debug("Bad discv4 packet", "addr", from, "err", err) - } else if err != nil && unhandled != nil { - select { - case unhandled <- ReadPacket{buf[:nbytes], from}: - default: - } - } - } -} - -func (t *UDPv4) handlePacket(from netip.AddrPort, buf []byte) error { - // Unwrap IPv4-in-6 source address. - if from.Addr().Is4In6() { - from = netip.AddrPortFrom(netip.AddrFrom4(from.Addr().As4()), from.Port()) - } - - rawpacket, fromKey, hash, err := v4wire.Decode(buf) - if err != nil { - return err - } - packet := t.wrapPacket(rawpacket) - fromID := fromKey.ID() - if packet.preverify != nil { - err = packet.preverify(packet, from, fromID, fromKey) - } - t.log.Trace("<< "+packet.Name(), "id", fromID, "addr", from, "err", err) - if err == nil && packet.handle != nil { - packet.handle(packet, from, fromID, hash) - } - return err -} - -// checkBond checks if the given node has a recent enough endpoint proof. -func (t *UDPv4) checkBond(id enode.ID, ip netip.AddrPort) bool { - return time.Since(t.db.LastPongReceived(id, ip.Addr())) < bondExpiration -} - -// ensureBond solicits a ping from a node if we haven't seen a ping from it for a while. -// This ensures there is a valid endpoint proof on the remote end. -func (t *UDPv4) ensureBond(toid enode.ID, toaddr netip.AddrPort) { - tooOld := time.Since(t.db.LastPingReceived(toid, toaddr.Addr())) > bondExpiration - if tooOld || t.db.FindFails(toid, toaddr.Addr()) > maxFindnodeFailures { - rm := t.sendPing(toid, toaddr, nil) - <-rm.errc - // Wait for them to ping back and process our pong. - time.Sleep(respTimeout) - } -} - -func (t *UDPv4) nodeFromRPC(sender netip.AddrPort, rn v4wire.Node) (*enode.Node, error) { - if rn.UDP <= 1024 { - return nil, errLowPort - } - if err := netutil.CheckRelayIP(sender.Addr().AsSlice(), rn.IP); err != nil { - return nil, err - } - if t.netrestrict != nil && !t.netrestrict.Contains(rn.IP) { - return nil, errors.New("not contained in netrestrict list") - } - key, err := v4wire.DecodePubkey(crypto.S256(), rn.ID) - if err != nil { - return nil, err - } - n := enode.NewV4(key, rn.IP, int(rn.TCP), int(rn.UDP)) - err = n.ValidateComplete() - return n, err -} - -func nodeToRPC(n *enode.Node) v4wire.Node { - var key ecdsa.PublicKey - var ekey v4wire.Pubkey - if err := n.Load((*enode.Secp256k1)(&key)); err == nil { - ekey = v4wire.EncodePubkey(&key) - } - return v4wire.Node{ID: ekey, IP: n.IP(), UDP: uint16(n.UDP()), TCP: uint16(n.TCP())} -} - -// wrapPacket returns the handler functions applicable to a packet. -func (t *UDPv4) wrapPacket(p v4wire.Packet) *packetHandlerV4 { - var h packetHandlerV4 - h.Packet = p - switch p.(type) { - case *v4wire.Ping: - h.preverify = t.verifyPing - h.handle = t.handlePing - case *v4wire.Pong: - h.preverify = t.verifyPong - case *v4wire.Findnode: - h.preverify = t.verifyFindnode - h.handle = t.handleFindnode - case *v4wire.Neighbors: - h.preverify = t.verifyNeighbors - case *v4wire.ENRRequest: - h.preverify = t.verifyENRRequest - h.handle = t.handleENRRequest - case *v4wire.ENRResponse: - h.preverify = t.verifyENRResponse - } - return &h -} - -// packetHandlerV4 wraps a packet with handler functions. -type packetHandlerV4 struct { - v4wire.Packet - senderKey *ecdsa.PublicKey // used for ping - - // preverify checks whether the packet is valid and should be handled at all. - preverify func(p *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error - // handle handles the packet. - handle func(req *packetHandlerV4, from netip.AddrPort, fromID enode.ID, mac []byte) -} - -// PING/v4 - -func (t *UDPv4) verifyPing(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { - req := h.Packet.(*v4wire.Ping) - - if v4wire.Expired(req.Expiration) { - return errExpired - } - senderKey, err := v4wire.DecodePubkey(crypto.S256(), fromKey) - if err != nil { - return err - } - h.senderKey = senderKey - return nil -} - -func (t *UDPv4) handlePing(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, mac []byte) { - req := h.Packet.(*v4wire.Ping) - - // Reply. - t.send(from, fromID, &v4wire.Pong{ - To: v4wire.NewEndpoint(from, req.From.TCP), - ReplyTok: mac, - Expiration: uint64(time.Now().Add(expiration).Unix()), - ENRSeq: t.localNode.Node().Seq(), - }) - - // Ping back if our last pong on file is too far in the past. - fromIP := from.Addr().AsSlice() - n := enode.NewV4(h.senderKey, fromIP, int(req.From.TCP), int(from.Port())) - if time.Since(t.db.LastPongReceived(n.ID(), from.Addr())) > bondExpiration { - t.sendPing(fromID, from, func() { - t.tab.addInboundNode(n) - }) - } else { - t.tab.addInboundNode(n) - } - - // Update node database and endpoint predictor. - t.db.UpdateLastPingReceived(n.ID(), from.Addr(), time.Now()) - toaddr := netip.AddrPortFrom(netutil.IPToAddr(req.To.IP), req.To.UDP) - t.localNode.UDPEndpointStatement(from, toaddr) -} - -// PONG/v4 - -func (t *UDPv4) verifyPong(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { - req := h.Packet.(*v4wire.Pong) - - if v4wire.Expired(req.Expiration) { - return errExpired - } - if !t.handleReply(fromID, from.Addr(), req) { - return errUnsolicitedReply - } - toaddr := netip.AddrPortFrom(netutil.IPToAddr(req.To.IP), req.To.UDP) - t.localNode.UDPEndpointStatement(from, toaddr) - t.db.UpdateLastPongReceived(fromID, from.Addr(), time.Now()) - return nil -} - -// FINDNODE/v4 - -func (t *UDPv4) verifyFindnode(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { - req := h.Packet.(*v4wire.Findnode) - - if v4wire.Expired(req.Expiration) { - return errExpired - } - if !t.checkBond(fromID, from) { - // No endpoint proof pong exists, we don't process the packet. This prevents an - // attack vector where the discovery protocol could be used to amplify traffic in a - // DDOS attack. A malicious actor would send a findnode request with the IP address - // and UDP port of the target as the source address. The recipient of the findnode - // packet would then send a neighbors packet (which is a much bigger packet than - // findnode) to the victim. - return errUnknownNode - } - return nil -} - -func (t *UDPv4) handleFindnode(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, mac []byte) { - req := h.Packet.(*v4wire.Findnode) - - // Determine closest nodes. - target := enode.ID(crypto.Keccak256Hash(req.Target[:])) - preferLive := !t.tab.cfg.NoFindnodeLivenessCheck - closest := t.tab.findnodeByID(target, bucketSize, preferLive).entries - - // Send neighbors in chunks with at most maxNeighbors per packet - // to stay below the packet size limit. - p := v4wire.Neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())} - var sent bool - for _, n := range closest { - if netutil.CheckRelayAddr(from.Addr(), n.IPAddr()) == nil { - p.Nodes = append(p.Nodes, nodeToRPC(n)) - } - if len(p.Nodes) == v4wire.MaxNeighbors { - t.send(from, fromID, &p) - p.Nodes = p.Nodes[:0] - sent = true - } - } - if len(p.Nodes) > 0 || !sent { - t.send(from, fromID, &p) - } -} - -// NEIGHBORS/v4 - -func (t *UDPv4) verifyNeighbors(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { - req := h.Packet.(*v4wire.Neighbors) - - if v4wire.Expired(req.Expiration) { - return errExpired - } - if !t.handleReply(fromID, from.Addr(), h.Packet) { - return errUnsolicitedReply - } - return nil -} - -// ENRREQUEST/v4 - -func (t *UDPv4) verifyENRRequest(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { - req := h.Packet.(*v4wire.ENRRequest) - - if v4wire.Expired(req.Expiration) { - return errExpired - } - if !t.checkBond(fromID, from) { - return errUnknownNode - } - return nil -} - -func (t *UDPv4) handleENRRequest(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, mac []byte) { - t.send(from, fromID, &v4wire.ENRResponse{ - ReplyTok: mac, - Record: *t.localNode.Node().Record(), - }) -} - -// ENRRESPONSE/v4 - -func (t *UDPv4) verifyENRResponse(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { - if !t.handleReply(fromID, from.Addr(), h.Packet) { - return errUnsolicitedReply - } - return nil -} diff --git a/discvx/v4_udp_test.go b/discvx/v4_udp_test.go deleted file mode 100644 index cae17c64..00000000 --- a/discvx/v4_udp_test.go +++ /dev/null @@ -1,656 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "bytes" - "crypto/ecdsa" - crand "crypto/rand" - "encoding/binary" - "errors" - "fmt" - "io" - "math/rand" - "net" - "net/netip" - "reflect" - "sync" - "testing" - "time" - - "github.com/ethereum/go-ethereum/internal/testlog" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/discover/v4wire" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" -) - -// shared test variables -var ( - futureExp = uint64(time.Now().Add(10 * time.Hour).Unix()) - testTarget = v4wire.Pubkey{0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1} - testRemote = v4wire.Endpoint{IP: net.ParseIP("1.1.1.1").To4(), UDP: 1, TCP: 2} - testLocalAnnounced = v4wire.Endpoint{IP: net.ParseIP("2.2.2.2").To4(), UDP: 3, TCP: 4} - testLocal = v4wire.Endpoint{IP: net.ParseIP("3.3.3.3").To4(), UDP: 5, TCP: 6} -) - -type udpTest struct { - t *testing.T - pipe *dgramPipe - table *Table - db *enode.DB - udp *UDPv4 - sent [][]byte - localkey, remotekey *ecdsa.PrivateKey - remoteaddr netip.AddrPort -} - -func newUDPTest(t *testing.T) *udpTest { - test := &udpTest{ - t: t, - pipe: newpipe(), - localkey: newkey(), - remotekey: newkey(), - remoteaddr: netip.MustParseAddrPort("10.0.1.99:30303"), - } - - test.db, _ = enode.OpenDB("") - ln := enode.NewLocalNode(test.db, test.localkey) - test.udp, _ = ListenV4(test.pipe, ln, Config{ - PrivateKey: test.localkey, - Log: testlog.Logger(t, log.LvlTrace), - }) - test.table = test.udp.tab - // Wait for initial refresh so the table doesn't send unexpected findnode. - <-test.table.initDone - return test -} - -func (test *udpTest) close() { - test.udp.Close() - test.db.Close() -} - -// handles a packet as if it had been sent to the transport. -func (test *udpTest) packetIn(wantError error, data v4wire.Packet) { - test.t.Helper() - - test.packetInFrom(wantError, test.remotekey, test.remoteaddr, data) -} - -// handles a packet as if it had been sent to the transport by the key/endpoint. -func (test *udpTest) packetInFrom(wantError error, key *ecdsa.PrivateKey, addr netip.AddrPort, data v4wire.Packet) { - test.t.Helper() - - enc, _, err := v4wire.Encode(key, data) - if err != nil { - test.t.Errorf("%s encode error: %v", data.Name(), err) - } - test.sent = append(test.sent, enc) - if err = test.udp.handlePacket(addr, enc); err != wantError { - test.t.Errorf("error mismatch: got %q, want %q", err, wantError) - } -} - -// waits for a packet to be sent by the transport. -// validate should have type func(X, netip.AddrPort, []byte), where X is a packet type. -func (test *udpTest) waitPacketOut(validate interface{}) (closed bool) { - test.t.Helper() - - dgram, err := test.pipe.receive() - if err == errClosed { - return true - } else if err != nil { - test.t.Error("packet receive error:", err) - return false - } - p, _, hash, err := v4wire.Decode(dgram.data) - if err != nil { - test.t.Errorf("sent packet decode error: %v", err) - return false - } - fn := reflect.ValueOf(validate) - exptype := fn.Type().In(0) - if !reflect.TypeOf(p).AssignableTo(exptype) { - test.t.Errorf("sent packet type mismatch, got: %v, want: %v", reflect.TypeOf(p), exptype) - return false - } - fn.Call([]reflect.Value{reflect.ValueOf(p), reflect.ValueOf(dgram.to), reflect.ValueOf(hash)}) - return false -} - -func TestUDPv4_packetErrors(t *testing.T) { - test := newUDPTest(t) - defer test.close() - - test.packetIn(errExpired, &v4wire.Ping{From: testRemote, To: testLocalAnnounced, Version: 4}) - test.packetIn(errUnsolicitedReply, &v4wire.Pong{ReplyTok: []byte{}, Expiration: futureExp}) - test.packetIn(errUnknownNode, &v4wire.Findnode{Expiration: futureExp}) - test.packetIn(errUnsolicitedReply, &v4wire.Neighbors{Expiration: futureExp}) -} - -func TestUDPv4_pingTimeout(t *testing.T) { - t.Parallel() - test := newUDPTest(t) - defer test.close() - - key := newkey() - toaddr := &net.UDPAddr{IP: net.ParseIP("1.2.3.4"), Port: 2222} - node := enode.NewV4(&key.PublicKey, toaddr.IP, 0, toaddr.Port) - if _, err := test.udp.ping(node); err != ErrTimeout { - t.Error("expected timeout error, got", err) - } -} - -type testPacket byte - -func (req testPacket) Kind() byte { return byte(req) } -func (req testPacket) Name() string { return "" } - -func TestUDPv4_responseTimeouts(t *testing.T) { - t.Parallel() - test := newUDPTest(t) - defer test.close() - - randomDuration := func(max time.Duration) time.Duration { - return time.Duration(rand.Int63n(int64(max))) - } - - var ( - nReqs = 200 - nTimeouts = 0 // number of requests with ptype > 128 - nilErr = make(chan error, nReqs) // for requests that get a reply - timeoutErr = make(chan error, nReqs) // for requests that time out - ) - for i := 0; i < nReqs; i++ { - // Create a matcher for a random request in udp.loop. Requests - // with ptype <= 128 will not get a reply and should time out. - // For all other requests, a reply is scheduled to arrive - // within the timeout window. - p := &replyMatcher{ - ptype: byte(rand.Intn(255)), - callback: func(v4wire.Packet) (bool, bool) { return true, true }, - } - binary.BigEndian.PutUint64(p.from[:], uint64(i)) - if p.ptype <= 128 { - p.errc = timeoutErr - test.udp.addReplyMatcher <- p - nTimeouts++ - } else { - p.errc = nilErr - test.udp.addReplyMatcher <- p - time.AfterFunc(randomDuration(60*time.Millisecond), func() { - if !test.udp.handleReply(p.from, p.ip, testPacket(p.ptype)) { - t.Logf("not matched: %v", p) - } - }) - } - time.Sleep(randomDuration(30 * time.Millisecond)) - } - - // Check that all timeouts were delivered and that the rest got nil errors. - // The replies must be delivered. - var ( - recvDeadline = time.After(20 * time.Second) - nTimeoutsRecv, nNil = 0, 0 - ) - for i := 0; i < nReqs; i++ { - select { - case err := <-timeoutErr: - if err != ErrTimeout { - t.Fatalf("got non-timeout error on timeoutErr %d: %v", i, err) - } - nTimeoutsRecv++ - case err := <-nilErr: - if err != nil { - t.Fatalf("got non-nil error on nilErr %d: %v", i, err) - } - nNil++ - case <-recvDeadline: - t.Fatalf("exceeded recv deadline") - } - } - if nTimeoutsRecv != nTimeouts { - t.Errorf("wrong number of timeout errors received: got %d, want %d", nTimeoutsRecv, nTimeouts) - } - if nNil != nReqs-nTimeouts { - t.Errorf("wrong number of successful replies: got %d, want %d", nNil, nReqs-nTimeouts) - } -} - -func TestUDPv4_findnodeTimeout(t *testing.T) { - t.Parallel() - test := newUDPTest(t) - defer test.close() - - toaddr := netip.AddrPortFrom(netip.MustParseAddr("1.2.3.4"), 2222) - toid := enode.ID{1, 2, 3, 4} - target := v4wire.Pubkey{4, 5, 6, 7} - result, err := test.udp.FindNode(toid, toaddr, target) - if err != ErrTimeout { - t.Error("expected timeout error, got", err) - } - if len(result) > 0 { - t.Error("expected empty result, got", result) - } -} - -func TestUDPv4_findnode(t *testing.T) { - test := newUDPTest(t) - defer test.close() - - // put a few nodes into the table. their exact - // distribution shouldn't matter much, although we need to - // take care not to overflow any bucket. - nodes := &nodesByDistance{target: testTarget.ID()} - live := make(map[enode.ID]bool) - numCandidates := 2 * bucketSize - for i := 0; i < numCandidates; i++ { - key := newkey() - ip := net.IP{10, 13, 0, byte(i)} - n := enode.NewV4(&key.PublicKey, ip, 0, 2000) - // Ensure half of table content isn't verified live yet. - if i > numCandidates/2 { - live[n.ID()] = true - } - test.table.addFoundNode(n, live[n.ID()]) - nodes.push(n, numCandidates) - } - - // ensure there's a bond with the test node, - // findnode won't be accepted otherwise. - remoteID := v4wire.EncodePubkey(&test.remotekey.PublicKey).ID() - test.table.db.UpdateLastPongReceived(remoteID, test.remoteaddr.Addr(), time.Now()) - - // check that closest neighbors are returned. - expected := test.table.findnodeByID(testTarget.ID(), bucketSize, true) - test.packetIn(nil, &v4wire.Findnode{Target: testTarget, Expiration: futureExp}) - waitNeighbors := func(want []*enode.Node) { - test.waitPacketOut(func(p *v4wire.Neighbors, to netip.AddrPort, hash []byte) { - if len(p.Nodes) != len(want) { - t.Errorf("wrong number of results: got %d, want %d", len(p.Nodes), len(want)) - return - } - for i, n := range p.Nodes { - if n.ID.ID() != want[i].ID() { - t.Errorf("result mismatch at %d:\n got: %v\n want: %v", i, n, expected.entries[i]) - } - if !live[n.ID.ID()] { - t.Errorf("result includes dead node %v", n.ID.ID()) - } - } - }) - } - // Receive replies. - want := expected.entries - if len(want) > v4wire.MaxNeighbors { - waitNeighbors(want[:v4wire.MaxNeighbors]) - want = want[v4wire.MaxNeighbors:] - } - waitNeighbors(want) -} - -func TestUDPv4_findnodeMultiReply(t *testing.T) { - test := newUDPTest(t) - defer test.close() - - rid := enode.PubkeyToIDV4(&test.remotekey.PublicKey) - test.table.db.UpdateLastPingReceived(rid, test.remoteaddr.Addr(), time.Now()) - - // queue a pending findnode request - resultc, errc := make(chan []*enode.Node, 1), make(chan error, 1) - go func() { - rid := v4wire.EncodePubkey(&test.remotekey.PublicKey).ID() - ns, err := test.udp.FindNode(rid, test.remoteaddr, testTarget) - if err != nil && len(ns) == 0 { - errc <- err - } else { - resultc <- ns - } - }() - - // wait for the findnode to be sent. - // after it is sent, the transport is waiting for a reply - test.waitPacketOut(func(p *v4wire.Findnode, to netip.AddrPort, hash []byte) { - if p.Target != testTarget { - t.Errorf("wrong target: got %v, want %v", p.Target, testTarget) - } - }) - - // send the reply as two packets. - list := []*enode.Node{ - enode.MustParse("enode://ba85011c70bcc5c04d8607d3a0ed29aa6179c092cbdda10d5d32684fb33ed01bd94f588ca8f91ac48318087dcb02eaf36773a7a453f0eedd6742af668097b29c@10.0.1.16:30303?discport=30304"), - enode.MustParse("enode://81fa361d25f157cd421c60dcc28d8dac5ef6a89476633339c5df30287474520caca09627da18543d9079b5b288698b542d56167aa5c09111e55acdbbdf2ef799@10.0.1.16:30303"), - enode.MustParse("enode://9bffefd833d53fac8e652415f4973bee289e8b1a5c6c4cbe70abf817ce8a64cee11b823b66a987f51aaa9fba0d6a91b3e6bf0d5a5d1042de8e9eeea057b217f8@10.0.1.36:30301?discport=17"), - enode.MustParse("enode://1b5b4aa662d7cb44a7221bfba67302590b643028197a7d5214790f3bac7aaa4a3241be9e83c09cf1f6c69d007c634faae3dc1b1221793e8446c0b3a09de65960@10.0.1.16:30303"), - } - rpclist := make([]v4wire.Node, len(list)) - for i := range list { - rpclist[i] = nodeToRPC(list[i]) - } - test.packetIn(nil, &v4wire.Neighbors{Expiration: futureExp, Nodes: rpclist[:2]}) - test.packetIn(nil, &v4wire.Neighbors{Expiration: futureExp, Nodes: rpclist[2:]}) - - // check that the sent neighbors are all returned by findnode - select { - case result := <-resultc: - want := append(list[:2], list[3:]...) - if !reflect.DeepEqual(result, want) { - t.Errorf("neighbors mismatch:\n got: %v\n want: %v", result, want) - } - case err := <-errc: - t.Errorf("findnode error: %v", err) - case <-time.After(5 * time.Second): - t.Error("findnode did not return within 5 seconds") - } -} - -// This test checks that reply matching of pong verifies the ping hash. -func TestUDPv4_pingMatch(t *testing.T) { - test := newUDPTest(t) - defer test.close() - - randToken := make([]byte, 32) - crand.Read(randToken) - - test.packetIn(nil, &v4wire.Ping{From: testRemote, To: testLocalAnnounced, Version: 4, Expiration: futureExp}) - test.waitPacketOut(func(*v4wire.Pong, netip.AddrPort, []byte) {}) - test.waitPacketOut(func(*v4wire.Ping, netip.AddrPort, []byte) {}) - test.packetIn(errUnsolicitedReply, &v4wire.Pong{ReplyTok: randToken, To: testLocalAnnounced, Expiration: futureExp}) -} - -// This test checks that reply matching of pong verifies the sender IP address. -func TestUDPv4_pingMatchIP(t *testing.T) { - test := newUDPTest(t) - defer test.close() - - test.packetIn(nil, &v4wire.Ping{From: testRemote, To: testLocalAnnounced, Version: 4, Expiration: futureExp}) - test.waitPacketOut(func(*v4wire.Pong, netip.AddrPort, []byte) {}) - - test.waitPacketOut(func(p *v4wire.Ping, to netip.AddrPort, hash []byte) { - wrongAddr := netip.MustParseAddrPort("33.44.1.2:30000") - test.packetInFrom(errUnsolicitedReply, test.remotekey, wrongAddr, &v4wire.Pong{ - ReplyTok: hash, - To: testLocalAnnounced, - Expiration: futureExp, - }) - }) -} - -func TestUDPv4_successfulPing(t *testing.T) { - test := newUDPTest(t) - added := make(chan *tableNode, 1) - test.table.nodeAddedHook = func(b *bucket, n *tableNode) { added <- n } - defer test.close() - - // The remote side sends a ping packet to initiate the exchange. - go test.packetIn(nil, &v4wire.Ping{From: testRemote, To: testLocalAnnounced, Version: 4, Expiration: futureExp}) - - // The ping is replied to. - test.waitPacketOut(func(p *v4wire.Pong, to netip.AddrPort, hash []byte) { - pinghash := test.sent[0][:32] - if !bytes.Equal(p.ReplyTok, pinghash) { - t.Errorf("got pong.ReplyTok %x, want %x", p.ReplyTok, pinghash) - } - // The mirrored UDP address is the UDP packet sender. - // The mirrored TCP port is the one from the ping packet. - wantTo := v4wire.NewEndpoint(test.remoteaddr, testRemote.TCP) - if !reflect.DeepEqual(p.To, wantTo) { - t.Errorf("got pong.To %v, want %v", p.To, wantTo) - } - }) - - // Remote is unknown, the table pings back. - test.waitPacketOut(func(p *v4wire.Ping, to netip.AddrPort, hash []byte) { - wantFrom := test.udp.ourEndpoint() - wantFrom.IP = net.IP{} - if !reflect.DeepEqual(p.From, wantFrom) { - t.Errorf("got ping.From %#v, want %#v", p.From, test.udp.ourEndpoint()) - } - // The mirrored UDP address is the UDP packet sender. - wantTo := v4wire.NewEndpoint(test.remoteaddr, 0) - if !reflect.DeepEqual(p.To, wantTo) { - t.Errorf("got ping.To %v, want %v", p.To, wantTo) - } - test.packetIn(nil, &v4wire.Pong{ReplyTok: hash, Expiration: futureExp}) - }) - - // The node should be added to the table shortly after getting the - // pong packet. - select { - case n := <-added: - rid := v4wire.EncodePubkey(&test.remotekey.PublicKey).ID() - if n.ID() != rid { - t.Errorf("node has wrong ID: got %v, want %v", n.ID(), rid) - } - if n.IPAddr() != test.remoteaddr.Addr() { - t.Errorf("node has wrong IP: got %v, want: %v", n.IPAddr(), test.remoteaddr.Addr()) - } - if n.UDP() != int(test.remoteaddr.Port()) { - t.Errorf("node has wrong UDP port: got %v, want: %v", n.UDP(), test.remoteaddr.Port()) - } - if n.TCP() != int(testRemote.TCP) { - t.Errorf("node has wrong TCP port: got %v, want: %v", n.TCP(), testRemote.TCP) - } - case <-time.After(2 * time.Second): - t.Errorf("node was not added within 2 seconds") - } -} - -// This test checks that EIP-868 requests work. -func TestUDPv4_EIP868(t *testing.T) { - test := newUDPTest(t) - defer test.close() - - test.udp.localNode.Set(enr.WithEntry("foo", "bar")) - wantNode := test.udp.localNode.Node() - - // ENR requests aren't allowed before endpoint proof. - test.packetIn(errUnknownNode, &v4wire.ENRRequest{Expiration: futureExp}) - - // Perform endpoint proof and check for sequence number in packet tail. - test.packetIn(nil, &v4wire.Ping{Expiration: futureExp}) - test.waitPacketOut(func(p *v4wire.Pong, addr netip.AddrPort, hash []byte) { - if p.ENRSeq != wantNode.Seq() { - t.Errorf("wrong sequence number in pong: %d, want %d", p.ENRSeq, wantNode.Seq()) - } - }) - test.waitPacketOut(func(p *v4wire.Ping, addr netip.AddrPort, hash []byte) { - if p.ENRSeq != wantNode.Seq() { - t.Errorf("wrong sequence number in ping: %d, want %d", p.ENRSeq, wantNode.Seq()) - } - test.packetIn(nil, &v4wire.Pong{Expiration: futureExp, ReplyTok: hash}) - }) - - // Request should work now. - test.packetIn(nil, &v4wire.ENRRequest{Expiration: futureExp}) - test.waitPacketOut(func(p *v4wire.ENRResponse, addr netip.AddrPort, hash []byte) { - n, err := enode.New(enode.ValidSchemes, &p.Record) - if err != nil { - t.Fatalf("invalid record: %v", err) - } - if !reflect.DeepEqual(n, wantNode) { - t.Fatalf("wrong node in ENRResponse: %v", n) - } - }) -} - -// This test verifies that a small network of nodes can boot up into a healthy state. -func TestUDPv4_smallNetConvergence(t *testing.T) { - t.Parallel() - - // Start the network. - nodes := make([]*UDPv4, 4) - for i := range nodes { - var cfg Config - if i > 0 { - bn := nodes[0].Self() - cfg.Bootnodes = []*enode.Node{bn} - } - nodes[i] = startLocalhostV4(t, cfg) - defer nodes[i].Close() - } - - // Run through the iterator on all nodes until - // they have all found each other. - status := make(chan error, len(nodes)) - for i := range nodes { - node := nodes[i] - go func() { - found := make(map[enode.ID]bool, len(nodes)) - it := node.RandomNodes() - for it.Next() { - found[it.Node().ID()] = true - if len(found) == len(nodes) { - status <- nil - return - } - } - status <- fmt.Errorf("node %s didn't find all nodes", node.Self().ID().TerminalString()) - }() - } - - // Wait for all status reports. - timeout := time.NewTimer(30 * time.Second) - defer timeout.Stop() - for received := 0; received < len(nodes); { - select { - case <-timeout.C: - for _, node := range nodes { - node.Close() - } - case err := <-status: - received++ - if err != nil { - t.Error("ERROR:", err) - return - } - } - } -} - -func startLocalhostV4(t *testing.T, cfg Config) *UDPv4 { - t.Helper() - - cfg.PrivateKey = newkey() - db, _ := enode.OpenDB("") - ln := enode.NewLocalNode(db, cfg.PrivateKey) - - // Prefix logs with node ID. - lprefix := fmt.Sprintf("(%s)", ln.ID().TerminalString()) - cfg.Log = testlog.Logger(t, log.LevelTrace).With("node-id", lprefix) - - // Listen. - socket, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IP{127, 0, 0, 1}}) - if err != nil { - t.Fatal(err) - } - realaddr := socket.LocalAddr().(*net.UDPAddr) - ln.SetStaticIP(realaddr.IP) - ln.SetFallbackUDP(realaddr.Port) - udp, err := ListenV4(socket, ln, cfg) - if err != nil { - t.Fatal(err) - } - return udp -} - -// dgramPipe is a fake UDP socket. It queues all sent datagrams. -type dgramPipe struct { - mu *sync.Mutex - cond *sync.Cond - closing chan struct{} - closed bool - queue []dgram -} - -type dgram struct { - to netip.AddrPort - data []byte -} - -func newpipe() *dgramPipe { - mu := new(sync.Mutex) - return &dgramPipe{ - closing: make(chan struct{}), - cond: &sync.Cond{L: mu}, - mu: mu, - } -} - -// WriteToUDPAddrPort queues a datagram. -func (c *dgramPipe) WriteToUDPAddrPort(b []byte, to netip.AddrPort) (n int, err error) { - msg := make([]byte, len(b)) - copy(msg, b) - c.mu.Lock() - defer c.mu.Unlock() - if c.closed { - return 0, errors.New("closed") - } - c.queue = append(c.queue, dgram{to, b}) - c.cond.Signal() - return len(b), nil -} - -// ReadFromUDPAddrPort just hangs until the pipe is closed. -func (c *dgramPipe) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) { - <-c.closing - return 0, netip.AddrPort{}, io.EOF -} - -func (c *dgramPipe) Close() error { - c.mu.Lock() - defer c.mu.Unlock() - if !c.closed { - close(c.closing) - c.closed = true - } - c.cond.Broadcast() - return nil -} - -func (c *dgramPipe) LocalAddr() net.Addr { - return &net.UDPAddr{IP: testLocal.IP, Port: int(testLocal.UDP)} -} - -func (c *dgramPipe) receive() (dgram, error) { - c.mu.Lock() - defer c.mu.Unlock() - - var timedOut bool - timer := time.AfterFunc(3*time.Second, func() { - c.mu.Lock() - timedOut = true - c.mu.Unlock() - c.cond.Broadcast() - }) - defer timer.Stop() - - for len(c.queue) == 0 && !c.closed && !timedOut { - c.cond.Wait() - } - if c.closed { - return dgram{}, errClosed - } - if timedOut { - return dgram{}, ErrTimeout - } - p := c.queue[0] - copy(c.queue, c.queue[1:]) - c.queue = c.queue[:len(c.queue)-1] - return p, nil -} diff --git a/discvx/v5_talk.go b/discvx/v5_talk.go deleted file mode 100644 index f80ad081..00000000 --- a/discvx/v5_talk.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "net" - "net/netip" - "sync" - "time" - - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/discover/v5wire" - "github.com/ethereum/go-ethereum/p2p/enode" -) - -// This is a limit for the number of concurrent talk requests. -const maxActiveTalkRequests = 1024 - -// This is the timeout for acquiring a handler execution slot for a talk request. -// The timeout should be short enough to fit within the request timeout. -const talkHandlerLaunchTimeout = 400 * time.Millisecond - -// TalkRequestHandler callback processes a talk request and returns a response. -// -// Note that talk handlers are expected to come up with a response very quickly, within at -// most 200ms or so. If the handler takes longer than that, the remote end may time out -// and wont receive the response. -type TalkRequestHandler func(enode.ID, *net.UDPAddr, []byte) []byte - -type talkSystem struct { - transport *UDPv5 - - mutex sync.Mutex - handlers map[string]TalkRequestHandler - slots chan struct{} - lastLog time.Time - dropCount int -} - -func newTalkSystem(transport *UDPv5) *talkSystem { - t := &talkSystem{ - transport: transport, - handlers: make(map[string]TalkRequestHandler), - slots: make(chan struct{}, maxActiveTalkRequests), - } - for i := 0; i < cap(t.slots); i++ { - t.slots <- struct{}{} - } - return t -} - -// register adds a protocol handler. -func (t *talkSystem) register(protocol string, handler TalkRequestHandler) { - t.mutex.Lock() - t.handlers[protocol] = handler - t.mutex.Unlock() -} - -// handleRequest handles a talk request. -func (t *talkSystem) handleRequest(id enode.ID, addr netip.AddrPort, req *v5wire.TalkRequest) { - t.mutex.Lock() - handler, ok := t.handlers[req.Protocol] - t.mutex.Unlock() - - if !ok { - resp := &v5wire.TalkResponse{ReqID: req.ReqID} - t.transport.sendResponse(id, addr, resp) - return - } - - // Wait for a slot to become available, then run the handler. - timeout := time.NewTimer(talkHandlerLaunchTimeout) - defer timeout.Stop() - select { - case <-t.slots: - go func() { - defer func() { t.slots <- struct{}{} }() - udpAddr := &net.UDPAddr{IP: addr.Addr().AsSlice(), Port: int(addr.Port())} - respMessage := handler(id, udpAddr, req.Message) - resp := &v5wire.TalkResponse{ReqID: req.ReqID, Message: respMessage} - t.transport.sendFromAnotherThread(id, addr, resp) - }() - case <-timeout.C: - // Couldn't get it in time, drop the request. - if time.Since(t.lastLog) > 5*time.Second { - log.Warn("Dropping TALKREQ due to overload", "ndrop", t.dropCount) - t.lastLog = time.Now() - t.dropCount++ - } - case <-t.transport.closeCtx.Done(): - // Transport closed, drop the request. - } -} - -// wait blocks until all active requests have finished, and prevents new request -// handlers from being launched. -func (t *talkSystem) wait() { - for i := 0; i < cap(t.slots); i++ { - <-t.slots - } -} diff --git a/discvx/v5_udp.go b/discvx/v5_udp.go deleted file mode 100644 index 87ab91be..00000000 --- a/discvx/v5_udp.go +++ /dev/null @@ -1,911 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "bytes" - "context" - "crypto/ecdsa" - crand "crypto/rand" - "errors" - "fmt" - "io" - "net" - "net/netip" - "slices" - "sync" - "time" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/discover/v5wire" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/p2p/netutil" -) - -const ( - lookupRequestLimit = 3 // max requests against a single node during lookup - findnodeResultLimit = 16 // applies in FINDNODE handler - totalNodesResponseLimit = 5 // applies in waitForNodes - - respTimeoutV5 = 700 * time.Millisecond -) - -// codecV5 is implemented by v5wire.Codec (and testCodec). -// -// The UDPv5 transport is split into two objects: the codec object deals with -// encoding/decoding and with the handshake; the UDPv5 object handles higher-level concerns. -type codecV5 interface { - // Encode encodes a packet. - Encode(enode.ID, string, v5wire.Packet, *v5wire.Whoareyou) ([]byte, v5wire.Nonce, error) - - // Decode decodes a packet. It returns a *v5wire.Unknown packet if decryption fails. - // The *enode.Node return value is non-nil when the input contains a handshake response. - Decode([]byte, string) (enode.ID, *enode.Node, v5wire.Packet, error) -} - -// UDPv5 is the implementation of protocol version 5. -type UDPv5 struct { - // static fields - conn UDPConn - tab *Table - netrestrict *netutil.Netlist - priv *ecdsa.PrivateKey - localNode *enode.LocalNode - db *enode.DB - log log.Logger - clock mclock.Clock - validSchemes enr.IdentityScheme - - // misc buffers used during message handling - logcontext []interface{} - - // talkreq handler registry - talk *talkSystem - - // channels into dispatch - packetInCh chan ReadPacket - readNextCh chan struct{} - callCh chan *callV5 - callDoneCh chan *callV5 - respTimeoutCh chan *callTimeout - sendCh chan sendRequest - unhandled chan<- ReadPacket - - // state of dispatch - codec codecV5 - activeCallByNode map[enode.ID]*callV5 - activeCallByAuth map[v5wire.Nonce]*callV5 - callQueue map[enode.ID][]*callV5 - - // shutdown stuff - closeOnce sync.Once - closeCtx context.Context - cancelCloseCtx context.CancelFunc - wg sync.WaitGroup -} - -type sendRequest struct { - destID enode.ID - destAddr netip.AddrPort - msg v5wire.Packet -} - -// callV5 represents a remote procedure call against another node. -type callV5 struct { - id enode.ID - addr netip.AddrPort - node *enode.Node // This is required to perform handshakes. - - packet v5wire.Packet - responseType byte // expected packet type of response - reqid []byte - ch chan v5wire.Packet // responses sent here - err chan error // errors sent here - - // Valid for active calls only: - nonce v5wire.Nonce // nonce of request packet - handshakeCount int // # times we attempted handshake for this call - challenge *v5wire.Whoareyou // last sent handshake challenge - timeout mclock.Timer -} - -// callTimeout is the response timeout event of a call. -type callTimeout struct { - c *callV5 - timer mclock.Timer -} - -// ListenV5 listens on the given connection. -func ListenV5(conn UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv5, error) { - t, err := newUDPv5(conn, ln, cfg) - if err != nil { - return nil, err - } - go t.tab.loop() - t.wg.Add(2) - go t.readLoop() - go t.dispatch() - return t, nil -} - -// newUDPv5 creates a UDPv5 transport, but doesn't start any goroutines. -func newUDPv5(conn UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv5, error) { - closeCtx, cancelCloseCtx := context.WithCancel(context.Background()) - cfg = cfg.withDefaults() - t := &UDPv5{ - // static fields - conn: newMeteredConn(conn), - localNode: ln, - db: ln.Database(), - netrestrict: cfg.NetRestrict, - priv: cfg.PrivateKey, - log: cfg.Log, - validSchemes: cfg.ValidSchemes, - clock: cfg.Clock, - // channels into dispatch - packetInCh: make(chan ReadPacket, 1), - readNextCh: make(chan struct{}, 1), - callCh: make(chan *callV5), - callDoneCh: make(chan *callV5), - sendCh: make(chan sendRequest), - respTimeoutCh: make(chan *callTimeout), - unhandled: cfg.Unhandled, - // state of dispatch - codec: v5wire.NewCodec(ln, cfg.PrivateKey, cfg.Clock, cfg.V5ProtocolID), - activeCallByNode: make(map[enode.ID]*callV5), - activeCallByAuth: make(map[v5wire.Nonce]*callV5), - callQueue: make(map[enode.ID][]*callV5), - // shutdown - closeCtx: closeCtx, - cancelCloseCtx: cancelCloseCtx, - } - t.talk = newTalkSystem(t) - tab, err := newTable(t, t.db, cfg) - if err != nil { - return nil, err - } - t.tab = tab - return t, nil -} - -// Self returns the local node record. -func (t *UDPv5) Self() *enode.Node { - return t.localNode.Node() -} - -// Close shuts down packet processing. -func (t *UDPv5) Close() { - t.closeOnce.Do(func() { - t.cancelCloseCtx() - t.conn.Close() - t.talk.wait() - t.wg.Wait() - t.tab.close() - }) -} - -// Ping sends a ping message to the given node. -func (t *UDPv5) Ping(n *enode.Node) error { - _, err := t.ping(n) - return err -} - -// Resolve searches for a specific node with the given ID and tries to get the most recent -// version of the node record for it. It returns n if the node could not be resolved. -func (t *UDPv5) Resolve(n *enode.Node) *enode.Node { - if intable := t.tab.getNode(n.ID()); intable != nil && intable.Seq() > n.Seq() { - n = intable - } - // Try asking directly. This works if the node is still responding on the endpoint we have. - if resp, err := t.RequestENR(n); err == nil { - return resp - } - // Otherwise do a network lookup. - result := t.Lookup(n.ID()) - for _, rn := range result { - if rn.ID() == n.ID() && rn.Seq() > n.Seq() { - return rn - } - } - return n -} - -// AllNodes returns all the nodes stored in the local table. -func (t *UDPv5) AllNodes() []*enode.Node { - t.tab.mutex.Lock() - defer t.tab.mutex.Unlock() - nodes := make([]*enode.Node, 0) - - for _, b := range &t.tab.buckets { - for _, n := range b.entries { - nodes = append(nodes, n.Node) - } - } - return nodes -} - -// LocalNode returns the current local node running the -// protocol. -func (t *UDPv5) LocalNode() *enode.LocalNode { - return t.localNode -} - -// RegisterTalkHandler adds a handler for 'talk requests'. The handler function is called -// whenever a request for the given protocol is received and should return the response -// data or nil. -func (t *UDPv5) RegisterTalkHandler(protocol string, handler TalkRequestHandler) { - t.talk.register(protocol, handler) -} - -// TalkRequest sends a talk request to a node and waits for a response. -func (t *UDPv5) TalkRequest(n *enode.Node, protocol string, request []byte) ([]byte, error) { - req := &v5wire.TalkRequest{Protocol: protocol, Message: request} - resp := t.callToNode(n, v5wire.TalkResponseMsg, req) - defer t.callDone(resp) - select { - case respMsg := <-resp.ch: - return respMsg.(*v5wire.TalkResponse).Message, nil - case err := <-resp.err: - return nil, err - } -} - -// TalkRequestToID sends a talk request to a node and waits for a response. -func (t *UDPv5) TalkRequestToID(id enode.ID, addr netip.AddrPort, protocol string, request []byte) ([]byte, error) { - req := &v5wire.TalkRequest{Protocol: protocol, Message: request} - resp := t.callToID(id, addr, v5wire.TalkResponseMsg, req) - defer t.callDone(resp) - select { - case respMsg := <-resp.ch: - return respMsg.(*v5wire.TalkResponse).Message, nil - case err := <-resp.err: - return nil, err - } -} - -// RandomNodes returns an iterator that finds random nodes in the DHT. -func (t *UDPv5) RandomNodes() enode.Iterator { - if t.tab.len() == 0 { - // All nodes were dropped, refresh. The very first query will hit this - // case and run the bootstrapping logic. - <-t.tab.refresh() - } - - return newLookupIterator(t.closeCtx, t.newRandomLookup) -} - -// Lookup performs a recursive lookup for the given target. -// It returns the closest nodes to target. -func (t *UDPv5) Lookup(target enode.ID) []*enode.Node { - return t.newLookup(t.closeCtx, target).run() -} - -// lookupRandom looks up a random target. -// This is needed to satisfy the transport interface. -func (t *UDPv5) lookupRandom() []*enode.Node { - return t.newRandomLookup(t.closeCtx).run() -} - -// lookupSelf looks up our own node ID. -// This is needed to satisfy the transport interface. -func (t *UDPv5) lookupSelf() []*enode.Node { - return t.newLookup(t.closeCtx, t.Self().ID()).run() -} - -func (t *UDPv5) newRandomLookup(ctx context.Context) *lookup { - var target enode.ID - crand.Read(target[:]) - return t.newLookup(ctx, target) -} - -func (t *UDPv5) newLookup(ctx context.Context, target enode.ID) *lookup { - return newLookup(ctx, t.tab, target, func(n *enode.Node) ([]*enode.Node, error) { - return t.lookupWorker(n, target) - }) -} - -// lookupWorker performs FINDNODE calls against a single node during lookup. -func (t *UDPv5) lookupWorker(destNode *enode.Node, target enode.ID) ([]*enode.Node, error) { - var ( - dists = lookupDistances(target, destNode.ID()) - nodes = nodesByDistance{target: target} - err error - ) - var r []*enode.Node - r, err = t.FindNode(destNode, dists) - if errors.Is(err, errClosed) { - return nil, err - } - for _, n := range r { - if n.ID() != t.Self().ID() { - nodes.push(n, findnodeResultLimit) - } - } - return nodes.entries, err -} - -// lookupDistances computes the distance parameter for FINDNODE calls to dest. -// It chooses distances adjacent to logdist(target, dest), e.g. for a target -// with logdist(target, dest) = 255 the result is [255, 256, 254]. -func lookupDistances(target, dest enode.ID) (dists []uint) { - td := enode.LogDist(target, dest) - dists = append(dists, uint(td)) - for i := 1; len(dists) < lookupRequestLimit; i++ { - if td+i <= 256 { - dists = append(dists, uint(td+i)) - } - if td-i > 0 { - dists = append(dists, uint(td-i)) - } - } - return dists -} - -// ping calls PING on a node and waits for a PONG response. -func (t *UDPv5) ping(n *enode.Node) (uint64, error) { - req := &v5wire.Ping{ENRSeq: t.localNode.Node().Seq()} - resp := t.callToNode(n, v5wire.PongMsg, req) - defer t.callDone(resp) - - select { - case pong := <-resp.ch: - return pong.(*v5wire.Pong).ENRSeq, nil - case err := <-resp.err: - return 0, err - } -} - -// RequestENR requests n's record. -func (t *UDPv5) RequestENR(n *enode.Node) (*enode.Node, error) { - nodes, err := t.FindNode(n, []uint{0}) - if err != nil { - return nil, err - } - if len(nodes) != 1 { - return nil, fmt.Errorf("%d nodes in response for distance zero", len(nodes)) - } - return nodes[0], nil -} - -// findnode calls FINDNODE on a node and waits for responses. -func (t *UDPv5) FindNode(n *enode.Node, distances []uint) ([]*enode.Node, error) { - resp := t.callToNode(n, v5wire.NodesMsg, &v5wire.Findnode{Distances: distances}) - return t.waitForNodes(resp, distances) -} - -// waitForNodes waits for NODES responses to the given call. -func (t *UDPv5) waitForNodes(c *callV5, distances []uint) ([]*enode.Node, error) { - defer t.callDone(c) - - var ( - nodes []*enode.Node - seen = make(map[enode.ID]struct{}) - received, total = 0, -1 - ) - for { - select { - case responseP := <-c.ch: - response := responseP.(*v5wire.Nodes) - for _, record := range response.Nodes { - node, err := t.verifyResponseNode(c, record, distances, seen) - if err != nil { - t.log.Debug("Invalid record in "+response.Name(), "id", c.node.ID(), "err", err) - continue - } - nodes = append(nodes, node) - } - if total == -1 { - total = min(int(response.RespCount), totalNodesResponseLimit) - } - if received++; received == total { - return nodes, nil - } - case err := <-c.err: - return nodes, err - } - } -} - -// verifyResponseNode checks validity of a record in a NODES response. -func (t *UDPv5) verifyResponseNode(c *callV5, r *enr.Record, distances []uint, seen map[enode.ID]struct{}) (*enode.Node, error) { - node, err := enode.New(t.validSchemes, r) - if err != nil { - return nil, err - } - if err := netutil.CheckRelayAddr(c.addr.Addr(), node.IPAddr()); err != nil { - return nil, err - } - if t.netrestrict != nil && !t.netrestrict.ContainsAddr(node.IPAddr()) { - return nil, errors.New("not contained in netrestrict list") - } - if node.UDP() <= 1024 { - return nil, errLowPort - } - if distances != nil { - nd := enode.LogDist(c.id, node.ID()) - if !slices.Contains(distances, uint(nd)) { - return nil, errors.New("does not match any requested distance") - } - } - if _, ok := seen[node.ID()]; ok { - return nil, errors.New("duplicate record") - } - seen[node.ID()] = struct{}{} - return node, nil -} - -// callToNode sends the given call and sets up a handler for response packets (of message -// type responseType). Responses are dispatched to the call's response channel. -func (t *UDPv5) callToNode(n *enode.Node, responseType byte, req v5wire.Packet) *callV5 { - addr, _ := n.UDPEndpoint() - c := &callV5{id: n.ID(), addr: addr, node: n} - t.initCall(c, responseType, req) - return c -} - -// callToID is like callToNode, but for cases where the node record is not available. -func (t *UDPv5) callToID(id enode.ID, addr netip.AddrPort, responseType byte, req v5wire.Packet) *callV5 { - c := &callV5{id: id, addr: addr} - t.initCall(c, responseType, req) - return c -} - -func (t *UDPv5) initCall(c *callV5, responseType byte, packet v5wire.Packet) { - c.packet = packet - c.responseType = responseType - c.reqid = make([]byte, 8) - c.ch = make(chan v5wire.Packet, 1) - c.err = make(chan error, 1) - // Assign request ID. - crand.Read(c.reqid) - packet.SetRequestID(c.reqid) - // Send call to dispatch. - select { - case t.callCh <- c: - case <-t.closeCtx.Done(): - c.err <- errClosed - } -} - -// callDone tells dispatch that the active call is done. -func (t *UDPv5) callDone(c *callV5) { - // This needs a loop because further responses may be incoming until the - // send to callDoneCh has completed. Such responses need to be discarded - // in order to avoid blocking the dispatch loop. - for { - select { - case <-c.ch: - // late response, discard. - case <-c.err: - // late error, discard. - case t.callDoneCh <- c: - return - case <-t.closeCtx.Done(): - return - } - } -} - -// dispatch runs in its own goroutine, handles incoming packets and deals with calls. -// -// For any destination node there is at most one 'active call', stored in the t.activeCall* -// maps. A call is made active when it is sent. The active call can be answered by a -// matching response, in which case c.ch receives the response; or by timing out, in which case -// c.err receives the error. When the function that created the call signals the active -// call is done through callDone, the next call from the call queue is started. -// -// Calls may also be answered by a WHOAREYOU packet referencing the call packet's authTag. -// When that happens the call is simply re-sent to complete the handshake. We allow one -// handshake attempt per call. -func (t *UDPv5) dispatch() { - defer t.wg.Done() - - // Arm first read. - t.readNextCh <- struct{}{} - - for { - select { - case c := <-t.callCh: - t.callQueue[c.id] = append(t.callQueue[c.id], c) - t.sendNextCall(c.id) - - case ct := <-t.respTimeoutCh: - active := t.activeCallByNode[ct.c.id] - if ct.c == active && ct.timer == active.timeout { - ct.c.err <- ErrTimeout - } - - case c := <-t.callDoneCh: - active := t.activeCallByNode[c.id] - if active != c { - panic("BUG: callDone for inactive call") - } - c.timeout.Stop() - delete(t.activeCallByAuth, c.nonce) - delete(t.activeCallByNode, c.id) - t.sendNextCall(c.id) - - case r := <-t.sendCh: - t.send(r.destID, r.destAddr, r.msg, nil) - - case p := <-t.packetInCh: - t.handlePacket(p.Data, p.Addr) - // Arm next read. - t.readNextCh <- struct{}{} - - case <-t.closeCtx.Done(): - close(t.readNextCh) - for id, queue := range t.callQueue { - for _, c := range queue { - c.err <- errClosed - } - delete(t.callQueue, id) - } - for id, c := range t.activeCallByNode { - c.err <- errClosed - delete(t.activeCallByNode, id) - delete(t.activeCallByAuth, c.nonce) - } - return - } - } -} - -// startResponseTimeout sets the response timer for a call. -func (t *UDPv5) startResponseTimeout(c *callV5) { - if c.timeout != nil { - c.timeout.Stop() - } - var ( - timer mclock.Timer - done = make(chan struct{}) - ) - timer = t.clock.AfterFunc(respTimeoutV5, func() { - <-done - select { - case t.respTimeoutCh <- &callTimeout{c, timer}: - case <-t.closeCtx.Done(): - } - }) - c.timeout = timer - close(done) -} - -// sendNextCall sends the next call in the call queue if there is no active call. -func (t *UDPv5) sendNextCall(id enode.ID) { - queue := t.callQueue[id] - if len(queue) == 0 || t.activeCallByNode[id] != nil { - return - } - t.activeCallByNode[id] = queue[0] - t.sendCall(t.activeCallByNode[id]) - if len(queue) == 1 { - delete(t.callQueue, id) - } else { - copy(queue, queue[1:]) - t.callQueue[id] = queue[:len(queue)-1] - } -} - -// sendCall encodes and sends a request packet to the call's recipient node. -// This performs a handshake if needed. -func (t *UDPv5) sendCall(c *callV5) { - // The call might have a nonce from a previous handshake attempt. Remove the entry for - // the old nonce because we're about to generate a new nonce for this call. - if c.nonce != (v5wire.Nonce{}) { - delete(t.activeCallByAuth, c.nonce) - } - - newNonce, _ := t.send(c.id, c.addr, c.packet, c.challenge) - c.nonce = newNonce - t.activeCallByAuth[newNonce] = c - t.startResponseTimeout(c) -} - -// sendResponse sends a response packet to the given node. -// This doesn't trigger a handshake even if no keys are available. -func (t *UDPv5) sendResponse(toID enode.ID, toAddr netip.AddrPort, packet v5wire.Packet) error { - _, err := t.send(toID, toAddr, packet, nil) - return err -} - -func (t *UDPv5) sendFromAnotherThread(toID enode.ID, toAddr netip.AddrPort, packet v5wire.Packet) { - select { - case t.sendCh <- sendRequest{toID, toAddr, packet}: - case <-t.closeCtx.Done(): - } -} - -// send sends a packet to the given node. -func (t *UDPv5) send(toID enode.ID, toAddr netip.AddrPort, packet v5wire.Packet, c *v5wire.Whoareyou) (v5wire.Nonce, error) { - addr := toAddr.String() - t.logcontext = append(t.logcontext[:0], "id", toID, "addr", addr) - t.logcontext = packet.AppendLogInfo(t.logcontext) - - enc, nonce, err := t.codec.Encode(toID, addr, packet, c) - if err != nil { - t.logcontext = append(t.logcontext, "err", err) - t.log.Warn(">> "+packet.Name(), t.logcontext...) - return nonce, err - } - - _, err = t.conn.WriteToUDPAddrPort(enc, toAddr) - t.log.Trace(">> "+packet.Name(), t.logcontext...) - return nonce, err -} - -// readLoop runs in its own goroutine and reads packets from the network. -func (t *UDPv5) readLoop() { - defer t.wg.Done() - - buf := make([]byte, maxPacketSize) - for range t.readNextCh { - nbytes, from, err := t.conn.ReadFromUDPAddrPort(buf) - if netutil.IsTemporaryError(err) { - // Ignore temporary read errors. - t.log.Debug("Temporary UDP read error", "err", err) - continue - } else if err != nil { - // Shut down the loop for permanent errors. - if !errors.Is(err, io.EOF) { - t.log.Debug("UDP read error", "err", err) - } - return - } - t.dispatchReadPacket(from, buf[:nbytes]) - } -} - -// dispatchReadPacket sends a packet into the dispatch loop. -func (t *UDPv5) dispatchReadPacket(from netip.AddrPort, content []byte) bool { - // Unwrap IPv4-in-6 source address. - if from.Addr().Is4In6() { - from = netip.AddrPortFrom(netip.AddrFrom4(from.Addr().As4()), from.Port()) - } - select { - case t.packetInCh <- ReadPacket{content, from}: - return true - case <-t.closeCtx.Done(): - return false - } -} - -// handlePacket decodes and processes an incoming packet from the network. -func (t *UDPv5) handlePacket(rawpacket []byte, fromAddr netip.AddrPort) error { - addr := fromAddr.String() - fromID, fromNode, packet, err := t.codec.Decode(rawpacket, addr) - if err != nil { - if t.unhandled != nil && v5wire.IsInvalidHeader(err) { - // The packet seems unrelated to discv5, send it to the next protocol. - // t.log.Trace("Unhandled discv5 packet", "id", fromID, "addr", addr, "err", err) - up := ReadPacket{Data: make([]byte, len(rawpacket)), Addr: fromAddr} - copy(up.Data, rawpacket) - t.unhandled <- up - return nil - } - t.log.Debug("Bad discv5 packet", "id", fromID, "addr", addr, "err", err) - return err - } - if fromNode != nil { - // Handshake succeeded, add to table. - t.tab.addInboundNode(fromNode) - } - if packet.Kind() != v5wire.WhoareyouPacket { - // WHOAREYOU logged separately to report errors. - t.logcontext = append(t.logcontext[:0], "id", fromID, "addr", addr) - t.logcontext = packet.AppendLogInfo(t.logcontext) - t.log.Trace("<< "+packet.Name(), t.logcontext...) - } - t.handle(packet, fromID, fromAddr) - return nil -} - -// handleCallResponse dispatches a response packet to the call waiting for it. -func (t *UDPv5) handleCallResponse(fromID enode.ID, fromAddr netip.AddrPort, p v5wire.Packet) bool { - ac := t.activeCallByNode[fromID] - if ac == nil || !bytes.Equal(p.RequestID(), ac.reqid) { - t.log.Debug(fmt.Sprintf("Unsolicited/late %s response", p.Name()), "id", fromID, "addr", fromAddr) - return false - } - if fromAddr != ac.addr { - t.log.Debug(fmt.Sprintf("%s from wrong endpoint", p.Name()), "id", fromID, "addr", fromAddr) - return false - } - if p.Kind() != ac.responseType { - t.log.Debug(fmt.Sprintf("Wrong discv5 response type %s", p.Name()), "id", fromID, "addr", fromAddr) - return false - } - t.startResponseTimeout(ac) - ac.ch <- p - return true -} - -// getNode looks for a node record in table and database. -func (t *UDPv5) getNode(id enode.ID) *enode.Node { - if n := t.tab.getNode(id); n != nil { - return n - } - if n := t.localNode.Database().Node(id); n != nil { - return n - } - return nil -} - -// handle processes incoming packets according to their message type. -func (t *UDPv5) handle(p v5wire.Packet, fromID enode.ID, fromAddr netip.AddrPort) { - switch p := p.(type) { - case *v5wire.Unknown: - t.handleUnknown(p, fromID, fromAddr) - case *v5wire.Whoareyou: - t.handleWhoareyou(p, fromID, fromAddr) - case *v5wire.Ping: - t.handlePing(p, fromID, fromAddr) - case *v5wire.Pong: - if t.handleCallResponse(fromID, fromAddr, p) { - toAddr := netip.AddrPortFrom(netutil.IPToAddr(p.ToIP), p.ToPort) - t.localNode.UDPEndpointStatement(fromAddr, toAddr) - } - case *v5wire.Findnode: - t.handleFindnode(p, fromID, fromAddr) - case *v5wire.Nodes: - t.handleCallResponse(fromID, fromAddr, p) - case *v5wire.TalkRequest: - t.talk.handleRequest(fromID, fromAddr, p) - case *v5wire.TalkResponse: - t.handleCallResponse(fromID, fromAddr, p) - } -} - -// handleUnknown initiates a handshake by responding with WHOAREYOU. -func (t *UDPv5) handleUnknown(p *v5wire.Unknown, fromID enode.ID, fromAddr netip.AddrPort) { - challenge := &v5wire.Whoareyou{Nonce: p.Nonce} - crand.Read(challenge.IDNonce[:]) - if n := t.getNode(fromID); n != nil { - challenge.Node = n - challenge.RecordSeq = n.Seq() - } - t.sendResponse(fromID, fromAddr, challenge) -} - -var ( - errChallengeNoCall = errors.New("no matching call") - errChallengeTwice = errors.New("second handshake") -) - -// handleWhoareyou resends the active call as a handshake packet. -func (t *UDPv5) handleWhoareyou(p *v5wire.Whoareyou, fromID enode.ID, fromAddr netip.AddrPort) { - c, err := t.matchWithCall(fromID, p.Nonce) - if err != nil { - t.log.Debug("Invalid "+p.Name(), "addr", fromAddr, "err", err) - return - } - - if c.node == nil { - // Can't perform handshake because we don't have the ENR. - t.log.Debug("Can't handle "+p.Name(), "addr", fromAddr, "err", "call has no ENR") - c.err <- errors.New("remote wants handshake, but call has no ENR") - return - } - // Resend the call that was answered by WHOAREYOU. - t.log.Trace("<< "+p.Name(), "id", c.node.ID(), "addr", fromAddr) - c.handshakeCount++ - c.challenge = p - p.Node = c.node - t.sendCall(c) -} - -// matchWithCall checks whether a handshake attempt matches the active call. -func (t *UDPv5) matchWithCall(fromID enode.ID, nonce v5wire.Nonce) (*callV5, error) { - c := t.activeCallByAuth[nonce] - if c == nil { - return nil, errChallengeNoCall - } - if c.handshakeCount > 0 { - return nil, errChallengeTwice - } - return c, nil -} - -// handlePing sends a PONG response. -func (t *UDPv5) handlePing(p *v5wire.Ping, fromID enode.ID, fromAddr netip.AddrPort) { - var remoteIP net.IP - // Handle IPv4 mapped IPv6 addresses in the event the local node is binded - // to an ipv6 interface. - if fromAddr.Addr().Is4() || fromAddr.Addr().Is4In6() { - ip4 := fromAddr.Addr().As4() - remoteIP = ip4[:] - } else { - remoteIP = fromAddr.Addr().AsSlice() - } - t.sendResponse(fromID, fromAddr, &v5wire.Pong{ - ReqID: p.ReqID, - ToIP: remoteIP, - ToPort: fromAddr.Port(), - ENRSeq: t.localNode.Node().Seq(), - }) -} - -// handleFindnode returns nodes to the requester. -func (t *UDPv5) handleFindnode(p *v5wire.Findnode, fromID enode.ID, fromAddr netip.AddrPort) { - nodes := t.collectTableNodes(fromAddr.Addr(), p.Distances, findnodeResultLimit) - for _, resp := range packNodes(p.ReqID, nodes) { - t.sendResponse(fromID, fromAddr, resp) - } -} - -// collectTableNodes creates a FINDNODE result set for the given distances. -func (t *UDPv5) collectTableNodes(rip netip.Addr, distances []uint, limit int) []*enode.Node { - var bn []*enode.Node - var nodes []*enode.Node - processed := make(map[uint]struct{}) - for _, dist := range distances { - // Reject duplicate / invalid distances. - _, seen := processed[dist] - if seen || dist > 256 { - continue - } - processed[dist] = struct{}{} - - checkLive := !t.tab.cfg.NoFindnodeLivenessCheck - for _, n := range t.tab.appendBucketNodes(dist, bn[:0], checkLive) { - // Apply some pre-checks to avoid sending invalid nodes. - // Note liveness is checked by appendLiveNodes. - if netutil.CheckRelayAddr(rip, n.IPAddr()) != nil { - continue - } - nodes = append(nodes, n) - if len(nodes) >= limit { - return nodes - } - } - } - return nodes -} - -// packNodes creates NODES response packets for the given node list. -func packNodes(reqid []byte, nodes []*enode.Node) []*v5wire.Nodes { - if len(nodes) == 0 { - return []*v5wire.Nodes{{ReqID: reqid, RespCount: 1}} - } - - // This limit represents the available space for nodes in output packets. Maximum - // packet size is 1280, and out of this ~80 bytes will be taken up by the packet - // frame. So limiting to 1000 bytes here leaves 200 bytes for other fields of the - // NODES message, which is a lot. - const sizeLimit = 1000 - - var resp []*v5wire.Nodes - for len(nodes) > 0 { - p := &v5wire.Nodes{ReqID: reqid} - size := uint64(0) - for len(nodes) > 0 { - r := nodes[0].Record() - if size += r.Size(); size > sizeLimit { - break - } - p.Nodes = append(p.Nodes, r) - nodes = nodes[1:] - } - resp = append(resp, p) - } - for _, msg := range resp { - msg.RespCount = uint8(len(resp)) - } - return resp -} diff --git a/discvx/v5_udp_test.go b/discvx/v5_udp_test.go deleted file mode 100644 index ad4d4aaf..00000000 --- a/discvx/v5_udp_test.go +++ /dev/null @@ -1,859 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discvx - -import ( - "bytes" - "crypto/ecdsa" - "encoding/binary" - "fmt" - "math/rand" - "net" - "net/netip" - "reflect" - "slices" - "testing" - "time" - - "github.com/ethereum/go-ethereum/internal/testlog" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/discover/v4wire" - "github.com/ethereum/go-ethereum/p2p/discover/v5wire" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/rlp" - "github.com/stretchr/testify/require" -) - -// Real sockets, real crypto: this test checks end-to-end connectivity for UDPv5. -func TestUDPv5_lookupE2E(t *testing.T) { - t.Parallel() - - const N = 5 - var nodes []*UDPv5 - for i := 0; i < N; i++ { - var cfg Config - if len(nodes) > 0 { - bn := nodes[0].Self() - cfg.Bootnodes = []*enode.Node{bn} - } - node := startLocalhostV5(t, cfg) - nodes = append(nodes, node) - defer node.Close() - } - last := nodes[N-1] - target := nodes[rand.Intn(N-2)].Self() - - // It is expected that all nodes can be found. - expectedResult := make([]*enode.Node, len(nodes)) - for i := range nodes { - expectedResult[i] = nodes[i].Self() - } - slices.SortFunc(expectedResult, func(a, b *enode.Node) int { - return enode.DistCmp(target.ID(), a.ID(), b.ID()) - }) - - // Do the lookup. - results := last.Lookup(target.ID()) - if err := checkNodesEqual(results, expectedResult); err != nil { - t.Fatalf("lookup returned wrong results: %v", err) - } -} - -func startLocalhostV5(t *testing.T, cfg Config) *UDPv5 { - cfg.PrivateKey = newkey() - db, _ := enode.OpenDB("") - ln := enode.NewLocalNode(db, cfg.PrivateKey) - - // Prefix logs with node ID. - lprefix := fmt.Sprintf("(%s)", ln.ID().TerminalString()) - cfg.Log = testlog.Logger(t, log.LevelTrace).With("node-id", lprefix) - - // Listen. - socket, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IP{127, 0, 0, 1}}) - if err != nil { - t.Fatal(err) - } - realaddr := socket.LocalAddr().(*net.UDPAddr) - ln.SetStaticIP(realaddr.IP) - ln.Set(enr.UDP(realaddr.Port)) - udp, err := ListenV5(socket, ln, cfg) - if err != nil { - t.Fatal(err) - } - return udp -} - -// This test checks that incoming PING calls are handled correctly. -func TestUDPv5_pingHandling(t *testing.T) { - t.Parallel() - test := newUDPV5Test(t) - defer test.close() - - test.packetIn(&v5wire.Ping{ReqID: []byte("foo")}) - test.waitPacketOut(func(p *v5wire.Pong, addr netip.AddrPort, _ v5wire.Nonce) { - if !bytes.Equal(p.ReqID, []byte("foo")) { - t.Error("wrong request ID in response:", p.ReqID) - } - if p.ENRSeq != test.table.self().Seq() { - t.Error("wrong ENR sequence number in response:", p.ENRSeq) - } - }) -} - -// This test checks that incoming 'unknown' packets trigger the handshake. -func TestUDPv5_unknownPacket(t *testing.T) { - t.Parallel() - test := newUDPV5Test(t) - defer test.close() - - nonce := v5wire.Nonce{1, 2, 3} - check := func(p *v5wire.Whoareyou, wantSeq uint64) { - t.Helper() - if p.Nonce != nonce { - t.Error("wrong nonce in WHOAREYOU:", p.Nonce, nonce) - } - if p.IDNonce == ([16]byte{}) { - t.Error("all zero ID nonce") - } - if p.RecordSeq != wantSeq { - t.Errorf("wrong record seq %d in WHOAREYOU, want %d", p.RecordSeq, wantSeq) - } - } - - // Unknown packet from unknown node. - test.packetIn(&v5wire.Unknown{Nonce: nonce}) - test.waitPacketOut(func(p *v5wire.Whoareyou, addr netip.AddrPort, _ v5wire.Nonce) { - check(p, 0) - }) - - // Make node known. - n := test.getNode(test.remotekey, test.remoteaddr).Node() - test.table.addFoundNode(n, false) - - test.packetIn(&v5wire.Unknown{Nonce: nonce}) - test.waitPacketOut(func(p *v5wire.Whoareyou, addr netip.AddrPort, _ v5wire.Nonce) { - check(p, n.Seq()) - }) -} - -// This test checks that incoming FINDNODE calls are handled correctly. -func TestUDPv5_findnodeHandling(t *testing.T) { - t.Parallel() - test := newUDPV5Test(t) - defer test.close() - - // Create test nodes and insert them into the table. - nodes253 := nodesAtDistance(test.table.self().ID(), 253, 16) - nodes249 := nodesAtDistance(test.table.self().ID(), 249, 4) - nodes248 := nodesAtDistance(test.table.self().ID(), 248, 10) - fillTable(test.table, nodes253, true) - fillTable(test.table, nodes249, true) - fillTable(test.table, nodes248, true) - - // Requesting with distance zero should return the node's own record. - test.packetIn(&v5wire.Findnode{ReqID: []byte{0}, Distances: []uint{0}}) - test.expectNodes([]byte{0}, 1, []*enode.Node{test.udp.Self()}) - - // Requesting with distance > 256 shouldn't crash. - test.packetIn(&v5wire.Findnode{ReqID: []byte{1}, Distances: []uint{4234098}}) - test.expectNodes([]byte{1}, 1, nil) - - // Requesting with empty distance list shouldn't crash either. - test.packetIn(&v5wire.Findnode{ReqID: []byte{2}, Distances: []uint{}}) - test.expectNodes([]byte{2}, 1, nil) - - // This request gets no nodes because the corresponding bucket is empty. - test.packetIn(&v5wire.Findnode{ReqID: []byte{3}, Distances: []uint{254}}) - test.expectNodes([]byte{3}, 1, nil) - - // This request gets all the distance-253 nodes. - test.packetIn(&v5wire.Findnode{ReqID: []byte{4}, Distances: []uint{253}}) - test.expectNodes([]byte{4}, 2, nodes253) - - // This request gets all the distance-249 nodes and some more at 248 because - // the bucket at 249 is not full. - test.packetIn(&v5wire.Findnode{ReqID: []byte{5}, Distances: []uint{249, 248}}) - var nodes []*enode.Node - nodes = append(nodes, nodes249...) - nodes = append(nodes, nodes248[:10]...) - test.expectNodes([]byte{5}, 1, nodes) -} - -func (test *udpV5Test) expectNodes(wantReqID []byte, wantTotal uint8, wantNodes []*enode.Node) { - nodeSet := make(map[enode.ID]*enr.Record, len(wantNodes)) - for _, n := range wantNodes { - nodeSet[n.ID()] = n.Record() - } - - for { - test.waitPacketOut(func(p *v5wire.Nodes, addr netip.AddrPort, _ v5wire.Nonce) { - if !bytes.Equal(p.ReqID, wantReqID) { - test.t.Fatalf("wrong request ID %v in response, want %v", p.ReqID, wantReqID) - } - if p.RespCount != wantTotal { - test.t.Fatalf("wrong total response count %d, want %d", p.RespCount, wantTotal) - } - for _, record := range p.Nodes { - n, _ := enode.New(enode.ValidSchemesForTesting, record) - want := nodeSet[n.ID()] - if want == nil { - test.t.Fatalf("unexpected node in response: %v", n) - } - if !reflect.DeepEqual(record, want) { - test.t.Fatalf("wrong record in response: %v", n) - } - delete(nodeSet, n.ID()) - } - }) - if len(nodeSet) == 0 { - return - } - } -} - -// This test checks that outgoing PING calls work. -func TestUDPv5_pingCall(t *testing.T) { - t.Parallel() - test := newUDPV5Test(t) - defer test.close() - - remote := test.getNode(test.remotekey, test.remoteaddr).Node() - done := make(chan error, 1) - - // This ping times out. - go func() { - _, err := test.udp.ping(remote) - done <- err - }() - test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) {}) - if err := <-done; err != ErrTimeout { - t.Fatalf("want ErrTimeout, got %q", err) - } - - // This ping works. - go func() { - _, err := test.udp.ping(remote) - done <- err - }() - test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) { - test.packetInFrom(test.remotekey, test.remoteaddr, &v5wire.Pong{ReqID: p.ReqID}) - }) - if err := <-done; err != nil { - t.Fatal(err) - } - - // This ping gets a reply from the wrong endpoint. - go func() { - _, err := test.udp.ping(remote) - done <- err - }() - test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) { - wrongAddr := netip.MustParseAddrPort("33.44.55.22:10101") - test.packetInFrom(test.remotekey, wrongAddr, &v5wire.Pong{ReqID: p.ReqID}) - }) - if err := <-done; err != ErrTimeout { - t.Fatalf("want ErrTimeout for reply from wrong IP, got %q", err) - } -} - -// This test checks that outgoing FINDNODE calls work and multiple NODES -// replies are aggregated. -func TestUDPv5_findnodeCall(t *testing.T) { - t.Parallel() - test := newUDPV5Test(t) - defer test.close() - - // Launch the request: - var ( - distances = []uint{230} - remote = test.getNode(test.remotekey, test.remoteaddr).Node() - nodes = nodesAtDistance(remote.ID(), int(distances[0]), 8) - done = make(chan error, 1) - response []*enode.Node - ) - go func() { - var err error - response, err = test.udp.FindNode(remote, distances) - done <- err - }() - - // Serve the responses: - test.waitPacketOut(func(p *v5wire.Findnode, addr netip.AddrPort, _ v5wire.Nonce) { - if !reflect.DeepEqual(p.Distances, distances) { - t.Fatalf("wrong distances in request: %v", p.Distances) - } - test.packetIn(&v5wire.Nodes{ - ReqID: p.ReqID, - RespCount: 2, - Nodes: nodesToRecords(nodes[:4]), - }) - test.packetIn(&v5wire.Nodes{ - ReqID: p.ReqID, - RespCount: 2, - Nodes: nodesToRecords(nodes[4:]), - }) - }) - - // Check results: - if err := <-done; err != nil { - t.Fatalf("unexpected error: %v", err) - } - if !reflect.DeepEqual(response, nodes) { - t.Fatalf("wrong nodes in response") - } - - // TODO: check invalid IPs - // TODO: check invalid/unsigned record -} - -// This test checks that pending calls are re-sent when a handshake happens. -func TestUDPv5_callResend(t *testing.T) { - t.Parallel() - test := newUDPV5Test(t) - defer test.close() - - remote := test.getNode(test.remotekey, test.remoteaddr).Node() - done := make(chan error, 2) - go func() { - _, err := test.udp.ping(remote) - done <- err - }() - go func() { - _, err := test.udp.ping(remote) - done <- err - }() - - // Ping answered by WHOAREYOU. - test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, nonce v5wire.Nonce) { - test.packetIn(&v5wire.Whoareyou{Nonce: nonce}) - }) - // Ping should be re-sent. - test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) { - test.packetIn(&v5wire.Pong{ReqID: p.ReqID}) - }) - // Answer the other ping. - test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) { - test.packetIn(&v5wire.Pong{ReqID: p.ReqID}) - }) - if err := <-done; err != nil { - t.Fatalf("unexpected ping error: %v", err) - } - if err := <-done; err != nil { - t.Fatalf("unexpected ping error: %v", err) - } -} - -// This test ensures we don't allow multiple rounds of WHOAREYOU for a single call. -func TestUDPv5_multipleHandshakeRounds(t *testing.T) { - t.Parallel() - test := newUDPV5Test(t) - defer test.close() - - remote := test.getNode(test.remotekey, test.remoteaddr).Node() - done := make(chan error, 1) - go func() { - _, err := test.udp.ping(remote) - done <- err - }() - - // Ping answered by WHOAREYOU. - test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, nonce v5wire.Nonce) { - test.packetIn(&v5wire.Whoareyou{Nonce: nonce}) - }) - // Ping answered by WHOAREYOU again. - test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, nonce v5wire.Nonce) { - test.packetIn(&v5wire.Whoareyou{Nonce: nonce}) - }) - if err := <-done; err != ErrTimeout { - t.Fatalf("unexpected ping error: %q", err) - } -} - -// This test checks that calls with n replies may take up to n * respTimeout. -func TestUDPv5_callTimeoutReset(t *testing.T) { - t.Parallel() - test := newUDPV5Test(t) - defer test.close() - - // Launch the request: - var ( - distance = uint(230) - remote = test.getNode(test.remotekey, test.remoteaddr).Node() - nodes = nodesAtDistance(remote.ID(), int(distance), 8) - done = make(chan error, 1) - ) - go func() { - _, err := test.udp.FindNode(remote, []uint{distance}) - done <- err - }() - - // Serve two responses, slowly. - test.waitPacketOut(func(p *v5wire.Findnode, addr netip.AddrPort, _ v5wire.Nonce) { - time.Sleep(respTimeout - 50*time.Millisecond) - test.packetIn(&v5wire.Nodes{ - ReqID: p.ReqID, - RespCount: 2, - Nodes: nodesToRecords(nodes[:4]), - }) - - time.Sleep(respTimeout - 50*time.Millisecond) - test.packetIn(&v5wire.Nodes{ - ReqID: p.ReqID, - RespCount: 2, - Nodes: nodesToRecords(nodes[4:]), - }) - }) - if err := <-done; err != nil { - t.Fatalf("unexpected error: %q", err) - } -} - -// This test checks that TALKREQ calls the registered handler function. -func TestUDPv5_talkHandling(t *testing.T) { - t.Parallel() - test := newUDPV5Test(t) - defer test.close() - - var recvMessage []byte - test.udp.RegisterTalkHandler("test", func(id enode.ID, addr *net.UDPAddr, message []byte) []byte { - recvMessage = message - return []byte("test response") - }) - - // Successful case: - test.packetIn(&v5wire.TalkRequest{ - ReqID: []byte("foo"), - Protocol: "test", - Message: []byte("test request"), - }) - test.waitPacketOut(func(p *v5wire.TalkResponse, addr netip.AddrPort, _ v5wire.Nonce) { - if !bytes.Equal(p.ReqID, []byte("foo")) { - t.Error("wrong request ID in response:", p.ReqID) - } - if string(p.Message) != "test response" { - t.Errorf("wrong talk response message: %q", p.Message) - } - if string(recvMessage) != "test request" { - t.Errorf("wrong message received in handler: %q", recvMessage) - } - }) - - // Check that empty response is returned for unregistered protocols. - recvMessage = nil - test.packetIn(&v5wire.TalkRequest{ - ReqID: []byte("2"), - Protocol: "wrong", - Message: []byte("test request"), - }) - test.waitPacketOut(func(p *v5wire.TalkResponse, addr netip.AddrPort, _ v5wire.Nonce) { - if !bytes.Equal(p.ReqID, []byte("2")) { - t.Error("wrong request ID in response:", p.ReqID) - } - if string(p.Message) != "" { - t.Errorf("wrong talk response message: %q", p.Message) - } - if recvMessage != nil { - t.Errorf("handler was called for wrong protocol: %q", recvMessage) - } - }) -} - -// This test checks that outgoing TALKREQ calls work. -func TestUDPv5_talkRequest(t *testing.T) { - t.Parallel() - test := newUDPV5Test(t) - defer test.close() - - remote := test.getNode(test.remotekey, test.remoteaddr).Node() - done := make(chan error, 1) - - // This request times out. - go func() { - _, err := test.udp.TalkRequest(remote, "test", []byte("test request")) - done <- err - }() - test.waitPacketOut(func(p *v5wire.TalkRequest, addr netip.AddrPort, _ v5wire.Nonce) {}) - if err := <-done; err != ErrTimeout { - t.Fatalf("want ErrTimeout, got %q", err) - } - - // This request works. - go func() { - _, err := test.udp.TalkRequest(remote, "test", []byte("test request")) - done <- err - }() - test.waitPacketOut(func(p *v5wire.TalkRequest, addr netip.AddrPort, _ v5wire.Nonce) { - if p.Protocol != "test" { - t.Errorf("wrong protocol ID in talk request: %q", p.Protocol) - } - if string(p.Message) != "test request" { - t.Errorf("wrong message talk request: %q", p.Message) - } - test.packetInFrom(test.remotekey, test.remoteaddr, &v5wire.TalkResponse{ - ReqID: p.ReqID, - Message: []byte("test response"), - }) - }) - if err := <-done; err != nil { - t.Fatal(err) - } - - // Also check requesting without ENR. - go func() { - _, err := test.udp.TalkRequestToID(remote.ID(), test.remoteaddr, "test", []byte("test request 2")) - done <- err - }() - test.waitPacketOut(func(p *v5wire.TalkRequest, addr netip.AddrPort, _ v5wire.Nonce) { - if p.Protocol != "test" { - t.Errorf("wrong protocol ID in talk request: %q", p.Protocol) - } - if string(p.Message) != "test request 2" { - t.Errorf("wrong message talk request: %q", p.Message) - } - test.packetInFrom(test.remotekey, test.remoteaddr, &v5wire.TalkResponse{ - ReqID: p.ReqID, - Message: []byte("test response 2"), - }) - }) - if err := <-done; err != nil { - t.Fatal(err) - } -} - -// This test checks that lookupDistances works. -func TestUDPv5_lookupDistances(t *testing.T) { - test := newUDPV5Test(t) - lnID := test.table.self().ID() - - t.Run("target distance of 1", func(t *testing.T) { - node := nodeAtDistance(lnID, 1, intIP(0)) - dists := lookupDistances(lnID, node.ID()) - require.Equal(t, []uint{1, 2, 3}, dists) - }) - - t.Run("target distance of 2", func(t *testing.T) { - node := nodeAtDistance(lnID, 2, intIP(0)) - dists := lookupDistances(lnID, node.ID()) - require.Equal(t, []uint{2, 3, 1}, dists) - }) - - t.Run("target distance of 128", func(t *testing.T) { - node := nodeAtDistance(lnID, 128, intIP(0)) - dists := lookupDistances(lnID, node.ID()) - require.Equal(t, []uint{128, 129, 127}, dists) - }) - - t.Run("target distance of 255", func(t *testing.T) { - node := nodeAtDistance(lnID, 255, intIP(0)) - dists := lookupDistances(lnID, node.ID()) - require.Equal(t, []uint{255, 256, 254}, dists) - }) - - t.Run("target distance of 256", func(t *testing.T) { - node := nodeAtDistance(lnID, 256, intIP(0)) - dists := lookupDistances(lnID, node.ID()) - require.Equal(t, []uint{256, 255, 254}, dists) - }) -} - -// This test checks that lookup works. -func TestUDPv5_lookup(t *testing.T) { - t.Parallel() - test := newUDPV5Test(t) - - // Lookup on empty table returns no nodes. - if results := test.udp.Lookup(lookupTestnet.target.ID()); len(results) > 0 { - t.Fatalf("lookup on empty table returned %d results: %#v", len(results), results) - } - - // Ensure the tester knows all nodes in lookupTestnet by IP. - for d, nn := range lookupTestnet.dists { - for i, key := range nn { - n := lookupTestnet.node(d, i) - addr, _ := n.UDPEndpoint() - test.getNode(key, addr) - } - } - - // Seed table with initial node. - initialNode := lookupTestnet.node(256, 0) - fillTable(test.table, []*enode.Node{initialNode}, true) - - // Start the lookup. - resultC := make(chan []*enode.Node, 1) - go func() { - resultC <- test.udp.Lookup(lookupTestnet.target.ID()) - test.close() - }() - - // Answer lookup packets. - asked := make(map[enode.ID]bool) - for done := false; !done; { - done = test.waitPacketOut(func(p v5wire.Packet, to netip.AddrPort, _ v5wire.Nonce) { - recipient, key := lookupTestnet.nodeByAddr(to) - switch p := p.(type) { - case *v5wire.Ping: - test.packetInFrom(key, to, &v5wire.Pong{ReqID: p.ReqID}) - case *v5wire.Findnode: - if asked[recipient.ID()] { - t.Error("Asked node", recipient.ID(), "twice") - } - asked[recipient.ID()] = true - nodes := lookupTestnet.neighborsAtDistances(recipient, p.Distances, 16) - t.Logf("Got FINDNODE for %v, returning %d nodes", p.Distances, len(nodes)) - for _, resp := range packNodes(p.ReqID, nodes) { - test.packetInFrom(key, to, resp) - } - } - }) - } - - // Verify result nodes. - results := <-resultC - checkLookupResults(t, lookupTestnet, results) -} - -// This test checks the local node can be utilised to set key-values. -func TestUDPv5_LocalNode(t *testing.T) { - t.Parallel() - var cfg Config - node := startLocalhostV5(t, cfg) - defer node.Close() - localNd := node.LocalNode() - - // set value in node's local record - testVal := [4]byte{'A', 'B', 'C', 'D'} - localNd.Set(enr.WithEntry("testing", &testVal)) - - // retrieve the value from self to make sure it matches. - outputVal := [4]byte{} - if err := node.Self().Load(enr.WithEntry("testing", &outputVal)); err != nil { - t.Errorf("Could not load value from record: %v", err) - } - if testVal != outputVal { - t.Errorf("Wanted %#x to be retrieved from the record but instead got %#x", testVal, outputVal) - } -} - -func TestUDPv5_PingWithIPV4MappedAddress(t *testing.T) { - t.Parallel() - test := newUDPV5Test(t) - defer test.close() - - rawIP := netip.AddrFrom4([4]byte{0xFF, 0x12, 0x33, 0xE5}) - test.remoteaddr = netip.AddrPortFrom(netip.AddrFrom16(rawIP.As16()), 0) - remote := test.getNode(test.remotekey, test.remoteaddr).Node() - done := make(chan struct{}, 1) - - // This handler will truncate the ipv4-mapped in ipv6 address. - go func() { - test.udp.handlePing(&v5wire.Ping{ENRSeq: 1}, remote.ID(), test.remoteaddr) - done <- struct{}{} - }() - test.waitPacketOut(func(p *v5wire.Pong, addr netip.AddrPort, _ v5wire.Nonce) { - if len(p.ToIP) == net.IPv6len { - t.Error("Received untruncated ip address") - } - if len(p.ToIP) != net.IPv4len { - t.Errorf("Received ip address with incorrect length: %d", len(p.ToIP)) - } - if !p.ToIP.Equal(rawIP.AsSlice()) { - t.Errorf("Received incorrect ip address: wanted %s but received %s", rawIP.String(), p.ToIP.String()) - } - }) - <-done -} - -// udpV5Test is the framework for all tests above. -// It runs the UDPv5 transport on a virtual socket and allows testing outgoing packets. -type udpV5Test struct { - t *testing.T - pipe *dgramPipe - table *Table - db *enode.DB - udp *UDPv5 - localkey, remotekey *ecdsa.PrivateKey - remoteaddr netip.AddrPort - nodesByID map[enode.ID]*enode.LocalNode - nodesByIP map[netip.Addr]*enode.LocalNode -} - -// testCodec is the packet encoding used by protocol tests. This codec does not perform encryption. -type testCodec struct { - test *udpV5Test - id enode.ID - ctr uint64 -} - -type testCodecFrame struct { - NodeID enode.ID - AuthTag v5wire.Nonce - Ptype byte - Packet rlp.RawValue -} - -func (c *testCodec) Encode(toID enode.ID, addr string, p v5wire.Packet, _ *v5wire.Whoareyou) ([]byte, v5wire.Nonce, error) { - c.ctr++ - var authTag v5wire.Nonce - binary.BigEndian.PutUint64(authTag[:], c.ctr) - - penc, _ := rlp.EncodeToBytes(p) - frame, err := rlp.EncodeToBytes(testCodecFrame{c.id, authTag, p.Kind(), penc}) - return frame, authTag, err -} - -func (c *testCodec) Decode(input []byte, addr string) (enode.ID, *enode.Node, v5wire.Packet, error) { - frame, p, err := c.decodeFrame(input) - if err != nil { - return enode.ID{}, nil, nil, err - } - return frame.NodeID, nil, p, nil -} - -func (c *testCodec) decodeFrame(input []byte) (frame testCodecFrame, p v5wire.Packet, err error) { - if err = rlp.DecodeBytes(input, &frame); err != nil { - return frame, nil, fmt.Errorf("invalid frame: %v", err) - } - switch frame.Ptype { - case v5wire.UnknownPacket: - dec := new(v5wire.Unknown) - err = rlp.DecodeBytes(frame.Packet, &dec) - p = dec - case v5wire.WhoareyouPacket: - dec := new(v5wire.Whoareyou) - err = rlp.DecodeBytes(frame.Packet, &dec) - p = dec - default: - p, err = v5wire.DecodeMessage(frame.Ptype, frame.Packet) - } - return frame, p, err -} - -func newUDPV5Test(t *testing.T) *udpV5Test { - test := &udpV5Test{ - t: t, - pipe: newpipe(), - localkey: newkey(), - remotekey: newkey(), - remoteaddr: netip.MustParseAddrPort("10.0.1.99:30303"), - nodesByID: make(map[enode.ID]*enode.LocalNode), - nodesByIP: make(map[netip.Addr]*enode.LocalNode), - } - test.db, _ = enode.OpenDB("") - ln := enode.NewLocalNode(test.db, test.localkey) - ln.SetStaticIP(net.IP{10, 0, 0, 1}) - ln.Set(enr.UDP(30303)) - test.udp, _ = ListenV5(test.pipe, ln, Config{ - PrivateKey: test.localkey, - Log: testlog.Logger(t, log.LvlTrace), - ValidSchemes: enode.ValidSchemesForTesting, - }) - test.udp.codec = &testCodec{test: test, id: ln.ID()} - test.table = test.udp.tab - test.nodesByID[ln.ID()] = ln - // Wait for initial refresh so the table doesn't send unexpected findnode. - <-test.table.initDone - return test -} - -// handles a packet as if it had been sent to the transport. -func (test *udpV5Test) packetIn(packet v5wire.Packet) { - test.t.Helper() - test.packetInFrom(test.remotekey, test.remoteaddr, packet) -} - -// packetInFrom handles a packet as if it had been sent to the transport by the key/endpoint. -func (test *udpV5Test) packetInFrom(key *ecdsa.PrivateKey, addr netip.AddrPort, packet v5wire.Packet) { - test.t.Helper() - - ln := test.getNode(key, addr) - codec := &testCodec{test: test, id: ln.ID()} - enc, _, err := codec.Encode(test.udp.Self().ID(), addr.String(), packet, nil) - if err != nil { - test.t.Errorf("%s encode error: %v", packet.Name(), err) - } - if test.udp.dispatchReadPacket(addr, enc) { - <-test.udp.readNextCh // unblock UDPv5.dispatch - } -} - -// getNode ensures the test knows about a node at the given endpoint. -func (test *udpV5Test) getNode(key *ecdsa.PrivateKey, addr netip.AddrPort) *enode.LocalNode { - id := v4wire.EncodePubkey(&key.PublicKey).ID() - ln := test.nodesByID[id] - if ln == nil { - db, _ := enode.OpenDB("") - ln = enode.NewLocalNode(db, key) - ln.SetStaticIP(addr.Addr().AsSlice()) - ln.Set(enr.UDP(addr.Port())) - test.nodesByID[id] = ln - } - test.nodesByIP[addr.Addr()] = ln - return ln -} - -// waitPacketOut waits for the next output packet and handles it using the given 'validate' -// function. The function must be of type func (X, netip.AddrPort, v5wire.Nonce) where X is -// assignable to packetV5. -func (test *udpV5Test) waitPacketOut(validate interface{}) (closed bool) { - test.t.Helper() - - fn := reflect.ValueOf(validate) - exptype := fn.Type().In(0) - - dgram, err := test.pipe.receive() - if err == errClosed { - return true - } - if err == ErrTimeout { - test.t.Fatalf("timed out waiting for %v", exptype) - return false - } - ln := test.nodesByIP[dgram.to.Addr()] - if ln == nil { - test.t.Fatalf("attempt to send to non-existing node %v", &dgram.to) - return false - } - codec := &testCodec{test: test, id: ln.ID()} - frame, p, err := codec.decodeFrame(dgram.data) - if err != nil { - test.t.Errorf("sent packet decode error: %v", err) - return false - } - if !reflect.TypeOf(p).AssignableTo(exptype) { - test.t.Errorf("sent packet type mismatch, got: %v, want: %v", reflect.TypeOf(p), exptype) - return false - } - fn.Call([]reflect.Value{reflect.ValueOf(p), reflect.ValueOf(dgram.to), reflect.ValueOf(frame.AuthTag)}) - return false -} - -func (test *udpV5Test) close() { - test.t.Helper() - - test.udp.Close() - test.db.Close() - for id, n := range test.nodesByID { - if id != test.udp.Self().ID() { - n.Database().Close() - } - } - if len(test.pipe.queue) != 0 { - test.t.Fatalf("%d unmatched UDP packets in queue", len(test.pipe.queue)) - } -} diff --git a/go.mod b/go.mod index 2a2f95ec..bc808175 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,11 @@ module github.com/dennis-tra/nebula-crawler -go 1.23.1 +go 1.23 require ( github.com/benbjohnson/clock v1.3.5 github.com/cenkalti/backoff/v4 v4.3.0 + github.com/deckarep/golang-set/v2 v2.6.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/ethereum/go-ethereum v1.14.11 github.com/friendsofgo/errors v0.9.2 @@ -67,7 +68,6 @@ require ( github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/elastic/gosigar v0.14.3 // indirect github.com/ericlagergren/decimal v0.0.0-20240411145413-00de7ca16731 // indirect @@ -84,6 +84,7 @@ require ( github.com/gofrs/flock v0.12.1 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20241009165004-a3522334989c // indirect @@ -211,6 +212,13 @@ require ( rsc.io/tmplfunc v0.0.3 // indirect ) -// replace go-libp2p with fork (branch v0.28.3-nebula). Changes: -// - avoid running into dial backoffs even if forceDirectDial is set to false -replace github.com/libp2p/go-libp2p => github.com/plprobelab/go-libp2p v0.36.6-0.20241010102656-740d456bfc63 +replace ( + // replace go-ethereum with fork (branch nebula). Changes: + // - move everything inside the devp2p/internal package into devp2p to make it accessible + // - add Identify method + github.com/ethereum/go-ethereum => github.com/probe-lab/go-ethereum v0.0.0-20241016152650-2f1ae6611be1 + + // replace go-libp2p with fork (branch v0.28.3-nebula). Changes: + // - avoid running into dial backoffs even if forceDirectDial is set to false + github.com/libp2p/go-libp2p => github.com/probe-lab/go-libp2p v0.36.6-0.20241010102656-740d456bfc63 +) diff --git a/go.sum b/go.sum index 229d2f4b..06437e3a 100644 --- a/go.sum +++ b/go.sum @@ -219,8 +219,6 @@ github.com/ericlagergren/decimal v0.0.0-20240411145413-00de7ca16731 h1:R/ZjJpjQK github.com/ericlagergren/decimal v0.0.0-20240411145413-00de7ca16731/go.mod h1:M9R1FoZ3y//hwwnJtO51ypFGwm8ZfpxPT/ZLtO1mcgQ= github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs= github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/ethereum/go-ethereum v1.14.11 h1:8nFDCUUE67rPc6AKxFj7JKaOa2W/W1Rse3oS6LvvxEY= -github.com/ethereum/go-ethereum v1.14.11/go.mod h1:+l/fr42Mma+xBnhefL/+z11/hcmJ2egl+ScIVPjhc7E= github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9 h1:8NfxH2iXvJ60YRB8ChToFTUzl8awsc3cJ8CbLjGIl/A= github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -243,6 +241,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= +github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/getsentry/sentry-go v0.29.0 h1:YtWluuCFg9OfcqnaujpY918N/AhCCwarIDWOYSBAjCA= github.com/getsentry/sentry-go v0.29.0/go.mod h1:jhPesDAL0Q0W2+2YEuVOvdWmVtdsr1+jtBrlDEVWwLY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -289,6 +289,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y= github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -411,6 +413,8 @@ github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOj github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= +github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= @@ -440,6 +444,8 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/ github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4= +github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= @@ -572,6 +578,8 @@ github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= @@ -612,7 +620,10 @@ github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eI github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= +github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= @@ -749,14 +760,16 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/plprobelab/go-libp2p v0.36.6-0.20241010102656-740d456bfc63 h1:OQcMVNKi6wBjDVGLoipZjAgp+vSO3xFdYi5ki3Cpr98= -github.com/plprobelab/go-libp2p v0.36.6-0.20241010102656-740d456bfc63/go.mod h1:CpszAtXxHYOcyvB7K8rSHgnNlh21eKjYbEfLoMerbEI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/probe-lab/go-ethereum v0.0.0-20241016152650-2f1ae6611be1 h1:y5CT7WqYJ5gszLHmlf8kaUrbTM0y4mbDM9Bst/UNeJ8= +github.com/probe-lab/go-ethereum v0.0.0-20241016152650-2f1ae6611be1/go.mod h1:+l/fr42Mma+xBnhefL/+z11/hcmJ2egl+ScIVPjhc7E= +github.com/probe-lab/go-libp2p v0.36.6-0.20241010102656-740d456bfc63 h1:Zn3+Q7EuwGCNEU7Xtk0hItXLwkStZP8zaPrDcUQSkmQ= +github.com/probe-lab/go-libp2p v0.36.6-0.20241010102656-740d456bfc63/go.mod h1:CpszAtXxHYOcyvB7K8rSHgnNlh21eKjYbEfLoMerbEI= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -810,6 +823,8 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -871,6 +886,8 @@ github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= +github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -903,6 +920,8 @@ github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYN github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo= github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= +github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 h1:ZjUj9BLYf9PEqBn8W/OapxhPjVRdC6CsXTdULHsyk5c= github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2/go.mod h1:O8bHQfyinKwTXKkiKNGmLQS7vRsqRxIQTFZpYpHK3IQ= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -1572,6 +1591,8 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/tele/tele.go b/tele/tele.go index 507b09c0..29a1bac4 100644 --- a/tele/tele.go +++ b/tele/tele.go @@ -5,8 +5,7 @@ import ( "fmt" "net/http" _ "net/http/pprof" - - "go.opentelemetry.io/otel/trace/noop" + "runtime" "github.com/prometheus/client_golang/prometheus/promhttp" log "github.com/sirupsen/logrus" @@ -18,6 +17,7 @@ import ( sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.19.0" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" "go.uber.org/atomic" ) @@ -76,6 +76,9 @@ func ListenAndServe(host string, port int) { addr := fmt.Sprintf("%s:%d", host, port) log.WithField("addr", addr).Debugln("Starting telemetry endpoint") + // profile 1% of contention events + runtime.SetMutexProfileFraction(1) + http.Handle("/metrics", promhttp.Handler()) http.HandleFunc("/health", func(rw http.ResponseWriter, req *http.Request) { log.Debugln("Responding to health check")