diff --git a/.dockerignore b/.dockerignore
index df05904eab..f10af9ed76 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,12 +1,6 @@
.git
-/target/tmp
-/target/release*
-/target/debug
-/target/x86_64-unknown-linux-musl/release*/build
-/target/x86_64-unknown-linux-musl/release*/deps
-/target/x86_64-unknown-linux-musl/release*/incremental
-!/target/x86_64-unknown-linux-musl/release*/examples/
-!/target/release*/examples/
-!/target/release-lto/examples/
-!/target/release*/benchmark_client
-Dockerfile*
+target/*
+!target/release/examples/cdn-broker
+!target/release/examples/cdn-marshal
+!target/release/examples/coordinator
+!target/release/examples/single-validator
\ No newline at end of file
diff --git a/.github/workflows/build-and-push-examples.yml b/.github/workflows/build-and-push-examples.yml
new file mode 100644
index 0000000000..74aa0ffefa
--- /dev/null
+++ b/.github/workflows/build-and-push-examples.yml
@@ -0,0 +1,43 @@
+name: Build and Push Examples
+
+on:
+ push:
+ branches:
+ - "main"
+
+
+jobs:
+ build-and-push-examples:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ name: Checkout Repository
+
+ - name: Install Rust
+ uses: mkroening/rust-toolchain-toml@main
+
+ - uses: Swatinem/rust-cache@v2
+ name: Enable Rust Caching
+ with:
+ shared-key: "build-release"
+ cache-on-failure: "true"
+ save-if: ${{ github.ref == 'refs/heads/main' }}
+
+ - name: Build examples
+ run: cargo build --examples --release
+
+ - name: Build dockerfiles
+ run: |
+ docker build . -f crates/examples/Dockerfiles/cdn-marshal.Dockerfile -t ghcr.io/espressosystems/hotshot/cdn-marshal:main
+ docker build . -f crates/examples/Dockerfiles/cdn-broker.Dockerfile -t ghcr.io/espressosystems/hotshot/cdn-broker:main
+ docker build . -f crates/examples/Dockerfiles/coordinator.Dockerfile -t ghcr.io/espressosystems/hotshot/coordinator:main
+ docker build . -f crates/examples/Dockerfiles/single-validator.Dockerfile -t ghcr.io/espressosystems/hotshot/single-validator:main
+
+ - name: Push dockerfiles
+ run: |
+ docker push ghcr.io/espressosystems/hotshot/cdn-marshal:main
+ docker push ghcr.io/espressosystems/hotshot/cdn-broker:main
+ docker push ghcr.io/espressosystems/hotshot/coordinator:main
+ docker push ghcr.io/espressosystems/hotshot/single-validator:main
+
+
diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml
index d8836d871d..cc8924233e 100644
--- a/.github/workflows/build-and-test.yml
+++ b/.github/workflows/build-and-test.yml
@@ -52,239 +52,4 @@ jobs:
env:
RUST_BACKTRACE: full
- test-examples:
- strategy:
- fail-fast: false
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
- name: Checkout Repository
-
- - name: Install Rust
- uses: mkroening/rust-toolchain-toml@main
-
- - uses: Swatinem/rust-cache@v2
- name: Enable Rust Caching
- with:
- shared-key: "examples"
- cache-on-failure: "true"
- save-if: ${{ github.ref == 'refs/heads/main' }}
-
- - uses: taiki-e/install-action@just
-
- - name: Test examples
- run: |
- just example all-push-cdn -- --config_file ./crates/orchestrator/run-config.toml
- timeout-minutes: 20
-
- build-release:
- strategy:
- fail-fast: false
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
- name: Checkout Repository
-
- - name: Install Rust
- uses: mkroening/rust-toolchain-toml@main
-
- - uses: Swatinem/rust-cache@v2
- name: Enable Rust Caching
- with:
- shared-key: "build-release"
- cache-on-failure: "true"
- save-if: ${{ github.ref == 'refs/heads/main' }}
-
- - uses: taiki-e/install-action@just
-
- - name: Build examples in release mode
- run: just build_release --examples --package hotshot-examples --no-default-features
-
- - name: Upload Binaries
- uses: actions/upload-artifact@v4
- with:
- name: binaries-amd64
- path: |
- target/release/examples/counter
- target/release/examples/multi-validator-libp2p
- target/release/examples/validator-libp2p
- target/release/examples/validator-combined
- target/release/examples/validator-push-cdn
- target/release/examples/orchestrator
- target/release/examples/cdn-broker
- target/release/examples/cdn-marshal
-
- build-arm-release:
- strategy:
- fail-fast: false
- runs-on: buildjet-4vcpu-ubuntu-2204-arm
- if: ${{ github.ref == 'refs/heads/main' }}
- container: ghcr.io/espressosystems/devops-rust:stable
- steps:
- - uses: actions/checkout@v4
- name: Checkout Repository
- - uses: Swatinem/rust-cache@v2
- name: Enable Rust Caching
- with:
- shared-key: "build-arm-release"
- cache-on-failure: "true"
- save-if: ${{ github.ref == 'refs/heads/main' }}
-
- - name: Build examples in release mode
- run: just build_release --examples --package hotshot-examples --no-default-features
-
- - name: Upload Binaries
- uses: actions/upload-artifact@v4
- with:
- name: binaries-aarch64
- path: |
- target/release/examples/counter
- target/release/examples/multi-validator-libp2p
- target/release/examples/validator-libp2p
- target/release/examples/validator-combined
- target/release/examples/validator-push-cdn
- target/release/examples/orchestrator
- target/release/examples/cdn-broker
- target/release/examples/cdn-marshal
-
- build-dockers:
- strategy:
- fail-fast: false
- runs-on: ubuntu-latest
- if: ${{ github.ref == 'refs/heads/main' }}
- needs: [build-release, build-arm-release, test]
- steps:
- - name: Checkout Repository
- uses: actions/checkout@v4
-
- - name: Setup Docker BuildKit (buildx)
- uses: docker/setup-buildx-action@v3
-
- - name: Login to Github Container Repo
- uses: docker/login-action@v3
- if: github.event_name != 'pull_request'
- with:
- registry: ghcr.io
- username: ${{ github.repository_owner }}
- password: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Download AMD executables
- uses: actions/download-artifact@v4
- with:
- name: binaries-amd64
- path: target/amd64/release/examples
-
- - name: Download ARM executables
- uses: actions/download-artifact@v4
- with:
- name: binaries-aarch64
- path: target/arm64/release/examples
-
- - name: Generate validator-libp2p docker metadata
- uses: docker/metadata-action@v5
- id: validator-libp2p
- with:
- images: ghcr.io/espressosystems/hotshot/validator-libp2p
-
- - name: Generate validator-combined docker metadata
- uses: docker/metadata-action@v5
- id: validator-combined
- with:
- images: ghcr.io/espressosystems/hotshot/validator-combined
-
- - name: Generate validator-push-cdn docker metadata
- uses: docker/metadata-action@v5
- id: validator-push-cdn
- with:
- images: ghcr.io/espressosystems/hotshot/validator-push-cdn
-
- - name: Generate orchestrator docker metadata
- uses: docker/metadata-action@v5
- id: orchestrator
- with:
- images: ghcr.io/espressosystems/hotshot/orchestrator
-
- - name: Generate cdn-broker docker metadata
- uses: docker/metadata-action@v5
- id: cdn-broker
- with:
- images: ghcr.io/espressosystems/hotshot/cdn-broker
-
- - name: Generate cdn-marshal docker metadata
- uses: docker/metadata-action@v5
- id: cdn-marshal
- with:
- images: ghcr.io/espressosystems/hotshot/cdn-marshal
-
- - name: Build and push validator-libp2p docker
- uses: docker/build-push-action@v6
- with:
- context: ./
- file: ./docker/validator-libp2p.Dockerfile
- platforms: linux/amd64,linux/arm64
- push: ${{ github.event_name != 'pull_request' }}
- tags: ${{ steps.validator-libp2p.outputs.tags }}
- labels: ${{ steps.validator-libp2p.outputs.labels }}
- cache-from: type=gha
- cache-to: type=gha,mode=max
-
- - name: Build and push validator-combined docker
- uses: docker/build-push-action@v6
- with:
- context: ./
- file: ./docker/validator-combined.Dockerfile
- platforms: linux/amd64,linux/arm64
- push: ${{ github.event_name != 'pull_request' }}
- tags: ${{ steps.validator-combined.outputs.tags }}
- labels: ${{ steps.validator-combined.outputs.labels }}
- cache-from: type=gha
- cache-to: type=gha,mode=max
-
- - name: Build and push validator-push-cdn docker
- uses: docker/build-push-action@v6
- with:
- context: ./
- file: ./docker/validator-cdn.Dockerfile
- platforms: linux/amd64,linux/arm64
- push: ${{ github.event_name != 'pull_request' }}
- tags: ${{ steps.validator-push-cdn.outputs.tags }}
- labels: ${{ steps.validator-push-cdn.outputs.labels }}
- cache-from: type=gha
- cache-to: type=gha,mode=max
-
- - name: Build and push orchestrator docker
- uses: docker/build-push-action@v6
- with:
- context: ./
- file: ./docker/orchestrator.Dockerfile
- platforms: linux/amd64,linux/arm64
- push: ${{ github.event_name != 'pull_request' }}
- tags: ${{ steps.orchestrator.outputs.tags }}
- labels: ${{ steps.orchestrator.outputs.labels }}
- cache-from: type=gha
- cache-to: type=gha,mode=max
-
- - name: Build and push cdn-broker docker
- uses: docker/build-push-action@v6
- with:
- context: ./
- file: ./docker/cdn-broker.Dockerfile
- platforms: linux/amd64,linux/arm64
- push: ${{ github.event_name != 'pull_request' }}
- tags: ${{ steps.cdn-broker.outputs.tags }}
- labels: ${{ steps.cdn-broker.outputs.labels }}
- cache-from: type=gha
- cache-to: type=gha,mode=max
-
- - name: Build and push cdn-marshal docker
- uses: docker/build-push-action@v6
- with:
- context: ./
- file: ./docker/cdn-marshal.Dockerfile
- platforms: linux/amd64,linux/arm64
- push: ${{ github.event_name != 'pull_request' }}
- tags: ${{ steps.cdn-marshal.outputs.tags }}
- labels: ${{ steps.cdn-marshal.outputs.labels }}
- cache-from: type=gha
- cache-to: type=gha,mode=max
diff --git a/.github/workflows/test-sequencer.yml b/.github/workflows/test-sequencer.yml
index 2850764bd8..53518911f2 100644
--- a/.github/workflows/test-sequencer.yml
+++ b/.github/workflows/test-sequencer.yml
@@ -53,7 +53,6 @@ jobs:
hotshot-signature-key = { path = "${GITHUB_WORKSPACE}/hotshot/crates/hotshot-signature-key" }
hotshot-stake-table = { path = "${GITHUB_WORKSPACE}/hotshot/crates/hotshot-stake-table" }
hotshot-state-prover = { path = "${GITHUB_WORKSPACE}/hotshot/crates/hotshot-state-prover" }
- hotshot-orchestrator = { path = "${GITHUB_WORKSPACE}/hotshot/crates/orchestrator" }
hotshot-web-server = { path = "${GITHUB_WORKSPACE}/hotshot/crates/web_server" }
hotshot-task-impls = { path = "${GITHUB_WORKSPACE}/hotshot/crates/task-impls" }
hotshot-testing = { path = "${GITHUB_WORKSPACE}/hotshot/crates/testing" }
diff --git a/CHANGELOG.md b/CHANGELOG.md
deleted file mode 100644
index 8bcdeb6baf..0000000000
--- a/CHANGELOG.md
+++ /dev/null
@@ -1,45 +0,0 @@
-
-## [0.0.4] - 2021-11-01
-### Features
-- Downgrade possibly-temporary network faults to warnings
-- Improve logging when an invalid transaction is submitted
-
-
-
-## [0.0.3] - 2021-10-27
-### Features
-- Implement janky catchup
-
-### BREAKING CHANGE
-
-Adds new type parameter, corresponding to the state type, to Message
-
-
-## [0.0.2] - 2021-10-19
-### Bug Fixes
-- Fix leaders not sending themselves commit votes
-- Fix state not getting stored properly
-
-### Features
-- StatefulHandler trait
-- Reexport traits from traits module
-- State Machine + Node Implementation
-- state machine mvp megasquash
-- Replace tokio broadcast queue with unbounded equivalent
-
-### BREAKING CHANGE
-
-Changes queue type in hotshot methods
-
-
-
-## [0.0.1] - 2021-08-20
-
-
-## 0.0.0 - 2021-07-07
-
-[Unreleased]: https://github.com/EspressoSystems/hotshot/compare/0.0.4...HEAD
-[0.0.4]: https://github.com/EspressoSystems/hotshot/compare/0.0.3...0.0.4
-[0.0.3]: https://github.com/EspressoSystems/hotshot/compare/0.0.2...0.0.3
-[0.0.2]: https://github.com/EspressoSystems/hotshot/compare/0.0.1...0.0.2
-[0.0.1]: https://github.com/EspressoSystems/hotshot/compare/0.0.0...0.0.1
diff --git a/Cargo.lock b/Cargo.lock
index f95a2780c4..208f143dbd 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -547,10 +547,10 @@ version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "435a87a52755b8f27fcf321ac4f04b2802e337c8c4872923137471ec39c37532"
dependencies = [
- "event-listener 5.3.1",
+ "event-listener 5.4.0",
"event-listener-strategy",
"futures-core",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
]
[[package]]
@@ -573,7 +573,7 @@ dependencies = [
"concurrent-queue",
"event-listener-strategy",
"futures-core",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
]
[[package]]
@@ -646,7 +646,7 @@ dependencies = [
"log",
"parking",
"polling 2.8.0",
- "rustix 0.37.27",
+ "rustix 0.37.28",
"slab",
"socket2 0.4.10",
"waker-fn",
@@ -665,7 +665,7 @@ dependencies = [
"futures-lite 2.5.0",
"parking",
"polling 3.7.4",
- "rustix 0.38.42",
+ "rustix 0.38.43",
"slab",
"tracing",
"windows-sys 0.59.0",
@@ -686,9 +686,9 @@ version = "3.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18"
dependencies = [
- "event-listener 5.3.1",
+ "event-listener 5.4.0",
"event-listener-strategy",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
]
[[package]]
@@ -716,9 +716,9 @@ dependencies = [
"async-task",
"blocking",
"cfg-if",
- "event-listener 5.3.1",
+ "event-listener 5.4.0",
"futures-lite 2.5.0",
- "rustix 0.38.42",
+ "rustix 0.38.43",
"tracing",
]
@@ -734,7 +734,7 @@ dependencies = [
"cfg-if",
"futures-core",
"futures-io",
- "rustix 0.38.42",
+ "rustix 0.38.43",
"signal-hook-registry",
"slab",
"windows-sys 0.59.0",
@@ -776,7 +776,7 @@ dependencies = [
"log",
"memchr",
"once_cell",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"pin-utils",
"slab",
"wasm-bindgen-futures",
@@ -790,7 +790,7 @@ checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
dependencies = [
"async-stream-impl",
"futures-core",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
]
[[package]]
@@ -846,7 +846,7 @@ dependencies = [
"futures-io",
"futures-util",
"log",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"tungstenite",
]
@@ -860,7 +860,7 @@ dependencies = [
"futures-sink",
"futures-util",
"memchr",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
]
[[package]]
@@ -919,13 +919,13 @@ dependencies = [
"futures-util",
"http 0.2.12",
"http-body 0.4.6",
- "hyper 0.14.31",
+ "hyper 0.14.32",
"itoa",
"matchit",
"memchr",
"mime",
"percent-encoding",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"rustversion",
"serde",
"sync_wrapper 0.1.2",
@@ -1005,12 +1005,6 @@ version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
-[[package]]
-name = "bimap"
-version = "0.6.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7"
-
[[package]]
name = "bincode"
version = "1.3.3"
@@ -1191,6 +1185,12 @@ dependencies = [
"serde",
]
+[[package]]
+name = "bytesize"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc"
+
[[package]]
name = "capnp"
version = "0.20.3"
@@ -1220,9 +1220,9 @@ dependencies = [
[[package]]
name = "cc"
-version = "1.2.3"
+version = "1.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d"
+checksum = "a012a0df96dd6d06ba9a1b29d6402d1a5d77c6befd2566afdc26e10603dc93d7"
dependencies = [
"shlex",
]
@@ -1300,10 +1300,10 @@ dependencies = [
"prometheus",
"quinn",
"rand 0.8.5",
- "rcgen 0.13.1",
+ "rcgen 0.13.2",
"redis",
"rkyv",
- "rustls 0.23.19",
+ "rustls 0.23.20",
"rustls-pki-types",
"sqlx",
"thiserror 1.0.69",
@@ -1432,7 +1432,7 @@ dependencies = [
"bytes",
"futures-core",
"memchr",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"tokio",
"tokio-util",
]
@@ -1549,9 +1549,9 @@ dependencies = [
[[package]]
name = "const_fn"
-version = "0.4.10"
+version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "373e9fafaa20882876db20562275ff58d50e0caa2590077fe7ce7bef90211d0d"
+checksum = "2f8a2ca5ac02d09563609681103aada9e1777d54fc57a5acd7a41404f9c93b6e"
[[package]]
name = "constant_time_eq"
@@ -1663,18 +1663,18 @@ dependencies = [
[[package]]
name = "crossbeam-channel"
-version = "0.5.13"
+version = "0.5.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2"
+checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
-version = "0.8.5"
+version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
+checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
@@ -1691,18 +1691,18 @@ dependencies = [
[[package]]
name = "crossbeam-queue"
-version = "0.3.11"
+version = "0.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35"
+checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
-version = "0.8.20"
+version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80"
+checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]]
name = "crunchy"
@@ -1741,27 +1741,6 @@ dependencies = [
"subtle",
]
-[[package]]
-name = "csv"
-version = "1.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf"
-dependencies = [
- "csv-core",
- "itoa",
- "ryu",
- "serde",
-]
-
-[[package]]
-name = "csv-core"
-version = "0.1.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70"
-dependencies = [
- "memchr",
-]
-
[[package]]
name = "ctr"
version = "0.6.0"
@@ -2197,7 +2176,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d"
dependencies = [
"libc",
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -2219,13 +2198,13 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0"
[[package]]
name = "event-listener"
-version = "5.3.1"
+version = "5.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba"
+checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae"
dependencies = [
"concurrent-queue",
"parking",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
]
[[package]]
@@ -2234,8 +2213,8 @@ version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2"
dependencies = [
- "event-listener 5.3.1",
- "pin-project-lite 0.2.15",
+ "event-listener 5.4.0",
+ "pin-project-lite 0.2.16",
]
[[package]]
@@ -2300,9 +2279,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "foldhash"
-version = "0.1.3"
+version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2"
+checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f"
[[package]]
name = "foreign-types"
@@ -2415,7 +2394,7 @@ dependencies = [
"futures-io",
"memchr",
"parking",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"waker-fn",
]
@@ -2429,7 +2408,7 @@ dependencies = [
"futures-core",
"futures-io",
"parking",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
]
[[package]]
@@ -2450,7 +2429,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb"
dependencies = [
"futures-io",
- "rustls 0.23.19",
+ "rustls 0.23.20",
"rustls-pki-types",
]
@@ -2496,7 +2475,7 @@ dependencies = [
"futures-sink",
"futures-task",
"memchr",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"pin-utils",
"slab",
]
@@ -2554,9 +2533,9 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
[[package]]
name = "glob"
-version = "0.3.1"
+version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
+checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2"
[[package]]
name = "gloo-timers"
@@ -2658,11 +2637,11 @@ dependencies = [
[[package]]
name = "hashlink"
-version = "0.9.1"
+version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af"
+checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1"
dependencies = [
- "hashbrown 0.14.5",
+ "hashbrown 0.15.2",
]
[[package]]
@@ -2845,11 +2824,11 @@ dependencies = [
[[package]]
name = "home"
-version = "0.5.9"
+version = "0.5.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5"
+checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf"
dependencies = [
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -2871,7 +2850,6 @@ dependencies = [
"async-broadcast",
"async-lock 3.4.0",
"async-trait",
- "bimap",
"bincode",
"blake3",
"cdn-broker",
@@ -2895,7 +2873,6 @@ dependencies = [
"primitive-types",
"rand 0.8.5",
"serde",
- "sha2 0.10.8",
"time 0.3.37",
"tokio",
"tracing",
@@ -2917,7 +2894,7 @@ dependencies = [
"hotshot-types",
"serde",
"tagged-base64",
- "thiserror 2.0.9",
+ "thiserror 2.0.10",
"tide-disco",
"toml",
"vbs",
@@ -2940,7 +2917,7 @@ dependencies = [
"serde",
"sha2 0.10.8",
"sha3",
- "thiserror 2.0.9",
+ "thiserror 2.0.10",
"time 0.3.37",
"tokio",
"url",
@@ -2953,30 +2930,30 @@ version = "0.5.79"
dependencies = [
"anyhow",
"async-lock 3.4.0",
- "async-trait",
+ "bytes",
+ "bytesize",
"cdn-broker",
"cdn-marshal",
- "chrono",
"clap",
"futures",
"hotshot",
"hotshot-example-types",
- "hotshot-orchestrator",
"hotshot-testing",
"hotshot-types",
+ "libp2p",
"libp2p-networking",
"local-ip-address",
+ "lru 0.12.5",
+ "parking_lot",
"portpicker",
"rand 0.8.5",
- "serde",
+ "reqwest",
"sha2 0.10.8",
- "surf-disco",
- "time 0.3.37",
+ "simple_moving_average",
"tokio",
- "toml",
"tracing",
- "tracing-subscriber 0.3.19",
"url",
+ "warp",
]
[[package]]
@@ -3006,28 +2983,6 @@ dependencies = [
"syn 2.0.95",
]
-[[package]]
-name = "hotshot-orchestrator"
-version = "0.5.79"
-dependencies = [
- "anyhow",
- "async-lock 3.4.0",
- "blake3",
- "clap",
- "csv",
- "futures",
- "hotshot-types",
- "libp2p-identity",
- "multiaddr",
- "serde",
- "surf-disco",
- "tide-disco",
- "tokio",
- "toml",
- "tracing",
- "vbs",
-]
-
[[package]]
name = "hotshot-stake-table"
version = "0.5.79"
@@ -3084,7 +3039,7 @@ dependencies = [
"sha2 0.10.8",
"surf-disco",
"tagged-base64",
- "thiserror 2.0.9",
+ "thiserror 2.0.10",
"time 0.3.37",
"tokio",
"tracing",
@@ -3125,7 +3080,7 @@ dependencies = [
"serde",
"sha2 0.10.8",
"tagged-base64",
- "thiserror 2.0.9",
+ "thiserror 2.0.10",
"tide-disco",
"tokio",
"tracing",
@@ -3150,7 +3105,6 @@ dependencies = [
"bincode",
"bitvec",
"blake3",
- "clap",
"committable",
"derive_more 1.0.0",
"digest 0.10.7",
@@ -3163,20 +3117,16 @@ dependencies = [
"jf-utils",
"jf-vid",
"lazy_static",
- "libp2p-identity",
"memoize",
"mnemonic",
- "multiaddr",
"primitive-types",
"rand 0.8.5",
"rand_chacha 0.3.1",
"serde",
- "serde-inline-default",
"serde_bytes",
- "serde_json",
"sha2 0.10.8",
"tagged-base64",
- "thiserror 2.0.9",
+ "thiserror 2.0.10",
"time 0.3.37",
"tokio",
"toml",
@@ -3218,7 +3168,7 @@ checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
dependencies = [
"bytes",
"http 0.2.12",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
]
[[package]]
@@ -3241,7 +3191,7 @@ dependencies = [
"futures-util",
"http 1.2.0",
"http-body 1.0.1",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
]
[[package]]
@@ -3269,7 +3219,7 @@ dependencies = [
"cookie",
"futures-lite 1.13.0",
"infer",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"rand 0.7.3",
"serde",
"serde_json",
@@ -3298,9 +3248,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]]
name = "hyper"
-version = "0.14.31"
+version = "0.14.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85"
+checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7"
dependencies = [
"bytes",
"futures-channel",
@@ -3312,7 +3262,7 @@ dependencies = [
"httparse",
"httpdate",
"itoa",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"socket2 0.5.8",
"tokio",
"tower-service",
@@ -3322,9 +3272,9 @@ dependencies = [
[[package]]
name = "hyper"
-version = "1.5.1"
+version = "1.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f"
+checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0"
dependencies = [
"bytes",
"futures-channel",
@@ -3334,7 +3284,7 @@ dependencies = [
"http-body 1.0.1",
"httparse",
"itoa",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"smallvec",
"tokio",
"want",
@@ -3342,15 +3292,15 @@ dependencies = [
[[package]]
name = "hyper-rustls"
-version = "0.27.3"
+version = "0.27.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333"
+checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2"
dependencies = [
"futures-util",
"http 1.2.0",
- "hyper 1.5.1",
+ "hyper 1.5.2",
"hyper-util",
- "rustls 0.23.19",
+ "rustls 0.23.20",
"rustls-pki-types",
"tokio",
"tokio-rustls",
@@ -3363,8 +3313,8 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1"
dependencies = [
- "hyper 0.14.31",
- "pin-project-lite 0.2.15",
+ "hyper 0.14.32",
+ "pin-project-lite 0.2.16",
"tokio",
"tokio-io-timeout",
]
@@ -3377,7 +3327,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0"
dependencies = [
"bytes",
"http-body-util",
- "hyper 1.5.1",
+ "hyper 1.5.2",
"hyper-util",
"native-tls",
"tokio",
@@ -3396,8 +3346,8 @@ dependencies = [
"futures-util",
"http 1.2.0",
"http-body 1.0.1",
- "hyper 1.5.1",
- "pin-project-lite 0.2.15",
+ "hyper 1.5.2",
+ "pin-project-lite 0.2.16",
"socket2 0.5.8",
"tokio",
"tower-service",
@@ -3649,7 +3599,7 @@ dependencies = [
"bytes",
"futures",
"http 0.2.12",
- "hyper 0.14.31",
+ "hyper 0.14.32",
"log",
"rand 0.8.5",
"tokio",
@@ -4096,9 +4046,9 @@ dependencies = [
[[package]]
name = "libc"
-version = "0.2.168"
+version = "0.2.169"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d"
+checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"
[[package]]
name = "libloading"
@@ -4107,7 +4057,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
dependencies = [
"cfg-if",
- "windows-targets 0.48.5",
+ "windows-targets 0.52.6",
]
[[package]]
@@ -4401,8 +4351,6 @@ dependencies = [
"blake3",
"cbor4ii",
"delegate",
- "derive_builder",
- "derive_more 1.0.0",
"futures",
"hotshot-example-types",
"hotshot-types",
@@ -4435,7 +4383,7 @@ dependencies = [
"quinn",
"rand 0.8.5",
"ring 0.17.8",
- "rustls 0.23.19",
+ "rustls 0.23.20",
"socket2 0.5.8",
"thiserror 1.0.69",
"tokio",
@@ -4529,7 +4477,7 @@ dependencies = [
"libp2p-identity",
"rcgen 0.11.3",
"ring 0.17.8",
- "rustls 0.23.19",
+ "rustls 0.23.20",
"rustls-webpki 0.101.7",
"thiserror 1.0.69",
"x509-parser",
@@ -4635,9 +4583,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
[[package]]
name = "linux-raw-sys"
-version = "0.4.14"
+version = "0.4.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89"
+checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab"
[[package]]
name = "litemap"
@@ -4833,9 +4781,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
[[package]]
name = "miniz_oxide"
-version = "0.8.0"
+version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1"
+checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394"
dependencies = [
"adler2",
]
@@ -5146,9 +5094,9 @@ dependencies = [
[[package]]
name = "object"
-version = "0.36.5"
+version = "0.36.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e"
+checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
dependencies = [
"memchr",
]
@@ -5339,7 +5287,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc"
dependencies = [
"memchr",
- "thiserror 2.0.9",
+ "thiserror 2.0.10",
"ucd-trie",
]
@@ -5405,9 +5353,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777"
[[package]]
name = "pin-project-lite"
-version = "0.2.15"
+version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff"
+checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
[[package]]
name = "pin-utils"
@@ -5471,7 +5419,7 @@ dependencies = [
"concurrent-queue",
"libc",
"log",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"windows-sys 0.48.0",
]
@@ -5484,8 +5432,8 @@ dependencies = [
"cfg-if",
"concurrent-queue",
"hermit-abi 0.4.0",
- "pin-project-lite 0.2.15",
- "rustix 0.38.42",
+ "pin-project-lite 0.2.16",
+ "rustix 0.38.43",
"tracing",
"windows-sys 0.59.0",
]
@@ -5527,9 +5475,9 @@ dependencies = [
[[package]]
name = "prettyplease"
-version = "0.2.25"
+version = "0.2.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033"
+checksum = "483f8c21f64f3ea09fe0f30f5d48c3e8eefe5dac9129f0075f76593b4c1da705"
dependencies = [
"proc-macro2",
"syn 2.0.95",
@@ -5726,13 +5674,13 @@ checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef"
dependencies = [
"bytes",
"futures-io",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"quinn-proto",
"quinn-udp",
"rustc-hash",
- "rustls 0.23.19",
+ "rustls 0.23.20",
"socket2 0.5.8",
- "thiserror 2.0.9",
+ "thiserror 2.0.10",
"tokio",
"tracing",
]
@@ -5748,10 +5696,10 @@ dependencies = [
"rand 0.8.5",
"ring 0.17.8",
"rustc-hash",
- "rustls 0.23.19",
+ "rustls 0.23.20",
"rustls-pki-types",
"slab",
- "thiserror 2.0.9",
+ "thiserror 2.0.10",
"tinyvec",
"tracing",
"web-time",
@@ -5759,16 +5707,16 @@ dependencies = [
[[package]]
name = "quinn-udp"
-version = "0.5.8"
+version = "0.5.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "52cd4b1eff68bf27940dd39811292c49e007f4d0b4c357358dc9b0197be6b527"
+checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904"
dependencies = [
"cfg_aliases",
"libc",
"once_cell",
"socket2 0.5.8",
"tracing",
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -5891,9 +5839,9 @@ dependencies = [
[[package]]
name = "rcgen"
-version = "0.13.1"
+version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "54077e1872c46788540de1ea3d7f4ccb1983d12f9aa909b234468676c1a36779"
+checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2"
dependencies = [
"pem",
"ring 0.17.8",
@@ -5920,7 +5868,7 @@ dependencies = [
"itoa",
"num-bigint",
"percent-encoding",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"ryu",
"tokio",
"tokio-util",
@@ -5929,9 +5877,9 @@ dependencies = [
[[package]]
name = "redox_syscall"
-version = "0.5.7"
+version = "0.5.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f"
+checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834"
dependencies = [
"bitflags 2.6.0",
]
@@ -6015,7 +5963,7 @@ dependencies = [
"http 1.2.0",
"http-body 1.0.1",
"http-body-util",
- "hyper 1.5.1",
+ "hyper 1.5.2",
"hyper-rustls",
"hyper-tls",
"hyper-util",
@@ -6026,7 +5974,7 @@ dependencies = [
"native-tls",
"once_cell",
"percent-encoding",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"rustls-pemfile",
"serde",
"serde_json",
@@ -6222,7 +6170,7 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92"
dependencies = [
- "semver 1.0.23",
+ "semver 1.0.24",
]
[[package]]
@@ -6236,9 +6184,9 @@ dependencies = [
[[package]]
name = "rustix"
-version = "0.37.27"
+version = "0.37.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2"
+checksum = "519165d378b97752ca44bbe15047d5d3409e875f39327546b42ac81d7e18c1b6"
dependencies = [
"bitflags 1.3.2",
"errno",
@@ -6250,15 +6198,15 @@ dependencies = [
[[package]]
name = "rustix"
-version = "0.38.42"
+version = "0.38.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85"
+checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6"
dependencies = [
"bitflags 2.6.0",
"errno",
"libc",
- "linux-raw-sys 0.4.14",
- "windows-sys 0.52.0",
+ "linux-raw-sys 0.4.15",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -6276,9 +6224,9 @@ dependencies = [
[[package]]
name = "rustls"
-version = "0.23.19"
+version = "0.23.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1"
+checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b"
dependencies = [
"log",
"once_cell",
@@ -6300,9 +6248,9 @@ dependencies = [
[[package]]
name = "rustls-pki-types"
-version = "1.10.0"
+version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b"
+checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37"
dependencies = [
"web-time",
]
@@ -6330,9 +6278,9 @@ dependencies = [
[[package]]
name = "rustversion"
-version = "1.0.18"
+version = "1.0.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248"
+checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4"
[[package]]
name = "rw-stream-sink"
@@ -6403,9 +6351,9 @@ dependencies = [
[[package]]
name = "security-framework-sys"
-version = "2.12.1"
+version = "2.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2"
+checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32"
dependencies = [
"core-foundation-sys",
"libc",
@@ -6422,9 +6370,9 @@ dependencies = [
[[package]]
name = "semver"
-version = "1.0.23"
+version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b"
+checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba"
[[package]]
name = "semver-parser"
@@ -6441,17 +6389,6 @@ dependencies = [
"serde_derive",
]
-[[package]]
-name = "serde-inline-default"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "59fb1bedd774187d304179493b0d3c41fbe97b04b14305363f68d2bdf5e47cb9"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.95",
-]
-
[[package]]
name = "serde_bytes"
version = "0.11.15"
@@ -6527,9 +6464,9 @@ dependencies = [
[[package]]
name = "serde_with"
-version = "3.11.0"
+version = "3.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817"
+checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa"
dependencies = [
"base64 0.22.1",
"chrono",
@@ -6545,9 +6482,9 @@ dependencies = [
[[package]]
name = "serde_with_macros"
-version = "3.11.0"
+version = "3.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d"
+checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e"
dependencies = [
"darling",
"proc-macro2",
@@ -6699,6 +6636,15 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e"
+[[package]]
+name = "simple_moving_average"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3a4b144ad185430cd033299e2c93e465d5a7e65fbb858593dc57181fa13cd310"
+dependencies = [
+ "num-traits",
+]
+
[[package]]
name = "slab"
version = "0.4.9"
@@ -6803,21 +6749,11 @@ dependencies = [
"der",
]
-[[package]]
-name = "sqlformat"
-version = "0.2.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790"
-dependencies = [
- "nom",
- "unicode_categories",
-]
-
[[package]]
name = "sqlx"
-version = "0.8.2"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "93334716a037193fac19df402f8571269c84a00852f6a7066b5d2616dcd64d3e"
+checksum = "4410e73b3c0d8442c5f99b425d7a435b5ee0ae4167b3196771dd3f7a01be745f"
dependencies = [
"sqlx-core",
"sqlx-macros",
@@ -6828,37 +6764,31 @@ dependencies = [
[[package]]
name = "sqlx-core"
-version = "0.8.2"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4d8060b456358185f7d50c55d9b5066ad956956fddec42ee2e8567134a8936e"
+checksum = "6a007b6936676aa9ab40207cde35daab0a04b823be8ae004368c0793b96a61e0"
dependencies = [
- "atoi",
- "byteorder",
"bytes",
"crc",
"crossbeam-queue",
"either",
- "event-listener 5.3.1",
- "futures-channel",
+ "event-listener 5.4.0",
"futures-core",
"futures-intrusive",
"futures-io",
"futures-util",
- "hashbrown 0.14.5",
- "hashlink 0.9.1",
- "hex",
+ "hashbrown 0.15.2",
+ "hashlink 0.10.0",
"indexmap 2.7.0",
"log",
"memchr",
"once_cell",
- "paste",
"percent-encoding",
"serde",
"serde_json",
"sha2 0.10.8",
"smallvec",
- "sqlformat",
- "thiserror 1.0.69",
+ "thiserror 2.0.10",
"time 0.3.37",
"tokio",
"tokio-stream",
@@ -6868,9 +6798,9 @@ dependencies = [
[[package]]
name = "sqlx-macros"
-version = "0.8.2"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657"
+checksum = "3112e2ad78643fef903618d78cf0aec1cb3134b019730edb039b69eaf531f310"
dependencies = [
"proc-macro2",
"quote",
@@ -6881,9 +6811,9 @@ dependencies = [
[[package]]
name = "sqlx-macros-core"
-version = "0.8.2"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1804e8a7c7865599c9c79be146dc8a9fd8cc86935fa641d3ea58e5f0688abaa5"
+checksum = "4e9f90acc5ab146a99bf5061a7eb4976b573f560bc898ef3bf8435448dd5e7ad"
dependencies = [
"dotenvy",
"either",
@@ -6907,9 +6837,9 @@ dependencies = [
[[package]]
name = "sqlx-mysql"
-version = "0.8.2"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64bb4714269afa44aef2755150a0fc19d756fb580a67db8885608cf02f47d06a"
+checksum = "4560278f0e00ce64938540546f59f590d60beee33fffbd3b9cd47851e5fff233"
dependencies = [
"atoi",
"base64 0.22.1",
@@ -6942,7 +6872,7 @@ dependencies = [
"smallvec",
"sqlx-core",
"stringprep",
- "thiserror 1.0.69",
+ "thiserror 2.0.10",
"time 0.3.37",
"tracing",
"whoami",
@@ -6950,9 +6880,9 @@ dependencies = [
[[package]]
name = "sqlx-postgres"
-version = "0.8.2"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6fa91a732d854c5d7726349bb4bb879bb9478993ceb764247660aee25f67c2f8"
+checksum = "c5b98a57f363ed6764d5b3a12bfedf62f07aa16e1856a7ddc2a0bb190a959613"
dependencies = [
"atoi",
"base64 0.22.1",
@@ -6963,7 +6893,6 @@ dependencies = [
"etcetera",
"futures-channel",
"futures-core",
- "futures-io",
"futures-util",
"hex",
"hkdf 0.12.4",
@@ -6981,7 +6910,7 @@ dependencies = [
"smallvec",
"sqlx-core",
"stringprep",
- "thiserror 1.0.69",
+ "thiserror 2.0.10",
"time 0.3.37",
"tracing",
"whoami",
@@ -6989,9 +6918,9 @@ dependencies = [
[[package]]
name = "sqlx-sqlite"
-version = "0.8.2"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d5b2cf34a45953bfd3daaf3db0f7a7878ab9b7a6b91b422d24a7a9e4c857b680"
+checksum = "f85ca71d3a5b24e64e1d08dd8fe36c6c95c339a896cc33068148906784620540"
dependencies = [
"atoi",
"flume",
@@ -7323,15 +7252,16 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "tempfile"
-version = "3.14.0"
+version = "3.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c"
+checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704"
dependencies = [
"cfg-if",
"fastrand 2.3.0",
+ "getrandom 0.2.15",
"once_cell",
- "rustix 0.38.42",
- "windows-sys 0.52.0",
+ "rustix 0.38.43",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -7345,11 +7275,11 @@ dependencies = [
[[package]]
name = "thiserror"
-version = "2.0.9"
+version = "2.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc"
+checksum = "a3ac7f54ca534db81081ef1c1e7f6ea8a3ef428d2fc069097c079443d24124d3"
dependencies = [
- "thiserror-impl 2.0.9",
+ "thiserror-impl 2.0.10",
]
[[package]]
@@ -7365,9 +7295,9 @@ dependencies = [
[[package]]
name = "thiserror-impl"
-version = "2.0.9"
+version = "2.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4"
+checksum = "9e9465d30713b56a37ede7185763c3492a91be2f5fa68d958c44e41ab9248beb"
dependencies = [
"proc-macro2",
"quote",
@@ -7408,7 +7338,7 @@ dependencies = [
"http-types",
"kv-log-macro",
"log",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"route-recognizer",
"serde",
"serde_json",
@@ -7446,7 +7376,7 @@ dependencies = [
"rand 0.8.5",
"reqwest",
"routefinder",
- "semver 1.0.23",
+ "semver 1.0.24",
"serde",
"serde_json",
"serde_with",
@@ -7577,9 +7507,9 @@ dependencies = [
[[package]]
name = "tinyvec"
-version = "1.8.0"
+version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938"
+checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8"
dependencies = [
"tinyvec_macros",
]
@@ -7592,16 +7522,16 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.42.0"
+version = "1.43.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551"
+checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e"
dependencies = [
"backtrace",
"bytes",
"libc",
"mio",
"parking_lot",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"socket2 0.5.8",
"tokio-macros",
"tracing",
@@ -7614,15 +7544,15 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf"
dependencies = [
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"tokio",
]
[[package]]
name = "tokio-macros"
-version = "2.4.0"
+version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752"
+checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
dependencies = [
"proc-macro2",
"quote",
@@ -7645,7 +7575,7 @@ version = "0.26.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37"
dependencies = [
- "rustls 0.23.19",
+ "rustls 0.23.20",
"tokio",
]
@@ -7656,7 +7586,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
dependencies = [
"futures-core",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"tokio",
]
@@ -7669,7 +7599,7 @@ dependencies = [
"bytes",
"futures-core",
"futures-sink",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"tokio",
]
@@ -7721,7 +7651,7 @@ dependencies = [
"h2 0.3.26",
"http 0.2.12",
"http-body 0.4.6",
- "hyper 0.14.31",
+ "hyper 0.14.32",
"hyper-timeout",
"percent-encoding",
"pin-project",
@@ -7744,7 +7674,7 @@ dependencies = [
"futures-util",
"indexmap 1.9.3",
"pin-project",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"rand 0.8.5",
"slab",
"tokio",
@@ -7762,7 +7692,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9"
dependencies = [
"futures-core",
"futures-util",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"sync_wrapper 1.0.2",
"tokio",
"tower-layer",
@@ -7788,7 +7718,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
dependencies = [
"log",
- "pin-project-lite 0.2.15",
+ "pin-project-lite 0.2.16",
"tracing-attributes",
"tracing-core",
]
@@ -7946,15 +7876,15 @@ dependencies = [
[[package]]
name = "unicase"
-version = "2.8.0"
+version = "2.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df"
+checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539"
[[package]]
name = "unicode-bidi"
-version = "0.3.17"
+version = "0.3.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893"
+checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5"
[[package]]
name = "unicode-ident"
@@ -7989,12 +7919,6 @@ version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
-[[package]]
-name = "unicode_categories"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e"
-
[[package]]
name = "universal-hash"
version = "0.4.0"
@@ -8039,7 +7963,7 @@ dependencies = [
"flate2",
"log",
"once_cell",
- "rustls 0.23.19",
+ "rustls 0.23.20",
"rustls-pki-types",
"url",
"webpki-roots 0.26.7",
@@ -8201,7 +8125,7 @@ dependencies = [
"futures-util",
"headers",
"http 0.2.12",
- "hyper 0.14.31",
+ "hyper 0.14.32",
"log",
"mime",
"mime_guess",
@@ -8608,9 +8532,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "winnow"
-version = "0.6.20"
+version = "0.6.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b"
+checksum = "39281189af81c07ec09db316b302a3e67bf9bd7cbf6c820b50e35fee9c2fa980"
dependencies = [
"memchr",
]
@@ -8666,9 +8590,9 @@ dependencies = [
[[package]]
name = "xml-rs"
-version = "0.8.24"
+version = "0.8.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ea8b391c9a790b496184c29f7f93b9ed5b16abb306c05415b68bcc16e4d06432"
+checksum = "c5b940ebc25896e71dd073bad2dbaa2abfe97b0a391415e22ad1326d9c54e3c4"
[[package]]
name = "xmltree"
diff --git a/Cargo.toml b/Cargo.toml
index 2ea7c703f1..53780e32fc 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -20,7 +20,6 @@ members = [
"crates/hotshot-stake-table",
"crates/libp2p-networking",
"crates/macros",
- "crates/orchestrator",
"crates/task",
"crates/task-impls",
"crates/testing",
@@ -90,6 +89,7 @@ clap = { version = "4", features = ["derive", "env"] }
url = { version = "2", features = ["serde"] }
vec1 = { version = "1", features = ["serde"] }
reqwest = { version = "0.12", features = ["json"] }
+parking_lot = "0.12"
libp2p = { package = "libp2p", version = "0.54", default-features = false, features = [
"macros",
@@ -120,7 +120,6 @@ cdn-client = { git = "https://github.com/EspressoSystems/Push-CDN", tag = "0.5.6
cdn-broker = { git = "https://github.com/EspressoSystems/Push-CDN", tag = "0.5.6" }
cdn-marshal = { git = "https://github.com/EspressoSystems/Push-CDN", tag = "0.5.6" }
cdn-proto = { git = "https://github.com/EspressoSystems/Push-CDN", tag = "0.5.6" }
-
### Profiles
###
### Note: these only apply to example executables or tests built from within this crate. They have
diff --git a/README.md b/README.md
index c5e37298af..7094739d3a 100644
--- a/README.md
+++ b/README.md
@@ -1,229 +1,31 @@
![GitHub Release](https://img.shields.io/github/v/release/EspressoSystems/HotShot)
+# HotShot
-# License
-## Copyright
-**(c) 2021-2024 Espresso Systems**.
-`HotShot` was developed by Espresso Systems.
+HotShot is a Byzantine Fault Tolerant (BFT) consensus protocol that builds upon HotStuff 2. It is modified for proof-of-stake settings and features a linear view-synchronization protocol and a data-availability layer.
-# HotShot Consensus Module
+## Paper
+The HotShot protocol is described in the [Espresso Sequencing Network paper](https://eprint.iacr.org/2024/1189.pdf).
-HotShot is a BFT consensus protocol based off of HotStuff, with the addition of proof-of-stake and
-VRF committee elections.
+## Usage and Examples
-## Disclaimer
-
-**DISCLAIMER:** This software is provided "as is" and its security has not been externally audited. Use at your own risk.
-
-# Usage
-
-Please see the rustdoc for API documentation, and the examples directory for usage.
-
-## Dependencies
-
-### Unix-like
-
-#### Nix (macos and linux)
-
-```
-nix develop
-```
-
-#### Brew (macos)
-
-```
-brew install cmake protobuf
-```
-
-#### Apt-get (linux)
-
-```
-apt-get install cmake protobuf
-```
-
-### Windows
-
-#### Chocolatey
-
-```
-choco install cmake protoc
-```
-
-#### Scoop
-
-```
-scoop bucket add extras
-scoop install protobuf cmake
-```
-
-## Building
-
-Once dependencies have been installed, to build everything:
-
-```sh
-just build
-```
-
-
-
-# Static linking
-
-HotShot supports static linking for its examples:
-
-```sh
-# Nix-shell is optional but recommended
-nix develop .#staticShell
-
-just build
-```
-
-# Testing
-
-To test:
-
-```sh
-RUST_LOG=$ERROR_LOG_LEVEL RUST_LOG_FORMAT=$ERROR_LOG_FORMAT just test
-```
-
-- `RUST_LOG=$ERROR_LOG_LEVEL`: The basic levels of logging include `warn`, `error`, `info`.
-- `RUST_LOG_FORMAT=$ERROR_LOG_FORMAT`: The types of logging include `full`, `json`, and `compact`.
-- Internally, the inclusion of the `--nocapture` flag indicates whether or not to output logs.
-- Internally, we run at `--test-threads=1` because the tests spawn up a lot of file handles, and unix based systems consistently run out of handles.
-
-To stress test, run the ignored tests prefixed with `test_stress`:
-```sh
-RUST_LOG=$ERROR_LOG_LEVEL RUST_LOG_FORMAT=$ERROR_LOG_FORMAT just run_test test_stress
-```
-
-## Careful
-
-To double-check for UB:
-
-```bash
-nix develop .#correctnessShell
-just careful
-```
-
-## Testing on CI
-
-To test as if running on CI, one must limit the number of cores and ram to match github runners (2 core, 7 gig ram). To limit the ram, spin up a virtual machine or container with 7 gigs ram. To limit the core count when running tests:
-
-```
-ASYNC_STD_THREAD_COUNT=1 RUST_LOG=$ERROR_LOG_LEVEL RUST_LOG_FORMAT=$ERROR_LOG_FORMAT just tokio test
-```
-
-# Tokio-console
-
-To use tokio-console, drop into the console shell:
-
-```
-nix develop .#consoleShell
-```
-
-Then, run an example.
-
-On a separate terminal, also drop into the console shell and start tokio-console:
-```
-nix develop .#consoleShell -c tokio-console
-```
-
-This second window should now display task usage.
-
-# Open Telemetry + Jaeger Integration
-
-To view distributed logs with just the centralized server and one client, first edit the `centralized_server/orchestrator` file to include have a threshold and num_nodes of 1.
-
-Then open three separate terminals.
+Usage examples are provided in the [examples directory](./crates/examples).
+### Running the examples
+To run a full example network, use the command
```bash
-# Terminal 1
-# Start the jaeger instance to view spans
-docker run -d -p6831:6831/udp -p6832:6832/udp -p16686:16686 -p14268:14268 jaegertracing/all-in-one:latest
-
-# Terminal 2
-# Start the CDN
-
-# Terminal 3
-# Start the client
+RUST_LOG=info cargo run --example all
```
-# Resource Usage Statistics
-
-To generate usage stats:
-- build the test suite
-- find the executable containing the test of interest
-- run profiling tools
-
-The executable `cargo` uses is shown in the output of `cargo test`.
-
-For example, to profile `test_stress_dht_many_round`:
-
+You can see the list of supported command-line arguments by running
```bash
-# bring profiling tooling like flamegraph and heaptrack into scope
-nix develop .#perfShell
-
-# show the executable we need run
-# and build all test executables (required for subsequent steps)
-cargo test --verbose --release --lib --bins --tests --benches --workspace -- --test-threads=1
-# the output cargo test contains the tests path:
-# Running `/home/jrestivo/work/crosscross/target/release/deps/counter-880b1ff53ee21dea test_stress --test-threads=1 --ignored`
-# running 7 tests
-# test test_stress_dht_many_rounds ... ok
-# ...
-
-# a more detailed alternative to flamegraph
-# NOTE: only works on linux
-heaptrack $(fd -I "counter*" -t x | rg release) --ignored -- test_stress_dht_many_round --nocapture
-# palette provides memory statistics, omission will provide cpu cycle stats as colors
-# NOTE: must be run as root on macos
-flamegraph --palette=mem $(fd -I "counter*" -t x | rg release) --ignored -- test_stress_dht_one_round
-# code coveragte statistics
-cargo-llvm-cov llvm-cov --test=test_stress_dht_many_round --workspace --all-targets --release --html --output-path lcov.html
-```
-
-This will output:
-- `heaptrack.counter-$HASH` which is viewable by heaptrack. This provides a plethora of useful statistics about memory and cpu cycles.
-- `flamegraph.svg` which is a (moderately) less detailed version of heaptrack.
-- `lcov.html` generates a summary of code coverage.
-
-# Debugging
-
-A debugging config file is provided for vscode and vscodium in [`.vscode/launch.json`](https://github.com/EspressoSystems/HotShot/blob/main/.vscode/launch.json). This is intended to be used with [vadimcn/vscode-lldb](https://open-vsx.org/extension/vadimcn/vscode-lldb) but may work with other rust debuggers as well.
-
-To bring `lldb` into scope with nix, run `nix develop .#debugShell`.
-
-# Git Workflow
-
-For espresso developers we have written up a description of our workflow [here](./WORKFLOW.md).
-
-# Extra Editor Configuration
-
-Choose an async runtime to use before launching a text editor. This may be done by setting the environment RUSTFLAGS. For example:
-
-```
-nvim # launch text editor of choice. We choose neovim in this example
-unset RUSTFLAGS # Unset rustflags so we may continue to use the justfile. The justfile sets these particular config options
-```
-
-# Debugging
-
-We support the [CodeLLDB Debugger](https://github.com/vadimcn/vscode-lldb).
-
-## Neovim
-
-Install [`dap`](https://github.com/mfussenegger/nvim-dap) and [`rust-tools`](https://github.com/simrat39/rust-tools.nvim). Install the CodeLLDB debugger listed above.
-Follow the instructions [here](https://github.com/mfussenegger/nvim-dap/discussions/671#discussioncomment-4286738) to configure the adapter. To add our project-local configurations, run:
-
-```
-lua require('dap.ext.vscode').load_launchjs(nil, { ["codelldb"] = {"rust"} })
+cargo run --example all -- --help
```
-Finally, place a breakpoint and run `:DapContinue` to begin debugging.
+## Audits
+The HotShot protocol has been internally audited. The report is available [here](./audits/internal-reviews/EspressoHotshot-2024internal.pdf).
-NOTE: Do NOT configure dap at all with rust-tools. Do it manually.
-
-[Example configuration](https://github.com/DieracDelta/vimconfig/blob/master/modules/lsp.nix#L280).
+## Disclaimer
-## Vscode
+**DISCLAIMER:** This software is provided "as is" and its security has not been externally audited. Use at your own risk.
-Install the extension and load the `launch.json` file. Then run the desired test target.
diff --git a/config/ValidatorConfigExample b/config/ValidatorConfigExample
deleted file mode 100644
index 3170b75f9f..0000000000
--- a/config/ValidatorConfigExample
+++ /dev/null
@@ -1,32 +0,0 @@
-ValidatorConfig {
- public_key: VerKey(
- (
- QuadExtField(2264797523581107490935262917175769123227923636811928330606075281145117212394 + 15807017392833049888165434456991157794698032464874424842715555348468160607934 * u),
- QuadExtField(7996517616082121122160563552650547601395271017260499735456299700133762512689 + 7504045709281061282278228438613345070383424761478787301859187055302953740948 * u),
- QuadExtField(1515973040548822760825076242090160370742046237881440422068330135941139244581 + 20251846261653098602911417004145145971080304248810966341160788194007704966108 * u)
- )
- ),
- private_key: SignKey(
- BigInt(
- [3505488234151006356, 6655477166151225138, 3291219027844407676, 2153641080015542578]
- )
- ),
- stake_value: 1,
- state_key_pair: StateKeyPair(
- KeyPair {
- sk: SignKey(
- BigInt(
- [2822822805887490846, 6664316196088353173, 4926510007447087464, 116097479308258694]
- )
- ),
- vk: VerKey(
- Projective {
- x: BigInt([11315198235793138814, 4744451806709910489, 6921831025042192557, 1125393823825936625]),
- y: BigInt([13035879815613524256, 18225673961538637854, 12006860967936477969, 1516668567229692859]),
- t: BigInt([13450777528397789701, 12242009376162249168, 12596256366242272750, 3368076418495976469]),
- z: BigInt([10465708325245823445, 13967918689717629445, 14943426723808572731, 621075342718756551])
- }
- )
- }
- )
- }
\ No newline at end of file
diff --git a/config/ValidatorConfigFile.toml b/config/ValidatorConfigFile.toml
deleted file mode 100644
index 1f96cf0eb2..0000000000
--- a/config/ValidatorConfigFile.toml
+++ /dev/null
@@ -1,3 +0,0 @@
-is_da = true
-seed = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
-node_id = 0
diff --git a/crates/example-types/src/block_types.rs b/crates/example-types/src/block_types.rs
index 3d42344bc2..bf2358117b 100644
--- a/crates/example-types/src/block_types.rs
+++ b/crates/example-types/src/block_types.rs
@@ -6,10 +6,12 @@
use std::{
fmt::{Debug, Display},
+ io::{Cursor, Read},
mem::size_of,
sync::Arc,
};
+use anyhow::Context;
use async_trait::async_trait;
use committable::{Commitment, Committable, RawCommitmentBuilder};
use hotshot_types::{
@@ -106,6 +108,36 @@ impl TestTransaction {
encoded
}
+
+ /// Decode a list of individual transactions from the encoded payload
+ pub fn decode(encoded_transactions: &[u8]) -> anyhow::Result> {
+ // Create a cursor to read the encoded transactions
+ let mut cursor = Cursor::new(encoded_transactions);
+
+ // A collection of the transactions to return
+ let mut transactions = Vec::new();
+
+ // Process each transaction
+ let mut transaction_size_bytes = [0; size_of::()];
+ while cursor.position() < encoded_transactions.len() as u64 {
+ // Read the transaction size
+ cursor
+ .read_exact(&mut transaction_size_bytes)
+ .context("Failed to read transaction size")?;
+ let transaction_size = u32::from_le_bytes(transaction_size_bytes);
+
+ // Read the transaction
+ let mut transaction_bytes = vec![0; transaction_size as usize];
+ cursor
+ .read_exact(&mut transaction_bytes)
+ .context("Failed to read transaction")?;
+
+ // Add the transaction to the collection
+ transactions.push(Self(transaction_bytes));
+ }
+
+ Ok(transactions)
+ }
}
impl Committable for TestTransaction {
diff --git a/crates/examples/Cargo.toml b/crates/examples/Cargo.toml
index 3136acdae4..6cfe139e31 100644
--- a/crates/examples/Cargo.toml
+++ b/crates/examples/Cargo.toml
@@ -1,114 +1,60 @@
[package]
-authors = { workspace = true }
-description = "HotShot Examples and binaries"
-edition = { workspace = true }
name = "hotshot-examples"
-readme = "README.md"
-version = { workspace = true }
-rust-version = { workspace = true }
+version.workspace = true
+authors.workspace = true
+edition.workspace = true
+rust-version.workspace = true
+homepage.workspace = true
+documentation.workspace = true
+repository.workspace = true
-[features]
-default = ["docs", "doc-images", "hotshot-testing"]
-gpu-vid = ["hotshot-example-types/gpu-vid"]
-
-# Build the extended documentation
-docs = []
-doc-images = []
-hotshot-testing = ["hotshot/hotshot-testing"]
-fixed-leader-election = []
-
-# Common
-[[example]]
-name = "orchestrator"
-path = "orchestrator.rs"
-
-# Libp2p
-[[example]]
-name = "validator-libp2p"
-path = "libp2p/validator.rs"
-
-[[example]]
-name = "multi-validator-libp2p"
-path = "libp2p/multi-validator.rs"
-
-[[example]]
-name = "all-libp2p"
-path = "libp2p/all.rs"
-
-# Combined
-[[example]]
-name = "all-combined"
-path = "combined/all.rs"
-
-[[example]]
-name = "multi-validator-combined"
-path = "combined/multi-validator.rs"
-
-[[example]]
-name = "validator-combined"
-path = "combined/validator.rs"
-
-[[example]]
-name = "orchestrator-combined"
-path = "combined/orchestrator.rs"
-
-# Push CDN
-[[example]]
-name = "all-push-cdn"
-path = "push-cdn/all.rs"
-
-[[example]]
-name = "validator-push-cdn"
-path = "push-cdn/validator.rs"
+[dependencies]
+async-lock.workspace = true
+tokio.workspace = true
+clap.workspace = true
+hotshot = { path = "../hotshot" }
+hotshot-types = { path = "../types" }
+hotshot-example-types.path = "../example-types"
+url.workspace = true
+libp2p.workspace = true
+portpicker.workspace = true
+libp2p-networking.workspace = true
+anyhow.workspace = true
+rand.workspace = true
+futures.workspace = true
+tracing.workspace = true
+hotshot-testing.path = "../testing"
+lru.workspace = true
+cdn-broker.workspace = true
+cdn-marshal.workspace = true
+sha2.workspace = true
+warp = { version = "0.3", default-features = false }
+reqwest.workspace = true
+bytes = "1"
+parking_lot.workspace = true
+local-ip-address = "0.6"
+simple_moving_average = "1"
+bytesize = "1"
[[example]]
-name = "multi-validator-push-cdn"
-path = "push-cdn/multi-validator.rs"
+name = "all"
+path = "all.rs"
[[example]]
name = "cdn-broker"
-path = "push-cdn/broker.rs"
+path = "cdn/broker.rs"
[[example]]
name = "cdn-marshal"
-path = "push-cdn/marshal.rs"
+path = "cdn/marshal.rs"
[[example]]
-name = "whitelist-push-cdn"
-path = "push-cdn/whitelist-adapter.rs"
+name = "coordinator"
+path = "coordinator.rs"
-[dependencies]
-async-lock = { workspace = true }
-async-trait = { workspace = true }
-
-cdn-broker = { workspace = true, features = ["global-permits"] }
-cdn-marshal = { workspace = true }
-chrono = { workspace = true }
-clap = { workspace = true, optional = true }
-futures = { workspace = true }
-hotshot = { path = "../hotshot" }
-hotshot-example-types = { path = "../example-types" }
-hotshot-orchestrator = { version = "0.5.36", path = "../orchestrator", default-features = false }
-hotshot-testing = { path = "../testing" }
-hotshot-types = { path = "../types" }
-libp2p-networking = { workspace = true }
-local-ip-address = "0.6"
-portpicker = { workspace = true }
-rand = { workspace = true }
-serde = { workspace = true, features = ["rc"] }
-sha2 = { workspace = true }
-surf-disco = { workspace = true }
-time = { workspace = true }
-tokio = { workspace = true }
-
-tracing = { workspace = true }
-url = { workspace = true }
-
-[dev-dependencies]
-anyhow = { workspace = true }
-clap = { workspace = true }
-toml = { workspace = true }
-tracing-subscriber = "0.3"
+[[example]]
+name = "single-validator"
+path = "single-validator.rs"
[lints]
workspace = true
diff --git a/crates/examples/Dockerfiles/cdn-broker.Dockerfile b/crates/examples/Dockerfiles/cdn-broker.Dockerfile
new file mode 100644
index 0000000000..a5d03571eb
--- /dev/null
+++ b/crates/examples/Dockerfiles/cdn-broker.Dockerfile
@@ -0,0 +1,7 @@
+FROM alpine:3
+
+# Copy the source files
+COPY ./target/release/examples/cdn-broker /cdn-broker
+
+# Run the broker
+ENTRYPOINT ["/cdn-broker"]
diff --git a/crates/examples/Dockerfiles/cdn-marshal.Dockerfile b/crates/examples/Dockerfiles/cdn-marshal.Dockerfile
new file mode 100644
index 0000000000..639b1a9238
--- /dev/null
+++ b/crates/examples/Dockerfiles/cdn-marshal.Dockerfile
@@ -0,0 +1,7 @@
+FROM alpine:3
+
+# Copy the source files
+COPY ./target/release/examples/cdn-marshal /cdn-marshal
+
+# Run the broker
+ENTRYPOINT ["/cdn-marshal"]
diff --git a/crates/examples/Dockerfiles/coordinator.Dockerfile b/crates/examples/Dockerfiles/coordinator.Dockerfile
new file mode 100644
index 0000000000..65f7f8ed43
--- /dev/null
+++ b/crates/examples/Dockerfiles/coordinator.Dockerfile
@@ -0,0 +1,7 @@
+FROM alpine:3
+
+# Copy the source files
+COPY ./target/release/examples/coordinator /coordinator
+
+# Run the broker
+ENTRYPOINT ["/coordinator"]
diff --git a/crates/examples/Dockerfiles/single-validator.Dockerfile b/crates/examples/Dockerfiles/single-validator.Dockerfile
new file mode 100644
index 0000000000..55c0d253f8
--- /dev/null
+++ b/crates/examples/Dockerfiles/single-validator.Dockerfile
@@ -0,0 +1,7 @@
+FROM alpine:3
+
+# Copy the source files
+COPY ./target/release/examples/single-validator /single-validator
+
+# Run the broker
+ENTRYPOINT ["/single-validator"]
diff --git a/crates/examples/all.rs b/crates/examples/all.rs
new file mode 100644
index 0000000000..9a9f62b966
--- /dev/null
+++ b/crates/examples/all.rs
@@ -0,0 +1,387 @@
+//! This file contains an example of running a full HotShot network, comprised of
+//! a CDN, Libp2p, a builder, and multiple validators.
+use std::{collections::HashMap, num::NonZero, sync::Arc, time::Duration};
+
+use anyhow::{Context, Result};
+use cdn_broker::{reexports::def::hook::NoMessageHook, Broker, Config as BrokerConfig};
+use cdn_marshal::{Config as MarshalConfig, Marshal};
+use clap::Parser;
+use futures::StreamExt;
+use hotshot::{
+ helpers::initialize_logging,
+ traits::{
+ election::static_committee::StaticCommittee,
+ implementations::{
+ derive_libp2p_keypair, CdnMetricsValue, CdnTopic, KeyPair, Libp2pMetricsValue,
+ Libp2pNetwork, PushCdnNetwork, TestingDef, WrappedSignatureKey,
+ },
+ },
+ types::{BLSPrivKey, BLSPubKey, EventType, SignatureKey},
+ MarketplaceConfig, SystemContext,
+};
+use hotshot_example_types::{
+ auction_results_provider_types::TestAuctionResultsProvider,
+ block_types::TestTransaction,
+ node_types::{CombinedImpl, Libp2pImpl, PushCdnImpl, TestTypes, TestVersions},
+ state_types::TestInstanceState,
+ storage_types::TestStorage,
+ testable_delay::DelayConfig,
+};
+use hotshot_testing::block_builder::{SimpleBuilderImplementation, TestBuilderImplementation};
+use hotshot_types::{
+ consensus::ConsensusMetricsValue,
+ traits::{election::Membership, node_implementation::NodeType},
+ HotShotConfig, PeerConfig,
+};
+use libp2p::Multiaddr;
+use libp2p_networking::network::{
+ behaviours::dht::record::{Namespace, RecordKey, RecordValue},
+ node::config::{KademliaConfig, Libp2pConfig},
+ GossipConfig, RequestResponseConfig,
+};
+use lru::LruCache;
+use rand::Rng;
+use tokio::{spawn, sync::OnceCell};
+use tracing::info;
+use url::Url;
+
+// Include some common code
+include!("common.rs");
+
+/// This example runs all necessary HotShot components
+#[derive(Parser)]
+struct Args {
+ /// The number of nodes to start
+ #[arg(long, default_value_t = 5)]
+ total_num_nodes: usize,
+
+ /// The number of nodes which are DA nodes
+ #[arg(long, default_value_t = 3)]
+ num_da_nodes: usize,
+
+ /// The number of views to run for. If not specified, it will run indefinitely
+ #[arg(long)]
+ num_views: Option,
+
+ /// The number of transactions to submit to each nodes' builder per view
+ #[arg(long, default_value_t = 1)]
+ num_transactions_per_view: usize,
+
+ /// The size of the transactions submitted to each nodes' builder per view
+ #[arg(long, default_value_t = 1000)]
+ transaction_size: usize,
+
+ /// The type of network to use. Acceptable values are
+ /// "combined", "cdn", or "libp2p"
+ #[arg(long, default_value = "combined")]
+ network: String,
+}
+
+/// Generate a Libp2p multiaddress from a port
+fn libp2p_multiaddress_from_index(index: usize) -> Result {
+ // Generate the peer's private key and derive their libp2p keypair
+ let (_, peer_private_key) = BLSPubKey::generated_from_seed_indexed([0u8; 32], index as u64);
+ let peer_libp2p_keypair = derive_libp2p_keypair::(&peer_private_key)
+ .with_context(|| "Failed to derive libp2p keypair")?;
+
+ // Generate the multiaddress from the peer's port and libp2p keypair
+ format!(
+ "/ip4/127.0.0.1/udp/{}/quic-v1/p2p/{}",
+ portpicker::pick_unused_port().with_context(|| "Failed to find unused port")?,
+ peer_libp2p_keypair.public().to_peer_id()
+ )
+ .parse()
+ .with_context(|| "Failed to parse multiaddress")
+}
+
+/// A helper function to start the CDN
+/// (2 brokers + 1 marshal)
+///
+/// Returns the address of the marshal
+async fn start_cdn() -> Result {
+ // Figure out where we're going to spawn the marshal
+ let marshal_port =
+ portpicker::pick_unused_port().with_context(|| "Failed to find unused port")?;
+ let marshal_address = format!("127.0.0.1:{marshal_port}");
+
+ // Generate a random file path for the SQLite database
+ let db_path = format!("/tmp/marshal-{}.db", rand::random::());
+
+ // Configure the marshal
+ let marshal_config = MarshalConfig {
+ bind_endpoint: marshal_address.clone(),
+ discovery_endpoint: db_path.clone(),
+ ca_cert_path: None,
+ ca_key_path: None,
+ metrics_bind_endpoint: None,
+ global_memory_pool_size: Some(1024 * 1024 * 1024),
+ };
+
+ // Create and start the marshal
+ let marshal: Marshal> = Marshal::new(marshal_config)
+ .await
+ .with_context(|| "Failed to create marshal")?;
+ spawn(marshal.start());
+
+ // This keypair is shared between brokers
+ let (broker_public_key, broker_private_key) =
+ ::SignatureKey::generated_from_seed_indexed([0u8; 32], 1337);
+
+ for _ in 0..2 {
+ // Generate one random port for the "public" endpoint and one for the "private" endpoint
+ let public_port =
+ portpicker::pick_unused_port().with_context(|| "Failed to find unused port")?;
+ let private_port =
+ portpicker::pick_unused_port().with_context(|| "Failed to find unused port")?;
+
+ // Throw into address format
+ let private_address = format!("127.0.0.1:{private_port}");
+ let public_address = format!("127.0.0.1:{public_port}");
+
+ // Configure the broker
+ let broker_config: BrokerConfig::SignatureKey>> =
+ BrokerConfig {
+ public_advertise_endpoint: public_address.clone(),
+ public_bind_endpoint: public_address,
+ private_advertise_endpoint: private_address.clone(),
+ private_bind_endpoint: private_address,
+ discovery_endpoint: db_path.clone(),
+ keypair: KeyPair {
+ public_key: WrappedSignatureKey(broker_public_key),
+ private_key: broker_private_key.clone(),
+ },
+
+ user_message_hook: NoMessageHook, // Don't do any message processing
+ broker_message_hook: NoMessageHook,
+
+ metrics_bind_endpoint: None,
+ ca_cert_path: None,
+ ca_key_path: None,
+ global_memory_pool_size: Some(1024 * 1024 * 1024),
+ };
+
+ // Create and start it
+ let broker = Broker::new(broker_config)
+ .await
+ .with_context(|| "Failed to create broker")?;
+ spawn(broker.start());
+ }
+
+ Ok(marshal_address)
+}
+
+/// Start the CDN if it's not already running
+/// Returns the address of the marshal
+async fn try_start_cdn() -> Result {
+ /// Create a static cell to store the marshal's endpoint
+ static MARSHAL_ADDRESS: OnceCell = OnceCell::const_new();
+
+ // If the marshal endpoint isn't already set, start the CDN and set the endpoint
+ Ok(MARSHAL_ADDRESS
+ .get_or_init(|| async { start_cdn().await.expect("Failed to start CDN") })
+ .await
+ .clone())
+}
+
+#[tokio::main]
+#[allow(clippy::too_many_lines)]
+async fn main() -> Result<()> {
+ // Initialize logging
+ initialize_logging();
+
+ // Parse the command line arguments
+ let args = Args::parse();
+
+ // Match the network type
+ let network_type = match args.network.to_lowercase().as_str() {
+ "combined" => NetworkType::Combined,
+ "cdn" => NetworkType::Cdn,
+ "libp2p" => NetworkType::LibP2P,
+ _ => {
+ anyhow::bail!("Invalid network type. Please use one of 'combined', 'cdn', or 'libp2p'.")
+ }
+ };
+
+ // Generate the builder URL we plan to use
+ let builder_url = Url::parse(
+ format!(
+ "http://localhost:{}",
+ portpicker::pick_unused_port().with_context(|| "Failed to find unused port")?
+ )
+ .as_str(),
+ )
+ .with_context(|| "Failed to parse builder URL")?;
+
+ // Create the `known_nodes` and `known_da_nodes`
+ let known_nodes: Vec> = (0..args.total_num_nodes)
+ .map(peer_info_from_index)
+ .collect();
+ let known_da_nodes: Vec> = (0..args.num_da_nodes)
+ .filter(|index| is_da_node(*index, args.num_da_nodes))
+ .map(peer_info_from_index)
+ .collect();
+
+ // If the network type is "Libp2p" or "Combined", we need to also assign the list of
+ // Libp2p addresses to be used
+ let mut known_libp2p_nodes = Vec::new();
+ if network_type == NetworkType::LibP2P || network_type == NetworkType::Combined {
+ for index in 0..args.total_num_nodes {
+ // Generate a Libp2p multiaddress from a random, unused port
+ let addr = libp2p_multiaddress_from_index(index)
+ .with_context(|| "Failed to generate multiaddress")?;
+ known_libp2p_nodes.push(addr);
+ }
+ }
+
+ // Create the memberships from the known nodes and known da nodes
+ let memberships = StaticCommittee::new(known_nodes.clone(), known_da_nodes.clone());
+
+ // Create a set composed of all handles
+ let mut join_set = Vec::new();
+
+ // Spawn each node
+ for index in 0..args.total_num_nodes {
+ // Create a new instance state
+ let instance_state = TestInstanceState::new(DelayConfig::default());
+
+ // Initialize HotShot from genesis
+ let hotshot_initializer =
+ hotshot::HotShotInitializer::::from_genesis::(instance_state)
+ .await
+ .with_context(|| "Failed to initialize HotShot")?;
+
+ // Create our own keypair
+ let (public_key, private_key) =
+ BLSPubKey::generated_from_seed_indexed([0u8; 32], index as u64);
+
+ // Configure HotShot
+ let config = HotShotConfig:: {
+ known_nodes: known_nodes.clone(),
+ known_da_nodes: known_da_nodes.clone(),
+ next_view_timeout: 5000,
+ fixed_leader_for_gpuvid: 0, // This means that we don't have a fixed leader for testing GPU VID
+ view_sync_timeout: Duration::from_secs(5),
+ builder_timeout: Duration::from_secs(1),
+ data_request_delay: Duration::from_millis(200),
+ builder_urls: vec![builder_url.clone()],
+ start_proposing_view: u64::MAX, // These just mean the upgrade functionality is disabled
+ stop_proposing_view: u64::MAX,
+ start_voting_view: u64::MAX,
+ stop_voting_view: u64::MAX,
+ start_proposing_time: u64::MAX,
+ stop_proposing_time: u64::MAX,
+ start_voting_time: u64::MAX,
+ stop_voting_time: u64::MAX,
+ epoch_height: 0, // This just means epochs aren't enabled
+ };
+
+ // Create a network and start HotShot based on the network type
+ match network_type {
+ NetworkType::Combined => {
+ // Start the CDN if it's not already running
+ let marshal_address = try_start_cdn().await?;
+
+ // Create the combined network
+ let network = new_combined_network(
+ Some(marshal_address),
+ known_libp2p_nodes[index].clone(),
+ args.total_num_nodes,
+ &known_libp2p_nodes,
+ is_da_node(index, args.num_da_nodes),
+ &public_key,
+ &private_key,
+ )
+ .await
+ .with_context(|| "Failed to create Combined network")?;
+
+ // Start the node
+ join_set.push(
+ start_consensus::(
+ public_key,
+ private_key,
+ config,
+ memberships.clone(),
+ network,
+ hotshot_initializer,
+ args.total_num_nodes,
+ builder_url.clone(),
+ args.num_transactions_per_view,
+ args.transaction_size,
+ args.num_views,
+ )
+ .await?,
+ );
+ }
+ NetworkType::Cdn => {
+ // Start the CDN if it's not already running
+ let marshal_address = try_start_cdn().await?;
+
+ // Create the CDN network
+ let network = new_cdn_network(
+ Some(marshal_address),
+ is_da_node(index, args.num_da_nodes),
+ &public_key,
+ &private_key,
+ )
+ .with_context(|| "Failed to create CDN network")?;
+
+ // Start the node
+ join_set.push(
+ start_consensus::(
+ public_key,
+ private_key,
+ config,
+ memberships.clone(),
+ network,
+ hotshot_initializer,
+ args.total_num_nodes,
+ builder_url.clone(),
+ args.num_transactions_per_view,
+ args.transaction_size,
+ args.num_views,
+ )
+ .await?,
+ );
+ }
+
+ NetworkType::LibP2P => {
+ // Create the Libp2p network
+ let network = new_libp2p_network(
+ // Advertise == bind address here
+ known_libp2p_nodes[index].clone(),
+ args.total_num_nodes,
+ &public_key,
+ &private_key,
+ &known_libp2p_nodes,
+ )
+ .await
+ .with_context(|| "Failed to create Libp2p network")?;
+
+ // Start the node
+ join_set.push(
+ start_consensus::(
+ public_key,
+ private_key,
+ config,
+ memberships.clone(),
+ network,
+ hotshot_initializer,
+ args.total_num_nodes,
+ builder_url.clone(),
+ args.num_transactions_per_view,
+ args.transaction_size,
+ args.num_views,
+ )
+ .await?,
+ );
+ }
+ };
+ }
+
+ // Wait for all the tasks to finish
+ while let Some(res) = join_set.pop() {
+ res.await.expect("Failed to join task");
+ }
+
+ Ok(())
+}
diff --git a/crates/examples/push-cdn/broker.rs b/crates/examples/cdn/broker.rs
similarity index 61%
rename from crates/examples/push-cdn/broker.rs
rename to crates/examples/cdn/broker.rs
index 8e03999c33..4e4669983c 100644
--- a/crates/examples/push-cdn/broker.rs
+++ b/crates/examples/cdn/broker.rs
@@ -1,27 +1,26 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
+//! The broker is the message-routing component of the CDN
+//!
+//! This is meant to be run externally, e.g. when running benchmarks on the protocol.
+//! If you just want to run everything required, you can use the `all` example
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-//! The following is the main `Broker` binary, which just instantiates and runs
-//! a `Broker` object.
-use anyhow::Result;
+use anyhow::{Context, Result};
use cdn_broker::{reexports::def::hook::NoMessageHook, Broker, Config};
use clap::Parser;
-use hotshot::traits::implementations::{KeyPair, ProductionDef, WrappedSignatureKey};
+use hotshot::{
+ helpers::initialize_logging,
+ traits::implementations::{KeyPair, ProductionDef, WrappedSignatureKey},
+};
use hotshot_example_types::node_types::TestTypes;
-use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey};
+use hotshot_types::traits::node_implementation::NodeType;
+use hotshot_types::traits::signature_key::BuilderSignatureKey;
use sha2::Digest;
-use tracing_subscriber::EnvFilter;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
/// The main component of the push CDN.
struct Args {
/// The discovery client endpoint (including scheme) to connect to.
- /// With the local discovery feature, this is a file path.
- /// With the remote (redis) discovery feature, this is a redis URL (e.g. `redis://127.0.0.1:6789`).
+ /// This is a URL pointing to a `KeyDB` database (e.g. `redis://127.0.0.1:6789`).
#[arg(short, long)]
discovery_endpoint: String,
@@ -60,31 +59,15 @@ struct Args {
/// The seed for broker key generation
#[arg(short, long, default_value_t = 0)]
key_seed: u64,
-
- /// The size of the global memory pool (in bytes). This is the maximum number of bytes that
- /// can be allocated at once for all connections. A connection will block if it
- /// tries to allocate more than this amount until some memory is freed.
- /// Default is 1GB.
- #[arg(long, default_value_t = 1_073_741_824)]
- global_memory_pool_size: usize,
}
#[tokio::main]
async fn main() -> Result<()> {
- // Parse command line arguments
- let args = Args::parse();
+ // Initialize logging
+ initialize_logging();
- // Initialize tracing
- if std::env::var("RUST_LOG_FORMAT") == Ok("json".to_string()) {
- tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .json()
- .init();
- } else {
- tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .init();
- }
+ // Parse the command line arguments
+ let args = Args::parse();
// Generate the broker key from the supplied seed
let key_hash = sha2::Sha256::digest(args.key_seed.to_le_bytes());
@@ -110,15 +93,13 @@ async fn main() -> Result<()> {
public_advertise_endpoint: args.public_advertise_endpoint,
private_bind_endpoint: args.private_bind_endpoint,
private_advertise_endpoint: args.private_advertise_endpoint,
- global_memory_pool_size: Some(args.global_memory_pool_size),
+ // Use a 1GB memory pool size
+ global_memory_pool_size: Some(1_073_741_824),
};
- // Create new `Broker`
- // Uses TCP from broker connections and TCP+TLS for user connections.
+ // Create the new `Broker`
let broker = Broker::new(broker_config).await?;
- // Start the main loop, consuming it
- broker.start().await?;
-
- Ok(())
+ // Run the broker until it is terminated
+ broker.start().await.with_context(|| "Broker exited")
}
diff --git a/crates/examples/push-cdn/marshal.rs b/crates/examples/cdn/marshal.rs
similarity index 55%
rename from crates/examples/push-cdn/marshal.rs
rename to crates/examples/cdn/marshal.rs
index 569cb0dc33..f4a5d0d969 100644
--- a/crates/examples/push-cdn/marshal.rs
+++ b/crates/examples/cdn/marshal.rs
@@ -1,27 +1,22 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
+//! The marshal is the component of the CDN that authenticates users and routes
+//! them to the appropriate broker
+//!
+//! This is meant to be run externally, e.g. when running benchmarks on the protocol.
+//! If you just want to run everything required, you can use the `all` example
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-//! The following is the main `Marshal` binary, which just instantiates and runs
-//! a `Marshal` object.
-
-use anyhow::Result;
+use anyhow::{Context, Result};
use cdn_marshal::{Config, Marshal};
use clap::Parser;
-use hotshot::traits::implementations::ProductionDef;
+use hotshot::{helpers::initialize_logging, traits::implementations::ProductionDef};
use hotshot_example_types::node_types::TestTypes;
use hotshot_types::traits::node_implementation::NodeType;
-use tracing_subscriber::EnvFilter;
-
-// TODO: forall, add logging where we need it
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
/// The main component of the push CDN.
struct Args {
- /// The discovery client endpoint (including scheme) to connect to
+ /// The discovery client endpoint (including scheme) to connect to.
+ /// This is a URL pointing to a `KeyDB` database (e.g. `redis://127.0.0.1:6789`).
#[arg(short, long)]
discovery_endpoint: String,
@@ -43,13 +38,6 @@ struct Args {
/// If not provided, a local, pinned CA is used
#[arg(long)]
ca_key_path: Option,
-
- /// The size of the global memory pool (in bytes). This is the maximum number of bytes that
- /// can be allocated at once for all connections. A connection will block if it
- /// tries to allocate more than this amount until some memory is freed.
- /// Default is 1GB.
- #[arg(long, default_value_t = 1_073_741_824)]
- global_memory_pool_size: usize,
}
#[tokio::main]
@@ -58,16 +46,7 @@ async fn main() -> Result<()> {
let args = Args::parse();
// Initialize tracing
- if std::env::var("RUST_LOG_FORMAT") == Ok("json".to_string()) {
- tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .json()
- .init();
- } else {
- tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env())
- .init();
- }
+ initialize_logging();
// Create a new `Config`
let config = Config {
@@ -76,7 +55,8 @@ async fn main() -> Result<()> {
metrics_bind_endpoint: args.metrics_bind_endpoint,
ca_cert_path: args.ca_cert_path,
ca_key_path: args.ca_key_path,
- global_memory_pool_size: Some(args.global_memory_pool_size),
+ // Use a 1GB memory pool
+ global_memory_pool_size: Some(1_073_741_824),
};
// Create new `Marshal` from the config
@@ -84,7 +64,5 @@ async fn main() -> Result<()> {
Marshal::::SignatureKey>>::new(config).await?;
// Start the main loop, consuming it
- marshal.start().await?;
-
- Ok(())
+ marshal.start().await.with_context(|| "Marshal exited")
}
diff --git a/crates/examples/combined/all.rs b/crates/examples/combined/all.rs
deleted file mode 100644
index 89864af1d1..0000000000
--- a/crates/examples/combined/all.rs
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
-
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-//! An example program using both the web server and libp2p
-/// types used for this example
-pub mod types;
-
-use std::path::Path;
-
-use cdn_broker::{reexports::def::hook::NoMessageHook, Broker};
-use cdn_marshal::Marshal;
-use hotshot::{
- helpers::initialize_logging,
- traits::implementations::{KeyPair, TestingDef, WrappedSignatureKey},
- types::SignatureKey,
-};
-use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes};
-use hotshot_orchestrator::client::ValidatorArgs;
-use hotshot_types::traits::node_implementation::NodeType;
-use infra::{gen_local_address, BUILDER_BASE_PORT, VALIDATOR_BASE_PORT};
-use rand::{rngs::StdRng, RngCore, SeedableRng};
-use tokio::spawn;
-use tracing::{error, instrument};
-
-use crate::{
- infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs},
- types::{Network, NodeImpl, ThisRun},
-};
-
-/// general infra used for this example
-#[path = "../infra/mod.rs"]
-pub mod infra;
-
-#[tokio::main]
-#[instrument]
-async fn main() {
- // Initialize logging
- initialize_logging();
-
- let (config, orchestrator_url) = read_orchestrator_init_config::();
-
- // The configuration we are using for testing is 2 brokers & 1 marshal
- // A keypair shared between brokers
- let (broker_public_key, broker_private_key) =
- ::SignatureKey::generated_from_seed_indexed([0u8; 32], 1337);
-
- // Get the OS temporary directory
- let temp_dir = std::env::temp_dir();
-
- // Create an SQLite file inside of the temporary directory
- let discovery_endpoint = temp_dir
- .join(Path::new(&format!(
- "test-{}.sqlite",
- StdRng::from_entropy().next_u64()
- )))
- .to_string_lossy()
- .into_owned();
-
- // 2 brokers
- for _ in 0..2 {
- // Get the ports to bind to
- let private_port = portpicker::pick_unused_port().expect("could not find an open port");
- let public_port = portpicker::pick_unused_port().expect("could not find an open port");
-
- // Extrapolate addresses
- let private_address = format!("127.0.0.1:{private_port}");
- let public_address = format!("127.0.0.1:{public_port}");
-
- let config: cdn_broker::Config::SignatureKey>> =
- cdn_broker::Config {
- discovery_endpoint: discovery_endpoint.clone(),
- public_advertise_endpoint: public_address.clone(),
- public_bind_endpoint: public_address,
- private_advertise_endpoint: private_address.clone(),
- private_bind_endpoint: private_address,
-
- keypair: KeyPair {
- public_key: WrappedSignatureKey(broker_public_key),
- private_key: broker_private_key.clone(),
- },
-
- user_message_hook: NoMessageHook,
- broker_message_hook: NoMessageHook,
-
- metrics_bind_endpoint: None,
- ca_cert_path: None,
- ca_key_path: None,
- global_memory_pool_size: Some(1024 * 1024 * 1024),
- };
-
- // Create and spawn the broker
- spawn(async move {
- let broker: Broker::SignatureKey>> =
- Broker::new(config).await.expect("broker failed to start");
-
- // Error if we stopped unexpectedly
- if let Err(err) = broker.start().await {
- error!("broker stopped: {err}");
- }
- });
- }
-
- // Get the port to use for the marshal
- let marshal_endpoint = config
- .cdn_marshal_address
- .clone()
- .expect("CDN marshal address must be specified");
-
- // Configure the marshal
- let marshal_config = cdn_marshal::Config {
- bind_endpoint: marshal_endpoint.clone(),
- discovery_endpoint,
- metrics_bind_endpoint: None,
- ca_cert_path: None,
- ca_key_path: None,
- global_memory_pool_size: Some(1024 * 1024 * 1024),
- };
-
- // Spawn the marshal
- spawn(async move {
- let marshal: Marshal::SignatureKey>> =
- Marshal::new(marshal_config)
- .await
- .expect("failed to spawn marshal");
-
- // Error if we stopped unexpectedly
- if let Err(err) = marshal.start().await {
- error!("broker stopped: {err}");
- }
- });
-
- // orchestrator
- spawn(run_orchestrator::(OrchestratorArgs {
- url: orchestrator_url.clone(),
- config: config.clone(),
- }));
-
- // nodes
- let mut nodes = Vec::new();
- for i in 0..config.config.num_nodes_with_stake.into() {
- // Calculate our libp2p advertise address, which we will later derive the
- // bind address from for example purposes.
- let advertise_address = gen_local_address::(i);
- let orchestrator_url = orchestrator_url.clone();
- let builder_address = gen_local_address::(i);
-
- let node = spawn(async move {
- infra::main_entry_point::(
- ValidatorArgs {
- url: orchestrator_url,
- advertise_address: Some(advertise_address.to_string()),
- builder_address: Some(builder_address),
- network_config_file: None,
- },
- )
- .await;
- });
- nodes.push(node);
- }
- futures::future::join_all(nodes).await;
-}
diff --git a/crates/examples/combined/multi-validator.rs b/crates/examples/combined/multi-validator.rs
deleted file mode 100644
index b721cb5c4f..0000000000
--- a/crates/examples/combined/multi-validator.rs
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
-
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-//! A multi-validator using both the web server libp2p
-use clap::Parser;
-use hotshot::helpers::initialize_logging;
-use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes};
-use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs};
-use tokio::spawn;
-use tracing::instrument;
-
-use crate::types::{Network, NodeImpl, ThisRun};
-
-/// types used for this example
-pub mod types;
-
-/// general infra used for this example
-#[path = "../infra/mod.rs"]
-pub mod infra;
-
-#[tokio::main]
-#[instrument]
-async fn main() {
- // Initialize logging
- initialize_logging();
-
- let args = MultiValidatorArgs::parse();
- tracing::debug!("connecting to orchestrator at {:?}", args.url);
- let mut nodes = Vec::new();
- for node_index in 0..args.num_nodes {
- let args = args.clone();
-
- let node = spawn(async move {
- infra::main_entry_point::(
- ValidatorArgs::from_multi_args(args, node_index),
- )
- .await;
- });
- nodes.push(node);
- }
- let _result = futures::future::join_all(nodes).await;
-}
diff --git a/crates/examples/combined/orchestrator.rs b/crates/examples/combined/orchestrator.rs
deleted file mode 100644
index c3d399f489..0000000000
--- a/crates/examples/combined/orchestrator.rs
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
-
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-//! Orchestrator using the web server
-/// types used for this example
-pub mod types;
-
-use hotshot::helpers::initialize_logging;
-use hotshot_example_types::state_types::TestTypes;
-use tracing::instrument;
-
-use crate::infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs};
-/// general infra used for this example
-#[path = "../infra/mod.rs"]
-pub mod infra;
-
-#[tokio::main]
-#[instrument]
-async fn main() {
- // Initialize logging
- initialize_logging();
-
- let (config, orchestrator_url) = read_orchestrator_init_config::();
- run_orchestrator::(OrchestratorArgs:: {
- url: orchestrator_url.clone(),
- config: config.clone(),
- })
- .await;
-}
diff --git a/crates/examples/combined/types.rs b/crates/examples/combined/types.rs
deleted file mode 100644
index 1209891b71..0000000000
--- a/crates/examples/combined/types.rs
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
-
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-use std::fmt::Debug;
-
-use hotshot::traits::implementations::CombinedNetworks;
-use hotshot_example_types::{
- auction_results_provider_types::TestAuctionResultsProvider, state_types::TestTypes,
- storage_types::TestStorage,
-};
-use hotshot_types::traits::node_implementation::NodeImplementation;
-use serde::{Deserialize, Serialize};
-
-use crate::infra::CombinedDaRun;
-
-/// dummy struct so we can choose types
-#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)]
-pub struct NodeImpl {}
-
-/// Convenience type alias
-pub type Network = CombinedNetworks;
-
-impl NodeImplementation for NodeImpl {
- type Network = Network;
- type Storage = TestStorage;
- type AuctionResultsProvider = TestAuctionResultsProvider;
-}
-/// convenience type alias
-pub type ThisRun = CombinedDaRun;
diff --git a/crates/examples/combined/validator.rs b/crates/examples/combined/validator.rs
deleted file mode 100644
index fd6ff83957..0000000000
--- a/crates/examples/combined/validator.rs
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
-
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-//! A validator using both the web server and libp2p
-
-use clap::Parser;
-use hotshot::helpers::initialize_logging;
-use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes};
-use hotshot_orchestrator::client::ValidatorArgs;
-use local_ip_address::local_ip;
-use tracing::{debug, instrument};
-
-use crate::types::{Network, NodeImpl, ThisRun};
-
-/// types used for this example
-pub mod types;
-
-/// general infra used for this example
-#[path = "../infra/mod.rs"]
-pub mod infra;
-
-#[tokio::main]
-#[instrument]
-async fn main() {
- // Initialize logging
- initialize_logging();
-
- let mut args = ValidatorArgs::parse();
-
- // If we did not set the advertise address, use our local IP and port 8000
- let local_ip = local_ip().expect("failed to get local IP");
- args.advertise_address = Some(args.advertise_address.unwrap_or(format!("{local_ip}:8000")));
-
- debug!("connecting to orchestrator at {:?}", args.url);
- infra::main_entry_point::(args).await;
-}
diff --git a/crates/examples/common.rs b/crates/examples/common.rs
new file mode 100644
index 0000000000..88a1885ddf
--- /dev/null
+++ b/crates/examples/common.rs
@@ -0,0 +1,377 @@
+use std::time::Instant;
+
+use async_lock::RwLock;
+use hotshot_types::{
+ data::EpochNumber, traits::node_implementation::ConsensusTime, utils::non_crypto_hash,
+};
+use simple_moving_average::SingleSumSMA;
+use simple_moving_average::SMA;
+
+/// The type of network to use for the example
+#[derive(Debug, PartialEq, Eq)]
+enum NetworkType {
+ /// A combined network, which is a combination of a Libp2p and Push CDN network
+ Combined,
+
+ /// A network solely using the Push CDN
+ Cdn,
+
+ /// A Libp2p network
+ LibP2P,
+}
+
+/// This is a testing function which allows us to easily determine if a node should be a DA node
+fn is_da_node(index: usize, num_da_nodes: usize) -> bool {
+ index < num_da_nodes
+}
+
+/// This is a testing function which allows us to easily generate peer configs from indexes
+fn peer_info_from_index(index: usize) -> hotshot_types::PeerConfig {
+ // Get the node's public key
+ let (public_key, _) =
+ hotshot::types::BLSPubKey::generated_from_seed_indexed([0u8; 32], index as u64);
+
+ // Generate the peer config
+ hotshot_types::PeerConfig {
+ stake_table_entry: public_key.stake_table_entry(1),
+ state_ver_key: hotshot_types::light_client::StateKeyPair::default()
+ .0
+ .ver_key(),
+ }
+}
+
+/// Create a new Push CDN network
+fn new_cdn_network(
+ marshal_address: Option,
+ is_da_node: bool,
+ public_key: &BLSPubKey,
+ private_key: &BLSPrivKey,
+) -> Result>> {
+ // If the marshal endpoint is not provided, we don't need to create a CDN network
+ let Some(marshal_address) = marshal_address else {
+ anyhow::bail!("Marshal endpoint is required for CDN networks");
+ };
+
+ // Subscribe to topics based on whether we're a DA node or not
+ let mut topics = vec![CdnTopic::Global];
+ if is_da_node {
+ topics.push(CdnTopic::Da);
+ }
+
+ // Create and return the network
+ Ok(Arc::new(
+ PushCdnNetwork::new(
+ marshal_address,
+ topics,
+ KeyPair {
+ public_key: WrappedSignatureKey(*public_key),
+ private_key: private_key.clone(),
+ },
+ CdnMetricsValue::default(),
+ )
+ .with_context(|| "Failed to create Push CDN network")?,
+ ))
+}
+
+/// A helper function to create a Libp2p network
+async fn new_libp2p_network(
+ bind_address: Multiaddr,
+ total_num_nodes: usize,
+ public_key: &BLSPubKey,
+ private_key: &BLSPrivKey,
+ known_libp2p_nodes: &[Multiaddr],
+) -> Result>> {
+ // Derive the Libp2p keypair from the private key
+ let libp2p_keypair = derive_libp2p_keypair::(private_key)
+ .with_context(|| "Failed to derive libp2p keypair")?;
+
+ // Sign our Libp2p lookup record value
+ let lookup_record_value = RecordValue::new_signed(
+ &RecordKey::new(Namespace::Lookup, public_key.to_bytes()),
+ libp2p_keypair.public().to_peer_id().to_bytes(),
+ private_key,
+ )
+ .expect("Failed to sign DHT lookup record");
+
+ // Configure Libp2p
+ let libp2p_config = Libp2pConfig {
+ keypair: libp2p_keypair,
+ bind_address,
+ known_peers: known_libp2p_nodes.to_vec(),
+ quorum_membership: None, // This disables stake-table authentication
+ auth_message: None, // This disables stake-table authentication
+ gossip_config: GossipConfig::default(),
+ request_response_config: RequestResponseConfig::default(),
+ kademlia_config: KademliaConfig {
+ replication_factor: total_num_nodes * 2 / 3,
+ record_ttl: None,
+ publication_interval: None,
+ file_path: format!("/tmp/kademlia-{}.db", rand::random::()),
+ lookup_record_value,
+ },
+ };
+
+ // Create the network with the config
+ Ok(Arc::new(
+ Libp2pNetwork::new(
+ libp2p_config,
+ public_key,
+ Libp2pMetricsValue::default(),
+ None,
+ )
+ .await
+ .with_context(|| "Failed to create libp2p network")?,
+ ))
+}
+
+/// A helper function to create a Combined network, which is a combination of a Libp2p and Push CDN network
+async fn new_combined_network(
+ marshal_address: Option,
+ libp2p_bind_address: Multiaddr,
+ total_num_nodes: usize,
+ known_libp2p_nodes: &[Multiaddr],
+ is_da_node: bool,
+ public_key: &BLSPubKey,
+ private_key: &BLSPrivKey,
+) -> Result>> {
+ // Create the CDN network and launch the CDN
+ let cdn_network = Arc::into_inner(new_cdn_network(
+ marshal_address,
+ is_da_node,
+ public_key,
+ private_key,
+ )?)
+ .unwrap();
+
+ // Create the Libp2p network
+ let libp2p_network = Arc::into_inner(
+ new_libp2p_network(
+ libp2p_bind_address,
+ total_num_nodes,
+ public_key,
+ private_key,
+ known_libp2p_nodes,
+ )
+ .await?,
+ )
+ .unwrap();
+
+ // Create and return the combined network
+ Ok(Arc::new(
+ hotshot::traits::implementations::CombinedNetworks::new(
+ cdn_network,
+ libp2p_network,
+ Some(Duration::from_secs(1)),
+ ),
+ ))
+}
+
+#[allow(clippy::too_many_arguments)]
+#[allow(clippy::cast_precision_loss)]
+#[allow(clippy::cast_sign_loss)]
+#[allow(clippy::too_many_lines)]
+#[allow(clippy::cast_possible_truncation)]
+/// A helper function to start consensus with a builder
+async fn start_consensus<
+ I: hotshot::traits::NodeImplementation<
+ TestTypes,
+ Storage = TestStorage,
+ AuctionResultsProvider = TestAuctionResultsProvider,
+ >,
+>(
+ public_key: BLSPubKey,
+ private_key: BLSPrivKey,
+ config: HotShotConfig,
+ memberships: hotshot_example_types::node_types::StaticMembership,
+ network: Arc,
+ hotshot_initializer: hotshot::HotShotInitializer,
+ total_num_nodes: usize,
+ builder_url: Url,
+ num_transactions_per_view: usize,
+ transaction_size: usize,
+ num_views: Option,
+) -> Result> {
+ // Create the marketplace config
+ let marketplace_config: MarketplaceConfig = MarketplaceConfig {
+ auction_results_provider: TestAuctionResultsProvider::::default().into(),
+ // TODO: we need to pass a valid fallback builder url here somehow
+ fallback_builder_url: Url::parse("http://localhost:8080").unwrap(),
+ };
+
+ // Initialize the system context
+ let handle = SystemContext::::init(
+ public_key,
+ private_key,
+ config,
+ Arc::new(RwLock::new(memberships.clone())),
+ network,
+ hotshot_initializer,
+ ConsensusMetricsValue::default(),
+ TestStorage::::default(),
+ marketplace_config,
+ )
+ .await
+ .with_context(|| "Failed to initialize system context")?
+ .0;
+
+ // Each node has to start the builder since we don't have a sovereign builder in this example
+ let builder_handle =
+ >::start(
+ total_num_nodes,
+ builder_url.clone(),
+ (),
+ HashMap::new(),
+ )
+ .await;
+
+ // Start it
+ builder_handle.start(Box::new(handle.event_stream()));
+
+ // Start consensus
+ handle.hotshot.start_consensus().await;
+
+ // See if we're a DA node or not
+ let is_da_node = memberships.has_da_stake(&public_key, EpochNumber::new(0));
+
+ // Create an LRU cache to store block data if we're DA. We populate this cache when we receive
+ // the DA proposal for a view and print the data when we actually decide on that view
+ let mut view_cache = LruCache::new(NonZero::new(100).unwrap());
+
+ // A cache of outstanding transactions (hashes of). Used to calculate latency. Isn't needed for non-DA nodes
+ let mut outstanding_transactions: LruCache =
+ LruCache::new(NonZero::new(10000).unwrap());
+
+ // The simple moving average, used to calculate throughput
+ let mut throughput: SingleSumSMA = SingleSumSMA::::new();
+
+ // The last time we decided on a view (for calculating throughput)
+ let mut last_decide_time = Instant::now();
+
+ // Spawn the task to wait for events
+ let join_handle = tokio::spawn(async move {
+ // Get the event stream for this particular node
+ let mut event_stream = handle.event_stream();
+
+ // Wait for a `Decide` event for the view number we requested
+ loop {
+ // Get the next event
+ let event = event_stream.next().await.unwrap();
+
+ // DA proposals contain the full list of transactions. We can use this to cache
+ // the size of the proposed block
+ if let EventType::DaProposal { proposal, .. } = event.event {
+ // Decode the transactions. We use this to log the size of the proposed block
+ // when we decide on a view
+ let transactions =
+ match TestTransaction::decode(&proposal.data.encoded_transactions) {
+ Ok(transactions) => transactions,
+ Err(err) => {
+ tracing::error!("Failed to decode transactions: {:?}", err);
+ continue;
+ }
+ };
+
+ // Get the number of transactions in the proposed block
+ let num_transactions = transactions.len();
+
+ // Sum the total number of bytes in the proposed block and cache
+ // the hash so we can calculate latency
+ let mut sum = 0;
+ let mut submitted_times = Vec::new();
+ for transaction in transactions {
+ // Add the size of the transaction to the sum
+ sum += transaction.bytes().len();
+
+ // If we can find the transaction in the cache, add the hash of the transaction to the cache
+ if let Some(&instant) =
+ outstanding_transactions.get(&non_crypto_hash(transaction.bytes()))
+ {
+ submitted_times.push(instant);
+ }
+ }
+
+ // Insert the size of the proposed block and the number of transactions into the cache.
+ // We use this to log the size of the proposed block when we decide on a view
+ view_cache.put(
+ *proposal.data.view_number,
+ (sum, num_transactions, submitted_times),
+ );
+
+ // A `Decide` event contains data that HotShot has decided on
+ } else if let EventType::Decide { qc, .. } = event.event {
+ // If we cached the size of the proposed block, log it
+ if let Some((block_size, num_transactions, submitted_times)) =
+ view_cache.get(&*qc.view_number)
+ {
+ // Calculate the average latency of the transactions
+ let mut total_latency = Duration::default();
+ let mut num_found_transactions = 0;
+ for submitted_time in submitted_times {
+ total_latency += submitted_time.elapsed();
+ num_found_transactions += 1;
+ }
+ let average_latency = total_latency.checked_div(num_found_transactions);
+
+ // Update the throughput SMA
+ throughput
+ .add_sample(*block_size as f64 / last_decide_time.elapsed().as_secs_f64());
+
+ // Update the last decided time
+ last_decide_time = Instant::now();
+
+ // If we have a valid average latency, log it
+ if let Some(average_latency) = average_latency {
+ info!(
+ block_size = block_size,
+ num_txs = num_transactions,
+ avg_tx_latency =? average_latency,
+ avg_throughput = format!("{}/s", bytesize::ByteSize::b(throughput.get_average() as u64)),
+ "Decided on view {}",
+ *qc.view_number
+ );
+ } else {
+ info!(
+ block_size = block_size,
+ num_txs = num_transactions,
+ "Decided on view {}",
+ *qc.view_number
+ );
+ }
+ } else {
+ info!("Decided on view {}", *qc.view_number);
+ }
+
+ // Generate and submit the requested number of transactions
+ for _ in 0..num_transactions_per_view {
+ // Generate a random transaction
+ let mut transaction_bytes = vec![0u8; transaction_size];
+ rand::thread_rng().fill(&mut transaction_bytes[..]);
+
+ // If we're a DA node, cache the transaction so we can calculate latency
+ if is_da_node {
+ outstanding_transactions
+ .put(non_crypto_hash(&transaction_bytes), Instant::now());
+ }
+
+ // Submit the transaction
+ if let Err(err) = handle
+ .submit_transaction(TestTransaction::new(transaction_bytes))
+ .await
+ {
+ tracing::error!("Failed to submit transaction: {:?}", err);
+ };
+ }
+
+ // If we have a specific view number we want to wait for, check if we've reached it
+ if let Some(num_views) = num_views {
+ if *qc.view_number == num_views as u64 {
+ // Break when we've decided on the view number we requested
+ break;
+ }
+ }
+ }
+ }
+ });
+
+ Ok(join_handle)
+}
diff --git a/crates/examples/coordinator.rs b/crates/examples/coordinator.rs
new file mode 100644
index 0000000000..dd041832f4
--- /dev/null
+++ b/crates/examples/coordinator.rs
@@ -0,0 +1,127 @@
+//! This service helps coordinate running multiple nodes where each needs
+//! to be assigned a unique index
+//!
+//! This is meant to be run externally, e.g. when running benchmarks on the protocol.
+//! If you just want to run everything required, you can use the `all` example
+
+use std::{
+ collections::HashSet,
+ net::SocketAddr,
+ str::FromStr,
+ sync::{
+ atomic::{AtomicU32, Ordering},
+ Arc,
+ },
+};
+
+use anyhow::{Context, Result};
+use bytes::Bytes;
+use clap::Parser;
+use hotshot::helpers::initialize_logging;
+use libp2p::{multiaddr::Protocol, Multiaddr};
+use parking_lot::RwLock;
+use warp::Filter;
+
+/// The coordinator service, used to assign unique indices to nodes when running benchmarks
+#[derive(Parser)]
+struct Args {
+ /// The address to bind to
+ #[arg(long, default_value = "127.0.0.1:3030")]
+ bind_address: String,
+}
+
+#[tokio::main]
+async fn main() -> Result<()> {
+ // Initialize logging
+ initialize_logging();
+
+ // Parse the command-line arguments
+ let args = Args::parse();
+
+ // Parse the bind address
+ let bind_address = args
+ .bind_address
+ .parse::()
+ .with_context(|| "Failed to parse bind address")?;
+
+ // Create a shared counter
+ let counter = Arc::new(AtomicU32::new(0));
+ let counter = warp::any().map(move || Arc::clone(&counter));
+
+ // Create a shared set of multiaddrs for Libp2p
+ let libp2p_multiaddrs = Arc::new(RwLock::new(HashSet::new()));
+ let libp2p_multiaddrs = warp::any().map(move || Arc::clone(&libp2p_multiaddrs));
+
+ // `/index` returns the node index we are assigned
+ let index = warp::path!("index")
+ .and(counter.clone())
+ .map(|counter: Arc| counter.fetch_add(1, Ordering::SeqCst).to_string());
+
+ // POST `/libp2p_info` submits libp2p information to the coordinator
+ let submit_libp2p_info = warp::path!("libp2p-info")
+ .and(warp::post())
+ .and(warp::body::bytes())
+ .and(libp2p_multiaddrs.clone())
+ .map(
+ |body: Bytes, libp2p_multiaddrs: Arc>>| {
+ // Attempt to process as a string
+ let Ok(string) = String::from_utf8(body.to_vec()) else {
+ return "Failed to parse body as string".to_string();
+ };
+
+ // Attempt to parse the string as a Libp2p Multiaddr
+ let Ok(multiaddr) = Multiaddr::from_str(&string) else {
+ return "Failed to parse body as Multiaddr".to_string();
+ };
+
+ // Pop off the last protocol
+ let Some(last_protocol) = multiaddr.clone().pop() else {
+ return "Failed to get last protocol of multiaddr".to_string();
+ };
+
+ // Make sure it is the P2p protocol
+ let Protocol::P2p(_) = last_protocol else {
+ return "Failed to get P2p protocol of multiaddr".to_string();
+ };
+
+ // Add it to the set
+ libp2p_multiaddrs.write().insert(multiaddr);
+
+ "Ok".to_string()
+ },
+ );
+
+ // GET `/libp2p_info` returns the list of libp2p multiaddrs
+ let get_libp2p_info = warp::path!("libp2p-info")
+ .and(libp2p_multiaddrs.clone())
+ .map(|libp2p_multiaddrs: Arc>>| {
+ // Get the list of multiaddrs
+ let multiaddrs = libp2p_multiaddrs.read().clone();
+
+ // Convert the multiaddrs to a string, separated by newlines
+ multiaddrs
+ .iter()
+ .map(ToString::to_string)
+ .collect::>()
+ .join("\n")
+ });
+
+ // `/reset` resets the state of the coordinator
+ let reset = warp::path!("reset")
+ .and(counter)
+ .and(libp2p_multiaddrs)
+ .map(
+ |counter: Arc, libp2p_multiaddrs: Arc>>| {
+ counter.store(0, Ordering::SeqCst);
+ libp2p_multiaddrs.write().clear();
+ "Ok"
+ },
+ );
+
+ // Run the server
+ warp::serve(index.or(reset).or(submit_libp2p_info).or(get_libp2p_info))
+ .run(bind_address)
+ .await;
+
+ Ok(())
+}
diff --git a/crates/examples/infra/mod.rs b/crates/examples/infra/mod.rs
deleted file mode 100755
index 63dd75d203..0000000000
--- a/crates/examples/infra/mod.rs
+++ /dev/null
@@ -1,1139 +0,0 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
-
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-#![allow(clippy::panic)]
-use std::{
- collections::HashMap,
- fmt::Debug,
- fs,
- net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4},
- num::NonZeroUsize,
- sync::Arc,
- time::Instant,
-};
-
-use async_lock::RwLock;
-use async_trait::async_trait;
-use cdn_broker::reexports::crypto::signature::KeyPair;
-use chrono::Utc;
-use clap::{value_parser, Arg, Command, Parser};
-use futures::StreamExt;
-use hotshot::{
- traits::{
- implementations::{
- derive_libp2p_multiaddr, derive_libp2p_peer_id, CdnMetricsValue, CdnTopic,
- CombinedNetworks, Libp2pMetricsValue, Libp2pNetwork, PushCdnNetwork,
- WrappedSignatureKey,
- },
- BlockPayload, NodeImplementation,
- },
- types::SystemContextHandle,
- MarketplaceConfig, SystemContext,
-};
-use hotshot_example_types::{
- auction_results_provider_types::TestAuctionResultsProvider,
- block_types::{TestBlockHeader, TestBlockPayload, TestTransaction},
- node_types::{Libp2pImpl, PushCdnImpl},
- state_types::TestInstanceState,
- storage_types::TestStorage,
-};
-use hotshot_orchestrator::{
- self,
- client::{get_complete_config, BenchResults, OrchestratorClient, ValidatorArgs},
-};
-use hotshot_testing::block_builder::{
- BuilderTask, RandomBuilderImplementation, SimpleBuilderImplementation,
- TestBuilderImplementation,
-};
-use hotshot_types::{
- consensus::ConsensusMetricsValue,
- data::{Leaf, TestableLeaf},
- event::{Event, EventType},
- network::{BuilderType, NetworkConfig, NetworkConfigFile, NetworkConfigSource},
- traits::{
- block_contents::{BlockHeader, TestableBlock},
- election::Membership,
- network::ConnectedNetwork,
- node_implementation::{ConsensusTime, NodeType, Versions},
- states::TestableState,
- },
- HotShotConfig, PeerConfig, ValidatorConfig,
-};
-use libp2p_networking::network::{GossipConfig, RequestResponseConfig};
-use rand::{rngs::StdRng, SeedableRng};
-use surf_disco::Url;
-use tracing::{debug, error, info, warn};
-
-#[derive(Debug, Clone)]
-/// Arguments passed to the orchestrator
-pub struct OrchestratorArgs {
- /// The url the orchestrator runs on; this should be in the form of `http://localhost:5555` or `http://0.0.0.0:5555`
- pub url: Url,
- /// The configuration file to be used for this run
- pub config: NetworkConfig,
-}
-
-#[derive(Parser, Debug, Clone)]
-#[command(
- name = "Multi-machine consensus",
- about = "Simulates consensus among multiple machines"
-)]
-/// The configuration file to be used for this run
-pub struct ConfigArgs {
- /// The configuration file to be used for this run
- pub config_file: String,
-}
-
-impl Default for ConfigArgs {
- fn default() -> Self {
- Self {
- config_file: "./crates/orchestrator/run-config.toml".to_string(),
- }
- }
-}
-
-/// Reads the orchestrator initialization config from the command line
-/// # Panics
-/// If unable to read the config file from the command line
-#[allow(clippy::too_many_lines)]
-pub fn read_orchestrator_init_config() -> (NetworkConfig, Url)
-{
- // assign default setting
- let mut orchestrator_url = Url::parse("http://localhost:4444").unwrap();
- let mut args = ConfigArgs::default();
- // start reading from the command line
- let matches = Command::new("orchestrator")
- .arg(
- Arg::new("config_file")
- .short('c')
- .long("config_file")
- .value_name("FILE")
- .help("Sets a custom config file with default values, some might be changed if they are set manually in the command line")
- .required(true),
- )
- .arg(
- Arg::new("total_nodes")
- .short('n')
- .long("total_nodes")
- .value_name("NUM")
- .help("Sets the total number of nodes")
- .required(false),
- )
- .arg(
- Arg::new("da_committee_size")
- .short('d')
- .long("da_committee_size")
- .value_name("NUM")
- .help("Sets the size of the data availability committee")
- .required(false),
- )
- .arg(
- Arg::new("transactions_per_round")
- .short('t')
- .long("transactions_per_round")
- .value_name("NUM")
- .help("Sets the number of transactions per round")
- .required(false),
- )
- .arg(
- Arg::new("transaction_size")
- .short('s')
- .long("transaction_size")
- .value_name("NUM")
- .help("Sets the size of each transaction in bytes")
- .required(false),
- )
- .arg(
- Arg::new("rounds")
- .short('r')
- .long("rounds")
- .value_name("NUM")
- .help("Sets the number of rounds to run")
- .required(false),
- )
- .arg(
- Arg::new("commit_sha")
- .short('o')
- .long("commit_sha")
- .value_name("SHA")
- .help("Sets the commit sha to output in the results")
- .required(false),
- )
- .arg(
- Arg::new("orchestrator_url")
- .short('u')
- .long("orchestrator_url")
- .value_name("URL")
- .help("Sets the url of the orchestrator")
- .required(false),
- )
- .arg(
- Arg::new("fixed_leader_for_gpuvid")
- .short('f')
- .long("fixed_leader_for_gpuvid")
- .value_name("BOOL")
- .help("Sets the number of fixed leader for gpu vid, only be used when leaders running on gpu")
- .required(false),
- )
- .arg(
- Arg::new("builder")
- .short('b')
- .long("builder")
- .value_name("BUILDER_TYPE")
- .value_parser(value_parser!(BuilderType))
- .help("Sets type of builder. `simple` or `random` to run corresponding integrated builder, `external` to use the one specified by `[config.builder_url]` in config")
- .required(false),
- )
- .arg(
- Arg::new("cdn_marshal_address")
- .short('m')
- .long("cdn_marshal_address")
- .value_name("URL")
- .help("Sets the url for cdn_broker_marshal_endpoint")
- .required(false),
- )
- .get_matches();
-
- if let Some(config_file_string) = matches.get_one::("config_file") {
- args = ConfigArgs {
- config_file: config_file_string.clone(),
- };
- } else {
- error!("No config file provided, we'll use the default one.");
- }
- let mut config: NetworkConfig =
- load_config_from_file::(&args.config_file);
-
- if let Some(total_nodes_string) = matches.get_one::("total_nodes") {
- config.config.num_nodes_with_stake = total_nodes_string.parse::().unwrap();
- config.config.known_nodes_with_stake =
- vec![PeerConfig::default(); config.config.num_nodes_with_stake.get() as usize];
- error!(
- "config.config.total_nodes: {:?}",
- config.config.num_nodes_with_stake
- );
- }
- if let Some(da_committee_size_string) = matches.get_one::("da_committee_size") {
- config.config.da_staked_committee_size = da_committee_size_string.parse::().unwrap();
- }
- if let Some(fixed_leader_for_gpuvid_string) =
- matches.get_one::("fixed_leader_for_gpuvid")
- {
- config.config.fixed_leader_for_gpuvid =
- fixed_leader_for_gpuvid_string.parse::().unwrap();
- }
- if let Some(transactions_per_round_string) = matches.get_one::("transactions_per_round")
- {
- config.transactions_per_round = transactions_per_round_string.parse::().unwrap();
- }
- if let Some(transaction_size_string) = matches.get_one::("transaction_size") {
- config.transaction_size = transaction_size_string.parse::().unwrap();
- }
- if let Some(rounds_string) = matches.get_one::("rounds") {
- config.rounds = rounds_string.parse::().unwrap();
- }
- if let Some(commit_sha_string) = matches.get_one::("commit_sha") {
- config.commit_sha = commit_sha_string.to_string();
- }
- if let Some(orchestrator_url_string) = matches.get_one::("orchestrator_url") {
- orchestrator_url = Url::parse(orchestrator_url_string).unwrap();
- }
- if let Some(builder_type) = matches.get_one::("builder") {
- config.builder = *builder_type;
- }
- if let Some(cdn_marshal_address_string) = matches.get_one::("cdn_marshal_address") {
- config.cdn_marshal_address = Some(cdn_marshal_address_string.to_string());
- }
-
- (config, orchestrator_url)
-}
-
-/// Reads a network configuration from a given filepath
-/// # Panics
-/// if unable to convert the config file into toml
-/// # Note
-/// This derived config is used for initialization of orchestrator,
-/// therefore `known_nodes_with_stake` will be an initialized
-/// vector full of the node's own config.
-#[must_use]
-pub fn load_config_from_file(
- config_file: &str,
-) -> NetworkConfig {
- let config_file_as_string: String = fs::read_to_string(config_file)
- .unwrap_or_else(|_| panic!("Could not read config file located at {config_file}"));
- let config_toml: NetworkConfigFile =
- toml::from_str::>(&config_file_as_string)
- .expect("Unable to convert config file to TOML");
-
- let mut config: NetworkConfig = config_toml.into();
-
- // initialize it with size for better assignment of peers' config
- config.config.known_nodes_with_stake =
- vec![PeerConfig::default(); config.config.num_nodes_with_stake.get() as usize];
-
- config
-}
-
-/// Runs the orchestrator
-pub async fn run_orchestrator(
- OrchestratorArgs { url, config }: OrchestratorArgs,
-) {
- println!("Starting orchestrator",);
- let _ = hotshot_orchestrator::run_orchestrator::(config, url).await;
-}
-
-/// Helper function to calculate the number of transactions to send per node per round
-#[allow(clippy::cast_possible_truncation)]
-fn calculate_num_tx_per_round(
- node_index: u64,
- total_num_nodes: usize,
- transactions_per_round: usize,
-) -> usize {
- transactions_per_round / total_num_nodes
- + usize::from(
- (total_num_nodes)
- < (transactions_per_round % total_num_nodes) + 1 + (node_index as usize),
- )
-}
-
-/// Helper function to generate transactions a given node should send
-fn generate_transactions>(
- node_index: u64,
- rounds: usize,
- transactions_to_send_per_round: usize,
- transaction_size: usize,
-) -> Vec
-where
- ::ValidatedState: TestableState,
- ::BlockPayload: TestableBlock,
-{
- let mut txn_rng = StdRng::seed_from_u64(node_index);
- let mut transactions = Vec::new();
-
- for _ in 0..rounds {
- for _ in 0..transactions_to_send_per_round {
- let txn = ::create_random_transaction(
- None,
- &mut txn_rng,
- transaction_size as u64,
- );
-
- transactions.push(txn);
- }
- }
- transactions
-}
-
-/// Defines the behavior of a "run" of the network with a given configuration
-#[async_trait]
-pub trait RunDa<
- TYPES: NodeType,
- NETWORK: ConnectedNetwork,
- NODE: NodeImplementation<
- TYPES,
- Network = NETWORK,
- Storage = TestStorage,
- AuctionResultsProvider = TestAuctionResultsProvider,
- >,
- V: Versions,
-> where
- ::ValidatedState: TestableState,
- ::BlockPayload: TestableBlock,
- TYPES: NodeType,
- Leaf: TestableLeaf,
- Self: Sync,
-{
- /// Initializes networking, returns self
- async fn initialize_networking(
- config: NetworkConfig,
- validator_config: ValidatorConfig,
- libp2p_advertise_address: Option,
- membership: &Arc::Membership>>,
- ) -> Self;
-
- /// Initializes the genesis state and HotShot instance; does not start HotShot consensus
- /// # Panics if it cannot generate a genesis block, fails to initialize HotShot, or cannot
- /// get the anchored view
- /// Note: sequencing leaf does not have state, so does not return state
- async fn initialize_state_and_hotshot(
- &self,
- membership: Arc::Membership>>,
- ) -> SystemContextHandle {
- let initializer =
- hotshot::HotShotInitializer::::from_genesis::(TestInstanceState::default())
- .await
- .expect("Couldn't generate genesis block");
-
- let config = self.config();
- let validator_config = self.validator_config();
-
- // Get KeyPair for certificate Aggregation
- let pk = validator_config.public_key.clone();
- let sk = validator_config.private_key.clone();
-
- let network = self.network();
-
- let marketplace_config = MarketplaceConfig {
- auction_results_provider: TestAuctionResultsProvider::::default().into(),
- // TODO: we need to pass a valid fallback builder url here somehow
- fallback_builder_url: config.config.builder_urls.first().clone(),
- };
-
- SystemContext::init(
- pk,
- sk,
- config.node_index,
- config.config,
- membership,
- Arc::from(network),
- initializer,
- ConsensusMetricsValue::default(),
- TestStorage::::default(),
- marketplace_config,
- )
- .await
- .expect("Could not init hotshot")
- .0
- }
-
- /// Starts HotShot consensus, returns when consensus has finished
- #[allow(clippy::too_many_lines)]
- async fn run_hotshot(
- &self,
- context: SystemContextHandle,
- transactions: &mut Vec,
- transactions_to_send_per_round: u64,
- transaction_size_in_bytes: u64,
- ) -> BenchResults {
- let NetworkConfig {
- rounds, node_index, ..
- } = self.config();
-
- let mut total_transactions_committed = 0;
- let mut total_transactions_sent = 0;
- let mut minimum_latency = 1000;
- let mut maximum_latency = 0;
- let mut total_latency = 0;
- let mut num_latency = 0;
-
- info!("Starting HotShot example!");
- let start = Instant::now();
-
- let mut event_stream = context.event_stream();
- let mut anchor_view: TYPES::View = ::genesis();
- let mut num_successful_commits = 0;
-
- context.hotshot.start_consensus().await;
-
- loop {
- match event_stream.next().await {
- None => {
- panic!("Error! Event stream completed before consensus ended.");
- }
- Some(Event { event, .. }) => {
- match event {
- EventType::Error { error } => {
- error!("Error in consensus: {:?}", error);
- // TODO what to do here
- }
- EventType::Decide {
- leaf_chain,
- qc: _,
- block_size,
- } => {
- let current_timestamp = Utc::now().timestamp();
- // this might be a obob
- if let Some(leaf_info) = leaf_chain.first() {
- let leaf = &leaf_info.leaf;
- info!("Decide event for leaf: {}", *leaf.view_number());
-
- // iterate all the decided transactions to calculate latency
- if let Some(block_payload) = &leaf.block_payload() {
- for tx in
- block_payload.transactions(leaf.block_header().metadata())
- {
- let restored_timestamp_vec =
- tx.bytes()[tx.bytes().len() - 8..].to_vec();
- let restored_timestamp = i64::from_be_bytes(
- restored_timestamp_vec.as_slice().try_into().unwrap(),
- );
- let cur_latency = current_timestamp - restored_timestamp;
- total_latency += cur_latency;
- num_latency += 1;
- minimum_latency =
- std::cmp::min(minimum_latency, cur_latency);
- maximum_latency =
- std::cmp::max(maximum_latency, cur_latency);
- }
- }
-
- let new_anchor = leaf.view_number();
- if new_anchor >= anchor_view {
- anchor_view = leaf.view_number();
- }
-
- // send transactions
- for _ in 0..transactions_to_send_per_round {
- // append current timestamp to the tx to calc latency
- let timestamp = Utc::now().timestamp();
- let mut tx = transactions.remove(0).into_bytes();
- let mut timestamp_vec = timestamp.to_be_bytes().to_vec();
- tx.append(&mut timestamp_vec);
-
- () = context
- .submit_transaction(TestTransaction::new(tx))
- .await
- .unwrap();
- total_transactions_sent += 1;
- }
- }
-
- if let Some(size) = block_size {
- total_transactions_committed += size;
- debug!("[{node_index}] got block with size: {:?}", size);
- }
-
- num_successful_commits += leaf_chain.len();
- if num_successful_commits >= rounds {
- break;
- }
-
- if leaf_chain.len() > 1 {
- warn!("Leaf chain is greater than 1 with len {}", leaf_chain.len());
- }
- // when we make progress, submit new events
- }
- EventType::ReplicaViewTimeout { view_number } => {
- warn!("Timed out as a replicas in view {:?}", view_number);
- }
- EventType::ViewTimeout { view_number } => {
- warn!("Timed out in view {:?}", view_number);
- }
- _ => {} // mostly DA proposal
- }
- }
- }
- }
- let num_eligible_leaders = context
- .hotshot
- .memberships
- .read()
- .await
- .committee_leaders(TYPES::View::genesis(), TYPES::Epoch::genesis())
- .len();
- let consensus_lock = context.hotshot.consensus();
- let consensus = consensus_lock.read().await;
- let total_num_views = usize::try_from(consensus.locked_view().u64()).unwrap();
- // `failed_num_views` could include uncommitted views
- let failed_num_views = total_num_views - num_successful_commits;
- // When posting to the orchestrator, note that the total number of views also include un-finalized views.
- println!("[{node_index}]: Total views: {total_num_views}, Failed views: {failed_num_views}, num_successful_commits: {num_successful_commits}");
- // Output run results
- let total_time_elapsed = start.elapsed(); // in seconds
- println!("[{node_index}]: {rounds} rounds completed in {total_time_elapsed:?} - Total transactions sent: {total_transactions_sent} - Total transactions committed: {total_transactions_committed} - Total commitments: {num_successful_commits}");
- if total_transactions_committed != 0 {
- // prevent division by 0
- let total_time_elapsed_sec = std::cmp::max(total_time_elapsed.as_secs(), 1u64);
- // extra 8 bytes for timestamp
- let throughput_bytes_per_sec = total_transactions_committed
- * (transaction_size_in_bytes + 8)
- / total_time_elapsed_sec;
- let avg_latency_in_sec = total_latency / num_latency;
- println!("[{node_index}]: throughput: {throughput_bytes_per_sec} bytes/sec, avg_latency: {avg_latency_in_sec} sec.");
-
- BenchResults {
- partial_results: "Unset".to_string(),
- avg_latency_in_sec,
- num_latency,
- minimum_latency_in_sec: minimum_latency,
- maximum_latency_in_sec: maximum_latency,
- throughput_bytes_per_sec,
- total_transactions_committed,
- transaction_size_in_bytes: transaction_size_in_bytes + 8, // extra 8 bytes for timestamp
- total_time_elapsed_in_sec: total_time_elapsed.as_secs(),
- total_num_views,
- failed_num_views,
- committee_type: format!(
- "{} with {num_eligible_leaders} eligible leaders",
- std::any::type_name::()
- ),
- }
- } else {
- // all values with zero
- BenchResults::default()
- }
- }
-
- /// Returns the underlying network for this run
- fn network(&self) -> NETWORK;
-
- /// Returns the config for this run
- fn config(&self) -> NetworkConfig;
-
- /// Returns the validator config with private signature keys for this run.
- fn validator_config(&self) -> ValidatorConfig;
-}
-
-// Push CDN
-
-/// Represents a Push CDN-based run
-pub struct PushCdnDaRun {
- /// The underlying configuration
- config: NetworkConfig,
- /// The private validator config
- validator_config: ValidatorConfig,
- /// The underlying network
- network: PushCdnNetwork,
-}
-
-#[async_trait]
-impl<
- TYPES: NodeType<
- Transaction = TestTransaction,
- BlockPayload = TestBlockPayload,
- BlockHeader = TestBlockHeader,
- InstanceState = TestInstanceState,
- >,
- NODE: NodeImplementation<
- TYPES,
- Network = PushCdnNetwork,
- Storage = TestStorage,
- AuctionResultsProvider = TestAuctionResultsProvider,
- >,
- V: Versions,
- > RunDa, NODE, V> for PushCdnDaRun
-where
- ::ValidatedState: TestableState,
- ::BlockPayload: TestableBlock,
- Leaf: TestableLeaf,
- Self: Sync,
-{
- async fn initialize_networking(
- config: NetworkConfig,
- validator_config: ValidatorConfig,
- _libp2p_advertise_address: Option,
- _membership: &Arc::Membership>>,
- ) -> PushCdnDaRun {
- // Convert to the Push-CDN-compatible type
- let keypair = KeyPair {
- public_key: WrappedSignatureKey(validator_config.public_key.clone()),
- private_key: validator_config.private_key.clone(),
- };
-
- // See if we should be DA, subscribe to the DA topic if so
- let mut topics = vec![CdnTopic::Global];
- if validator_config.is_da {
- topics.push(CdnTopic::Da);
- }
-
- // Create the network and await the initial connection
- let network = PushCdnNetwork::new(
- config
- .cdn_marshal_address
- .clone()
- .expect("`cdn_marshal_address` needs to be supplied for a push CDN run"),
- topics,
- keypair,
- CdnMetricsValue::default(),
- )
- .expect("failed to create network");
-
- // Wait for the network to be ready
- network.wait_for_ready().await;
-
- PushCdnDaRun {
- config,
- validator_config,
- network,
- }
- }
-
- fn network(&self) -> PushCdnNetwork {
- self.network.clone()
- }
-
- fn config(&self) -> NetworkConfig {
- self.config.clone()
- }
-
- fn validator_config(&self) -> ValidatorConfig {
- self.validator_config.clone()
- }
-}
-
-// Libp2p
-
-/// Represents a libp2p-based run
-pub struct Libp2pDaRun {
- /// The underlying network configuration
- config: NetworkConfig,
- /// The private validator config
- validator_config: ValidatorConfig,
- /// The underlying network
- network: Libp2pNetwork,
-}
-
-#[async_trait]
-impl<
- TYPES: NodeType<
- Transaction = TestTransaction,
- BlockPayload = TestBlockPayload,
- BlockHeader = TestBlockHeader,
- InstanceState = TestInstanceState,
- >,
- NODE: NodeImplementation<
- TYPES,
- Network = Libp2pNetwork,
- Storage = TestStorage,
- AuctionResultsProvider = TestAuctionResultsProvider,
- >,
- V: Versions,
- > RunDa, NODE, V> for Libp2pDaRun
-where
- ::ValidatedState: TestableState,
- ::BlockPayload: TestableBlock,
- Leaf: TestableLeaf,
- Self: Sync,
-{
- async fn initialize_networking(
- config: NetworkConfig,
- validator_config: ValidatorConfig,
- libp2p_advertise_address: Option,
- membership: &Arc::Membership>>,
- ) -> Libp2pDaRun {
- // Extrapolate keys for ease of use
- let public_key = &validator_config.public_key;
- let private_key = &validator_config.private_key;
-
- // In an example, we can calculate the libp2p bind address as a function
- // of the advertise address.
- let bind_address = if let Some(libp2p_advertise_address) = libp2p_advertise_address {
- let libp2p_advertise_address: SocketAddrV4 = libp2p_advertise_address
- .parse()
- .expect("failed to parse advertise address");
-
- // If we have supplied one, use it
- SocketAddr::new(
- IpAddr::V4(Ipv4Addr::UNSPECIFIED),
- libp2p_advertise_address.port(),
- )
- .to_string()
- } else {
- // If not, index a base port with our node index
- SocketAddr::new(
- IpAddr::V4(Ipv4Addr::UNSPECIFIED),
- 8000 + (u16::try_from(config.node_index)
- .expect("failed to create advertise address")),
- )
- .to_string()
- };
-
- // Derive the bind address
- let bind_address =
- derive_libp2p_multiaddr(&bind_address).expect("failed to derive bind address");
-
- // Create the Libp2p network
- let libp2p_network = Libp2pNetwork::from_config(
- config.clone(),
- Arc::clone(membership),
- GossipConfig::default(),
- RequestResponseConfig::default(),
- bind_address,
- public_key,
- private_key,
- Libp2pMetricsValue::default(),
- )
- .await
- .expect("failed to create libp2p network");
-
- // Wait for the network to be ready
- libp2p_network.wait_for_ready().await;
-
- Libp2pDaRun {
- config,
- validator_config,
- network: libp2p_network,
- }
- }
-
- fn network(&self) -> Libp2pNetwork {
- self.network.clone()
- }
-
- fn config(&self) -> NetworkConfig {
- self.config.clone()
- }
-
- fn validator_config(&self) -> ValidatorConfig {
- self.validator_config.clone()
- }
-}
-
-// Combined network
-
-/// Represents a combined-network-based run
-pub struct CombinedDaRun {
- /// The underlying network configuration
- config: NetworkConfig,
- /// The private validator config
- validator_config: ValidatorConfig,
- /// The underlying network
- network: CombinedNetworks,
-}
-
-#[async_trait]
-impl<
- TYPES: NodeType<
- Transaction = TestTransaction,
- BlockPayload = TestBlockPayload,
- BlockHeader = TestBlockHeader,
- InstanceState = TestInstanceState,
- >,
- NODE: NodeImplementation<
- TYPES,
- Network = CombinedNetworks,
- Storage = TestStorage,
- AuctionResultsProvider = TestAuctionResultsProvider,
- >,
- V: Versions,
- > RunDa, NODE, V> for CombinedDaRun
-where
- ::ValidatedState: TestableState,
- ::BlockPayload: TestableBlock,
- Leaf: TestableLeaf,
- Self: Sync,
-{
- async fn initialize_networking(
- config: NetworkConfig,
- validator_config: ValidatorConfig,
- libp2p_advertise_address: Option,
- membership: &Arc::Membership>>,
- ) -> CombinedDaRun {
- // Initialize our Libp2p network
- let libp2p_network: Libp2pDaRun = as RunDa<
- TYPES,
- Libp2pNetwork,
- Libp2pImpl,
- V,
- >>::initialize_networking(
- config.clone(),
- validator_config.clone(),
- libp2p_advertise_address.clone(),
- membership,
- )
- .await;
-
- // Initialize our CDN network
- let cdn_network: PushCdnDaRun = as RunDa<
- TYPES,
- PushCdnNetwork,
- PushCdnImpl,
- V,
- >>::initialize_networking(
- config.clone(),
- validator_config.clone(),
- libp2p_advertise_address,
- membership,
- )
- .await;
-
- // Create our combined network config
- let delay_duration = config
- .combined_network_config
- .as_ref()
- .map(|config| config.delay_duration);
-
- // Create our combined network
- let network =
- CombinedNetworks::new(cdn_network.network, libp2p_network.network, delay_duration);
-
- // Return the run configuration
- CombinedDaRun {
- config,
- validator_config,
- network,
- }
- }
-
- fn network(&self) -> CombinedNetworks {
- self.network.clone()
- }
-
- fn config(&self) -> NetworkConfig {
- self.config.clone()
- }
-
- fn validator_config(&self) -> ValidatorConfig {
- self.validator_config.clone()
- }
-}
-
-#[allow(clippy::too_many_lines)]
-/// Main entry point for validators
-/// # Panics
-/// if unable to get the local ip address
-pub async fn main_entry_point<
- TYPES: NodeType<
- Transaction = TestTransaction,
- BlockHeader = TestBlockHeader,
- InstanceState = TestInstanceState,
- >,
- NETWORK: ConnectedNetwork,
- NODE: NodeImplementation<
- TYPES,
- Network = NETWORK,
- Storage = TestStorage,
- AuctionResultsProvider = TestAuctionResultsProvider,
- >,
- V: Versions,
- RUNDA: RunDa,
->(
- args: ValidatorArgs,
-) where
- ::ValidatedState: TestableState,
- ::BlockPayload: TestableBlock,
- Leaf: TestableLeaf,
-{
- // Initialize logging
- hotshot::helpers::initialize_logging();
-
- info!("Starting validator");
-
- let orchestrator_client: OrchestratorClient = OrchestratorClient::new(args.url.clone());
-
- // We assume one node will not call this twice to generate two validator_config-s with same identity.
- let validator_config = NetworkConfig::::generate_init_validator_config(
- orchestrator_client
- .get_node_index_for_init_validator_config()
- .await,
- // we assign nodes to the DA committee by default
- true,
- );
-
- // Derives our Libp2p private key from our private key, and then returns the public key of that key
- let libp2p_public_key =
- derive_libp2p_peer_id::(&validator_config.private_key)
- .expect("failed to derive Libp2p keypair");
-
- // We need this to be able to register our node
- let peer_config =
- PeerConfig::::to_bytes(&validator_config.public_config()).clone();
-
- // Derive the advertise multiaddress from the supplied string
- let advertise_multiaddress = args.advertise_address.clone().map(|advertise_address| {
- derive_libp2p_multiaddr(&advertise_address).expect("failed to derive Libp2p multiaddr")
- });
-
- // conditionally save/load config from file or orchestrator
- // This is a function that will return correct complete config from orchestrator.
- // It takes in a valid args.network_config_file when loading from file, or valid validator_config when loading from orchestrator, the invalid one will be ignored.
- // It returns the complete config which also includes peer's public key and public config.
- // This function will be taken solely by sequencer right after OrchestratorClient::new,
- // which means the previous `generate_validator_config_when_init` will not be taken by sequencer, it's only for key pair generation for testing in hotshot.
-
- let (mut run_config, validator_config, source) = get_complete_config(
- &orchestrator_client,
- validator_config,
- advertise_multiaddress,
- Some(libp2p_public_key),
- )
- .await
- .expect("failed to get config");
-
- let builder_task = initialize_builder(
- &mut run_config,
- &validator_config,
- &args,
- &orchestrator_client,
- )
- .await;
-
- run_config.config.builder_urls = orchestrator_client
- .get_builder_addresses()
- .await
- .try_into()
- .expect("Orchestrator didn't provide any builder addresses");
-
- debug!(
- "Assigned urls from orchestrator: {}",
- run_config
- .config
- .builder_urls
- .iter()
- .map(ToString::to_string)
- .collect::>()
- .join(",")
- );
-
- let all_nodes = if cfg!(feature = "fixed-leader-election") {
- let mut vec = run_config.config.known_nodes_with_stake.clone();
- vec.truncate(run_config.config.fixed_leader_for_gpuvid);
- vec
- } else {
- run_config.config.known_nodes_with_stake.clone()
- };
- let membership = Arc::new(RwLock::new(::Membership::new(
- all_nodes,
- run_config.config.known_da_nodes.clone(),
- )));
-
- info!("Initializing networking");
- let run = RUNDA::initialize_networking(
- run_config.clone(),
- validator_config,
- args.advertise_address,
- &membership,
- )
- .await;
- let hotshot = run.initialize_state_and_hotshot(membership).await;
-
- if let Some(task) = builder_task {
- task.start(Box::new(hotshot.event_stream()));
- }
-
- // pre-generate transactions
- let NetworkConfig {
- transaction_size,
- rounds,
- transactions_per_round,
- node_index,
- config: HotShotConfig {
- num_nodes_with_stake,
- ..
- },
- ..
- } = run_config;
-
- let transactions_to_send_per_round = calculate_num_tx_per_round(
- node_index,
- num_nodes_with_stake.get(),
- transactions_per_round,
- );
- let mut transactions: Vec = generate_transactions::(
- node_index,
- rounds,
- transactions_to_send_per_round,
- transaction_size,
- );
-
- if let NetworkConfigSource::Orchestrator = source {
- info!("Waiting for the start command from orchestrator");
- orchestrator_client
- .wait_for_all_nodes_ready(peer_config)
- .await;
- }
-
- info!("Starting HotShot");
- let bench_results = run
- .run_hotshot(
- hotshot,
- &mut transactions,
- transactions_to_send_per_round as u64,
- (transaction_size + 8) as u64, // extra 8 bytes for transaction base, see `create_random_transaction`.
- )
- .await;
- orchestrator_client.post_bench_results(bench_results).await;
-}
-
-/// Sets correct builder_url and registers a builder with orchestrator if this node is running one.
-/// Returns a `BuilderTask` if this node is going to be running a builder.
-async fn initialize_builder<
- TYPES: NodeType<
- Transaction = TestTransaction,
- BlockHeader = TestBlockHeader,
- InstanceState = TestInstanceState,
- >,
->(
- run_config: &mut NetworkConfig<::SignatureKey>,
- validator_config: &ValidatorConfig<::SignatureKey>,
- args: &ValidatorArgs,
- orchestrator_client: &OrchestratorClient,
-) -> Option>>
-where
- ::ValidatedState: TestableState,
- ::BlockPayload: TestableBlock,
- Leaf: TestableLeaf,
-{
- if !validator_config.is_da {
- return None;
- }
-
- let advertise_urls: Vec;
- let bind_address: Url;
-
- match args.builder_address {
- None => {
- let port = portpicker::pick_unused_port().expect("Failed to pick an unused port");
- advertise_urls = local_ip_address::list_afinet_netifas()
- .expect("Couldn't get list of local IP addresses")
- .into_iter()
- .map(|(_name, ip)| ip)
- .filter(|ip| !ip.is_loopback())
- .map(|ip| match ip {
- IpAddr::V4(addr) => Url::parse(&format!("http://{addr}:{port}")).unwrap(),
- IpAddr::V6(addr) => Url::parse(&format!("http://[{addr}]:{port}")).unwrap(),
- })
- .collect();
- bind_address = Url::parse(&format!("http://0.0.0.0:{port}")).unwrap();
- }
- Some(ref addr) => {
- bind_address = Url::parse(&format!("http://{addr}")).expect("Valid URL");
- advertise_urls = vec![bind_address.clone()];
- }
- }
-
- match run_config.builder {
- BuilderType::External => None,
- BuilderType::Random => {
- let builder_task =
- >::start(
- run_config.config.num_nodes_with_stake.into(),
- bind_address,
- run_config.random_builder.clone().unwrap_or_default(),
- HashMap::new(),
- )
- .await;
-
- orchestrator_client
- .post_builder_addresses(advertise_urls)
- .await;
-
- Some(builder_task)
- }
- BuilderType::Simple => {
- let builder_task =
- >::start(
- run_config.config.num_nodes_with_stake.into(),
- bind_address,
- (),
- HashMap::new(),
- )
- .await;
-
- orchestrator_client
- .post_builder_addresses(advertise_urls)
- .await;
-
- Some(builder_task)
- }
- }
-}
-
-/// Base port for validator
-pub const VALIDATOR_BASE_PORT: u16 = 8000;
-/// Base port for builder
-pub const BUILDER_BASE_PORT: u16 = 9000;
-
-/// Generate a local address for node with index `node_index`, offsetting from port `BASE_PORT`.
-/// # Panics
-/// If `node_index` is too large to fit in a `u16`
-#[must_use]
-pub fn gen_local_address(node_index: usize) -> SocketAddr {
- SocketAddr::new(
- IpAddr::V4(Ipv4Addr::LOCALHOST),
- BASE_PORT + (u16::try_from(node_index).expect("node index too large")),
- )
-}
diff --git a/crates/examples/libp2p/all.rs b/crates/examples/libp2p/all.rs
deleted file mode 100644
index 4fd99cd0e8..0000000000
--- a/crates/examples/libp2p/all.rs
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
-
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-//! An example program using libp2p
-/// types used for this example
-pub mod types;
-
-use hotshot::helpers::initialize_logging;
-use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes};
-use hotshot_orchestrator::client::ValidatorArgs;
-use infra::{gen_local_address, BUILDER_BASE_PORT, VALIDATOR_BASE_PORT};
-use tokio::spawn;
-use tracing::instrument;
-
-use crate::{
- infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs},
- types::{Network, NodeImpl, ThisRun},
-};
-
-/// general infra used for this example
-#[path = "../infra/mod.rs"]
-pub mod infra;
-
-#[tokio::main]
-#[instrument]
-async fn main() {
- // Initialize logging
- initialize_logging();
-
- // use configfile args
- let (config, orchestrator_url) = read_orchestrator_init_config::();
-
- // orchestrator
- spawn(run_orchestrator::(OrchestratorArgs {
- url: orchestrator_url.clone(),
- config: config.clone(),
- }));
-
- // nodes
- let mut nodes = Vec::new();
- for i in 0..config.config.num_nodes_with_stake.into() {
- // Calculate our libp2p advertise address, which we will later derive the
- // bind address from for example purposes.
- let advertise_address = gen_local_address::(i);
- let builder_address = gen_local_address::(i);
- let orchestrator_url = orchestrator_url.clone();
- let node = spawn(async move {
- infra::main_entry_point::(
- ValidatorArgs {
- url: orchestrator_url,
- advertise_address: Some(advertise_address.to_string()),
- builder_address: Some(builder_address),
- network_config_file: None,
- },
- )
- .await;
- });
- nodes.push(node);
- }
- futures::future::join_all(nodes).await;
-}
diff --git a/crates/examples/libp2p/multi-validator.rs b/crates/examples/libp2p/multi-validator.rs
deleted file mode 100644
index 0767245c3b..0000000000
--- a/crates/examples/libp2p/multi-validator.rs
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
-
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-//! A multi-validator using libp2p
-use clap::Parser;
-use hotshot::helpers::initialize_logging;
-use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes};
-use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs};
-use tokio::spawn;
-use tracing::instrument;
-
-use crate::types::{Network, NodeImpl, ThisRun};
-
-/// types used for this example
-pub mod types;
-
-/// general infra used for this example
-#[path = "../infra/mod.rs"]
-pub mod infra;
-
-#[tokio::main]
-#[instrument]
-async fn main() {
- // Initialize logging
- initialize_logging();
-
- let args = MultiValidatorArgs::parse();
- tracing::debug!("connecting to orchestrator at {:?}", args.url);
- let mut nodes = Vec::new();
- for node_index in 0..args.num_nodes {
- let args = args.clone();
-
- let node = spawn(async move {
- infra::main_entry_point::(
- ValidatorArgs::from_multi_args(args, node_index),
- )
- .await;
- });
- nodes.push(node);
- }
- let _result = futures::future::join_all(nodes).await;
-}
diff --git a/crates/examples/libp2p/types.rs b/crates/examples/libp2p/types.rs
deleted file mode 100644
index ed8fbcda6f..0000000000
--- a/crates/examples/libp2p/types.rs
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
-
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-use std::fmt::Debug;
-
-use hotshot::traits::implementations::Libp2pNetwork;
-use hotshot_example_types::{
- auction_results_provider_types::TestAuctionResultsProvider, state_types::TestTypes,
- storage_types::TestStorage,
-};
-use hotshot_types::traits::node_implementation::NodeImplementation;
-use serde::{Deserialize, Serialize};
-
-use crate::infra::Libp2pDaRun;
-
-/// dummy struct so we can choose types
-#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)]
-pub struct NodeImpl {}
-
-/// Convenience type alias
-pub type Network = Libp2pNetwork;
-
-impl NodeImplementation for NodeImpl {
- type Network = Network;
- type Storage = TestStorage;
- type AuctionResultsProvider = TestAuctionResultsProvider;
-}
-/// convenience type alias
-pub type ThisRun = Libp2pDaRun;
diff --git a/crates/examples/libp2p/validator.rs b/crates/examples/libp2p/validator.rs
deleted file mode 100644
index c85e52688e..0000000000
--- a/crates/examples/libp2p/validator.rs
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
-
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-//! A validator using libp2p
-
-use clap::Parser;
-use hotshot::helpers::initialize_logging;
-use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes};
-use hotshot_orchestrator::client::ValidatorArgs;
-use local_ip_address::local_ip;
-use tracing::{debug, instrument};
-
-use crate::types::{Network, NodeImpl, ThisRun};
-
-/// types used for this example
-pub mod types;
-
-/// general infra used for this example
-#[path = "../infra/mod.rs"]
-pub mod infra;
-
-#[tokio::main]
-#[instrument]
-async fn main() {
- // Initialize logging
- initialize_logging();
-
- let mut args = ValidatorArgs::parse();
-
- // If we did not set the advertise address, use our local IP and port 8000
- let local_ip = local_ip().expect("failed to get local IP");
- args.advertise_address = Some(args.advertise_address.unwrap_or(format!("{local_ip}:8000")));
-
- debug!("connecting to orchestrator at {:?}", args.url);
- infra::main_entry_point::(args).await;
-}
diff --git a/crates/examples/orchestrator.rs b/crates/examples/orchestrator.rs
deleted file mode 100644
index 42ea1d9014..0000000000
--- a/crates/examples/orchestrator.rs
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
-
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-//! An orchestrator
-
-use hotshot::helpers::initialize_logging;
-use hotshot_example_types::state_types::TestTypes;
-use tracing::instrument;
-
-use crate::infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs};
-
-/// general infra used for this example
-#[path = "./infra/mod.rs"]
-pub mod infra;
-
-#[tokio::main]
-#[instrument]
-async fn main() {
- // Initialize logging
- initialize_logging();
-
- let (config, orchestrator_url) = read_orchestrator_init_config::();
- run_orchestrator::(OrchestratorArgs:: {
- url: orchestrator_url.clone(),
- config: config.clone(),
- })
- .await;
-}
diff --git a/crates/examples/process-compose.yaml b/crates/examples/process-compose.yaml
new file mode 100644
index 0000000000..1df6494df1
--- /dev/null
+++ b/crates/examples/process-compose.yaml
@@ -0,0 +1,86 @@
+# This file is used to run all components of the multi-process examples (in combined network mode).
+# If you want to just run them all in one process, you can use `cargo run --example all`.
+
+# To run this, do `process-compose up`
+
+# NOTE: You will need Docker installed to run this, as we use KeyDB as a database for the CDN for now
+
+version: "3"
+
+processes:
+ # The coordinator is used to assign unique indices to nodes when running benchmarks on multiple machines
+ # (or in this case, multiple processes). It is also used to share Libp2p addresses with other nodes
+ # so they can bootstrap to each other.
+ coordinator:
+ command: RUST_LOG=info cargo run --example coordinator
+ readiness_probe:
+ exec:
+ command: nc -zv localhost 3030
+ period_seconds: 5
+ timeout_seconds: 4
+ failure_threshold: 20
+
+ # We use KeyDB (a Redis variant) to maintain consistency between
+ # different parts of the CDN
+ # Cheating a bit here too, but KeyDB is not available as a Nix package.
+ # Could do local (SQLite) discovery, but removes some of the spirit
+ # from the local demo.
+ keydb:
+ command: docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb --requirepass changeme!
+ readiness_probe:
+ exec:
+ command: nc -zv localhost 6379
+ period_seconds: 5
+ timeout_seconds: 4
+ failure_threshold: 20
+
+ # The marshal is the CDN component that is responsible for authenticating users and
+ # pointing them to the correct broker.
+ marshal:
+ command: RUST_LOG=info cargo run --example cdn-marshal -- --discovery-endpoint "redis://:changeme!@localhost:6379"
+ depends_on:
+ keydb:
+ condition: process_healthy
+
+ # The `broker` is the CDN component that is primarily responsible for routing messages
+ # between nodes.
+ broker:
+ command: RUST_LOG=info,libp2p=off cargo run --example cdn-broker -- --discovery-endpoint "redis://:changeme!@localhost:6379"
+ depends_on:
+ keydb:
+ condition: process_healthy
+
+ # `hotshot1` is a single HotShot node
+ hotshot1:
+ command: RUST_LOG=info,libp2p=off cargo run --example single-validator -- --network combined --libp2p-port 3000 --total-num-nodes 5 --num-da-nodes 3 --marshal-address localhost:1737
+ depends_on:
+ coordinator:
+ condition: process_healthy
+
+ # `hotshot2` is a second HotShot node
+ hotshot2:
+ command: RUST_LOG=info,libp2p=off cargo run --example single-validator -- --network combined --libp2p-port 3001 --total-num-nodes 5 --num-da-nodes 3 --marshal-address localhost:1737
+ depends_on:
+ coordinator:
+ condition: process_healthy
+
+ # `hotshot3` is a third HotShot node
+ hotshot3:
+ command: RUST_LOG=info,libp2p=off cargo run --example single-validator -- --network combined --libp2p-port 3002 --total-num-nodes 5 --num-da-nodes 3 --marshal-address localhost:1737
+ depends_on:
+ coordinator:
+ condition: process_healthy
+
+ # `hotshot4` is a fourth HotShot node
+ hotshot4:
+ command: RUST_LOG=info,libp2p=off cargo run --example single-validator -- --network combined --libp2p-port 3003 --total-num-nodes 5 --num-da-nodes 3 --marshal-address localhost:1737
+ depends_on:
+ coordinator:
+ condition: process_healthy
+
+ # `hotshot5` is a fifth HotShot node
+ hotshot5:
+ command: RUST_LOG=info,libp2p=off cargo run --example single-validator -- --network combined --libp2p-port 3004 --total-num-nodes 5 --num-da-nodes 3 --marshal-address localhost:1737
+ depends_on:
+ coordinator:
+ condition: process_healthy
diff --git a/crates/examples/push-cdn/README.md b/crates/examples/push-cdn/README.md
deleted file mode 100644
index a20e46f4ce..0000000000
--- a/crates/examples/push-cdn/README.md
+++ /dev/null
@@ -1,68 +0,0 @@
-Steps
----------------
-
-KeyDB is the ephemeral database, it's like Redis but with extra features. The only thing we run it with is with `--requirepass` to set a password.
-
-**Marshals:**
-The marshal is the entry point of the push CDN, all users connect there first. It tells users which broker to connect to.
-
-- `-d` is the "discovery endpoint", which in this case is the URL of KeyDB.
-- `-b` is the bind port. This is what you would set in run_config.toml for cdn_broker_marshal_endpoint
-- `-m` is metrics stuff. You shouldn't have to use that
-
-
-**Brokers:**
-In a run with multiple machines, we want two brokers. With one machine, it's probably fine to do one broker. These are what route the messages. Here are the relevant command line arguments:
-
-- `-d` is the "discovery endpoint", which in this case is the URL of KeyDB.
-- `--public-bind-endpoint`: the endpoint which we bind to locally for users to connect to (e.g. 0.0.0.0:1740)
-- `--public-advertise-endpoint`: the endpoint which we advertise to users (e.g. my.public.ip:1740)
-- `--private-bind-endpoint`: the endpoint which we bind to locally for brokers to connect to (e.g. 0.0.0.0:1741)
-- `--private-advertise-endpoint`: the endpoint which we advertise to brokers (e.g. my.public.ip:1741)
-- `-m` is metrics stuff. You shouldn't have to use that
-For brokers, there is a magic value called `local_ip`. This resolves to the local IP address, which skips the need for talking to the AWS metadata server. For in-AWS uses, the following configuration is probably fine:
-`cdn-broker --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741`. You won't need to put this port or values anywhere, as the marshal does everything for you.
-
-Examples:
----------------
-
-**Run Locally**
-
-`just example all-push-cdn -- --config_file ./crates/orchestrator/run-config.toml`
-
-OR
-
-```
-docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb
-just example cdn-marshal -- -d redis://localhost:6379 -b 9000
-just example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741
-just example orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444
-just example multi-validator-push-cdn -- 10 http://127.0.0.1:4444
-```
-
-**Run with GPU-VID**
-```
-docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb
-just example cdn-marshal -- -d redis://localhost:6379 -b 9000
-just example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741
-just example_fixed_leader orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 --fixed_leader_for_gpuvid 1
-just example_gpuvid_leader multi-validator-push-cdn -- 1 http://127.0.0.1:4444
-sleep 1m
-just example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444
-```
-
-Where ones using `example_gpuvid_leader` could be the leader and should be running on an nvidia GPU, and other validators using `example_fixed_leader` will never be a leader. In practice, these url should be changed to the corresponding ip and port.
-
-
-If you don't have a gpu but want to test out fixed leader, you can run:
-```
-docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb
-just example cdn-marshal -- -d redis://localhost:6379 -b 9000
-just example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741
-just example_fixed_leader orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 --fixed_leader_for_gpuvid 1
-just example_fixed_leader multi-validator-push-cdn -- 1 http://127.0.0.1:4444
-sleep 1m
-just example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444
-```
-
-Remember, you have to run leaders first, then other validators, so that leaders will have lower index.
diff --git a/crates/examples/push-cdn/all.rs b/crates/examples/push-cdn/all.rs
deleted file mode 100644
index 444edc60fe..0000000000
--- a/crates/examples/push-cdn/all.rs
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
-// This file is part of the HotShot repository.
-
-// You should have received a copy of the MIT License
-// along with the HotShot repository. If not, see .
-
-//! An example program using the Push CDN
-/// The types we're importing
-pub mod types;
-
-use std::path::Path;
-
-use cdn_broker::{
- reexports::{crypto::signature::KeyPair, def::hook::NoMessageHook},
- Broker,
-};
-use cdn_marshal::Marshal;
-use hotshot::{
- helpers::initialize_logging,
- traits::implementations::{TestingDef, WrappedSignatureKey},
- types::SignatureKey,
-};
-use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes};
-use hotshot_orchestrator::client::ValidatorArgs;
-use hotshot_types::traits::node_implementation::NodeType;
-use infra::{gen_local_address, BUILDER_BASE_PORT};
-use rand::{rngs::StdRng, RngCore, SeedableRng};
-use tokio::spawn;
-
-use crate::{
- infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs},
- types::{Network, NodeImpl, ThisRun},
-};
-
-/// The infra implementation
-#[path = "../infra/mod.rs"]
-pub mod infra;
-
-use tracing::error;
-
-#[tokio::main]
-async fn main() {
- // Initialize logging
- initialize_logging();
-
- // use configfile args
- let (config, orchestrator_url) = read_orchestrator_init_config::();
-
- // Start the orhcestrator
- spawn(run_orchestrator::(OrchestratorArgs {
- url: orchestrator_url.clone(),
- config: config.clone(),
- }));
-
- // The configuration we are using for this example is 2 brokers & 1 marshal
-
- // A keypair shared between brokers
- let (broker_public_key, broker_private_key) =
- ::SignatureKey::generated_from_seed_indexed([0u8; 32], 1337);
-
- // Get the OS temporary directory
- let temp_dir = std::env::temp_dir();
-
- // Create an SQLite file inside of the temporary directory
- let discovery_endpoint = temp_dir
- .join(Path::new(&format!(
- "test-{}.sqlite",
- StdRng::from_entropy().next_u64()
- )))
- .to_string_lossy()
- .into_owned();
-
- // 2 brokers
- for _ in 0..2 {
- // Get the ports to bind to
- let private_port = portpicker::pick_unused_port().expect("could not find an open port");
- let public_port = portpicker::pick_unused_port().expect("could not find an open port");
-
- // Extrapolate addresses
- let private_address = format!("127.0.0.1:{private_port}");
- let public_address = format!("127.0.0.1:{public_port}");
-
- let config: cdn_broker::Config::SignatureKey>> =
- cdn_broker::Config {
- discovery_endpoint: discovery_endpoint.clone(),
- public_advertise_endpoint: public_address.clone(),
- public_bind_endpoint: public_address,
- private_advertise_endpoint: private_address.clone(),
- private_bind_endpoint: private_address,
-
- keypair: KeyPair {
- public_key: WrappedSignatureKey(broker_public_key),
- private_key: broker_private_key.clone(),
- },
-
- user_message_hook: NoMessageHook,
- broker_message_hook: NoMessageHook,
-
- metrics_bind_endpoint: None,
- ca_cert_path: None,
- ca_key_path: None,
- global_memory_pool_size: Some(1024 * 1024 * 1024),
- };
-
- // Create and spawn the broker
- spawn(async move {
- let broker: Broker