diff --git a/.github/workflows/docker-reproducible.yml b/.github/workflows/docker-reproducible.yml new file mode 100644 index 00000000000..f3479e9468d --- /dev/null +++ b/.github/workflows/docker-reproducible.yml @@ -0,0 +1,176 @@ +name: docker-reproducible + +on: + push: + branches: + - unstable + - stable + tags: + - v* + workflow_dispatch: # allows manual triggering for testing purposes and skips publishing an image + +env: + DOCKER_REPRODUCIBLE_IMAGE_NAME: >- + ${{ github.repository_owner }}/lighthouse-reproducible + DOCKER_PASSWORD: ${{ secrets.DH_KEY }} + DOCKER_USERNAME: ${{ secrets.DH_ORG }} + +jobs: + extract-version: + name: extract version + runs-on: ubuntu-22.04 + steps: + - name: Extract version + run: | + if [[ "${{ github.ref }}" == refs/tags/* ]]; then + # It's a tag (e.g., v1.2.3) + VERSION="${GITHUB_REF#refs/tags/}" + elif [[ "${{ github.ref }}" == refs/heads/stable ]]; then + # stable branch -> latest + VERSION="latest" + elif [[ "${{ github.ref }}" == refs/heads/unstable ]]; then + # unstable branch -> latest-unstable + VERSION="latest-unstable" + else + # For manual triggers from other branches and will not publish any image + VERSION="test-build" + fi + echo "VERSION=$VERSION" >> $GITHUB_OUTPUT + id: extract_version + outputs: + VERSION: ${{ steps.extract_version.outputs.VERSION }} + + verify-and-build: + name: verify reproducibility and build + needs: extract-version + strategy: + matrix: + arch: [amd64, arm64] + include: + - arch: amd64 + rust_target: x86_64-unknown-linux-gnu + rust_image: >- + rust:1.88-bullseye@sha256:8e3c421122bf4cd3b2a866af41a4dd52d87ad9e315fd2cb5100e87a7187a9816 + platform: linux/amd64 + runner: ubuntu-22.04 + - arch: arm64 + rust_target: aarch64-unknown-linux-gnu + rust_image: >- + rust:1.88-bullseye@sha256:8b22455a7ce2adb1355067638284ee99d21cc516fab63a96c4514beaf370aa94 + platform: linux/arm64 + runner: ubuntu-22.04-arm + runs-on: ${{ matrix.runner }} + steps: + - uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker + + - name: Verify reproducible builds (${{ matrix.arch }}) + run: | + # Build first image + docker build -f Dockerfile.reproducible \ + --platform ${{ matrix.platform }} \ + --build-arg RUST_TARGET="${{ matrix.rust_target }}" \ + --build-arg RUST_IMAGE="${{ matrix.rust_image }}" \ + -t lighthouse-verify-1-${{ matrix.arch }} . + + # Extract binary from first build + docker create --name extract-1-${{ matrix.arch }} lighthouse-verify-1-${{ matrix.arch }} + docker cp extract-1-${{ matrix.arch }}:/lighthouse ./lighthouse-1-${{ matrix.arch }} + docker rm extract-1-${{ matrix.arch }} + + # Clean state for second build + docker buildx prune -f + docker system prune -f + + # Build second image + docker build -f Dockerfile.reproducible \ + --platform ${{ matrix.platform }} \ + --build-arg RUST_TARGET="${{ matrix.rust_target }}" \ + --build-arg RUST_IMAGE="${{ matrix.rust_image }}" \ + -t lighthouse-verify-2-${{ matrix.arch }} . + + # Extract binary from second build + docker create --name extract-2-${{ matrix.arch }} lighthouse-verify-2-${{ matrix.arch }} + docker cp extract-2-${{ matrix.arch }}:/lighthouse ./lighthouse-2-${{ matrix.arch }} + docker rm extract-2-${{ matrix.arch }} + + # Compare binaries + echo "=== Comparing binaries ===" + echo "Build 1 SHA256: $(sha256sum lighthouse-1-${{ matrix.arch }})" + echo "Build 2 SHA256: $(sha256sum lighthouse-2-${{ matrix.arch }})" + + if cmp lighthouse-1-${{ matrix.arch }} lighthouse-2-${{ matrix.arch }}; then + echo "Reproducible build verified for ${{ matrix.arch }}" + else + echo "Reproducible build FAILED for ${{ matrix.arch }}" + echo "BLOCKING RELEASE: Builds are not reproducible!" + echo "First 10 differences:" + cmp -l lighthouse-1-${{ matrix.arch }} lighthouse-2-${{ matrix.arch }} | head -10 + exit 1 + fi + + # Clean up verification artifacts but keep one image for publishing + rm -f lighthouse-*-${{ matrix.arch }} + docker rmi lighthouse-verify-1-${{ matrix.arch }} || true + + # Re-tag the second image for publishing (we verified it's identical to first) + VERSION=${{ needs.extract-version.outputs.VERSION }} + FINAL_TAG="${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:${VERSION}-${{ matrix.arch }}" + docker tag lighthouse-verify-2-${{ matrix.arch }} "$FINAL_TAG" + + - name: Log in to Docker Hub + if: ${{ github.event_name != 'workflow_dispatch' }} + uses: docker/login-action@v3 + with: + username: ${{ env.DOCKER_USERNAME }} + password: ${{ env.DOCKER_PASSWORD }} + + - name: Push verified image (${{ matrix.arch }}) + if: ${{ github.event_name != 'workflow_dispatch' }} + run: | + VERSION=${{ needs.extract-version.outputs.VERSION }} + IMAGE_TAG="${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:${VERSION}-${{ matrix.arch }}" + docker push "$IMAGE_TAG" + + - name: Clean up local images + run: | + docker rmi lighthouse-verify-2-${{ matrix.arch }} || true + VERSION=${{ needs.extract-version.outputs.VERSION }} + docker rmi "${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:${VERSION}-${{ matrix.arch }}" || true + + - name: Upload verification artifacts (on failure) + if: failure() + uses: actions/upload-artifact@v4 + with: + name: verification-failure-${{ matrix.arch }} + path: | + lighthouse-*-${{ matrix.arch }} + + create-manifest: + name: create multi-arch manifest + runs-on: ubuntu-22.04 + needs: [extract-version, verify-and-build] + if: ${{ github.event_name != 'workflow_dispatch' }} + steps: + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ env.DOCKER_USERNAME }} + password: ${{ env.DOCKER_PASSWORD }} + + - name: Create and push multi-arch manifest + run: | + IMAGE_NAME=${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }} + VERSION=${{ needs.extract-version.outputs.VERSION }} + + # Create manifest for the version tag + docker manifest create \ + ${IMAGE_NAME}:${VERSION} \ + ${IMAGE_NAME}:${VERSION}-amd64 \ + ${IMAGE_NAME}:${VERSION}-arm64 + + docker manifest push ${IMAGE_NAME}:${VERSION} diff --git a/.github/workflows/nightly-tests.yml b/.github/workflows/nightly-tests.yml new file mode 100644 index 00000000000..be52c5b84d3 --- /dev/null +++ b/.github/workflows/nightly-tests.yml @@ -0,0 +1,135 @@ +# We only run tests on `RECENT_FORKS` on CI. To make sure we don't break prior forks, we run nightly tests to cover all prior forks. +name: nightly-tests + +on: + schedule: + # Run at 8:30 AM UTC every day + - cron: '30 8 * * *' + workflow_dispatch: # Allow manual triggering + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + # Deny warnings in CI + # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) + RUSTFLAGS: "-D warnings -C debuginfo=0" + # Prevent Github API rate limiting. + LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.LIGHTHOUSE_GITHUB_TOKEN }} + # Disable incremental compilation + CARGO_INCREMENTAL: 0 + # Enable portable to prevent issues with caching `blst` for the wrong CPU type + TEST_FEATURES: portable + +jobs: + setup-matrix: + name: setup-matrix + runs-on: ubuntu-latest + outputs: + forks: ${{ steps.set-matrix.outputs.forks }} + steps: + - name: Set matrix + id: set-matrix + run: | + # All prior forks to cover in nightly tests. This list should be updated when we remove a fork from `RECENT_FORKS`. + echo 'forks=["phase0", "altair", "bellatrix", "capella", "deneb"]' >> $GITHUB_OUTPUT + + beacon-chain-tests: + name: beacon-chain-tests + needs: setup-matrix + runs-on: 'ubuntu-latest' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + fork: ${{ fromJson(needs.setup-matrix.outputs.forks) }} + fail-fast: false + steps: + - uses: actions/checkout@v5 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + - name: Run beacon_chain tests for ${{ matrix.fork }} + run: make test-beacon-chain-${{ matrix.fork }} + timeout-minutes: 60 + + http-api-tests: + name: http-api-tests + needs: setup-matrix + runs-on: 'ubuntu-latest' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + fork: ${{ fromJson(needs.setup-matrix.outputs.forks) }} + fail-fast: false + steps: + - uses: actions/checkout@v5 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + - name: Run http_api tests for ${{ matrix.fork }} + run: make test-http-api-${{ matrix.fork }} + timeout-minutes: 60 + + op-pool-tests: + name: op-pool-tests + needs: setup-matrix + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + fork: ${{ fromJson(needs.setup-matrix.outputs.forks) }} + fail-fast: false + steps: + - uses: actions/checkout@v5 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + - name: Run operation_pool tests for ${{ matrix.fork }} + run: make test-op-pool-${{ matrix.fork }} + timeout-minutes: 60 + + network-tests: + name: network-tests + needs: setup-matrix + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + fork: ${{ fromJson(needs.setup-matrix.outputs.forks) }} + fail-fast: false + steps: + - uses: actions/checkout@v5 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + - name: Create CI logger dir + run: mkdir ${{ runner.temp }}/network_test_logs + - name: Run network tests for ${{ matrix.fork }} + run: make test-network-${{ matrix.fork }} + timeout-minutes: 60 + env: + TEST_FEATURES: portable + CI_LOGGER_DIR: ${{ runner.temp }}/network_test_logs + - name: Upload logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: network_test_logs_${{ matrix.fork }} + path: ${{ runner.temp }}/network_test_logs diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000000..65447c4390a --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "rust-analyzer.cargo.cfgs": [ + "!debug_assertions" + ] +} diff --git a/Cargo.lock b/Cargo.lock index 481fe71df06..b7b148b7b20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,7 +4,7 @@ version = 4 [[package]] name = "account_manager" -version = "8.0.0" +version = "8.0.1" dependencies = [ "account_utils", "bls", @@ -37,7 +37,7 @@ dependencies = [ "eth2_keystore", "eth2_wallet", "filesystem", - "rand 0.9.0", + "rand 0.9.2", "regex", "rpassword", "serde", @@ -48,20 +48,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "addr2line" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" -dependencies = [ - "gimli", -] - [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aead" @@ -70,7 +61,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -83,7 +74,7 @@ dependencies = [ "cipher 0.3.0", "cpufeatures", "ctr 0.8.0", - "opaque-debug 0.3.1", + "opaque-debug", ] [[package]] @@ -113,21 +104,21 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ "cfg-if", "once_cell", "version_check", - "zerocopy 0.7.35", + "zerocopy", ] [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] @@ -138,6 +129,17 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +[[package]] +name = "alloy-chains" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bc32535569185cbcb6ad5fa64d989a47bccb9a08e27284b1f2a3ccf16e6d010" +dependencies = [ + "alloy-primitives", + "num_enum", + "strum 0.27.2", +] + [[package]] name = "alloy-consensus" version = "1.0.42" @@ -150,17 +152,18 @@ dependencies = [ "alloy-serde", "alloy-trie", "alloy-tx-macros", - "auto_impl 1.2.1", + "auto_impl", "c-kzg", "derive_more 2.0.1", "either", - "k256 0.13.4", + "k256", "once_cell", "rand 0.8.5", "secp256k1", "serde", "serde_json", - "thiserror 2.0.12", + "serde_with", + "thiserror 2.0.17", ] [[package]] @@ -203,37 +206,39 @@ dependencies = [ "alloy-rlp", "crc", "serde", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] name = "alloy-eip2930" -version = "0.2.0" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbe3e16484669964c26ac48390245d84c410b1a5f968976076c17184725ef235" +checksum = "9441120fa82df73e8959ae0e4ab8ade03de2aaae61be313fbf5746277847ce25" dependencies = [ "alloy-primitives", "alloy-rlp", + "borsh", "serde", ] [[package]] name = "alloy-eip7702" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d4769c6ffddca380b0070d71c8b7f30bed375543fe76bb2f74ec0acf4b7cd16" +checksum = "2919c5a56a1007492da313e7a3b6d45ef5edc5d33416fdec63c0d7a2702a0d20" dependencies = [ "alloy-primitives", "alloy-rlp", + "borsh", "serde", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] name = "alloy-eips" -version = "1.0.42" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07d9a64522a0db6ebcc4ff9c904e329e77dd737c2c25d30f1bdc32ca6c6ce334" +checksum = "7e867b5fd52ed0372a95016f3a37cbff95a9d5409230fbaef2d8ea00e8618098" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -241,14 +246,15 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-serde", - "auto_impl 1.2.1", + "auto_impl", + "borsh", "c-kzg", "derive_more 2.0.1", "either", "serde", "serde_with", - "sha2 0.10.8", - "thiserror 2.0.12", + "sha2 0.10.9", + "thiserror 2.0.17", ] [[package]] @@ -263,6 +269,47 @@ dependencies = [ "serde_json", ] +[[package]] +name = "alloy-json-rpc" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcab4c51fb1273e3b0f59078e0cdf8aa99f697925b09f0d2055c18be46b4d48c" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", + "http 1.3.1", + "serde", + "serde_json", + "thiserror 2.0.17", + "tracing", +] + +[[package]] +name = "alloy-network" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5d6ed73d440bae8f27771b7cd507fa8f10f19ddf0b8f67e7622a52e0dbf798e" +dependencies = [ + "alloy-consensus", + "alloy-consensus-any", + "alloy-eips", + "alloy-json-rpc", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-types-any", + "alloy-rpc-types-eth", + "alloy-serde", + "alloy-signer", + "alloy-sol-types", + "async-trait", + "auto_impl", + "derive_more 2.0.1", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror 2.0.17", +] + [[package]] name = "alloy-network-primitives" version = "1.0.42" @@ -289,16 +336,16 @@ dependencies = [ "const-hex", "derive_more 2.0.1", "foldhash 0.2.0", - "getrandom 0.3.1", + "getrandom 0.3.4", "hashbrown 0.16.0", - "indexmap 2.8.0", + "indexmap 2.12.0", "itoa", - "k256 0.13.4", + "k256", "keccak-asm", "paste", "proptest", "proptest-derive", - "rand 0.9.0", + "rand 0.9.2", "ruint", "rustc-hash 2.1.1", "serde", @@ -306,6 +353,45 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "alloy-provider" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0ef8cbc2b68e2512acf04b2d296c05c98a661bc460462add6414528f4ff3d9b" +dependencies = [ + "alloy-chains", + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-signer", + "alloy-sol-types", + "alloy-transport", + "alloy-transport-http", + "async-stream", + "async-trait", + "auto_impl", + "dashmap", + "either", + "futures", + "futures-utils-wasm", + "lru 0.13.0", + "parking_lot", + "pin-project", + "reqwest", + "serde", + "serde_json", + "thiserror 2.0.17", + "tokio", + "tracing", + "url", + "wasmtimer", +] + [[package]] name = "alloy-rlp" version = "0.3.12" @@ -325,7 +411,41 @@ checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", +] + +[[package]] +name = "alloy-rpc-client" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c2630fde9ff6033a780635e1af6ef40e92d74a9cacb8af3defc1b15cfebca5" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-transport", + "alloy-transport-http", + "futures", + "pin-project", + "reqwest", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower 0.5.2", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-rpc-types-any" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "425e14ee32eb8b7edd6a2247fe0ed640785e6eba75af27db27f1e6220c15ef0d" +dependencies = [ + "alloy-consensus-any", + "alloy-rpc-types-eth", + "alloy-serde", ] [[package]] @@ -345,20 +465,52 @@ dependencies = [ "itertools 0.14.0", "serde", "serde_json", - "thiserror 2.0.12", + "serde_with", + "thiserror 2.0.17", ] [[package]] name = "alloy-serde" -version = "1.0.42" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "596cfa360922ba9af901cc7370c68640e4f72adb6df0ab064de32f21fec498d7" +checksum = "01e856112bfa0d9adc85bd7c13db03fad0e71d1d6fb4c2010e475b6718108236" dependencies = [ "alloy-primitives", "serde", "serde_json", ] +[[package]] +name = "alloy-signer" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66a4f629da632d5279bbc5731634f0f5c9484ad9c4cad0cd974d9669dc1f46d6" +dependencies = [ + "alloy-primitives", + "async-trait", + "auto_impl", + "either", + "elliptic-curve", + "k256", + "thiserror 2.0.17", +] + +[[package]] +name = "alloy-signer-local" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "590dcaeb290cdce23155e68af4791d093afc3754b1a331198a25d2d44c5456e8" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-primitives", + "alloy-signer", + "async-trait", + "k256", + "rand 0.8.5", + "thiserror 2.0.17", +] + [[package]] name = "alloy-sol-macro" version = "1.4.1" @@ -370,7 +522,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -382,11 +534,11 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck 0.5.0", - "indexmap 2.8.0", + "indexmap 2.12.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", "syn-solidity", "tiny-keccak", ] @@ -403,7 +555,7 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", "syn-solidity", ] @@ -429,6 +581,44 @@ dependencies = [ "serde", ] +[[package]] +name = "alloy-transport" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe215a2f9b51d5f1aa5c8cf22c8be8cdb354934de09c9a4e37aefb79b77552fd" +dependencies = [ + "alloy-json-rpc", + "auto_impl", + "base64 0.22.1", + "derive_more 2.0.1", + "futures", + "futures-utils-wasm", + "parking_lot", + "serde", + "serde_json", + "thiserror 2.0.17", + "tokio", + "tower 0.5.2", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-transport-http" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1b37b1a30d23deb3a8746e882c70b384c574d355bc2bbea9ea918b0c31366e" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "reqwest", + "serde_json", + "tower 0.5.2", + "tracing", + "url", +] + [[package]] name = "alloy-trie" version = "0.9.1" @@ -447,22 +637,16 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.42" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab54221eccefa254ce9f65b079c097b1796e48c21c7ce358230f8988d75392fb" +checksum = "7ccf423f6de62e8ce1d6c7a11fb7508ae3536d02e0d68aaeb05c8669337d0937" dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.5" @@ -480,9 +664,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" dependencies = [ "anstyle", "anstyle-parse", @@ -495,50 +679,50 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.7" +version = "3.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", - "once_cell", - "windows-sys 0.59.0", + "once_cell_polyfill", + "windows-sys 0.61.2", ] [[package]] name = "anyhow" -version = "1.0.97" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "arbitrary" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" +checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" dependencies = [ "derive_arbitrary", ] @@ -643,7 +827,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -681,7 +865,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -780,7 +964,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror 2.0.12", + "thiserror 2.0.17", "time", ] @@ -792,7 +976,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", "synstructure", ] @@ -804,7 +988,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -836,9 +1020,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" dependencies = [ "concurrent-queue", "event-listener-strategy", @@ -848,32 +1032,20 @@ dependencies = [ [[package]] name = "async-io" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" +checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc" dependencies = [ - "async-lock", + "autocfg", "cfg-if", "concurrent-queue", "futures-io", "futures-lite", "parking", "polling", - "rustix 0.38.44", + "rustix 1.1.2", "slab", - "tracing", - "windows-sys 0.59.0", -] - -[[package]] -name = "async-lock" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" -dependencies = [ - "event-listener 5.4.0", - "event-listener-strategy", - "pin-project-lite", + "windows-sys 0.61.2", ] [[package]] @@ -895,29 +1067,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "async-trait" -version = "0.1.87" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d556ec1359574147ec0c4fc5eb525f3f23263a592b1a9c07e0a75b427de55c97" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", -] - -[[package]] -name = "async_io_stream" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" -dependencies = [ - "futures", - "pharos", - "rustc_version 0.4.1", + "syn 2.0.110", ] [[package]] @@ -946,39 +1107,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e2cdb6d5ed835199484bb92bb8b3edd526effe995c61732580439c1a67e2e9" dependencies = [ "base64 0.22.1", - "http 1.3.0", + "http 1.3.1", "log", "url", ] [[package]] name = "auto_impl" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7862e21c893d65a1650125d157eaeec691439379a1cee17ee49031b79236ada4" -dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "auto_impl" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" +checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "axum" @@ -990,9 +1139,11 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", + "hyper 1.8.1", + "hyper-util", "itoa", "matchit", "memchr", @@ -1001,10 +1152,15 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper 1.0.2", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", "tower 0.5.2", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -1016,30 +1172,16 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.2", + "sync_wrapper", "tower-layer", "tower-service", -] - -[[package]] -name = "backtrace" -version = "0.3.74" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-targets 0.52.6", + "tracing", ] [[package]] @@ -1048,12 +1190,6 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - [[package]] name = "base16ct" version = "0.2.0" @@ -1061,27 +1197,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" [[package]] -name = "base58" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" - -[[package]] -name = "base58check" -version = "0.1.0" +name = "base256emoji" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ee2fe4c9a0c84515f136aaae2466744a721af6d63339c18689d9e995d74d99b" +checksum = "b5e9430d9a245a77c92176e649af6e275f20839a48389859d1661e9a128d077c" dependencies = [ - "base58", - "sha2 0.8.2", + "const-str", + "match-lookup", ] -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.1" @@ -1102,16 +1226,16 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb97d56060ee67d285efb8001fec9d2a4c710c32efd2e14b5cbb5ba71930fc2d" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "beacon_chain" version = "0.2.0" dependencies = [ "alloy-primitives", - "bitvec 1.0.1", + "bitvec", "bls", "criterion", "educe", @@ -1132,7 +1256,7 @@ dependencies = [ "lighthouse_tracing", "lighthouse_version", "logging", - "lru", + "lru 0.12.5", "maplit", "merkle_proof", "metrics", @@ -1141,9 +1265,9 @@ dependencies = [ "once_cell", "oneshot_broadcast", "operation_pool", - "parking_lot 0.12.3", + "parking_lot", "proto_array", - "rand 0.9.0", + "rand 0.9.2", "rayon", "safe_arith", "sensitive_url", @@ -1155,7 +1279,7 @@ dependencies = [ "ssz_types", "state_processing", "store", - "strum", + "strum 0.24.1", "superstruct", "task_executor", "tempfile", @@ -1165,12 +1289,13 @@ dependencies = [ "tree_hash", "tree_hash_derive", "types", + "zkvm_execution_layer", "zstd 0.13.3", ] [[package]] name = "beacon_node" -version = "8.0.0" +version = "8.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -1185,7 +1310,7 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper 1.6.0", + "hyper 1.8.1", "lighthouse_network", "monitoring_api", "network_utils", @@ -1194,10 +1319,11 @@ dependencies = [ "serde_json", "slasher", "store", - "strum", + "strum 0.24.1", "task_executor", "tracing", "types", + "zkvm_execution_layer", ] [[package]] @@ -1211,7 +1337,7 @@ dependencies = [ "sensitive_url", "serde", "slot_clock", - "strum", + "strum 0.24.1", "task_executor", "tokio", "tracing", @@ -1231,10 +1357,10 @@ dependencies = [ "logging", "metrics", "num_cpus", - "parking_lot 0.12.3", + "parking_lot", "serde", "slot_clock", - "strum", + "strum 0.24.1", "task_executor", "tokio", "tokio-util", @@ -1242,12 +1368,6 @@ dependencies = [ "types", ] -[[package]] -name = "bech32" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dabbe35f96fb9507f7330793dc490461b2962659ac5d427181e451a623751d1" - [[package]] name = "bincode" version = "1.3.3" @@ -1263,7 +1383,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -1276,7 +1396,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.100", + "syn 2.0.110", "which", ] @@ -1319,19 +1439,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" - -[[package]] -name = "bitvec" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" -dependencies = [ - "either", - "radium 0.3.0", -] +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "bitvec" @@ -1340,7 +1450,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", - "radium 0.7.0", + "radium", "tap", "wyz", ] @@ -1354,25 +1464,13 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array 0.12.4", -] - [[package]] name = "block-buffer" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -1381,16 +1479,16 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] -name = "block-padding" -version = "0.1.5" +name = "block2" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +checksum = "cdeb9d870516001442e364c5220d3574d2da8dc765554b4a617230d33fa58ef5" dependencies = [ - "byte-tools", + "objc2", ] [[package]] @@ -1405,7 +1503,7 @@ dependencies = [ "ethereum_ssz", "fixed_bytes", "hex", - "rand 0.9.0", + "rand 0.9.2", "safe_arith", "serde", "tree_hash", @@ -1432,8 +1530,8 @@ checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29" dependencies = [ "blst", "byte-slice-cast", - "ff 0.13.1", - "group 0.13.0", + "ff", + "group", "pairing", "rand_core 0.6.4", "serde", @@ -1442,7 +1540,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "8.0.0" +version = "8.0.1" dependencies = [ "beacon_node", "bytes", @@ -1463,13 +1561,36 @@ dependencies = [ ] [[package]] -name = "bs58" -version = "0.4.0" +name = "borsh" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "borsh-derive", + "cfg_aliases", +] [[package]] -name = "bs58" +name = "borsh-derive" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "bs58" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" + +[[package]] +name = "bs58" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" @@ -1485,7 +1606,7 @@ dependencies = [ "ethereum_ssz", "lighthouse_version", "mockito", - "reqwest 0.11.27", + "reqwest", "sensitive_url", "serde", "serde_json", @@ -1494,9 +1615,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "byte-slice-cast" @@ -1504,12 +1625,6 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - [[package]] name = "byteorder" version = "1.5.0" @@ -1518,9 +1633,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" dependencies = [ "serde", ] @@ -1562,11 +1677,11 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.9" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +checksum = "276a59bf2b2c967788139340c9f0c5b12d7fd6630315c15c217e559de85d2609" dependencies = [ - "serde", + "serde_core", ] [[package]] @@ -1586,10 +1701,10 @@ checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" dependencies = [ "camino", "cargo-platform", - "semver 1.0.26", + "semver 1.0.27", "serde", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -1600,10 +1715,11 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.16" +version = "1.2.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" +checksum = "b97463e1064cb1b1c1384ad0a0b9c8abd0988e2a91f52606c80ef14aadb63e36" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -1620,9 +1736,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "cfg_aliases" @@ -1656,14 +1772,14 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.40" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "js-sys", "num-traits", + "serde", "wasm-bindgen", "windows-link", ] @@ -1701,7 +1817,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -1728,9 +1844,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.32" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6088f3ae8c3608d19260cd7445411865a485688711b78b5be70d78cd96136f83" +checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" dependencies = [ "clap_builder", "clap_derive", @@ -1738,9 +1854,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.32" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a7ef7f676155edfb82daa97f99441f3ebf4a58d5e32f295a56259f1b6facc8" +checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" dependencies = [ "anstream", "anstyle", @@ -1751,21 +1867,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.32" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "clap_utils" @@ -1807,7 +1923,8 @@ dependencies = [ "monitoring_api", "network", "operation_pool", - "rand 0.9.0", + "proof_generation_service", + "rand 0.9.2", "sensitive_url", "serde", "serde_json", @@ -1824,6 +1941,7 @@ dependencies = [ "tracing", "tracing-subscriber", "types", + "zkvm_execution_layer", ] [[package]] @@ -1835,68 +1953,11 @@ dependencies = [ "cc", ] -[[package]] -name = "coins-bip32" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634c509653de24b439672164bbf56f5f582a2ab0e313d3b0f6af0b7345cf2560" -dependencies = [ - "bincode", - "bs58 0.4.0", - "coins-core", - "digest 0.10.7", - "getrandom 0.2.15", - "hmac 0.12.1", - "k256 0.11.6", - "lazy_static", - "serde", - "sha2 0.10.8", - "thiserror 1.0.69", -] - -[[package]] -name = "coins-bip39" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a11892bcac83b4c6e95ab84b5b06c76d9d70ad73548dd07418269c5c7977171" -dependencies = [ - "bitvec 0.17.4", - "coins-bip32", - "getrandom 0.2.15", - "hex", - "hmac 0.12.1", - "pbkdf2 0.11.0", - "rand 0.8.5", - "sha2 0.10.8", - "thiserror 1.0.69", -] - -[[package]] -name = "coins-core" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c94090a6663f224feae66ab01e41a2555a8296ee07b5f20dab8888bdefc9f617" -dependencies = [ - "base58check", - "base64 0.12.3", - "bech32", - "blake2", - "digest 0.10.7", - "generic-array 0.14.7", - "hex", - "ripemd", - "serde", - "serde_derive", - "sha2 0.10.8", - "sha3", - "thiserror 1.0.69", -] - [[package]] name = "colorchoice" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "colored" @@ -1977,15 +2038,14 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.14.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" +checksum = "3bb320cac8a0750d7f25280aa97b09c26edfe161164238ecbbb31092b079e735" dependencies = [ "cfg-if", "cpufeatures", - "hex", "proptest", - "serde", + "serde_core", ] [[package]] @@ -1994,11 +2054,17 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "const-str" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f421161cb492475f1661ddc9815a745a1c894592070661180fdec3d4872e9c3" + [[package]] name = "const_format" -version = "0.2.34" +version = "0.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +checksum = "7faa7469a93a566e9ccc1c73fe783b4a65c274c5ace346038dca9c39fe0030ad" dependencies = [ "const_format_proc_macros", ] @@ -2022,22 +2088,21 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "context_deserialize" -version = "0.1.0" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5f9ea0a0ae2de4943f5ca71590b6dbd0b952475f0a0cafb30a470cec78c8b9" dependencies = [ "context_deserialize_derive", - "milhouse", "serde", - "ssz_types", ] [[package]] name = "context_deserialize_derive" -version = "0.1.0" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c57b2db1e4e3ed804dcc49894a144b68fe6c754b8f545eb1dda7ad3c7dbe7e6" dependencies = [ - "context_deserialize", "quote", - "serde", - "serde_json", "syn 1.0.109", ] @@ -2047,15 +2112,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" -[[package]] -name = "convert_case" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -2102,9 +2158,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.2.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" dependencies = [ "crc-catalog", ] @@ -2117,9 +2173,9 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] @@ -2202,21 +2258,9 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" - -[[package]] -name = "crypto-bigint" -version = "0.4.9" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array 0.14.7", - "rand_core 0.6.4", - "subtle", - "zeroize", -] +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-bigint" @@ -2224,7 +2268,7 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ - "generic-array 0.14.7", + "generic-array", "rand_core 0.6.4", "subtle", "zeroize", @@ -2232,11 +2276,11 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ - "generic-array 0.14.7", + "generic-array", "rand_core 0.6.4", "typenum", ] @@ -2247,7 +2291,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" dependencies = [ - "generic-array 0.14.7", + "generic-array", "subtle", ] @@ -2271,12 +2315,13 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.5" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" +checksum = "73736a89c4aff73035ba2ed2e565061954da00d4970fc9ac25dcc85a2a20d790" dependencies = [ - "nix 0.29.0", - "windows-sys 0.59.0", + "dispatch2", + "nix 0.30.1", + "windows-sys 0.61.2", ] [[package]] @@ -2303,7 +2348,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2318,12 +2363,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ - "darling_core 0.20.10", - "darling_macro 0.20.10", + "darling_core 0.20.11", + "darling_macro 0.20.11", ] [[package]] @@ -2352,16 +2397,16 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2376,7 +2421,7 @@ dependencies = [ "quote", "serde", "strsim 0.11.1", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2392,13 +2437,13 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ - "darling_core 0.20.10", + "darling_core 0.20.11", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2409,7 +2454,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2432,17 +2477,31 @@ dependencies = [ "libc", ] +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "data-encoding" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" [[package]] name = "data-encoding-macro" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f9724adfcf41f45bf652b3995837669d73c4d49a1b5ac1ff82905ac7d9b5558" +checksum = "47ce6c96ea0102f01122a185683611bd5ac8d99e62bc59dd12e6bda344ee673d" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2450,12 +2509,12 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4fdb82bd54a12e42fb58a800dcae6b9e13982238ce2296dc3570b92148e1f" +checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2470,7 +2529,7 @@ dependencies = [ "hex", "serde", "store", - "strum", + "strum 0.24.1", "tracing", "types", ] @@ -2483,9 +2542,9 @@ checksum = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" [[package]] name = "delay_map" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df941644b671f05f59433e481ba0d31ac10e3667de725236a4c0d587c496fba1" +checksum = "88e365f083a5cb5972d50ce8b1b2c9f125dc5ec0f50c0248cfb568ae59efcf0b" dependencies = [ "futures", "tokio", @@ -2501,7 +2560,7 @@ dependencies = [ "alloy-primitives", "ethereum_ssz", "hex", - "reqwest 0.11.27", + "reqwest", "serde_json", "sha2 0.9.9", "tree_hash", @@ -2510,19 +2569,9 @@ dependencies = [ [[package]] name = "der" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" -dependencies = [ - "const-oid", - "zeroize", -] - -[[package]] -name = "der" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ "const-oid", "zeroize", @@ -2544,11 +2593,12 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" dependencies = [ "powerfmt", + "serde_core", ] [[package]] @@ -2564,35 +2614,26 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" +checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "derive_more" -version = "0.99.19" +version = "0.99.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da29a38df43d6f156149c9b43ded5e018ddff2a855cf2cfd62e8cd7d079c69f" +checksum = "6edb4b64a43d977b8e99788fe3a04d483834fba1215a7e02caa415b626497f7f" dependencies = [ - "convert_case 0.4.0", + "convert_case", "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.100", -] - -[[package]] -name = "derive_more" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" -dependencies = [ - "derive_more-impl 1.0.0", + "syn 2.0.110", ] [[package]] @@ -2601,18 +2642,7 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" dependencies = [ - "derive_more-impl 2.0.1", -] - -[[package]] -name = "derive_more-impl" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", + "derive_more-impl", ] [[package]] @@ -2623,26 +2653,17 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", "unicode-xid", ] -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.4", -] - [[package]] name = "digest" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -2688,9 +2709,9 @@ dependencies = [ [[package]] name = "discv5" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20b702c8491b3325866a4935d0b5101e49144d74540384243b6293794aad6fa" +checksum = "f170f4f6ed0e1df52bf43b403899f0081917ecf1500bfe312505cc3b515a8899" dependencies = [ "aes 0.8.4", "aes-gcm", @@ -2706,19 +2727,31 @@ dependencies = [ "hkdf", "lazy_static", "libp2p-identity", - "lru", + "lru 0.12.5", "more-asserts", "multiaddr", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.5.10", "tokio", "tracing", "uint 0.10.0", "zeroize", ] +[[package]] +name = "dispatch2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec" +dependencies = [ + "bitflags 2.10.0", + "block2", + "libc", + "objc2", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -2727,7 +2760,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2739,7 +2772,7 @@ dependencies = [ "eth2", "futures", "logging", - "parking_lot 0.12.3", + "parking_lot", "slot_clock", "task_executor", "tokio", @@ -2760,6 +2793,22 @@ version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6add3b8cff394282be81f3fc1a0605db594ed69890078ca6e2cab1c408bcf04" +[[package]] +name = "dummy_el" +version = "0.1.0" +dependencies = [ + "anyhow", + "axum", + "clap", + "hex", + "jsonwebtoken", + "serde", + "serde_json", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "dunce" version = "1.0.5" @@ -2767,16 +2816,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] -name = "ecdsa" -version = "0.14.8" +name = "dyn-clone" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der 0.6.1", - "elliptic-curve 0.12.3", - "rfc6979 0.3.1", - "signature 1.6.4", -] +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" [[package]] name = "ecdsa" @@ -2784,13 +2827,13 @@ version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der 0.7.9", + "der", "digest 0.10.7", - "elliptic-curve 0.13.8", - "rfc6979 0.4.0", + "elliptic-curve", + "rfc6979", "serdect", - "signature 2.2.0", - "spki 0.7.3", + "signature", + "spki", ] [[package]] @@ -2799,21 +2842,21 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "pkcs8 0.10.2", - "signature 2.2.0", + "pkcs8", + "signature", ] [[package]] name = "ed25519-dalek" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" dependencies = [ "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", - "sha2 0.10.8", + "sha2 0.10.9", "subtle", "zeroize", ] @@ -2827,7 +2870,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2878,7 +2921,7 @@ dependencies = [ "itertools 0.14.0", "serde", "serde_json", - "sha2 0.10.8", + "sha2 0.10.9", ] [[package]] @@ -2910,8 +2953,8 @@ checksum = "05c599a59deba6188afd9f783507e4d89efc997f0fa340a758f0d0992b322416" dependencies = [ "blst", "blstrs", - "ff 0.13.1", - "group 0.13.0", + "ff", + "group", "pairing", "subtle", ] @@ -2941,7 +2984,7 @@ dependencies = [ "ekzg-bls12-381", "ekzg-maybe-rayon", "ekzg-polynomial", - "sha2 0.10.8", + "sha2 0.10.9", ] [[package]] @@ -2988,41 +3031,21 @@ dependencies = [ "serde_json", ] -[[package]] -name = "elliptic-curve" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" -dependencies = [ - "base16ct 0.1.1", - "crypto-bigint 0.4.9", - "der 0.6.1", - "digest 0.10.7", - "ff 0.12.1", - "generic-array 0.14.7", - "group 0.12.1", - "pkcs8 0.9.0", - "rand_core 0.6.4", - "sec1 0.3.0", - "subtle", - "zeroize", -] - [[package]] name = "elliptic-curve" version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "base16ct 0.2.0", - "crypto-bigint 0.5.5", + "base16ct", + "crypto-bigint", "digest 0.10.7", - "ff 0.13.1", - "generic-array 0.14.7", - "group 0.13.0", - "pkcs8 0.10.2", + "ff", + "generic-array", + "group", + "pkcs8", "rand_core 0.6.4", - "sec1 0.7.3", + "sec1", "serdect", "subtle", "zeroize", @@ -3048,7 +3071,7 @@ dependencies = [ "bytes", "ed25519-dalek", "hex", - "k256 0.13.4", + "k256", "log", "rand 0.8.5", "serde", @@ -3065,37 +3088,27 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "enum-ordinalize" -version = "4.3.0" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +checksum = "4a1091a7bb1f8f2c4b28f1fe2cef4980ca2d410a3d727d67ecc3178c9b0800f0" dependencies = [ "enum-ordinalize-derive", ] [[package]] name = "enum-ordinalize-derive" -version = "4.3.1" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", -] - -[[package]] -name = "env_logger" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" -dependencies = [ - "log", - "regex", + "syn 2.0.110", ] [[package]] @@ -3128,34 +3141,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.10" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", -] - -[[package]] -name = "eth-keystore" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" -dependencies = [ - "aes 0.8.4", - "ctr 0.9.2", - "digest 0.10.7", - "hex", - "hmac 0.12.1", - "pbkdf2 0.11.0", - "rand 0.8.5", - "scrypt 0.10.0", - "serde", - "serde_json", - "sha2 0.10.8", - "sha3", - "thiserror 1.0.69", - "uuid 0.8.2", + "windows-sys 0.61.2", ] [[package]] @@ -3177,8 +3168,8 @@ dependencies = [ "multiaddr", "pretty_reqwest_error", "proto_array", - "rand 0.9.0", - "reqwest 0.11.27", + "rand 0.9.2", + "reqwest", "reqwest-eventsource", "sensitive_url", "serde", @@ -3233,8 +3224,8 @@ dependencies = [ "hex", "hmac 0.11.0", "pbkdf2 0.8.0", - "rand 0.9.0", - "scrypt 0.7.0", + "rand 0.9.2", + "scrypt", "serde", "serde_json", "serde_repr", @@ -3255,7 +3246,7 @@ dependencies = [ "ethereum_ssz", "kzg", "pretty_reqwest_error", - "reqwest 0.11.27", + "reqwest", "sensitive_url", "serde_yaml", "sha2 0.9.9", @@ -3274,7 +3265,7 @@ dependencies = [ "eth2_key_derivation", "eth2_keystore", "hex", - "rand 0.9.0", + "rand 0.9.2", "serde", "serde_json", "serde_repr", @@ -3293,85 +3284,38 @@ dependencies = [ ] [[package]] -name = "ethabi" -version = "18.0.0" +name = "ethereum_hashing" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa93f58bb1eb3d1e556e4f408ef1dac130bad01ac37db4e7ade45de40d1c86a" +dependencies = [ + "cpufeatures", + "ring", + "sha2 0.10.9", +] + +[[package]] +name = "ethereum_serde_utils" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" +checksum = "3dc1355dbb41fbbd34ec28d4fb2a57d9a70c67ac3c19f6a5ca4d4a176b9e997a" dependencies = [ - "ethereum-types", + "alloy-primitives", "hex", - "once_cell", - "regex", "serde", + "serde_derive", "serde_json", - "sha3", - "thiserror 1.0.69", - "uint 0.9.5", ] [[package]] -name = "ethbloom" -version = "0.13.0" +name = "ethereum_ssz" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "scale-info", - "tiny-keccak", -] - -[[package]] -name = "ethereum-types" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "primitive-types", - "scale-info", - "uint 0.9.5", -] - -[[package]] -name = "ethereum_hashing" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c853bd72c9e5787f8aafc3df2907c2ed03cff3150c3acd94e2e53a98ab70a8ab" -dependencies = [ - "cpufeatures", - "ring", - "sha2 0.10.8", -] - -[[package]] -name = "ethereum_serde_utils" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dc1355dbb41fbbd34ec28d4fb2a57d9a70c67ac3c19f6a5ca4d4a176b9e997a" -dependencies = [ - "alloy-primitives", - "hex", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "ethereum_ssz" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca8ba45b63c389c6e115b095ca16381534fdcc03cf58176a3f8554db2dbe19b" +checksum = "7e8cd8c4f47dfb947dbfe3cdf2945ae1da808dbedc592668658e827a12659ba1" dependencies = [ "alloy-primitives", "arbitrary", + "context_deserialize", "ethereum_serde_utils", "itertools 0.13.0", "serde", @@ -3382,156 +3326,14 @@ dependencies = [ [[package]] name = "ethereum_ssz_derive" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd55d08012b4e0dfcc92b8d6081234df65f2986ad34cc76eeed69c5e2ce7506" +checksum = "78d247bc40823c365a62e572441a8f8b12df03f171713f06bc76180fcd56ab71" dependencies = [ - "darling 0.20.10", + "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.100", -] - -[[package]] -name = "ethers-contract" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c3c3e119a89f0a9a1e539e7faecea815f74ddcf7c90d0b00d1f524db2fdc9c" -dependencies = [ - "ethers-core", - "ethers-providers", - "futures-util", - "hex", - "once_cell", - "pin-project", - "serde", - "serde_json", - "thiserror 1.0.69", -] - -[[package]] -name = "ethers-core" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade3e9c97727343984e1ceada4fdab11142d2ee3472d2c67027d56b1251d4f15" -dependencies = [ - "arrayvec", - "bytes", - "chrono", - "convert_case 0.6.0", - "elliptic-curve 0.12.3", - "ethabi", - "generic-array 0.14.7", - "hex", - "k256 0.11.6", - "open-fastrlp", - "proc-macro2", - "rand 0.8.5", - "rlp", - "rlp-derive", - "serde", - "serde_json", - "strum", - "syn 1.0.109", - "thiserror 1.0.69", - "tiny-keccak", - "unicode-xid", -] - -[[package]] -name = "ethers-etherscan" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9713f525348e5dde025d09b0a4217429f8074e8ff22c886263cc191e87d8216" -dependencies = [ - "ethers-core", - "getrandom 0.2.15", - "reqwest 0.11.27", - "semver 1.0.26", - "serde", - "serde-aux", - "serde_json", - "thiserror 1.0.69", - "tracing", -] - -[[package]] -name = "ethers-middleware" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e71df7391b0a9a51208ffb5c7f2d068900e99d6b3128d3a4849d138f194778b7" -dependencies = [ - "async-trait", - "auto_impl 0.5.0", - "ethers-contract", - "ethers-core", - "ethers-etherscan", - "ethers-providers", - "ethers-signers", - "futures-locks", - "futures-util", - "instant", - "reqwest 0.11.27", - "serde", - "serde_json", - "thiserror 1.0.69", - "tokio", - "tracing", - "tracing-futures", - "url", -] - -[[package]] -name = "ethers-providers" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a9e0597aa6b2fdc810ff58bc95e4eeaa2c219b3e615ed025106ecb027407d8" -dependencies = [ - "async-trait", - "auto_impl 1.2.1", - "base64 0.13.1", - "ethers-core", - "futures-core", - "futures-timer", - "futures-util", - "getrandom 0.2.15", - "hashers", - "hex", - "http 0.2.12", - "once_cell", - "parking_lot 0.11.2", - "pin-project", - "reqwest 0.11.27", - "serde", - "serde_json", - "thiserror 1.0.69", - "tokio", - "tracing", - "tracing-futures", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-timer", - "web-sys", - "ws_stream_wasm", -] - -[[package]] -name = "ethers-signers" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f41ced186867f64773db2e55ffdd92959e094072a1d09a5e5e831d443204f98" -dependencies = [ - "async-trait", - "coins-bip32", - "coins-bip39", - "elliptic-curve 0.12.3", - "eth-keystore", - "ethers-core", - "hex", - "rand 0.8.5", - "sha2 0.10.8", - "thiserror 1.0.69", + "syn 2.0.110", ] [[package]] @@ -3542,9 +3344,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.4.0" +version = "5.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" dependencies = [ "concurrent-queue", "parking", @@ -3553,11 +3355,11 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.4.1", "pin-project-lite", ] @@ -3576,19 +3378,20 @@ dependencies = [ name = "execution_engine_integration" version = "0.1.0" dependencies = [ + "alloy-network", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-signer-local", "async-channel 1.9.0", "deposit_contract", - "ethers-core", - "ethers-middleware", - "ethers-providers", - "ethers-signers", "execution_layer", "fork_choice", "futures", "hex", "logging", "network_utils", - "reqwest 0.11.27", + "reqwest", "sensitive_url", "serde_json", "task_executor", @@ -3621,12 +3424,12 @@ dependencies = [ "kzg", "lighthouse_version", "logging", - "lru", + "lru 0.12.5", "metrics", - "parking_lot 0.12.3", + "parking_lot", "pretty_reqwest_error", - "rand 0.9.0", - "reqwest 0.11.27", + "rand 0.9.2", + "reqwest", "sensitive_url", "serde", "serde_json", @@ -3634,7 +3437,7 @@ dependencies = [ "slot_clock", "ssz_types", "state_processing", - "strum", + "strum 0.24.1", "superstruct", "task_executor", "tempfile", @@ -3649,12 +3452,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - [[package]] name = "fallible-iterator" version = "0.2.0" @@ -3680,7 +3477,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" dependencies = [ "arrayvec", - "auto_impl 1.2.1", + "auto_impl", "bytes", ] @@ -3691,7 +3488,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" dependencies = [ "arrayvec", - "auto_impl 1.2.1", + "auto_impl", "bytes", ] @@ -3705,23 +3502,13 @@ dependencies = [ "thiserror 1.0.69", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "ff" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" dependencies = [ - "bitvec 1.0.1", + "bitvec", "rand_core 0.6.4", "subtle", ] @@ -3756,6 +3543,12 @@ dependencies = [ "windows-acl", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" + [[package]] name = "fixed-hash" version = "0.8.0" @@ -3778,9 +3571,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.0" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" dependencies = [ "crc32fast", "libz-sys", @@ -3795,9 +3588,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" [[package]] name = "foldhash" @@ -3840,9 +3633,9 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] @@ -3930,24 +3723,14 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" dependencies = [ "futures-core", "pin-project-lite", ] -[[package]] -name = "futures-locks" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" -dependencies = [ - "futures-channel", - "futures-task", -] - [[package]] name = "futures-macro" version = "0.3.31" @@ -3956,7 +3739,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -3966,7 +3749,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.23", + "rustls 0.23.35", "rustls-pki-types", ] @@ -4007,35 +3790,10 @@ dependencies = [ ] [[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - -[[package]] -name = "generator" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" -dependencies = [ - "cfg-if", - "libc", - "log", - "rustversion", - "windows 0.58.0", -] - -[[package]] -name = "generic-array" -version = "0.12.4" +name = "futures-utils-wasm" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" -dependencies = [ - "typenum", -] +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" [[package]] name = "generic-array" @@ -4065,27 +3823,29 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", + "js-sys", "libc", - "wasi 0.13.3+wasi-0.2.2", - "windows-targets 0.52.6", + "r-efi", + "wasip2", + "wasm-bindgen", ] [[package]] @@ -4094,21 +3854,15 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ - "opaque-debug 0.3.1", + "opaque-debug", "polyval", ] -[[package]] -name = "gimli" -version = "0.31.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" - [[package]] name = "glob" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "graffiti_file" @@ -4122,24 +3876,13 @@ dependencies = [ "types", ] -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.1", + "ff", "rand 0.8.5", "rand_core 0.6.4", "rand_xorshift 0.3.0", @@ -4148,9 +3891,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" dependencies = [ "bytes", "fnv", @@ -4158,7 +3901,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.8.0", + "indexmap 2.12.0", "slab", "tokio", "tokio-util", @@ -4167,17 +3910,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.8" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http 1.3.0", - "indexmap 2.8.0", + "http 1.3.1", + "indexmap 2.12.0", "slab", "tokio", "tokio-util", @@ -4186,12 +3929,13 @@ dependencies = [ [[package]] name = "half" -version = "2.4.1" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", + "zerocopy", ] [[package]] @@ -4227,13 +3971,13 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.2" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", "equivalent", - "foldhash 0.1.4", + "foldhash 0.1.5", ] [[package]] @@ -4246,15 +3990,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hashers" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2bca93b15ea5a746f220e56587f71e73c6165eab783df9e26590069953e3c30" -dependencies = [ - "fxhash", -] - [[package]] name = "hashlink" version = "0.8.4" @@ -4279,7 +4014,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.15.2", + "hashbrown 0.15.5", ] [[package]] @@ -4349,24 +4084,15 @@ checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hermit-abi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" - -[[package]] -name = "hermit-abi" -version = "0.5.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -dependencies = [ - "serde", -] [[package]] name = "hex-conservative" @@ -4399,10 +4125,10 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.9.0", + "rand 0.9.2", "ring", - "socket2", - "thiserror 2.0.12", + "socket2 0.5.10", + "thiserror 2.0.17", "tinyvec", "tokio", "tracing", @@ -4421,11 +4147,11 @@ dependencies = [ "ipconfig", "moka", "once_cell", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot", + "rand 0.9.2", "resolv-conf", "smallvec", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -4460,22 +4186,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" dependencies = [ - "windows-sys 0.59.0", -] - -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", + "windows-sys 0.61.2", ] [[package]] @@ -4491,9 +4206,9 @@ dependencies = [ [[package]] name = "http" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a761d192fbf18bdef69f5ceedd0d1333afcbda0ee23840373b8317570d23c65" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -4518,7 +4233,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.3.0", + "http 1.3.1", ] [[package]] @@ -4529,7 +4244,7 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "pin-project-lite", ] @@ -4556,14 +4271,14 @@ dependencies = [ "lighthouse_tracing", "lighthouse_version", "logging", - "lru", + "lru 0.12.5", "metrics", "network", "network_utils", "operation_pool", - "parking_lot 0.12.3", + "parking_lot", "proto_array", - "rand 0.9.0", + "rand 0.9.2", "safe_arith", "sensitive_url", "serde", @@ -4595,7 +4310,7 @@ dependencies = [ "malloc_utils", "metrics", "network_utils", - "reqwest 0.11.27", + "reqwest", "serde", "slot_clock", "store", @@ -4620,9 +4335,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" -version = "2.1.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "hyper" @@ -4634,14 +4349,14 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.5.10", "tokio", "tower-service", "tracing", @@ -4650,20 +4365,22 @@ dependencies = [ [[package]] name = "hyper" -version = "1.6.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" dependencies = [ + "atomic-waker", "bytes", "futures-channel", - "futures-util", - "h2 0.4.8", - "http 1.3.0", + "futures-core", + "h2 0.4.12", + "http 1.3.1", "http-body 1.0.1", "httparse", "httpdate", "itoa", "pin-project-lite", + "pin-utils", "smallvec", "tokio", "want", @@ -4671,16 +4388,19 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.2" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "futures-util", - "http 0.2.12", - "hyper 0.14.32", - "rustls 0.21.12", + "http 1.3.1", + "hyper 1.8.1", + "hyper-util", + "rustls 0.23.35", + "rustls-pki-types", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls 0.26.4", + "tower-service", + "webpki-roots", ] [[package]] @@ -4689,7 +4409,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 1.6.0", + "hyper 1.8.1", "hyper-util", "pin-project-lite", "tokio", @@ -4698,31 +4418,39 @@ dependencies = [ [[package]] name = "hyper-tls" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", - "hyper 0.14.32", + "http-body-util", + "hyper 1.8.1", + "hyper-util", "native-tls", "tokio", "tokio-native-tls", + "tower-service", ] [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" dependencies = [ + "base64 0.22.1", "bytes", "futures-channel", + "futures-core", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", - "hyper 1.6.0", + "hyper 1.8.1", + "ipnet", + "libc", + "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.1", "tokio", "tower-service", "tracing", @@ -4730,16 +4458,17 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.61" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", + "log", "wasm-bindgen", - "windows-core 0.52.0", + "windows-core 0.62.2", ] [[package]] @@ -4753,21 +4482,22 @@ dependencies = [ [[package]] name = "icu_collections" -version = "1.5.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" dependencies = [ "displaydoc", + "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] -name = "icu_locid" -version = "1.5.0" +name = "icu_locale_core" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" dependencies = [ "displaydoc", "litemap", @@ -4776,99 +4506,61 @@ dependencies = [ "zerovec", ] -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" - [[package]] name = "icu_normalizer" -version = "1.5.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" dependencies = [ - "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", "icu_provider", "smallvec", - "utf16_iter", - "utf8_iter", - "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" -version = "1.5.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "1.5.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" dependencies = [ - "displaydoc", "icu_collections", - "icu_locid_transform", + "icu_locale_core", "icu_properties_data", "icu_provider", - "tinystr", + "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "1.5.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" [[package]] name = "icu_provider" -version = "1.5.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" dependencies = [ "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", + "icu_locale_core", "writeable", "yoke", "zerofrom", + "zerotrie", "zerovec", ] -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -4877,9 +4569,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -4888,9 +4580,9 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ "icu_normalizer", "icu_properties", @@ -4924,9 +4616,9 @@ dependencies = [ "netlink-proto", "netlink-sys", "rtnetlink", - "system-configuration 0.6.1", + "system-configuration", "tokio", - "windows 0.53.0", + "windows", ] [[package]] @@ -4939,12 +4631,12 @@ dependencies = [ "attohttpc", "bytes", "futures", - "http 1.3.0", + "http 1.3.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.8.1", "hyper-util", "log", - "rand 0.9.0", + "rand 0.9.2", "tokio", "url", "xmltree", @@ -4959,24 +4651,6 @@ dependencies = [ "parity-scale-codec", ] -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp", -] - -[[package]] -name = "impl-serde" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" -dependencies = [ - "serde", -] - [[package]] name = "impl-trait-for-tuples" version = "0.2.3" @@ -4985,7 +4659,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -4996,18 +4670,20 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", + "serde", ] [[package]] name = "indexmap" -version = "2.8.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" +checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.2", + "hashbrown 0.16.0", "serde", + "serde_core", ] [[package]] @@ -5021,9 +4697,9 @@ dependencies = [ "filesystem", "lockfile", "metrics", - "parking_lot 0.12.3", - "rand 0.9.0", - "reqwest 0.11.27", + "parking_lot", + "rand 0.9.2", + "reqwest", "serde", "serde_json", "signing_method", @@ -5042,19 +4718,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ - "generic-array 0.14.7", -] - -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", + "generic-array", ] [[package]] @@ -5092,8 +4756,8 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2", - "widestring 1.1.0", + "socket2 0.5.10", + "widestring 1.2.1", "windows-sys 0.48.0", "winreg", ] @@ -5104,22 +4768,32 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "iri-string" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "is-terminal" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ - "hermit-abi 0.5.0", + "hermit-abi 0.5.2", "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "is_terminal_polyfill" -version = "1.70.1" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" [[package]] name = "itertools" @@ -5165,18 +4839,19 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ + "getrandom 0.3.4", "libc", ] [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" dependencies = [ "once_cell", "wasm-bindgen", @@ -5197,19 +4872,6 @@ dependencies = [ "simple_asn1", ] -[[package]] -name = "k256" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" -dependencies = [ - "cfg-if", - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.8", - "sha3", -] - [[package]] name = "k256" version = "0.13.4" @@ -5217,12 +4879,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", - "ecdsa 0.16.9", - "elliptic-curve 0.13.8", + "ecdsa", + "elliptic-curve", "once_cell", "serdect", - "sha2 0.10.8", - "signature 2.2.0", + "sha2 0.10.9", + "signature", ] [[package]] @@ -5292,7 +4954,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "8.0.0" +version = "8.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -5352,25 +5014,25 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.175" +version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "libloading" -version = "0.8.6" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-link", ] [[package]] name = "libm" -version = "0.2.11" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libmdbx" @@ -5379,11 +5041,11 @@ source = "git+https://github.com/sigp/libmdbx-rs?rev=e6ff4b9377c1619bcf0bfdf52be dependencies = [ "bitflags 1.3.2", "byteorder", - "derive_more 0.99.19", + "derive_more 0.99.20", "indexmap 1.9.3", "libc", "mdbx-sys", - "parking_lot 0.12.3", + "parking_lot", "thiserror 1.0.69", ] @@ -5397,7 +5059,7 @@ dependencies = [ "either", "futures", "futures-timer", - "getrandom 0.2.15", + "getrandom 0.2.16", "libp2p-allow-block-list", "libp2p-connection-limits", "libp2p-core", @@ -5416,7 +5078,7 @@ dependencies = [ "multiaddr", "pin-project", "rw-stream-sink", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -5455,12 +5117,12 @@ dependencies = [ "multiaddr", "multihash", "multistream-select", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "quick-protobuf", "rand 0.8.5", "rw-stream-sink", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", "unsigned-varint 0.8.0", "web-time", @@ -5477,7 +5139,7 @@ dependencies = [ "hickory-resolver", "libp2p-core", "libp2p-identity", - "parking_lot 0.12.3", + "parking_lot", "smallvec", "tracing", ] @@ -5487,7 +5149,7 @@ name = "libp2p-gossipsub" version = "0.50.0" source = "git+https://github.com/sigp/rust-libp2p.git?rev=5acdf89a65d64098f9346efa5769e57bcd19dea9#5acdf89a65d64098f9346efa5769e57bcd19dea9" dependencies = [ - "async-channel 2.3.1", + "async-channel 2.5.0", "asynchronous-codec", "base64 0.22.1", "byteorder", @@ -5496,7 +5158,7 @@ dependencies = [ "fnv", "futures", "futures-timer", - "getrandom 0.2.15", + "getrandom 0.2.16", "hashlink 0.10.0", "hex_fmt", "libp2p-core", @@ -5507,7 +5169,7 @@ dependencies = [ "quick-protobuf-codec", "rand 0.8.5", "regex", - "sha2 0.10.8", + "sha2 0.10.9", "tracing", "web-time", ] @@ -5529,7 +5191,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec", "smallvec", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", ] @@ -5543,12 +5205,12 @@ dependencies = [ "bs58 0.5.1", "ed25519-dalek", "hkdf", - "k256 0.13.4", + "k256", "multihash", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.8", - "thiserror 2.0.12", + "sha2 0.10.9", + "thiserror 2.0.17", "tracing", "zeroize", ] @@ -5567,7 +5229,7 @@ dependencies = [ "libp2p-swarm", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.5.10", "tokio", "tracing", ] @@ -5590,9 +5252,9 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.43.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aaa6fee3722e355443058472fc4705d78681bc2d8e447a0bdeb3fecf40cd197" +checksum = "95a4019ba30c4e42b776113e9778071691fe3f34bf23b6b3bf0dfcf29d801f3d" dependencies = [ "asynchronous-codec", "bytes", @@ -5600,7 +5262,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "nohash-hasher", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "smallvec", "tracing", @@ -5624,7 +5286,7 @@ dependencies = [ "rand 0.8.5", "snow", "static_assertions", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", "x25519-dalek", "zeroize", @@ -5661,9 +5323,9 @@ dependencies = [ "quinn", "rand 0.8.5", "ring", - "rustls 0.23.23", - "socket2", - "thiserror 2.0.12", + "rustls 0.23.35", + "socket2 0.5.10", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -5681,7 +5343,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", - "lru", + "lru 0.12.5", "multistream-select", "rand 0.8.5", "smallvec", @@ -5698,7 +5360,7 @@ checksum = "dd297cf53f0cb3dee4d2620bb319ae47ef27c702684309f682bdb7e55a18ae9c" dependencies = [ "heck 0.5.0", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -5712,7 +5374,7 @@ dependencies = [ "if-watch", "libc", "libp2p-core", - "socket2", + "socket2 0.5.10", "tokio", "tracing", ] @@ -5729,9 +5391,9 @@ dependencies = [ "libp2p-identity", "rcgen", "ring", - "rustls 0.23.23", - "rustls-webpki 0.103.4", - "thiserror 2.0.12", + "rustls 0.23.35", + "rustls-webpki 0.103.8", + "thiserror 2.0.17", "x509-parser", "yasna", ] @@ -5760,19 +5422,19 @@ dependencies = [ "either", "futures", "libp2p-core", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", "yamux 0.12.1", - "yamux 0.13.4", + "yamux 0.13.8", ] [[package]] name = "libredox" -version = "0.1.3" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "libc", ] @@ -5789,9 +5451,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.21" +version = "1.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" dependencies = [ "cc", "pkg-config", @@ -5800,7 +5462,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "8.0.0" +version = "8.0.1" dependencies = [ "account_manager", "account_utils", @@ -5875,22 +5537,21 @@ dependencies = [ "lighthouse_version", "local-ip-address", "logging", - "lru", + "lru 0.12.5", "lru_cache", "metrics", "network_utils", - "parking_lot 0.12.3", + "parking_lot", "prometheus-client", - "quickcheck", - "quickcheck_macros", - "rand 0.9.0", + "proptest", + "rand 0.9.2", "regex", "serde", "sha2 0.9.9", "smallvec", "snap", "ssz_types", - "strum", + "strum 0.24.1", "superstruct", "task_executor", "tempfile", @@ -5919,7 +5580,7 @@ dependencies = [ "futures", "initialized_validators", "logging", - "parking_lot 0.12.3", + "parking_lot", "serde", "signing_method", "slashing_protection", @@ -5934,7 +5595,7 @@ dependencies = [ [[package]] name = "lighthouse_version" -version = "8.0.0" +version = "8.0.1" dependencies = [ "regex", ] @@ -5953,15 +5614,15 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "linux-raw-sys" -version = "0.9.2" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9c683daf087dc577b7506e9695b3d556a9f3849903fa28186283afd6809e9" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.7.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "lmdb-rkv" @@ -5986,23 +5647,22 @@ dependencies = [ [[package]] name = "local-ip-address" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" +checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" dependencies = [ "libc", "neli", - "thiserror 1.0.69", + "thiserror 2.0.17", "windows-sys 0.59.0", ] [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", ] @@ -6016,9 +5676,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.26" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "logging" @@ -6051,27 +5711,29 @@ dependencies = [ ] [[package]] -name = "loom" -version = "0.7.2" +name = "lru" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "cfg-if", - "generator", - "scoped-tls", - "tracing", - "tracing-subscriber", + "hashbrown 0.15.5", ] [[package]] name = "lru" -version = "0.12.5" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" dependencies = [ - "hashbrown 0.15.2", + "hashbrown 0.15.5", ] +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "lru_cache" version = "0.1.0" @@ -6082,9 +5744,9 @@ dependencies = [ [[package]] name = "mach2" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" +checksum = "d640282b302c0bb0a2a8e0233ead9035e3bed871f0b7e81fe4a1ec829765db44" dependencies = [ "libc", ] @@ -6097,7 +5759,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -6106,7 +5768,7 @@ version = "0.1.0" dependencies = [ "libc", "metrics", - "parking_lot 0.12.3", + "parking_lot", "tikv-jemalloc-ctl", "tikv-jemallocator", ] @@ -6118,10 +5780,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" [[package]] -name = "match_cfg" -version = "0.1.0" +name = "match-lookup" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" +checksum = "1265724d8cb29dbbc2b0f06fffb8bf1a8c0cf73a78eede9ba73a4a66c52a981e" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] [[package]] name = "matchers" @@ -6163,9 +5830,9 @@ checksum = "33746aadcb41349ec291e7f2f0a3aa6834d1d7c58066fb4b01f68efc4c4b7631" [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "memoffset" @@ -6183,8 +5850,7 @@ dependencies = [ "alloy-primitives", "ethereum_hashing", "fixed_bytes", - "quickcheck", - "quickcheck_macros", + "proptest", "safe_arith", ] @@ -6220,18 +5886,19 @@ dependencies = [ [[package]] name = "milhouse" -version = "0.7.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bdb104e38d3a8c5ffb7e9d2c43c522e6bcc34070edbadba565e722f0dee56c7" +checksum = "259dd9da2ae5e0278b95da0b7ecef9c18c309d0a2d9e6db57ed33b9e8910c5e7" dependencies = [ "alloy-primitives", "arbitrary", + "context_deserialize", "educe", "ethereum_hashing", "ethereum_ssz", "ethereum_ssz_derive", "itertools 0.13.0", - "parking_lot 0.12.3", + "parking_lot", "rayon", "serde", "smallvec", @@ -6265,22 +5932,23 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.5" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", + "simd-adler32", ] [[package]] name = "mio" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "wasi", + "windows-sys 0.61.2", ] [[package]] @@ -6312,7 +5980,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -6324,7 +5992,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -6337,13 +6005,13 @@ dependencies = [ "bytes", "colored", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.8.1", "hyper-util", "log", - "rand 0.9.0", + "rand 0.9.2", "regex", "serde_json", "serde_urlencoded", @@ -6353,21 +6021,20 @@ dependencies = [ [[package]] name = "moka" -version = "0.12.10" +version = "0.12.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077" dependencies = [ "crossbeam-channel", "crossbeam-epoch", "crossbeam-utils", - "loom", - "parking_lot 0.12.3", + "equivalent", + "parking_lot", "portable-atomic", "rustc_version 0.4.1", "smallvec", "tagptr", - "thiserror 1.0.69", - "uuid 1.15.1", + "uuid 1.18.1", ] [[package]] @@ -6379,7 +6046,7 @@ dependencies = [ "lighthouse_version", "metrics", "regex", - "reqwest 0.11.27", + "reqwest", "sensitive_url", "serde", "serde_json", @@ -6416,11 +6083,12 @@ dependencies = [ [[package]] name = "multibase" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +checksum = "8694bb4835f452b0e3bb06dbebb1d6fc5385b6ca1caf2e55fd165c042390ec77" dependencies = [ "base-x", + "base256emoji", "data-encoding", "data-encoding-macro", ] @@ -6539,7 +6207,7 @@ dependencies = [ "log", "netlink-packet-core", "netlink-sys", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -6578,7 +6246,7 @@ dependencies = [ "hex", "igd-next", "itertools 0.10.5", - "k256 0.13.4", + "k256", "kzg", "libp2p-gossipsub", "lighthouse_network", @@ -6588,9 +6256,9 @@ dependencies = [ "matches", "metrics", "operation_pool", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", - "rand 0.9.0", + "rand 0.9.2", "rand_chacha 0.3.1", "rand_chacha 0.9.0", "serde_json", @@ -6598,7 +6266,7 @@ dependencies = [ "smallvec", "ssz_types", "store", - "strum", + "strum 0.24.1", "task_executor", "tokio", "tokio-stream", @@ -6617,7 +6285,7 @@ dependencies = [ "lru_cache", "metrics", "multiaddr", - "parking_lot 0.12.3", + "parking_lot", "serde", "tiny-keccak", ] @@ -6646,11 +6314,11 @@ dependencies = [ [[package]] name = "nix" -version = "0.29.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "cfg-if", "cfg_aliases", "libc", @@ -6700,11 +6368,11 @@ dependencies = [ [[package]] name = "nu-ansi-term" -version = "0.50.1" +version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -6719,11 +6387,10 @@ dependencies = [ [[package]] name = "num-bigint-dig" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +checksum = "e661dda6640fad38e827a6d4a310ff4763082116fe217f279885c97f511bb0b7" dependencies = [ - "byteorder", "lazy_static", "libm", "num-integer", @@ -6773,35 +6440,64 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.5.2", "libc", ] +[[package]] +name = "num_enum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +dependencies = [ + "num_enum_derive", + "rustversion", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + [[package]] name = "nybbles" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c4b5ecbd0beec843101bffe848217f770e8b8da81d8355b7d6e226f2199b3dc" dependencies = [ + "alloy-rlp", "cfg-if", + "proptest", "ruint", "serde", "smallvec", ] [[package]] -name = "object" -version = "0.36.7" +name = "objc2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "b7c2599ce0ec54857b29ce62166b0ed9b4f6f1a70ccc9a71165b6154caca8c05" dependencies = [ - "memchr", + "objc2-encode", ] +[[package]] +name = "objc2-encode" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33" + [[package]] name = "oid-registry" version = "0.8.1" @@ -6813,19 +6509,25 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.21.0" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde51589ab56b20a6f686b2c68f7a0bd6add753d697abf720d63f8db3ab7b1ad" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" dependencies = [ "critical-section", "portable-atomic", ] +[[package]] +name = "once_cell_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" + [[package]] name = "oneshot_broadcast" version = "0.1.0" dependencies = [ - "parking_lot 0.12.3", + "parking_lot", ] [[package]] @@ -6834,50 +6536,19 @@ version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - [[package]] name = "opaque-debug" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "open-fastrlp" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" -dependencies = [ - "arrayvec", - "auto_impl 1.2.1", - "bytes", - "ethereum-types", - "open-fastrlp-derive", -] - -[[package]] -name = "open-fastrlp-derive" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" -dependencies = [ - "bytes", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "openssl" -version = "0.10.72" +version = "0.10.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "cfg-if", "foreign-types", "libc", @@ -6894,7 +6565,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -6905,18 +6576,18 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-src" -version = "300.4.2+3.4.1" +version = "300.5.4+3.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168ce4e058f975fe43e89d9ccf78ca668601887ae736090aacc23ae353c298e2" +checksum = "a507b3792995dae9b0df8a1c1e3771e8418b7c2d9f0baeba32e6fe8b06c7cb72" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.107" +version = "0.9.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8288979acd84749c744a9014b4382d42b8f7b2592847b5afb2ed29e5d16ede07" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" dependencies = [ "cc", "libc", @@ -6935,7 +6606,7 @@ dependencies = [ "futures-sink", "js-sys", "pin-project-lite", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", ] @@ -6947,9 +6618,9 @@ checksum = "50f6639e842a97dbea8886e3439710ae463120091e2e064518ba8e716e6ac36d" dependencies = [ "async-trait", "bytes", - "http 1.3.0", + "http 1.3.1", "opentelemetry", - "reqwest 0.12.15", + "reqwest", ] [[package]] @@ -6958,14 +6629,14 @@ version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b" dependencies = [ - "http 1.3.0", + "http 1.3.1", "opentelemetry", "opentelemetry-http", "opentelemetry-proto", "opentelemetry_sdk", "prost", - "reqwest 0.12.15", - "thiserror 2.0.12", + "reqwest", + "thiserror 2.0.17", "tokio", "tonic 0.13.1", "tracing", @@ -6994,9 +6665,9 @@ dependencies = [ "futures-util", "opentelemetry", "percent-encoding", - "rand 0.9.0", + "rand 0.9.2", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -7004,15 +6675,15 @@ name = "operation_pool" version = "0.2.0" dependencies = [ "beacon_chain", - "bitvec 1.0.1", + "bitvec", "educe", "ethereum_ssz", "ethereum_ssz_derive", "itertools 0.10.5", "maplit", "metrics", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot", + "rand 0.9.2", "rayon", "serde", "state_processing", @@ -7021,32 +6692,23 @@ dependencies = [ "types", ] -[[package]] -name = "ordered-float" -version = "2.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" -dependencies = [ - "num-traits", -] - [[package]] name = "pairing" version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" dependencies = [ - "group 0.13.0", + "group", ] [[package]] name = "parity-scale-codec" -version = "3.7.4" +version = "3.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9fde3d0718baf5bc92f577d652001da0f8d54cd03a7974e118d04fc888dc23d" +checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" dependencies = [ "arrayvec", - "bitvec 1.0.1", + "bitvec", "byte-slice-cast", "const_format", "impl-trait-for-tuples", @@ -7057,14 +6719,14 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.7.4" +version = "3.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581c837bb6b9541ce7faa9377c20616e4fb7650f6b0f68bc93c827ee504fb7b3" +checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -7075,50 +6737,25 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - -[[package]] -name = "parking_lot" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.10", + "redox_syscall", "smallvec", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -7156,46 +6793,35 @@ dependencies = [ "digest 0.10.7", "hmac 0.12.1", "password-hash", - "sha2 0.10.8", + "sha2 0.10.9", ] [[package]] name = "pem" -version = "3.0.5" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" dependencies = [ "base64 0.22.1", - "serde", + "serde_core", ] [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" -version = "2.7.15" +version = "2.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" +checksum = "989e7521a040efde50c3ab6bbadafbe15ab6dc042686926be59ac35d74607df4" dependencies = [ "memchr", - "thiserror 2.0.12", "ucd-trie", ] -[[package]] -name = "pharos" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" -dependencies = [ - "futures", - "rustc_version 0.4.1", -] - [[package]] name = "pin-project" version = "1.1.10" @@ -7213,7 +6839,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -7228,24 +6854,14 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der 0.6.1", - "spki 0.6.0", -] - [[package]] name = "pkcs8" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.9", - "spki 0.7.3", + "der", + "spki", ] [[package]] @@ -7290,17 +6906,16 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.4" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218" dependencies = [ "cfg-if", "concurrent-queue", - "hermit-abi 0.4.0", + "hermit-abi 0.5.2", "pin-project-lite", - "rustix 0.38.44", - "tracing", - "windows-sys 0.59.0", + "rustix 1.1.2", + "windows-sys 0.61.2", ] [[package]] @@ -7310,7 +6925,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", - "opaque-debug 0.3.1", + "opaque-debug", "universal-hash", ] @@ -7322,15 +6937,24 @@ checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", - "opaque-debug 0.3.1", + "opaque-debug", "universal-hash", ] [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "potential_utf" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] [[package]] name = "powerfmt" @@ -7344,7 +6968,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.23", + "zerocopy", ] [[package]] @@ -7377,18 +7001,18 @@ dependencies = [ name = "pretty_reqwest_error" version = "0.1.0" dependencies = [ - "reqwest 0.11.27", + "reqwest", "sensitive_url", ] [[package]] name = "prettyplease" -version = "0.2.30" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1ccf34da56fc294e7d4ccf69a85992b7dfb826b7cf57bac6a70bba3494cc08a" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -7399,45 +7023,18 @@ checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", "impl-codec", - "impl-rlp", - "impl-serde", - "scale-info", "uint 0.9.5", ] [[package]] name = "proc-macro-crate" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ "toml_edit", ] -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - [[package]] name = "proc-macro-error-attr2" version = "2.0.0" @@ -7457,14 +7054,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" dependencies = [ "unicode-ident", ] @@ -7494,7 +7091,7 @@ dependencies = [ "fnv", "lazy_static", "memchr", - "parking_lot 0.12.3", + "parking_lot", "thiserror 1.0.69", ] @@ -7506,7 +7103,7 @@ checksum = "cf41c1a7c32ed72abe5082fb19505b969095c12da9f5732a4bc9878757fd087c" dependencies = [ "dtoa", "itoa", - "parking_lot 0.12.3", + "parking_lot", "prometheus-client-derive-encode", ] @@ -7518,23 +7115,36 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", +] + +[[package]] +name = "proof_generation_service" +version = "0.1.0" +dependencies = [ + "beacon_chain", + "lighthouse_network", + "logging", + "network", + "tokio", + "tracing", + "types", + "zkvm_execution_layer", ] [[package]] name = "proptest" -version = "1.6.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" +checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.9.0", - "lazy_static", + "bitflags 2.10.0", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_xorshift 0.3.0", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_xorshift 0.4.0", "regex-syntax", "rusty-fork", "tempfile", @@ -7549,7 +7159,7 @@ checksum = "095a99f75c69734802359b682be8daaf8980296731f6470434ea2c652af1dd30" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -7572,7 +7182,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -7605,7 +7215,7 @@ checksum = "5e617cc9058daa5e1fe5a0d23ed745773a5ee354111dad1ec0235b0cc16b6730" dependencies = [ "cfg-if", "darwin-libproc", - "derive_more 0.99.19", + "derive_more 0.99.20", "glob", "mach2", "nix 0.24.3", @@ -7643,62 +7253,43 @@ dependencies = [ "unsigned-varint 0.8.0", ] -[[package]] -name = "quickcheck" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" -dependencies = [ - "env_logger", - "log", - "rand 0.8.5", -] - -[[package]] -name = "quickcheck_macros" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "quinn" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ "bytes", + "cfg_aliases", "futures-io", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash 2.1.1", - "rustls 0.23.23", - "socket2", - "thiserror 2.0.12", + "rustls 0.23.35", + "socket2 0.6.1", + "thiserror 2.0.17", "tokio", "tracing", + "web-time", ] [[package]] name = "quinn-proto" -version = "0.11.9" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ "bytes", - "getrandom 0.2.15", - "rand 0.8.5", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.2", "ring", "rustc-hash 2.1.1", - "rustls 0.23.23", + "rustls 0.23.35", "rustls-pki-types", "slab", - "thiserror 2.0.12", + "thiserror 2.0.17", "tinyvec", "tracing", "web-time", @@ -7706,27 +7297,33 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.10" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2", + "socket2 0.6.1", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] name = "quote" -version = "1.0.39" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + [[package]] name = "r2d2" version = "0.8.10" @@ -7734,7 +7331,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", - "parking_lot 0.12.3", + "parking_lot", "scheduled-thread-pool", ] @@ -7748,12 +7345,6 @@ dependencies = [ "rusqlite", ] -[[package]] -name = "radium" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" - [[package]] name = "radium" version = "0.7.0" @@ -7774,14 +7365,13 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", "serde", - "zerocopy 0.8.23", ] [[package]] @@ -7810,7 +7400,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", ] [[package]] @@ -7819,7 +7409,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.4", "serde", ] @@ -7843,9 +7433,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", "rayon-core", @@ -7853,9 +7443,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -7876,29 +7466,20 @@ dependencies = [ [[package]] name = "redb" -version = "2.4.0" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0a72cd7140de9fc3e318823b883abf819c20d478ec89ce880466dc2ef263c6" +checksum = "8eca1e9d98d5a7e9002d0013e18d5a9b000aee942eb134883a82f06ebffb6c01" dependencies = [ "libc", ] [[package]] name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.5.10" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", ] [[package]] @@ -7907,16 +7488,36 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", "libredox", "thiserror 1.0.69", ] +[[package]] +name = "ref-cast" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + [[package]] name = "regex" -version = "1.11.1" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", @@ -7926,9 +7527,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", @@ -7937,98 +7538,60 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" - -[[package]] -name = "reqwest" -version = "0.11.27" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "hyper-rustls", - "hyper-tls", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration 0.5.1", - "tokio", - "tokio-native-tls", - "tokio-rustls 0.24.1", - "tokio-util", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-streams", - "web-sys", - "webpki-roots", - "winreg", -] +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "reqwest" -version = "0.12.15" +version = "0.12.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ "base64 0.22.1", "bytes", "futures-channel", "futures-core", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.8.1", + "hyper-rustls", + "hyper-tls", "hyper-util", - "ipnet", "js-sys", "log", - "mime", - "once_cell", + "native-tls", "percent-encoding", "pin-project-lite", + "quinn", + "rustls 0.23.35", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", + "tokio-native-tls", + "tokio-rustls 0.26.4", + "tokio-util", "tower 0.5.2", + "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-streams", "web-sys", - "windows-registry", + "webpki-roots", ] [[package]] name = "reqwest-eventsource" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f529a5ff327743addc322af460761dff5b50e0c826b9e6ac44c3195c50bb2026" +checksum = "632c55746dbb44275691640e7b40c907c16a2dc1a5842aa98aaec90da6ec6bde" dependencies = [ "eventsource-stream", "futures-core", @@ -8036,30 +7599,15 @@ dependencies = [ "mime", "nom", "pin-project-lite", - "reqwest 0.11.27", + "reqwest", "thiserror 1.0.69", ] [[package]] name = "resolv-conf" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" -dependencies = [ - "hostname", - "quick-error", -] - -[[package]] -name = "rfc6979" -version = "0.3.1" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint 0.4.9", - "hmac 0.12.1", - "zeroize", -] +checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" [[package]] name = "rfc6979" @@ -8079,21 +7627,12 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.16", "libc", "untrusted", "windows-sys 0.52.0", ] -[[package]] -name = "ripemd" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" -dependencies = [ - "digest 0.10.7", -] - [[package]] name = "rlp" version = "0.5.2" @@ -8104,17 +7643,6 @@ dependencies = [ "rustc-hex", ] -[[package]] -name = "rlp-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "rpassword" version = "5.0.1" @@ -8173,7 +7701,7 @@ dependencies = [ "primitive-types", "proptest", "rand 0.8.5", - "rand 0.9.0", + "rand 0.9.2", "rlp", "ruint-macro", "serde_core", @@ -8218,12 +7746,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "rustc-demangle" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" - [[package]] name = "rustc-hash" version = "1.1.0" @@ -8257,7 +7779,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.26", + "semver 1.0.27", ] [[package]] @@ -8289,7 +7811,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "errno", "libc", "linux-raw-sys 0.4.15", @@ -8298,27 +7820,15 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.2" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7178faa4b75a30e269c71e61c353ce2748cf3d76f0c44c393f4e60abf49b825" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "errno", "libc", - "linux-raw-sys 0.9.2", - "windows-sys 0.59.0", -] - -[[package]] -name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", + "linux-raw-sys 0.11.0", + "windows-sys 0.61.2", ] [[package]] @@ -8337,38 +7847,29 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.23" +version = "0.23.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" +checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" dependencies = [ "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.8", + "rustls-webpki 0.103.8", "subtle", "zeroize", ] [[package]] name = "rustls-native-certs" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.3.0", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", + "security-framework 3.5.1", ] [[package]] @@ -8382,24 +7883,14 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" dependencies = [ "web-time", "zeroize", ] -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.102.8" @@ -8413,9 +7904,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.4" +version = "0.103.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" dependencies = [ "ring", "rustls-pki-types", @@ -8424,15 +7915,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "rusty-fork" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2" dependencies = [ "fnv", "quick-error", @@ -8472,15 +7963,6 @@ dependencies = [ "cipher 0.3.0", ] -[[package]] -name = "salsa20" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" -dependencies = [ - "cipher 0.4.4", -] - [[package]] name = "same-file" version = "1.0.6" @@ -8491,45 +7973,45 @@ dependencies = [ ] [[package]] -name = "scale-info" -version = "2.11.6" +name = "schannel" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "cfg-if", - "derive_more 1.0.0", - "parity-scale-codec", - "scale-info-derive", + "windows-sys 0.61.2", ] [[package]] -name = "scale-info-derive" -version = "2.11.6" +name = "scheduled-thread-pool" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 2.0.100", + "parking_lot", ] [[package]] -name = "schannel" -version = "0.1.27" +name = "schemars" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" dependencies = [ - "windows-sys 0.59.0", + "dyn-clone", + "ref-cast", + "serde", + "serde_json", ] [[package]] -name = "scheduled-thread-pool" -version = "0.2.7" +name = "schemars" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" +checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" dependencies = [ - "parking_lot 0.12.3", + "dyn-clone", + "ref-cast", + "serde", + "serde_json", ] [[package]] @@ -8552,56 +8034,20 @@ checksum = "879588d8f90906e73302547e20fffefdd240eb3e0e744e142321f5d49dea0518" dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", - "salsa20 0.8.1", + "salsa20", "sha2 0.9.9", ] -[[package]] -name = "scrypt" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f9e24d2b632954ded8ab2ef9fea0a0c769ea56ea98bddbafbad22caeeadf45d" -dependencies = [ - "hmac 0.12.1", - "pbkdf2 0.11.0", - "salsa20 0.10.2", - "sha2 0.10.8", -] - -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "sec1" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" -dependencies = [ - "base16ct 0.1.1", - "der 0.6.1", - "generic-array 0.14.7", - "pkcs8 0.9.0", - "subtle", - "zeroize", -] - [[package]] name = "sec1" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct 0.2.0", - "der 0.7.9", - "generic-array 0.14.7", - "pkcs8 0.10.2", + "base16ct", + "der", + "generic-array", + "pkcs8", "serdect", "subtle", "zeroize", @@ -8634,7 +8080,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -8643,11 +8089,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.3.0" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80fb1d92c5028aa318b4b8bd7302a5bfcf48be96a37fc6fc790f806b0004ee0c" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -8656,9 +8102,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" dependencies = [ "core-foundation-sys", "libc", @@ -8675,11 +8121,12 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" dependencies = [ "serde", + "serde_core", ] [[package]] @@ -8691,12 +8138,6 @@ dependencies = [ "pest", ] -[[package]] -name = "send_wrapper" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" - [[package]] name = "sensitive_url" version = "0.1.0" @@ -8717,27 +8158,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde-aux" -version = "4.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5290c39c5f6992b9dddbda28541d965dba46468294e6018a408fa297e6c602de" -dependencies = [ - "serde", - "serde-value", - "serde_json", -] - -[[package]] -name = "serde-value" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" -dependencies = [ - "ordered-float", - "serde", -] - [[package]] name = "serde_array_query" version = "0.1.0" @@ -8765,19 +8185,30 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", + "serde_core", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" +dependencies = [ + "itoa", + "serde", ] [[package]] @@ -8788,7 +8219,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -8805,24 +8236,33 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.15.1" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa66c845eee442168b2c8134fec70ac50dc20e760769c8ba0ad1319ca1959b04" +checksum = "10574371d41b0d9b2cff89418eda27da52bcaff2cc8741db26382a77c29131f1" dependencies = [ + "base64 0.22.1", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.12.0", + "schemars 0.9.0", + "schemars 1.1.0", "serde_core", + "serde_json", "serde_with_macros", + "time", ] [[package]] name = "serde_with_macros" -version = "3.15.1" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91a903660542fced4e99881aa481bdbaec1634568ee02e0b8bd57c64cb38955" +checksum = "08a72d8216842fdd57820dc78d840bef99248e35fb2554ff923319e60f2d686b" dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -8831,7 +8271,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.8.0", + "indexmap 2.12.0", "itoa", "ryu", "serde", @@ -8844,7 +8284,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" dependencies = [ - "base16ct 0.2.0", + "base16ct", "serde", ] @@ -8859,18 +8299,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - [[package]] name = "sha2" version = "0.9.9" @@ -8881,14 +8309,14 @@ dependencies = [ "cfg-if", "cpufeatures", "digest 0.9.0", - "opaque-debug 0.3.1", + "opaque-debug", ] [[package]] name = "sha2" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", @@ -8932,23 +8360,13 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.2" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" dependencies = [ "libc", ] -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" -dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", -] - [[package]] name = "signature" version = "2.2.0" @@ -8966,8 +8384,8 @@ dependencies = [ "eth2_keystore", "ethereum_serde_utils", "lockfile", - "parking_lot 0.12.3", - "reqwest 0.11.27", + "parking_lot", + "reqwest", "serde", "task_executor", "types", @@ -8975,6 +8393,12 @@ dependencies = [ "validator_metrics", ] +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + [[package]] name = "similar" version = "2.7.0" @@ -8989,7 +8413,7 @@ checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror 2.0.12", + "thiserror 2.0.17", "time", ] @@ -9004,7 +8428,7 @@ dependencies = [ "kzg", "logging", "node_test_rig", - "parking_lot 0.12.3", + "parking_lot", "rayon", "sensitive_url", "serde_json", @@ -9016,12 +8440,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.9" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "slasher" @@ -9037,17 +8458,17 @@ dependencies = [ "libmdbx", "lmdb-rkv", "lmdb-rkv-sys", - "lru", + "lru 0.12.5", "maplit", "metrics", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot", + "rand 0.9.2", "rayon", "redb", "safe_arith", "serde", "ssz_types", - "strum", + "strum 0.24.1", "tempfile", "tracing", "tree_hash", @@ -9096,15 +8517,15 @@ name = "slot_clock" version = "0.2.0" dependencies = [ "metrics", - "parking_lot 0.12.3", + "parking_lot", "types", ] [[package]] name = "smallvec" -version = "1.14.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" dependencies = [ "arbitrary", "serde", @@ -9129,35 +8550,35 @@ dependencies = [ "rand_core 0.6.4", "ring", "rustc_version 0.4.1", - "sha2 0.10.8", + "sha2 0.10.9", "subtle", ] [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] -name = "spin" -version = "0.9.8" +name = "socket2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] [[package]] -name = "spki" -version = "0.6.0" +name = "spin" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der 0.6.1", -] +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "spki" @@ -9166,16 +8587,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der 0.7.9", + "der", ] [[package]] name = "ssz_types" -version = "0.12.2" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "704671195db617afa3d919da8f220f2535f20d0fa8dad96a1c27a38a5f8f6e9c" +checksum = "1fc20a89bab2dabeee65e9c9eb96892dc222c23254b401e1319b85efd852fa31" dependencies = [ "arbitrary", + "context_deserialize", "ethereum_serde_utils", "ethereum_ssz", "itertools 0.14.0", @@ -9188,9 +8610,9 @@ dependencies = [ [[package]] name = "stable_deref_trait" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "state_processing" @@ -9208,7 +8630,7 @@ dependencies = [ "itertools 0.10.5", "merkle_proof", "metrics", - "rand 0.9.0", + "rand 0.9.2", "rayon", "safe_arith", "smallvec", @@ -9251,17 +8673,17 @@ dependencies = [ "itertools 0.10.5", "leveldb", "logging", - "lru", + "lru 0.12.5", "metrics", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot", + "rand 0.9.2", "redb", "safe_arith", "serde", "smallvec", "ssz_types", "state_processing", - "strum", + "strum 0.24.1", "superstruct", "tempfile", "tracing", @@ -9289,7 +8711,16 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ - "strum_macros", + "strum_macros 0.24.3", +] + +[[package]] +name = "strum" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" +dependencies = [ + "strum_macros 0.27.2", ] [[package]] @@ -9305,6 +8736,18 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "strum_macros" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.110", +] + [[package]] name = "subtle" version = "2.6.1" @@ -9317,12 +8760,12 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b986e4a629907f20a2c2a639a75bc22a8b5d99b444e0d83c395f4cb309022bf" dependencies = [ - "darling 0.20.10", + "darling 0.20.11", "itertools 0.13.0", "proc-macro2", "quote", "smallvec", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -9348,9 +8791,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.100" +version = "2.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" dependencies = [ "proc-macro2", "quote", @@ -9366,15 +8809,9 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - [[package]] name = "sync_wrapper" version = "1.0.2" @@ -9386,13 +8823,13 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -9400,25 +8837,14 @@ name = "sysinfo" version = "0.26.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c18a6156d1f27a9592ee18c1a846ca8dd5c258b7179fc193ae87c74ebb666f5" -dependencies = [ - "cfg-if", - "core-foundation-sys", - "libc", - "ntapi", - "once_cell", - "rayon", - "winapi", -] - -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation 0.9.4", - "system-configuration-sys 0.5.0", +dependencies = [ + "cfg-if", + "core-foundation-sys", + "libc", + "ntapi", + "once_cell", + "rayon", + "winapi", ] [[package]] @@ -9427,19 +8853,9 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "core-foundation 0.9.4", - "system-configuration-sys 0.6.0", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", + "system-configuration-sys", ] [[package]] @@ -9459,7 +8875,7 @@ dependencies = [ "lighthouse_network", "metrics", "network_utils", - "parking_lot 0.12.3", + "parking_lot", "serde", "sysinfo", "types", @@ -9499,26 +8915,25 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.18.0" +version = "3.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c317e0a526ee6120d8dabad239c8dadca62b24b6f168914bbbc8e2fb1f0e567" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ - "cfg-if", "fastrand", - "getrandom 0.3.1", + "getrandom 0.3.4", "once_cell", - "rustix 1.0.2", - "windows-sys 0.59.0", + "rustix 1.1.2", + "windows-sys 0.61.2", ] [[package]] name = "terminal_size" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" +checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" dependencies = [ - "rustix 1.0.2", - "windows-sys 0.59.0", + "rustix 1.1.2", + "windows-sys 0.60.2", ] [[package]] @@ -9546,11 +8961,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.12" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl 2.0.12", + "thiserror-impl 2.0.17", ] [[package]] @@ -9561,28 +8976,27 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "thiserror-impl" -version = "2.0.12" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -9596,9 +9010,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f21f216790c8df74ce3ab25b534e0718da5a1916719771d3fec23315c99e468b" +checksum = "661f1f6a57b3a36dc9174a2c10f19513b4866816e13425d3e418b11cc37bc24c" dependencies = [ "libc", "paste", @@ -9607,9 +9021,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" +version = "0.6.1+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3c60906412afa9c2b5b5a48ca6a5abe5736aec9eb48ad05037a677e52e4e2d" +checksum = "cd8aa5b2ab86a2cefa406d889139c162cbb230092f7d1d7cbc1716405d852a3b" dependencies = [ "cc", "libc", @@ -9617,9 +9031,9 @@ dependencies = [ [[package]] name = "tikv-jemallocator" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cec5ff18518d81584f477e9bfdf957f5bb0979b0bac3af4ca30b5b3ae2d2865" +checksum = "0359b4327f954e0567e69fb191cf1436617748813819c94b8cd4a431422d053a" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -9627,9 +9041,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.39" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad298b01a40a23aac4580b67e3dbedb7cc8402f3592d7f49469de2ea4aecdd8" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", "itoa", @@ -9642,15 +9056,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765c97a5b985b7c11d7bc27fa927dc4fe6af3a6dfb021d28deb60d3bf51e76ef" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" [[package]] name = "time-macros" -version = "0.2.20" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8093bc3e81c3bc5f7879de09619d06c9a5a5e45ca44dfeeb7225bae38005c5c" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" dependencies = [ "num-conv", "time-core", @@ -9679,7 +9093,7 @@ dependencies = [ "pbkdf2 0.11.0", "rand 0.8.5", "rustc-hash 1.1.0", - "sha2 0.10.8", + "sha2 0.10.9", "thiserror 1.0.69", "unicode-normalization", "wasm-bindgen", @@ -9697,9 +9111,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" dependencies = [ "displaydoc", "zerovec", @@ -9717,9 +9131,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" dependencies = [ "tinyvec_macros", ] @@ -9732,32 +9146,31 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.0" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9975ea0f48b5aa3972bf2d888c238182458437cc2a19374b81b25cdf1023fb3a" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ - "backtrace", "bytes", "libc", "mio", - "parking_lot 0.12.3", + "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.6.1", "tokio-macros", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -9770,16 +9183,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.25.0" @@ -9793,11 +9196,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls 0.23.23", + "rustls 0.23.35", "tokio", ] @@ -9815,9 +9218,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" dependencies = [ "bytes", "futures-core", @@ -9830,18 +9233,31 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.8" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +dependencies = [ + "serde_core", +] [[package]] name = "toml_edit" -version = "0.22.24" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ - "indexmap 2.8.0", + "indexmap 2.12.0", "toml_datetime", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +dependencies = [ "winnow", ] @@ -9856,17 +9272,17 @@ dependencies = [ "axum", "base64 0.22.1", "bytes", - "h2 0.4.8", - "http 1.3.0", + "h2 0.4.12", + "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.8.1", "hyper-timeout", "hyper-util", "percent-encoding", "pin-project", "prost", - "socket2", + "socket2 0.5.10", "tokio", "tokio-stream", "tower 0.4.13", @@ -9884,10 +9300,10 @@ dependencies = [ "async-trait", "base64 0.22.1", "bytes", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.8.1", "hyper-timeout", "hyper-util", "percent-encoding", @@ -9895,7 +9311,7 @@ dependencies = [ "prost", "rustls-native-certs", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls 0.26.4", "tokio-stream", "tower 0.5.2", "tower-layer", @@ -9931,10 +9347,10 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.8.0", + "indexmap 2.12.0", "pin-project-lite", "slab", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", "tokio-util", "tower-layer", @@ -9942,6 +9358,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags 2.10.0", + "bytes", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "iri-string", + "pin-project-lite", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" version = "0.3.3" @@ -9980,35 +9414,25 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - [[package]] name = "tracing-log" version = "0.2.0" @@ -10071,9 +9495,9 @@ dependencies = [ [[package]] name = "tree_hash" -version = "0.10.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee44f4cef85f88b4dea21c0b1f58320bdf35715cf56d840969487cff00613321" +checksum = "2db21caa355767db4fd6129876e5ae278a8699f4a6959b1e3e7aff610b532d52" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -10084,14 +9508,14 @@ dependencies = [ [[package]] name = "tree_hash_derive" -version = "0.10.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bee2ea1551f90040ab0e34b6fb7f2fa3bad8acc925837ac654f2c78a13e3089" +checksum = "711cc655fcbb48384a87dc2bf641b991a15c5ad9afc3caa0b1ab1df3b436f70f" dependencies = [ - "darling 0.20.10", + "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -10106,9 +9530,9 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8f7726da4807b58ea5c96fdc122f80702030edc33b35aff9190a51148ccc85" +checksum = "dd69c5aa8f924c7519d6372789a74eac5b94fb0f8fcf0d4a97eb0bfc3e785f39" dependencies = [ "serde", "stable_deref_trait", @@ -10122,9 +9546,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" [[package]] name = "types" @@ -10153,9 +9577,9 @@ dependencies = [ "merkle_proof", "metastruct", "milhouse", - "parking_lot 0.12.3", + "parking_lot", "paste", - "rand 0.9.0", + "rand 0.9.2", "rand_xorshift 0.4.0", "rayon", "regex", @@ -10228,25 +9652,19 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-normalization" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" +checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8" dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" - [[package]] name = "unicode-xid" version = "0.2.6" @@ -10294,21 +9712,16 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.4" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" dependencies = [ "form_urlencoded", "idna", "percent-encoding", + "serde", ] -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -10327,22 +9740,24 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", "serde", ] [[package]] name = "uuid" -version = "1.15.1" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.4", + "js-sys", + "wasm-bindgen", ] [[package]] name = "validator_client" -version = "8.0.0" +version = "8.0.1" dependencies = [ "account_utils", "beacon_node_fallback", @@ -10355,13 +9770,13 @@ dependencies = [ "eth2", "fdlimit", "graffiti_file", - "hyper 1.6.0", + "hyper 1.8.1", "initialized_validators", "lighthouse_validator_store", "metrics", "monitoring_api", - "parking_lot 0.12.3", - "reqwest 0.11.27", + "parking_lot", + "reqwest", "sensitive_url", "serde", "slashing_protection", @@ -10387,7 +9802,7 @@ dependencies = [ "filesystem", "hex", "lockfile", - "rand 0.9.0", + "rand 0.9.2", "tempfile", "tree_hash", "types", @@ -10416,8 +9831,8 @@ dependencies = [ "lighthouse_validator_store", "lighthouse_version", "logging", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot", + "rand 0.9.2", "sensitive_url", "serde", "serde_json", @@ -10451,7 +9866,7 @@ dependencies = [ "logging", "malloc_utils", "metrics", - "parking_lot 0.12.3", + "parking_lot", "serde", "slot_clock", "tracing", @@ -10508,7 +9923,7 @@ dependencies = [ "futures", "graffiti_file", "logging", - "parking_lot 0.12.3", + "parking_lot", "safe_arith", "slot_clock", "task_executor", @@ -10611,7 +10026,7 @@ dependencies = [ "mime_guess", "percent-encoding", "pin-project", - "rustls-pemfile 2.2.0", + "rustls-pemfile", "scoped-tls", "serde", "serde_json", @@ -10641,50 +10056,37 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] -name = "wasi" -version = "0.13.3+wasi-0.2.2" +name = "wasip2" +version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.100", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" dependencies = [ "cfg-if", "js-sys", @@ -10695,9 +10097,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10705,22 +10107,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" dependencies = [ + "bumpalo", "proc-macro2", "quote", - "syn 2.0.100", - "wasm-bindgen-backend", + "syn 2.0.110", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" dependencies = [ "unicode-ident", ] @@ -10739,25 +10141,24 @@ dependencies = [ ] [[package]] -name = "wasm-timer" -version = "0.2.5" +name = "wasmtimer" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" +checksum = "1c598d6b99ea013e35844697fc4670d08339d5cda15588f193c6beedd12f644b" dependencies = [ "futures", "js-sys", - "parking_lot 0.11.2", + "parking_lot", "pin-utils", + "slab", "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", ] [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" dependencies = [ "js-sys", "wasm-bindgen", @@ -10787,8 +10188,8 @@ dependencies = [ "initialized_validators", "lighthouse_validator_store", "logging", - "parking_lot 0.12.3", - "reqwest 0.11.27", + "parking_lot", + "reqwest", "serde", "serde_json", "serde_yaml", @@ -10805,9 +10206,12 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.4" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" +checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +dependencies = [ + "rustls-pki-types", +] [[package]] name = "which" @@ -10829,9 +10233,9 @@ checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" [[package]] name = "widestring" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" +checksum = "72069c3113ab32ab29e5584db3c6ec55d416895e60715417b5b883a357c3e471" [[package]] name = "winapi" @@ -10851,11 +10255,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -10874,16 +10278,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" -dependencies = [ - "windows-core 0.58.0", - "windows-targets 0.52.6", -] - [[package]] name = "windows-acl" version = "0.3.0" @@ -10896,15 +10290,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "windows-core" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" -dependencies = [ - "windows-targets 0.52.6", -] - [[package]] name = "windows-core" version = "0.53.0" @@ -10917,55 +10302,44 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.58.0" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", - "windows-result 0.2.0", - "windows-strings 0.1.0", - "windows-targets 0.52.6", + "windows-link", + "windows-result 0.4.1", + "windows-strings", ] [[package]] name = "windows-implement" -version = "0.58.0" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "windows-interface" -version = "0.58.0" +version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "windows-link" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" - -[[package]] -name = "windows-registry" -version = "0.4.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" -dependencies = [ - "windows-result 0.3.1", - "windows-strings 0.3.1", - "windows-targets 0.53.2", -] +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-result" @@ -10978,37 +10352,18 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-result" -version = "0.3.1" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06374efe858fab7e4f881500e6e86ec8bc28f9462c47e5a9941a0142ad86b189" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" -dependencies = [ - "windows-result 0.2.0", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-strings" -version = "0.3.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ "windows-link", ] @@ -11049,6 +10404,24 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -11097,18 +10470,19 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.2" +version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] [[package]] @@ -11131,9 +10505,9 @@ checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" @@ -11155,9 +10529,9 @@ checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" @@ -11179,9 +10553,9 @@ checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" [[package]] name = "windows_i686_gnullvm" @@ -11191,9 +10565,9 @@ checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" @@ -11215,9 +10589,9 @@ checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" @@ -11239,9 +10613,9 @@ checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" @@ -11263,9 +10637,9 @@ checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" @@ -11287,15 +10661,15 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.3" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" dependencies = [ "memchr", ] @@ -11311,13 +10685,10 @@ dependencies = [ ] [[package]] -name = "wit-bindgen-rt" -version = "0.33.0" +name = "wit-bindgen" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" -dependencies = [ - "bitflags 2.9.0", -] +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "workspace_members" @@ -11327,36 +10698,11 @@ dependencies = [ "quote", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - [[package]] name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - -[[package]] -name = "ws_stream_wasm" -version = "0.7.4" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" -dependencies = [ - "async_io_stream", - "futures", - "js-sys", - "log", - "pharos", - "rustc_version 0.4.1", - "send_wrapper", - "thiserror 1.0.69", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" [[package]] name = "wyz" @@ -11392,7 +10738,7 @@ dependencies = [ "nom", "oid-registry", "rusticata-macros", - "thiserror 2.0.12", + "thiserror 2.0.17", "time", ] @@ -11412,9 +10758,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.25" +version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5b940ebc25896e71dd073bad2dbaa2abfe97b0a391415e22ad1326d9c54e3c4" +checksum = "3ae8337f8a065cfc972643663ea4279e04e7256de865aa66fe25cec5fb912d3f" [[package]] name = "xmltree" @@ -11445,7 +10791,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "rand 0.8.5", "static_assertions", @@ -11453,16 +10799,16 @@ dependencies = [ [[package]] name = "yamux" -version = "0.13.4" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17610762a1207ee816c6fadc29220904753648aba0a9ed61c7b8336e80a559c4" +checksum = "deab71f2e20691b4728b349c6cee8fc7223880fa67b6b4f92225ec32225447e5" dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.3", + "parking_lot", "pin-project", - "rand 0.8.5", + "rand 0.9.2", "static_assertions", "web-time", ] @@ -11478,11 +10824,10 @@ dependencies = [ [[package]] name = "yoke" -version = "0.7.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" dependencies = [ - "serde", "stable_deref_trait", "yoke-derive", "zerofrom", @@ -11490,54 +10835,34 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", "synstructure", ] [[package]] name = "zerocopy" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" -dependencies = [ - "zerocopy-derive 0.7.35", -] - -[[package]] -name = "zerocopy" -version = "0.8.23" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd97444d05a4328b90e75e503a34bad781f14e28a823ad3557f0750df1ebcbc6" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ - "zerocopy-derive 0.8.23", + "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.35" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6352c01d0edd5db859a63e2605f4ea3183ddbd15e2c4a9e7d32184df75e4f154" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -11557,15 +10882,15 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", "synstructure", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" dependencies = [ "serde", "zeroize_derive", @@ -11579,14 +10904,25 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", +] + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", ] [[package]] name = "zerovec" -version = "0.10.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" dependencies = [ "yoke", "zerofrom", @@ -11595,13 +10931,13 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -11624,6 +10960,20 @@ dependencies = [ "zstd 0.11.2+zstd.1.5.2", ] +[[package]] +name = "zkvm_execution_layer" +version = "0.1.0" +dependencies = [ + "async-trait", + "execution_layer", + "hashbrown 0.15.5", + "lru 0.12.5", + "serde", + "thiserror 2.0.17", + "tokio", + "types", +] + [[package]] name = "zstd" version = "0.11.2+zstd.1.5.2" @@ -11639,7 +10989,7 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" dependencies = [ - "zstd-safe 7.2.3", + "zstd-safe 7.2.4", ] [[package]] @@ -11654,18 +11004,18 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.2.3" +version = "7.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.14+zstd.1.5.7" +version = "2.0.16+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 03116b3db18..03754a95866 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,6 +14,7 @@ members = [ "beacon_node/lighthouse_tracing", "beacon_node/network", "beacon_node/operation_pool", + "beacon_node/proof_generation_service", "beacon_node/store", "beacon_node/timer", "boot_node", @@ -47,8 +48,6 @@ members = [ "common/validator_dir", "common/warp_utils", "common/workspace_members", - "consensus/context_deserialize/context_deserialize", - "consensus/context_deserialize/context_deserialize_derive", "consensus/fixed_bytes", "consensus/fork_choice", "consensus/int_to_bytes", @@ -63,6 +62,7 @@ members = [ "crypto/eth2_wallet", "crypto/kzg", "database_manager", + "dummy_el", "lcli", "lighthouse", "lighthouse/environment", @@ -88,12 +88,13 @@ members = [ "validator_client/validator_metrics", "validator_client/validator_services", "validator_manager", + "zkvm_execution_layer", ] resolver = "2" [workspace.package] edition = "2024" -version = "8.0.0" +version = "8.0.1" [workspace.dependencies] account_utils = { path = "common/account_utils" } @@ -122,10 +123,7 @@ clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } clap_utils = { path = "common/clap_utils" } compare_fields = "0.1" console-subscriber = "0.4" -context_deserialize = { path = "consensus/context_deserialize/context_deserialize", features = [ - "all", -] } -context_deserialize_derive = { path = "consensus/context_deserialize/context_deserialize_derive" } +context_deserialize = "0.2" criterion = "0.5" delay_map = "0.4" deposit_contract = { path = "common/deposit_contract" } @@ -143,14 +141,10 @@ eth2_key_derivation = { path = "crypto/eth2_key_derivation" } eth2_keystore = { path = "crypto/eth2_keystore" } eth2_network_config = { path = "common/eth2_network_config" } eth2_wallet = { path = "crypto/eth2_wallet" } -ethereum_hashing = "0.7.0" +ethereum_hashing = "0.8.0" ethereum_serde_utils = "0.8.0" -ethereum_ssz = "0.9.0" -ethereum_ssz_derive = "0.9.0" -ethers-core = "1" -ethers-middleware = { version = "1", default-features = false } -ethers-providers = { version = "1", default-features = false } -ethers-signers = { version = "1", default-features = false } +ethereum_ssz = { version = "0.10.0", features = ["context_deserialize"] } +ethereum_ssz_derive = "0.10.0" execution_layer = { path = "beacon_node/execution_layer" } exit-future = "0.2" filesystem = { path = "common/filesystem" } @@ -187,7 +181,7 @@ malloc_utils = { path = "common/malloc_utils" } maplit = "1" merkle_proof = { path = "consensus/merkle_proof" } metrics = { path = "common/metrics" } -milhouse = { version = "0.7", default-features = false } +milhouse = { version = "0.9", default-features = false, features = ["context_deserialize"] } mockall = "0.13" mockall_double = "0.3" mockito = "1.5.0" @@ -205,15 +199,14 @@ parking_lot = "0.12" paste = "1" pretty_reqwest_error = { path = "common/pretty_reqwest_error" } prometheus = { version = "0.13", default-features = false } +proptest = "1" proto_array = { path = "consensus/proto_array" } -quickcheck = "1" -quickcheck_macros = "1" quote = "1" r2d2 = "0.8" rand = "0.9.0" rayon = "1.7" regex = "1" -reqwest = { version = "0.11", default-features = false, features = [ +reqwest = { version = "0.12", default-features = false, features = [ "blocking", "json", "stream", @@ -237,7 +230,7 @@ slashing_protection = { path = "validator_client/slashing_protection" } slot_clock = { path = "common/slot_clock" } smallvec = { version = "1.11.2", features = ["arbitrary"] } snap = "1" -ssz_types = "0.12.2" +ssz_types = { version = "0.14.0", features = ["context_deserialize"] } state_processing = { path = "consensus/state_processing" } store = { path = "beacon_node/store" } strum = { version = "0.24", features = ["derive"] } @@ -262,8 +255,8 @@ tracing-core = "0.1" tracing-log = "0.2" tracing-opentelemetry = "0.31.0" tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } -tree_hash = "0.10.0" -tree_hash_derive = "0.10.0" +tree_hash = "0.12.0" +tree_hash_derive = "0.12.0" types = { path = "consensus/types" } url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } @@ -289,13 +282,6 @@ lto = "fat" codegen-units = 1 incremental = false -[profile.reproducible] -inherits = "release" -debug = false -panic = "abort" -codegen-units = 1 -overflow-checks = true - [profile.release-debug] inherits = "release" debug = true diff --git a/Dockerfile b/Dockerfile index f925836e48e..5ed4a7dd9c8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,13 +1,19 @@ FROM rust:1.88.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev -COPY . lighthouse ARG FEATURES ARG PROFILE=release ARG CARGO_USE_GIT_CLI=true ENV FEATURES=$FEATURES ENV PROFILE=$PROFILE ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_USE_GIT_CLI -RUN cd lighthouse && make +ENV CARGO_INCREMENTAL=1 + +WORKDIR /lighthouse +COPY . . +# Persist the registry and target file across builds. See: https://docs.docker.com/build/cache/optimize/#use-cache-mounts +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/lighthouse/target \ + make FROM ubuntu:22.04 RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ @@ -15,4 +21,4 @@ RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-reco ca-certificates \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse +COPY --from=builder /usr/local/cargo/bin/lighthouse /usr/local/bin/lighthouse \ No newline at end of file diff --git a/Dockerfile.reproducible b/Dockerfile.reproducible index 24ba5a58a9b..903515373f8 100644 --- a/Dockerfile.reproducible +++ b/Dockerfile.reproducible @@ -3,42 +3,22 @@ ARG RUST_IMAGE="rust:1.88-bullseye@sha256:8e3c421122bf4cd3b2a866af41a4dd52d87ad9 FROM ${RUST_IMAGE} AS builder # Install specific version of the build dependencies -RUN apt-get update && apt-get install -y libclang-dev=1:11.0-51+nmu5 cmake=3.18.4-2+deb11u1 +RUN apt-get update && apt-get install -y libclang-dev=1:11.0-51+nmu5 cmake=3.18.4-2+deb11u1 libjemalloc-dev=5.2.1-3 -# Add target architecture argument with default value ARG RUST_TARGET="x86_64-unknown-linux-gnu" # Copy the project to the container -COPY . /app +COPY ./ /app WORKDIR /app -# Get the latest commit timestamp and set SOURCE_DATE_EPOCH (default it to 0 if not passed) -ARG SOURCE_DATE=0 - -# Set environment variables for reproducibility -ARG RUSTFLAGS="-C link-arg=-Wl,--build-id=none -C metadata='' --remap-path-prefix $(pwd)=." -ENV SOURCE_DATE_EPOCH=$SOURCE_DATE \ - CARGO_INCREMENTAL=0 \ - LC_ALL=C \ - TZ=UTC \ - RUSTFLAGS="${RUSTFLAGS}" - -# Set the default features if not provided -ARG FEATURES="gnosis,slasher-lmdb,slasher-mdbx,slasher-redb,jemalloc" - -# Set the default profile if not provided -ARG PROFILE="reproducible" - # Build the project with the reproducible settings -RUN cargo build --bin lighthouse \ - --features "${FEATURES}" \ - --profile "${PROFILE}" \ - --locked \ - --target "${RUST_TARGET}" +RUN make build-reproducible -RUN mv /app/target/${RUST_TARGET}/${PROFILE}/lighthouse /lighthouse +# Move the binary to a standard location +RUN mv /app/target/${RUST_TARGET}/release/lighthouse /lighthouse # Create a minimal final image with just the binary FROM gcr.io/distroless/cc-debian12:nonroot-6755e21ccd99ddead6edc8106ba03888cbeed41a COPY --from=builder /lighthouse /lighthouse + ENTRYPOINT [ "/lighthouse" ] diff --git a/Makefile b/Makefile index 2edc9f86328..a6891b682f7 100644 --- a/Makefile +++ b/Makefile @@ -81,36 +81,67 @@ build-lcli-aarch64: build-lcli-riscv64: cross build --bin lcli --target riscv64gc-unknown-linux-gnu --features "portable" --profile "$(CROSS_PROFILE)" --locked -# extracts the current source date for reproducible builds -SOURCE_DATE := $(shell git log -1 --pretty=%ct) - -# Default image for x86_64 +# Environment variables for reproducible builds +# Initialize RUSTFLAGS +RUST_BUILD_FLAGS = +# Remove build ID from the binary to ensure reproducibility across builds +RUST_BUILD_FLAGS += -C link-arg=-Wl,--build-id=none +# Remove metadata hash from symbol names to ensure reproducible builds +RUST_BUILD_FLAGS += -C metadata='' + +# Set timestamp from last git commit for reproducible builds +SOURCE_DATE ?= $(shell git log -1 --pretty=%ct) + +# Disable incremental compilation to avoid non-deterministic artifacts +CARGO_INCREMENTAL_VAL = 0 +# Set C locale for consistent string handling and sorting +LOCALE_VAL = C +# Set UTC timezone for consistent time handling across builds +TZ_VAL = UTC + +# Features for reproducible builds +FEATURES_REPRODUCIBLE = $(CROSS_FEATURES),jemalloc-unprefixed + +# Derive the architecture-specific library path from RUST_TARGET +JEMALLOC_LIB_ARCH = $(word 1,$(subst -, ,$(RUST_TARGET))) +JEMALLOC_OVERRIDE = /usr/lib/$(JEMALLOC_LIB_ARCH)-linux-gnu/libjemalloc.a + +# Default target architecture +RUST_TARGET ?= x86_64-unknown-linux-gnu + +# Default images for different architectures RUST_IMAGE_AMD64 ?= rust:1.88-bullseye@sha256:8e3c421122bf4cd3b2a866af41a4dd52d87ad9e315fd2cb5100e87a7187a9816 +RUST_IMAGE_ARM64 ?= rust:1.88-bullseye@sha256:8b22455a7ce2adb1355067638284ee99d21cc516fab63a96c4514beaf370aa94 -# Reproducible build for x86_64 -build-reproducible-x86_64: +.PHONY: build-reproducible +build-reproducible: ## Build the lighthouse binary into `target` directory with reproducible builds + SOURCE_DATE_EPOCH=$(SOURCE_DATE) \ + RUSTFLAGS="${RUST_BUILD_FLAGS} --remap-path-prefix $$(pwd)=." \ + CARGO_INCREMENTAL=${CARGO_INCREMENTAL_VAL} \ + LC_ALL=${LOCALE_VAL} \ + TZ=${TZ_VAL} \ + JEMALLOC_OVERRIDE=${JEMALLOC_OVERRIDE} \ + cargo build --bin lighthouse --features "$(FEATURES_REPRODUCIBLE)" --profile "$(PROFILE)" --locked --target $(RUST_TARGET) + +.PHONY: build-reproducible-x86_64 +build-reproducible-x86_64: ## Build reproducible x86_64 Docker image DOCKER_BUILDKIT=1 docker build \ --build-arg RUST_TARGET="x86_64-unknown-linux-gnu" \ --build-arg RUST_IMAGE=$(RUST_IMAGE_AMD64) \ - --build-arg SOURCE_DATE=$(SOURCE_DATE) \ -f Dockerfile.reproducible \ -t lighthouse:reproducible-amd64 . -# Default image for arm64 -RUST_IMAGE_ARM64 ?= rust:1.88-bullseye@sha256:8b22455a7ce2adb1355067638284ee99d21cc516fab63a96c4514beaf370aa94 - -# Reproducible build for aarch64 -build-reproducible-aarch64: +.PHONY: build-reproducible-aarch64 +build-reproducible-aarch64: ## Build reproducible aarch64 Docker image DOCKER_BUILDKIT=1 docker build \ --platform linux/arm64 \ --build-arg RUST_TARGET="aarch64-unknown-linux-gnu" \ --build-arg RUST_IMAGE=$(RUST_IMAGE_ARM64) \ - --build-arg SOURCE_DATE=$(SOURCE_DATE) \ -f Dockerfile.reproducible \ -t lighthouse:reproducible-arm64 . -# Build both architectures -build-reproducible-all: build-reproducible-x86_64 build-reproducible-aarch64 +.PHONY: build-reproducible-all +build-reproducible-all: build-reproducible-x86_64 build-reproducible-aarch64 ## Build both x86_64 and aarch64 reproducible Docker images # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index fd013559785..1e02db1e979 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -28,6 +28,8 @@ dirs = { workspace = true } environment = { workspace = true } eth2_config = { workspace = true } execution_layer = { workspace = true } +# TODO(zkproofs): add as a workspace dependency +zkvm_execution_layer = { path = "../zkvm_execution_layer" } genesis = { workspace = true } hex = { workspace = true } http_api = { workspace = true } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index e889f53bb01..7d7332da575 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -26,6 +26,8 @@ ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } execution_layer = { workspace = true } +# TODO(zkproofs): add as a workspace dependency +zkvm_execution_layer = { path = "../../zkvm_execution_layer" } fork_choice = { workspace = true } futures = { workspace = true } genesis = { workspace = true } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5ffdf951ac1..f6c26bb72c3 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -31,6 +31,9 @@ use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::events::ServerSentEventHandler; use crate::execution_payload::{NotifyExecutionLayer, PreparePayloadHandle, get_execution_payload}; +use crate::execution_proof_verification::{ + GossipExecutionProofError, GossipVerifiedExecutionProof, +}; use crate::fetch_blobs::EngineGetBlobsOutput; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::graffiti_calculator::GraffitiCalculator; @@ -55,6 +58,7 @@ use crate::observed_attesters::{ }; use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_data_sidecars::ObservedDataSidecars; +use crate::observed_execution_proofs::ObservedExecutionProofs; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; use crate::persisted_beacon_chain::PersistedBeaconChain; @@ -126,6 +130,7 @@ use store::{ KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{RayonPoolType, ShutdownReason, TaskExecutor}; +use tokio::sync::mpsc::UnboundedSender; use tokio_stream::Stream; use tracing::{Span, debug, debug_span, error, info, info_span, instrument, trace, warn}; use tree_hash::TreeHash; @@ -133,6 +138,7 @@ use types::blob_sidecar::FixedBlobSidecarList; use types::data_column_sidecar::ColumnIndex; use types::payload::BlockProductionVersion; use types::*; +use zkvm_execution_layer::GeneratorRegistry; pub type ForkChoiceError = fork_choice::Error; @@ -343,6 +349,8 @@ pub enum BlockProcessStatus { pub type LightClientProducerEvent = (Hash256, Slot, SyncAggregate); +pub type ProofGenerationEvent = (Hash256, Slot, Arc>); + pub type BeaconForkChoice = ForkChoice< BeaconForkChoiceStore< ::EthSpec, @@ -414,6 +422,8 @@ pub struct BeaconChain { pub observed_blob_sidecars: RwLock>>, /// Maintains a record of column sidecars seen over the gossip network. pub observed_column_sidecars: RwLock>>, + /// Maintains a record of execution proofs seen over the gossip network. + pub observed_execution_proofs: RwLock, /// Maintains a record of slashable message seen over the gossip network or RPC. pub observed_slashable: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. @@ -482,6 +492,10 @@ pub struct BeaconChain { pub kzg: Arc, /// RNG instance used by the chain. Currently used for shuffling column sidecars in block publishing. pub rng: Arc>>, + /// Registry of zkVM proof generators for altruistic proof generation + pub zkvm_generator_registry: Option>, + /// Sender to notify proof generation service of blocks needing proofs + pub proof_generation_tx: Option>>, } pub enum BeaconBlockResponseWrapper { @@ -1412,10 +1426,10 @@ impl BeaconChain { /// /// Returns `(block_root, block_slot)`. pub fn heads(&self) -> Vec<(Hash256, Slot)> { - self.canonical_head - .fork_choice_read_lock() + let fork_choice = self.canonical_head.fork_choice_read_lock(); + fork_choice .proto_array() - .heads_descended_from_finalization::() + .heads_descended_from_finalization::(fork_choice.finalized_checkpoint()) .iter() .map(|node| (node.root, node.slot)) .collect() @@ -2208,6 +2222,15 @@ impl BeaconChain { }) } + #[instrument(skip_all, level = "trace")] + pub fn verify_execution_proof_for_gossip( + self: &Arc, + execution_proof: Arc, + ) -> Result, GossipExecutionProofError> { + // TODO(zkproofs): Add metrics + GossipVerifiedExecutionProof::new(execution_proof, self) + } + #[instrument(skip_all, level = "trace")] pub fn verify_blob_sidecar_for_gossip( self: &Arc, @@ -3045,6 +3068,33 @@ impl BeaconChain { self.check_gossip_blob_availability_and_import(blob).await } + /// Process a gossip-verified execution proof by storing it in the DA checker. + /// + /// This method takes an execution proof that has already been validated via gossip + /// and stores it in the DataAvailabilityChecker. If all components for a block are + /// now available, the block will be imported to fork choice. + #[instrument(skip_all, level = "debug")] + pub async fn process_gossip_execution_proof( + self: &Arc, + execution_proof: GossipVerifiedExecutionProof, + publish_fn: impl FnOnce() -> Result<(), BlockError>, + ) -> Result { + let block_root = execution_proof.block_root(); + + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its execution proofs again. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::DuplicateFullyImported(block_root)); + } + + self.check_gossip_execution_proof_availability_and_import(execution_proof, publish_fn) + .await + } + /// Cache the data columns in the processing cache, process it, then evict it from the cache if it was /// imported or errors. #[instrument(skip_all, level = "debug")] @@ -3128,6 +3178,45 @@ impl BeaconChain { .await } + /// Process execution proofs retrieved via RPC and returns the `AvailabilityProcessingStatus`. + /// + /// This method handles execution proofs received from peers during block sync. The proofs + /// are verified and stored in the data availability checker. If all required components + /// (block, blobs/columns, and proofs) are available, the block is imported into fork choice. + pub async fn process_rpc_execution_proofs( + self: &Arc, + slot: Slot, + block_root: Hash256, + execution_proofs: Vec>, + ) -> Result { + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its execution proofs again. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::DuplicateFullyImported(block_root)); + } + + // Validate that all proofs are for the expected block_root + for proof in &execution_proofs { + if proof.block_root != block_root { + return Err(BlockError::AvailabilityCheck( + AvailabilityCheckError::Unexpected(format!( + "Proof block_root mismatch: expected {}, got {}", + block_root, proof.block_root + )), + )); + } + } + + // TODO(zkproofs): We can emit SSE events for execution proofs yet + + self.check_rpc_execution_proof_availability_and_import(slot, block_root, execution_proofs) + .await + } + /// Process blobs retrieved from the EL and returns the `AvailabilityProcessingStatus`. pub async fn process_engine_blobs( self: &Arc, @@ -3570,6 +3659,30 @@ impl BeaconChain { .await } + /// Checks if the provided execution proof can make any cached blocks available, and imports + /// immediately if so, otherwise caches the proof in the data availability checker. + async fn check_gossip_execution_proof_availability_and_import( + self: &Arc, + execution_proof: GossipVerifiedExecutionProof, + publish_fn: impl FnOnce() -> Result<(), BlockError>, + ) -> Result { + let block_root = execution_proof.block_root(); + let slot = execution_proof.slot(); + + // TODO(zkproofs): Can we avoid the clone + let proof_arc = execution_proof.into_inner(); + let proof = (*proof_arc).clone(); + + // Store the proof in the DA checker + let availability = self + .data_availability_checker + .put_verified_execution_proofs(block_root, std::iter::once(proof)) + .map_err(BlockError::AvailabilityCheck)?; + + self.process_availability(slot, availability, publish_fn) + .await + } + fn check_blob_header_signature_and_slashability<'a>( self: &Arc, block_root: Hash256, @@ -3674,6 +3787,28 @@ impl BeaconChain { .await } + /// Checks if the provided execution proofs can make any cached blocks available, and imports + /// immediately if so, otherwise caches the proofs in the data availability checker. + async fn check_rpc_execution_proof_availability_and_import( + self: &Arc, + slot: Slot, + block_root: Hash256, + execution_proofs: Vec>, + ) -> Result { + // TODO(zkproofs): For optional proofs, they are currently not signed + // so we can't add any slashability checks here. We also don't want this + // because it could cause issues where we slash a validator for giving us bad + // proofs, but for nodes that don't need proofs (most of the network), they will + // not see this slashing or care about. + + let availability = self + .data_availability_checker + .put_rpc_execution_proofs(block_root, execution_proofs)?; + + self.process_availability(slot, availability, || Ok(())) + .await + } + fn check_data_column_sidecar_header_signature_and_slashability<'a>( self: &Arc, block_root: Hash256, @@ -4053,6 +4188,20 @@ impl BeaconChain { current_slot, ); + // Notify proof generation service for altruistic proof generation + if let Some(ref proof_gen_tx) = self.proof_generation_tx { + let slot = signed_block.slot(); + let event = (block_root, slot, signed_block.clone()); + + if let Err(e) = proof_gen_tx.send(event) { + debug!( + error = ?e, + ?block_root, + "Failed to send proof generation event" + ); + } + } + Ok(block_root) } @@ -7402,6 +7551,34 @@ impl BeaconChain { && self.spec.is_peer_das_enabled_for_epoch(block_epoch) } + /// Returns true if epoch is within the execution proof retention boundary + pub fn execution_proof_check_required_for_epoch(&self, epoch: Epoch) -> bool { + self.data_availability_checker + .execution_proof_check_required_for_epoch(epoch) + } + + /// Returns true if we should fetch execution proofs for this block + pub fn should_fetch_execution_proofs(&self, block_epoch: Epoch) -> bool { + // Check if ZK-VM mode is enabled + if self.min_execution_proofs_required().is_none() { + return false; + } + + // Only fetch proofs within retention window + self.execution_proof_check_required_for_epoch(block_epoch) + } + + /// Returns the minimum number of execution proofs required + pub fn min_execution_proofs_required(&self) -> Option { + self.data_availability_checker + .min_execution_proofs_required() + } + + /// Returns the execution proof retention boundary epoch + pub fn execution_proof_boundary(&self) -> Option { + self.data_availability_checker.execution_proof_boundary() + } + /// Gets the `LightClientBootstrap` object for a requested block root. /// /// Returns `None` when the state or block is not found in the database. @@ -7501,6 +7678,59 @@ impl BeaconChain { .custody_context() .custody_columns_for_epoch(epoch_opt, &self.spec) } + + /// Returns a deterministic list of execution proof subnet IDs to request for a block in the given epoch. + /// + /// The selection is deterministic based on the epoch, ensuring all nodes request the same + /// subnets for blocks in the same epoch. Different epochs will result in different subnet + /// selections, providing rotation over time. + /// + /// # Arguments + /// * `epoch` - The epoch of the block + /// * `count` - Number of subnets to select (typically min_execution_proofs_required) + /// + /// # Returns + /// A vector of `count` subnet IDs, deterministically selected based on the epoch. + pub fn execution_proof_subnets_for_epoch( + &self, + epoch: Epoch, + count: usize, + ) -> Vec { + use types::EXECUTION_PROOF_TYPE_COUNT; + + let total_subnets = EXECUTION_PROOF_TYPE_COUNT as usize; + let count = std::cmp::min(count, total_subnets); + + if count == 0 { + return vec![]; + } + + // Use epoch as a deterministic seed + // Hash the epoch to get a pseudo-random but deterministic ordering + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + use std::hash::{Hash, Hasher}; + epoch.hash(&mut hasher); + let seed = hasher.finish(); + + // Create a deterministic permutation of subnet IDs based on the seed + let mut subnet_ids: Vec = (0..EXECUTION_PROOF_TYPE_COUNT).collect(); + + // Simple deterministic shuffle using the seed + // This is a Fisher-Yates shuffle variant using deterministic randomness + for i in (1..subnet_ids.len()).rev() { + // Use seed + i for deterministic pseudo-random index + let j = ((seed.wrapping_add(i as u64).wrapping_mul(2654435761)) % ((i + 1) as u64)) + as usize; + subnet_ids.swap(i, j); + } + + // Take the first `count` subnet IDs and convert to ExecutionProofId + subnet_ids + .into_iter() + .take(count) + .filter_map(|id| types::ExecutionProofId::new(id).ok()) + .collect() + } } impl Drop for BeaconChain { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 1ddc51cc351..374f1e2b360 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1164,9 +1164,9 @@ impl SignatureVerifiedBlock { block_root: Hash256, chain: &BeaconChain, ) -> Result> { - let header = block.signed_block_header(); + let arc_block = block.block_cloned(); Self::new(block, block_root, chain) - .map_err(|e| BlockSlashInfo::from_early_error_block(header, e)) + .map_err(|e| BlockSlashInfo::from_early_error_block(arc_block.signed_block_header(), e)) } /// Finishes signature verification on the provided `GossipVerifedBlock`. Does not re-verify @@ -1221,9 +1221,13 @@ impl SignatureVerifiedBlock { from: GossipVerifiedBlock, chain: &BeaconChain, ) -> Result> { - let header = from.block.signed_block_header(); - Self::from_gossip_verified_block(from, chain) - .map_err(|e| BlockSlashInfo::from_early_error_block(header, e)) + let block = from.block.clone(); + Self::from_gossip_verified_block(from, chain).map_err(|e| { + // Lazily create the header from the block in case of error. Computing the header + // involves some hashing and takes ~13ms which we DO NOT want to do on the hot path of + // block processing (prior to sending newPayload pre-Gloas). + BlockSlashInfo::from_early_error_block(block.signed_block_header(), e) + }) } pub fn block_root(&self) -> Hash256 { @@ -1248,12 +1252,12 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo> { - let header = self.block.signed_block_header(); + let arc_block = self.block.block_cloned(); let (parent, block) = if let Some(parent) = self.parent { (parent, self.block) } else { load_parent(self.block, chain) - .map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))? + .map_err(|e| BlockSlashInfo::SignatureValid(arc_block.signed_block_header(), e))? }; ExecutionPendingBlock::from_signature_verified_components( @@ -1264,7 +1268,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc chain, notify_execution_layer, ) - .map_err(|e| BlockSlashInfo::SignatureValid(header, e)) + .map_err(|e| BlockSlashInfo::SignatureValid(arc_block.signed_block_header(), e)) } fn block(&self) -> &SignedBeaconBlock { @@ -1428,11 +1432,11 @@ impl ExecutionPendingBlock { // Spawn the payload verification future as a new task, but don't wait for it to complete. // The `payload_verification_future` will be awaited later to ensure verification completed // successfully. - let current_span = Span::current(); let payload_verification_handle = chain .task_executor .spawn_handle( - payload_verification_future.instrument(current_span), + payload_verification_future + .instrument(debug_span!("execution_payload_verification")), "execution_payload_verification", ) .ok_or(BeaconChainError::RuntimeShutdown)?; diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 719c24b9561..c176c47f4dd 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -2,6 +2,7 @@ use crate::ChainConfig; use crate::CustodyContext; use crate::beacon_chain::{ BEACON_CHAIN_DB_KEY, CanonicalHead, LightClientProducerEvent, OP_POOL_DB_KEY, + ProofGenerationEvent, }; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::custody_context::NodeCustodyType; @@ -13,6 +14,7 @@ use crate::kzg_utils::build_data_column_sidecars; use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::observed_data_sidecars::ObservedDataSidecars; +use crate::observed_execution_proofs::ObservedExecutionProofs; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::persisted_custody::load_custody_context; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; @@ -39,10 +41,12 @@ use std::sync::Arc; use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; +use tokio::sync::mpsc::UnboundedSender; use tracing::{debug, error, info}; +use types::data_column_custody_group::CustodyIndex; use types::{ - BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, - FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, + Epoch, EthSpec, FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -102,7 +106,18 @@ pub struct BeaconChainBuilder { task_executor: Option, validator_monitor_config: Option, node_custody_type: NodeCustodyType, + ordered_custody_column_indices: Option>, rng: Option>, + /// ZK-VM execution layer configuration. + /// + /// TODO(zkproofs): When this is Some(_), the traditional ExecutionLayer should + /// be replaced with ZkVmEngineApi from zkvm_execution_layer. This would allow the + /// --execution-endpoint CLI flag to be optional when running in ZK-VM mode. + zkvm_execution_layer_config: Option, + /// Registry of zkVM proof generators for currently altruistic proof generation + zkvm_generator_registry: Option>, + /// Sender to notify proof generation service of blocks needing proofs + proof_generation_tx: Option>>, } impl @@ -141,7 +156,11 @@ where task_executor: None, validator_monitor_config: None, node_custody_type: NodeCustodyType::Fullnode, + ordered_custody_column_indices: None, rng: None, + zkvm_execution_layer_config: None, + zkvm_generator_registry: None, + proof_generation_tx: None, } } @@ -647,6 +666,26 @@ where self } + /// Sets the ZK-VM execution layer configuration. + /// When set, enables ZK-VM execution proof verification mode. + pub fn zkvm_execution_layer_config( + mut self, + config: Option, + ) -> Self { + self.zkvm_execution_layer_config = config; + self + } + + /// Sets the ordered custody column indices for this node. + /// This is used to determine the data columns the node is required to custody. + pub fn ordered_custody_column_indices( + mut self, + ordered_custody_column_indices: Vec, + ) -> Self { + self.ordered_custody_column_indices = Some(ordered_custody_column_indices); + self + } + /// Sets the `BeaconChain` event handler backend. /// /// For example, provide `ServerSentEventHandler` as a `handler`. @@ -682,6 +721,21 @@ where self } + /// Sets the zkVM generator registry for altruistic proof generation. + pub fn zkvm_generator_registry( + mut self, + registry: Arc, + ) -> Self { + self.zkvm_generator_registry = Some(registry); + self + } + + /// Sets a `Sender` to notify the proof generation service of new blocks. + pub fn proof_generation_tx(mut self, sender: UnboundedSender>) -> Self { + self.proof_generation_tx = Some(sender); + self + } + /// Creates a new, empty operation pool. fn empty_op_pool(mut self) -> Self { self.op_pool = Some(OperationPool::new()); @@ -740,6 +794,9 @@ where .genesis_state_root .ok_or("Cannot build without a genesis state root")?; let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); + let ordered_custody_column_indices = self + .ordered_custody_column_indices + .ok_or("Cannot build without ordered custody column indices")?; let rng = self.rng.ok_or("Cannot build without an RNG")?; let beacon_proposer_cache: Arc> = <_>::default(); @@ -942,16 +999,24 @@ where custody, self.node_custody_type, head_epoch, + ordered_custody_column_indices, &self.spec, ) } else { ( - CustodyContext::new(self.node_custody_type, &self.spec), + CustodyContext::new( + self.node_custody_type, + ordered_custody_column_indices, + &self.spec, + ), None, ) }; debug!(?custody_context, "Loaded persisted custody context"); + let has_execution_layer_and_proof_gen = + self.execution_layer.is_some() && self.zkvm_generator_registry.is_some(); + let beacon_chain = BeaconChain { spec: self.spec.clone(), config: self.chain_config, @@ -984,6 +1049,7 @@ where observed_block_producers: <_>::default(), observed_column_sidecars: RwLock::new(ObservedDataSidecars::new(self.spec.clone())), observed_blob_sidecars: RwLock::new(ObservedDataSidecars::new(self.spec.clone())), + observed_execution_proofs: RwLock::new(ObservedExecutionProofs::default()), observed_slashable: <_>::default(), observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), @@ -1029,11 +1095,22 @@ where store, Arc::new(custody_context), self.spec, + // Create verifier registry if zkvm mode is enabled + // For now, we use dummy verifiers for all subnets + self.zkvm_execution_layer_config + .as_ref() + .map(|_| Arc::new(zkvm_execution_layer::registry_proof_verification::VerifierRegistry::new_with_dummy_verifiers())), + // Pass whether this node has an execution layer AND generates proofs + // Nodes with EL+proof-gen validate via traditional execution + // Nodes with EL but no proof-gen wait for proofs (lightweight verifier) + has_execution_layer_and_proof_gen, ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), kzg: self.kzg.clone(), rng: Arc::new(Mutex::new(rng)), + zkvm_generator_registry: self.zkvm_generator_registry, + proof_generation_tx: self.proof_generation_tx, }; let head = beacon_chain.head_snapshot(); @@ -1220,7 +1297,9 @@ fn build_data_columns_from_blobs( #[cfg(test)] mod test { use super::*; - use crate::test_utils::{EphemeralHarnessType, get_kzg}; + use crate::test_utils::{ + EphemeralHarnessType, generate_data_column_indices_rand_order, get_kzg, + }; use ethereum_hashing::hash; use genesis::{ DEFAULT_ETH1_BLOCK_HASH, generate_deterministic_keypairs, interop_genesis_state, @@ -1272,6 +1351,9 @@ mod test { .expect("should configure testing slot clock") .shutdown_sender(shutdown_tx) .rng(Box::new(StdRng::seed_from_u64(42))) + .ordered_custody_column_indices( + generate_data_column_indices_rand_order::(), + ) .build() .expect("should build"); diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 7dd4c88c513..228e5eb2d27 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -951,6 +951,13 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()), ); + self.observed_execution_proofs.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + self.observed_slashable.write().prune( new_view .finalized_checkpoint diff --git a/beacon_node/beacon_chain/src/custody_context.rs b/beacon_node/beacon_chain/src/custody_context.rs index a5ef3ed2f65..c512ce616a1 100644 --- a/beacon_node/beacon_chain/src/custody_context.rs +++ b/beacon_node/beacon_chain/src/custody_context.rs @@ -2,13 +2,11 @@ use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; -use std::sync::OnceLock; use std::{ collections::{BTreeMap, HashMap}, sync::atomic::{AtomicU64, Ordering}, }; use tracing::{debug, warn}; -use types::data_column_custody_group::{CustodyIndex, compute_columns_for_custody_group}; use types::{ChainSpec, ColumnIndex, Epoch, EthSpec, Slot}; /// A delay before making the CGC change effective to the data availability checker. @@ -206,7 +204,7 @@ fn get_validators_custody_requirement(validator_custody_units: u64, spec: &Chain /// Therefore, the custody count at any point in time is calculated as the max of /// the validator custody at that time and the current cli params. /// -/// Choosing the max ensures that we always have the minimum required columns and +/// Choosing the max ensures that we always have the minimum required columns, and /// we can adjust the `status.earliest_available_slot` value to indicate to our peers /// the columns that we can guarantee to serve. #[derive(Debug, Copy, Clone, PartialEq, Eq, Default, Deserialize, Serialize)] @@ -218,7 +216,7 @@ pub enum NodeCustodyType { /// wants to subscribe to the minimum number of columns to enable /// reconstruction (50%) of the full blob data on demand. SemiSupernode, - /// The node isn't running with with any explicit cli parameters + /// The node isn't running with any explicit cli parameters /// or is running with cli parameters to indicate that it wants /// to only subscribe to the minimal custody requirements. #[default] @@ -248,9 +246,9 @@ pub struct CustodyContext { validator_custody_count: AtomicU64, /// Maintains all the validators that this node is connected to currently validator_registrations: RwLock, - /// Stores an immutable, ordered list of all custody columns as determined by the node's NodeID - /// on startup. - all_custody_columns_ordered: OnceLock>, + /// Stores an immutable, ordered list of all data column indices as determined by the node's NodeID + /// on startup. This used to determine the node's custody columns. + ordered_custody_column_indices: Vec, _phantom_data: PhantomData, } @@ -259,7 +257,11 @@ impl CustodyContext { /// exists. /// /// The `node_custody_type` value is based on current cli parameters. - pub fn new(node_custody_type: NodeCustodyType, spec: &ChainSpec) -> Self { + pub fn new( + node_custody_type: NodeCustodyType, + ordered_custody_column_indices: Vec, + spec: &ChainSpec, + ) -> Self { let cgc_override = node_custody_type.get_custody_count_override(spec); // If there's no override, we initialise `validator_custody_count` to 0. This has been the // existing behaviour and we maintain this for now to avoid a semantic schema change until @@ -267,7 +269,7 @@ impl CustodyContext { Self { validator_custody_count: AtomicU64::new(cgc_override.unwrap_or(0)), validator_registrations: RwLock::new(ValidatorRegistrations::new(cgc_override)), - all_custody_columns_ordered: OnceLock::new(), + ordered_custody_column_indices, _phantom_data: PhantomData, } } @@ -290,6 +292,7 @@ impl CustodyContext { ssz_context: CustodyContextSsz, node_custody_type: NodeCustodyType, head_epoch: Epoch, + ordered_custody_column_indices: Vec, spec: &ChainSpec, ) -> (Self, Option) { let CustodyContextSsz { @@ -355,39 +358,13 @@ impl CustodyContext { .into_iter() .collect(), }), - all_custody_columns_ordered: OnceLock::new(), + ordered_custody_column_indices, _phantom_data: PhantomData, }; (custody_context, custody_count_changed) } - /// Initializes an ordered list of data columns based on provided custody groups. - /// - /// # Arguments - /// * `all_custody_groups_ordered` - Vector of custody group indices to map to columns - /// * `spec` - Chain specification containing custody parameters - /// - /// # Returns - /// Ok(()) if initialization succeeds, Err with description string if it fails - pub fn init_ordered_data_columns_from_custody_groups( - &self, - all_custody_groups_ordered: Vec, - spec: &ChainSpec, - ) -> Result<(), String> { - let mut ordered_custody_columns = vec![]; - for custody_index in all_custody_groups_ordered { - let columns = compute_columns_for_custody_group::(custody_index, spec) - .map_err(|e| format!("Failed to compute columns for custody group {e:?}"))?; - ordered_custody_columns.extend(columns); - } - self.all_custody_columns_ordered - .set(ordered_custody_columns.into_boxed_slice()) - .map_err(|_| { - "Failed to initialise CustodyContext with computed custody columns".to_string() - }) - } - /// Register a new validator index and updates the list of validators if required. /// /// Also modifies the internal structures if the validator custody has changed to @@ -497,11 +474,7 @@ impl CustodyContext { /// A slice of ordered column indices that should be sampled for this epoch based on the node's custody configuration pub fn sampling_columns_for_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> &[ColumnIndex] { let num_of_columns_to_sample = self.num_of_data_columns_to_sample(epoch, spec); - let all_columns_ordered = self - .all_custody_columns_ordered - .get() - .expect("all_custody_columns_ordered should be initialized"); - &all_columns_ordered[..num_of_columns_to_sample] + &self.ordered_custody_column_indices[..num_of_columns_to_sample] } /// Returns the ordered list of column indices that the node is assigned to custody @@ -528,12 +501,11 @@ impl CustodyContext { self.custody_group_count_at_head(spec) as usize }; - let all_columns_ordered = self - .all_custody_columns_ordered - .get() - .expect("all_custody_columns_ordered should be initialized"); + // This is an unnecessary conversion for spec compliance, basically just multiplying by 1. + let columns_per_custody_group = spec.data_columns_per_group::() as usize; + let custody_column_count = columns_per_custody_group * custody_group_count; - &all_columns_ordered[..custody_group_count] + &self.ordered_custody_column_indices[..custody_column_count] } /// The node has completed backfill for this epoch. Update the internal records so the function @@ -599,11 +571,9 @@ impl From<&CustodyContext> for CustodyContextSsz { #[cfg(test)] mod tests { - use rand::rng; - use rand::seq::SliceRandom; - use types::MainnetEthSpec; - use super::*; + use crate::test_utils::generate_data_column_indices_rand_order; + use types::MainnetEthSpec; type E = MainnetEthSpec; @@ -623,13 +593,10 @@ mod tests { ssz_context, NodeCustodyType::Fullnode, head_epoch, + generate_data_column_indices_rand_order::(), spec, ); - let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); - custody_context - .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, spec) - .expect("should initialise ordered data columns"); custody_context } @@ -668,6 +635,7 @@ mod tests { ssz_context, target_node_custody_type, head_epoch, + generate_data_column_indices_rand_order::(), spec, ); @@ -738,6 +706,7 @@ mod tests { ssz_context, target_node_custody_type, head_epoch, + generate_data_column_indices_rand_order::(), spec, ); @@ -759,7 +728,11 @@ mod tests { #[test] fn no_validators_supernode_default() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Supernode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Supernode, + generate_data_column_indices_rand_order::(), + &spec, + ); assert_eq!( custody_context.custody_group_count_at_head(&spec), spec.number_of_custody_groups @@ -773,7 +746,11 @@ mod tests { #[test] fn no_validators_semi_supernode_default() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::SemiSupernode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::SemiSupernode, + generate_data_column_indices_rand_order::(), + &spec, + ); assert_eq!( custody_context.custody_group_count_at_head(&spec), spec.number_of_custody_groups / 2 @@ -787,7 +764,11 @@ mod tests { #[test] fn no_validators_fullnode_default() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + ); assert_eq!( custody_context.custody_group_count_at_head(&spec), spec.custody_requirement, @@ -802,7 +783,11 @@ mod tests { #[test] fn register_single_validator_should_update_cgc() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + ); let bal_per_additional_group = spec.balance_per_additional_custody_group; let min_val_custody_requirement = spec.validator_custody_requirement; // One single node increases its balance over 3 epochs. @@ -826,7 +811,11 @@ mod tests { #[test] fn register_multiple_validators_should_update_cgc() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + ); let bal_per_additional_group = spec.balance_per_additional_custody_group; let min_val_custody_requirement = spec.validator_custody_requirement; // Add 3 validators over 3 epochs. @@ -863,7 +852,11 @@ mod tests { #[test] fn register_validators_should_not_update_cgc_for_supernode() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Supernode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Supernode, + generate_data_column_indices_rand_order::(), + &spec, + ); let bal_per_additional_group = spec.balance_per_additional_custody_group; // Add 3 validators over 3 epochs. @@ -901,7 +894,11 @@ mod tests { #[test] fn cgc_change_should_be_effective_to_sampling_after_delay() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + ); let current_slot = Slot::new(10); let current_epoch = current_slot.epoch(E::slots_per_epoch()); let default_sampling_size = @@ -932,7 +929,11 @@ mod tests { #[test] fn validator_dropped_after_no_registrations_within_expiry_should_not_reduce_cgc() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + ); let current_slot = Slot::new(10); let val_custody_units_1 = 10; let val_custody_units_2 = 5; @@ -974,7 +975,11 @@ mod tests { #[test] fn validator_dropped_after_no_registrations_within_expiry() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + ); let current_slot = Slot::new(10); let val_custody_units_1 = 10; let val_custody_units_2 = 5; @@ -1021,37 +1026,6 @@ mod tests { ); } - #[test] - fn should_init_ordered_data_columns_and_return_sampling_columns() { - let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); - let sampling_size = custody_context.num_of_data_columns_to_sample(Epoch::new(0), &spec); - - // initialise ordered columns - let mut all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); - all_custody_groups_ordered.shuffle(&mut rng()); - - custody_context - .init_ordered_data_columns_from_custody_groups( - all_custody_groups_ordered.clone(), - &spec, - ) - .expect("should initialise ordered data columns"); - - let actual_sampling_columns = - custody_context.sampling_columns_for_epoch(Epoch::new(0), &spec); - - let expected_sampling_columns = &all_custody_groups_ordered - .iter() - .flat_map(|custody_index| { - compute_columns_for_custody_group::(*custody_index, &spec) - .expect("should compute columns for custody group") - }) - .collect::>()[0..sampling_size]; - - assert_eq!(actual_sampling_columns, expected_sampling_columns) - } - /// Update the validator every epoch and assert cgc against expected values. fn register_validators_and_assert_cgc( custody_context: &CustodyContext, @@ -1077,12 +1051,12 @@ mod tests { #[test] fn custody_columns_for_epoch_no_validators_fullnode() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); - let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); - - custody_context - .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, &spec) - .expect("should initialise ordered data columns"); + let ordered_custody_column_indices = generate_data_column_indices_rand_order::(); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + ordered_custody_column_indices, + &spec, + ); assert_eq!( custody_context.custody_columns_for_epoch(None, &spec).len(), @@ -1093,12 +1067,12 @@ mod tests { #[test] fn custody_columns_for_epoch_no_validators_supernode() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Supernode, &spec); - let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); - - custody_context - .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, &spec) - .expect("should initialise ordered data columns"); + let ordered_custody_column_indices = generate_data_column_indices_rand_order::(); + let custody_context = CustodyContext::::new( + NodeCustodyType::Supernode, + ordered_custody_column_indices, + &spec, + ); assert_eq!( custody_context.custody_columns_for_epoch(None, &spec).len(), @@ -1109,14 +1083,14 @@ mod tests { #[test] fn custody_columns_for_epoch_with_validators_should_match_cgc() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); - let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); + let ordered_custody_column_indices = generate_data_column_indices_rand_order::(); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + ordered_custody_column_indices, + &spec, + ); let val_custody_units = 10; - custody_context - .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, &spec) - .expect("should initialise ordered data columns"); - let _ = custody_context.register_validators( vec![( 0, @@ -1135,14 +1109,14 @@ mod tests { #[test] fn custody_columns_for_epoch_specific_epoch_uses_epoch_cgc() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); - let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); + let ordered_custody_column_indices = generate_data_column_indices_rand_order::(); + let custody_context = CustodyContext::::new( + NodeCustodyType::Fullnode, + ordered_custody_column_indices, + &spec, + ); let test_epoch = Epoch::new(5); - custody_context - .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, &spec) - .expect("should initialise ordered data columns"); - let expected_cgc = custody_context.custody_group_count_at_epoch(test_epoch, &spec); assert_eq!( custody_context @@ -1165,6 +1139,7 @@ mod tests { ssz_context, NodeCustodyType::Fullnode, Epoch::new(0), + generate_data_column_indices_rand_order::(), &spec, ); @@ -1198,7 +1173,11 @@ mod tests { fn restore_semi_supernode_with_validators_can_exceed_64() { let spec = E::default_spec(); let semi_supernode_cgc = spec.number_of_custody_groups / 2; // 64 - let custody_context = CustodyContext::::new(NodeCustodyType::SemiSupernode, &spec); + let custody_context = CustodyContext::::new( + NodeCustodyType::SemiSupernode, + generate_data_column_indices_rand_order::(), + &spec, + ); // Verify initial CGC is 64 (semi-supernode) assert_eq!( @@ -1348,6 +1327,7 @@ mod tests { ssz_context, NodeCustodyType::Fullnode, Epoch::new(20), + generate_data_column_indices_rand_order::(), &spec, ); diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 644c4716985..a4c86b5202e 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -18,12 +18,13 @@ use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; -use tracing::{debug, error, instrument}; +use tracing::{debug, error, instrument, warn}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ BlobSidecarList, BlockImportSource, ChainSpec, DataColumnSidecar, DataColumnSidecarList, Epoch, - EthSpec, Hash256, SignedBeaconBlock, Slot, + EthSpec, ExecutionProof, ExecutionProofId, Hash256, SignedBeaconBlock, Slot, }; +use zkvm_execution_layer::registry_proof_verification::VerifierRegistry; mod error; mod overflow_lru_cache; @@ -86,6 +87,8 @@ pub struct DataAvailabilityChecker { kzg: Arc, custody_context: Arc>, spec: Arc, + /// Registry of proof verifiers for different zkVM proof IDs. + verifier_registry: Option>, } pub type AvailabilityAndReconstructedColumns = (Availability, DataColumnSidecarList); @@ -118,6 +121,7 @@ impl Debug for Availability { } impl DataAvailabilityChecker { + #[allow(clippy::too_many_arguments)] pub fn new( complete_blob_backfill: bool, slot_clock: T::SlotClock, @@ -125,12 +129,15 @@ impl DataAvailabilityChecker { store: BeaconStore, custody_context: Arc>, spec: Arc, + verifier_registry: Option>, + has_execution_layer_and_proof_gen: bool, ) -> Result { let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY_NON_ZERO, store, custody_context.clone(), spec.clone(), + has_execution_layer_and_proof_gen, )?; Ok(Self { complete_blob_backfill, @@ -139,6 +146,7 @@ impl DataAvailabilityChecker { kzg, custody_context, spec, + verifier_registry, }) } @@ -169,6 +177,54 @@ impl DataAvailabilityChecker { }) } + /// Return the set of cached execution proof IDs for `block_root`. Returns None if there is + /// no block component for `block_root`. + pub fn cached_execution_proof_subnet_ids( + &self, + block_root: &Hash256, + ) -> Option> { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.map(|components| { + components + .get_cached_execution_proofs() + .iter() + .map(|proof| proof.proof_id) + .collect::>() + }) + }) + } + + /// Get proof IDs we already have for a block. + /// Used when creating RPC requests to tell peers what we don't need. + pub fn get_existing_proof_ids(&self, block_root: &Hash256) -> Option> { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.map(|components| { + components + .get_cached_execution_proofs() + .iter() + .map(|proof| proof.proof_id) + .collect::>() + }) + }) + } + + /// Get all execution proofs we have for a block. + /// Used when responding to RPC requests. + pub fn get_execution_proofs(&self, block_root: &Hash256) -> Option>> { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.map(|components| { + components + .get_cached_execution_proofs() + .iter() + .map(|proof| Arc::new(proof.clone())) + .collect::>() + }) + }) + } + /// Return the set of cached custody column indexes for `block_root`. Returns None if there is /// no block component for `block_root`. pub fn cached_data_column_indexes(&self, block_root: &Hash256) -> Option> { @@ -193,6 +249,63 @@ impl DataAvailabilityChecker { }) } + /// Check if an execution proof is already cached in the availability cache. + /// + /// We usually call this method if the proof was made available ia RPC, and we later receive it via Gossip. + /// If it exists in the cache, we know it has already passed validation, + /// even though this particular instance may not have been seen/published on gossip yet. + pub fn is_execution_proof_cached( + &self, + block_root: &Hash256, + execution_proof: &ExecutionProof, + ) -> bool { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.is_some_and(|components| { + components + .get_cached_execution_proofs() + .iter() + .any(|cached| cached == execution_proof) + }) + }) + } + + /// Verify a single execution proof for gossip. + /// + /// This performs cryptographic verification of the proof without requiring the full block. + /// + /// Returns: + /// - Ok(true) if proof is valid + /// - Ok(false) if proof is invalid + /// - Err if no verifier is configured or verification fails + pub fn verify_execution_proof_for_gossip( + &self, + proof: &ExecutionProof, + ) -> Result { + let Some(verifier_registry) = &self.verifier_registry else { + // No verifier configured but receiving proofs - this is a configuration error. + // If the chain spec enables zkVM, the node must have --activate-zkvm flag set. + return Err(AvailabilityCheckError::ProofVerificationError( + "Node is receiving execution proofs but zkVM verification is not enabled. \ + Use --activate-zkvm flag to enable proof verification." + .to_string(), + )); + }; + + let subnet_id = proof.proof_id; + let verifier = verifier_registry.get_verifier(subnet_id).ok_or_else(|| { + warn!(?subnet_id, "No verifier registered for subnet"); + AvailabilityCheckError::UnsupportedProofID(subnet_id) + })?; + + verifier.verify(proof).map_err(|e| { + AvailabilityCheckError::ProofVerificationError(format!( + "Proof verification failed: {:?}", + e + )) + }) + } + /// Get a blob from the availability cache. pub fn get_blob( &self, @@ -269,6 +382,117 @@ impl DataAvailabilityChecker { .put_kzg_verified_data_columns(block_root, verified_custody_columns) } + /// Put a list of execution proofs received via RPC into the availability cache. + /// This performs cryptographic verification on the proofs. + #[instrument(skip_all, level = "trace")] + pub fn put_rpc_execution_proofs( + &self, + block_root: Hash256, + proofs: Vec>, + ) -> Result, AvailabilityCheckError> { + debug!( + ?block_root, + num_proofs = proofs.len(), + "Verifying and storing execution proofs in DA checker" + ); + + // If no verifier registry is configured, skip verification + let Some(verifier_registry) = &self.verifier_registry else { + debug!( + ?block_root, + "No verifier registry configured, storing proofs without verification" + ); + let owned_proofs = proofs.iter().map(|p| (**p).clone()); + return self + .availability_cache + .put_verified_execution_proofs(block_root, owned_proofs); + }; + + // Get the execution payload hash from the block + let execution_payload_hash = self + .availability_cache + .peek_pending_components(&block_root, |components| { + components.and_then(|c| c.block.as_ref().and_then(|b| b.execution_payload_hash())) + }) + .ok_or_else(|| { + warn!( + ?block_root, + "Cannot verify proofs: block not in cache or has no execution payload" + ); + AvailabilityCheckError::MissingExecutionPayload + })?; + + debug!( + ?block_root, + ?execution_payload_hash, + "Got execution payload hash for proof verification" + ); + + let mut verified_proofs = Vec::new(); + for proof in proofs { + let proof_id = proof.proof_id; + + // Check that the proof's block_hash matches the execution payload hash + if proof.block_hash != execution_payload_hash { + warn!( + ?block_root, + ?proof_id, + proof_hash = ?proof.block_hash, + ?execution_payload_hash, + "Proof execution payload hash mismatch" + ); + return Err(AvailabilityCheckError::ExecutionPayloadHashMismatch { + proof_hash: proof.block_hash, + block_hash: execution_payload_hash, + }); + } + + let verifier = verifier_registry.get_verifier(proof_id).ok_or_else(|| { + warn!(?proof_id, "No verifier registered for proof ID"); + AvailabilityCheckError::UnsupportedProofID(proof_id) + })?; + + // Verify the proof (proof contains block_hash internally) + match verifier.verify(&proof) { + Ok(true) => { + debug!(?proof_id, ?block_root, "Proof verification succeeded"); + verified_proofs.push((*proof).clone()); + } + Ok(false) => { + warn!( + ?proof_id, + ?block_root, + "Proof verification failed: proof is invalid" + ); + return Err(AvailabilityCheckError::InvalidProof { + proof_id, + reason: "Proof verification returns false".to_string(), + }); + } + Err(e) => { + warn!( + ?proof_id, + ?block_root, + error = ?e, + "Proof verification error" + ); + return Err(AvailabilityCheckError::ProofVerificationError( + e.to_string(), + )); + } + } + } + + debug!( + ?block_root, + verified_count = verified_proofs.len(), + "All proofs verified successfully" + ); + + self.availability_cache + .put_verified_execution_proofs(block_root, verified_proofs) + } + /// Check if we've cached other blobs for this block. If it completes a set and we also /// have a block cached, return the `Availability` variant triggering block import. /// Otherwise cache the blob sidecar. @@ -338,6 +562,20 @@ impl DataAvailabilityChecker { .put_kzg_verified_data_columns(block_root, custody_columns) } + /// Put execution proofs into the availability cache as pending components. + /// + /// Returns `Availability` which has information about whether all components have been + /// received or more are required. + #[instrument(skip_all, level = "trace")] + pub fn put_verified_execution_proofs>( + &self, + block_root: Hash256, + execution_proofs: I, + ) -> Result, AvailabilityCheckError> { + self.availability_cache + .put_verified_execution_proofs(block_root, execution_proofs) + } + /// Check if we have all the blobs for a block. Returns `Availability` which has information /// about whether all components have been received or more are required. pub fn put_executed_block( @@ -566,6 +804,44 @@ impl DataAvailabilityChecker { }) } + /// The epoch at which we require execution proofs for block processing. + /// + /// Note: This follows the same pattern as blob retention: proofs are required starting from + /// the zkvm_fork epoch, but only retained for a configured number of epochs. + /// + /// TODO(zkproofs): We don't store proofs forever and we also don't store + /// blobs forever, perhaps we should because when the blob disappears, we may not + /// be able to remake the proof when we put blobs in blocks. + /// We don't for now because proofs are quite large at the moment. + /// + /// Returns `None` if ZK-VM mode is disabled. + pub fn execution_proof_boundary(&self) -> Option { + let zkvm_fork_epoch = self.spec.zkvm_fork_epoch()?; + + let current_epoch = self.slot_clock.now()?.epoch(T::EthSpec::slots_per_epoch()); + + // Calculate retention boundary + let proof_retention_epoch = + current_epoch.saturating_sub(self.spec.min_epochs_for_execution_proof_requests); + + // Return max of fork epoch and retention boundary + // This ensures: + // 1. Proofs are never required before the zkvm fork + // 2. Proofs are only retained for the configured number of epochs + Some(std::cmp::max(zkvm_fork_epoch, proof_retention_epoch)) + } + + /// Returns true if the given epoch lies within the proof retention boundary. + pub fn execution_proof_check_required_for_epoch(&self, block_epoch: Epoch) -> bool { + self.execution_proof_boundary() + .is_some_and(|boundary_epoch| block_epoch >= boundary_epoch) + } + + /// Returns the minimum number of execution proofs required for ZK-VM mode. + pub fn min_execution_proofs_required(&self) -> Option { + self.spec.zkvm_min_proofs_required() + } + /// Collects metrics from the data availability checker. pub fn metrics(&self) -> DataAvailabilityCheckerMetrics { DataAvailabilityCheckerMetrics { @@ -866,11 +1142,11 @@ mod test { use crate::CustodyContext; use crate::custody_context::NodeCustodyType; use crate::test_utils::{ - EphemeralHarnessType, NumBlobs, generate_rand_block_and_data_columns, get_kzg, + EphemeralHarnessType, NumBlobs, generate_data_column_indices_rand_order, + generate_rand_block_and_data_columns, get_kzg, }; use rand::SeedableRng; use rand::prelude::StdRng; - use rand::seq::SliceRandom; use slot_clock::{SlotClock, TestingSlotClock}; use std::collections::HashSet; use std::sync::Arc; @@ -892,8 +1168,6 @@ mod test { let da_checker = new_da_checker(spec.clone()); let custody_context = &da_checker.custody_context; - let all_column_indices_ordered = - init_custody_context_with_ordered_columns(custody_context, &mut rng, &spec); // GIVEN a single 32 ETH validator is attached slot 0 let epoch = Epoch::new(0); @@ -926,7 +1200,8 @@ mod test { &spec, ); let block_root = Hash256::random(); - let requested_columns = &all_column_indices_ordered[..10]; + let custody_columns = custody_context.custody_columns_for_epoch(None, &spec); + let requested_columns = &custody_columns[..10]; da_checker .put_rpc_custody_columns( block_root, @@ -971,8 +1246,6 @@ mod test { let da_checker = new_da_checker(spec.clone()); let custody_context = &da_checker.custody_context; - let all_column_indices_ordered = - init_custody_context_with_ordered_columns(custody_context, &mut rng, &spec); // GIVEN a single 32 ETH validator is attached slot 0 let epoch = Epoch::new(0); @@ -1006,7 +1279,8 @@ mod test { &spec, ); let block_root = Hash256::random(); - let requested_columns = &all_column_indices_ordered[..10]; + let custody_columns = custody_context.custody_columns_for_epoch(None, &spec); + let requested_columns = &custody_columns[..10]; let gossip_columns = data_columns .into_iter() .filter(|d| requested_columns.contains(&d.index)) @@ -1096,8 +1370,6 @@ mod test { let da_checker = new_da_checker(spec.clone()); let custody_context = &da_checker.custody_context; - let all_column_indices_ordered = - init_custody_context_with_ordered_columns(custody_context, &mut rng, &spec); // Set custody requirement to 65 columns (enough to trigger reconstruction) let epoch = Epoch::new(1); @@ -1127,7 +1399,8 @@ mod test { // Add 64 columns to the da checker (enough to be able to reconstruct) // Order by all_column_indices_ordered, then take first 64 - let custody_columns = all_column_indices_ordered + let custody_columns = custody_context.custody_columns_for_epoch(None, &spec); + let custody_columns = custody_columns .iter() .filter_map(|&col_idx| data_columns.iter().find(|d| d.index == col_idx).cloned()) .take(64) @@ -1177,19 +1450,6 @@ mod test { ); } - fn init_custody_context_with_ordered_columns( - custody_context: &Arc>, - mut rng: &mut StdRng, - spec: &ChainSpec, - ) -> Vec { - let mut all_data_columns = (0..spec.number_of_custody_groups).collect::>(); - all_data_columns.shuffle(&mut rng); - custody_context - .init_ordered_data_columns_from_custody_groups(all_data_columns.clone(), spec) - .expect("should initialise ordered custody columns"); - all_data_columns - } - fn new_da_checker(spec: Arc) -> DataAvailabilityChecker { let slot_clock = TestingSlotClock::new( Slot::new(0), @@ -1198,7 +1458,12 @@ mod test { ); let kzg = get_kzg(&spec); let store = Arc::new(HotColdDB::open_ephemeral(<_>::default(), spec.clone()).unwrap()); - let custody_context = Arc::new(CustodyContext::new(NodeCustodyType::Fullnode, &spec)); + let ordered_custody_column_indices = generate_data_column_indices_rand_order::(); + let custody_context = Arc::new(CustodyContext::new( + NodeCustodyType::Fullnode, + ordered_custody_column_indices, + &spec, + )); let complete_blob_backfill = false; DataAvailabilityChecker::new( complete_blob_backfill, @@ -1207,6 +1472,8 @@ mod test { store, custody_context, spec, + None, + false, ) .expect("should initialise data availability checker") } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index c9efb7a4149..e5158827479 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -1,5 +1,5 @@ use kzg::{Error as KzgError, KzgCommitment}; -use types::{BeaconStateError, ColumnIndex, Hash256}; +use types::{BeaconStateError, ColumnIndex, ExecutionProofId, Hash256}; #[derive(Debug)] pub enum Error { @@ -22,6 +22,27 @@ pub enum Error { BlockReplayError(state_processing::BlockReplayError), RebuildingStateCaches(BeaconStateError), SlotClockError, + /// Execution proof verification failed - proof is invalid. + /// Penalize peer, a peer should not forward invalid proofs + InvalidProof { + proof_id: ExecutionProofId, + reason: String, + }, + /// No verifier registered for this proof ID. + /// Internal error; no peer penalization. + UnsupportedProofID(ExecutionProofId), + /// Error during proof verification process. + /// Internal error; no peer penalization. + ProofVerificationError(String), + /// Could not extract execution payload from block. + /// Internal error; no peer penalization. + MissingExecutionPayload, + /// Execution payload hash mismatch between proof and block. + /// Penalize peer, similar to an invalid proof. + ExecutionPayloadHashMismatch { + proof_hash: types::ExecutionBlockHash, + block_hash: types::ExecutionBlockHash, + }, } #[derive(PartialEq, Eq)] @@ -44,13 +65,18 @@ impl Error { | Error::ParentStateMissing(_) | Error::BlockReplayError(_) | Error::RebuildingStateCaches(_) - | Error::SlotClockError => ErrorCategory::Internal, + | Error::SlotClockError + | Error::UnsupportedProofID(_) + | Error::ProofVerificationError(_) + | Error::MissingExecutionPayload => ErrorCategory::Internal, Error::InvalidBlobs { .. } | Error::InvalidColumn { .. } | Error::ReconstructColumnsError { .. } | Error::BlobIndexInvalid(_) | Error::DataColumnIndexInvalid(_) - | Error::KzgCommitmentMismatch { .. } => ErrorCategory::Malicious, + | Error::KzgCommitmentMismatch { .. } + | Error::InvalidProof { .. } + | Error::ExecutionPayloadHashMismatch { .. } => ErrorCategory::Malicious, } } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 5e6322ae95a..ca32744bafb 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -55,6 +55,16 @@ impl CachedBlock { .blob_kzg_commitments() .map_or(0, |commitments| commitments.len()) } + + /// Get the execution payload hash if this block has an execution payload + pub fn execution_payload_hash(&self) -> Option { + self.as_block() + .message() + .body() + .execution_payload() + .ok() + .map(|payload| payload.execution_payload_ref().block_hash()) + } } /// This represents the components of a partially available block @@ -74,6 +84,7 @@ pub struct PendingComponents { pub block_root: Hash256, pub verified_blobs: RuntimeFixedVector>>, pub verified_data_columns: Vec>, + pub verified_execution_proofs: Vec, pub block: Option>, pub reconstruction_started: bool, span: Span, @@ -199,6 +210,50 @@ impl PendingComponents { Ok(()) } + /// Returns an immutable reference to the cached execution proofs. + pub fn get_cached_execution_proofs(&self) -> &[types::ExecutionProof] { + &self.verified_execution_proofs + } + + /// Check if we have a specific proof + pub fn has_proof_with_id(&self, proof_id: types::ExecutionProofId) -> bool { + self.verified_execution_proofs + .iter() + .any(|proof| proof.proof_id == proof_id) + } + + /// Get the number of unique subnet proofs we have + pub fn execution_proof_subnet_count(&self) -> usize { + self.verified_execution_proofs.len() + } + + /// Merges a single execution proof into the cache. + /// + /// Proofs are only inserted if: + /// 1. We don't already have a proof from this subnet for this block + /// 2. The proof's block_hash matches the cached block_root (if block exists) + pub fn merge_execution_proof(&mut self, proof: types::ExecutionProof) { + // Verify the proof is for the correct block + // ExecutionBlockHash is a wrapper around Hash256, so we need to convert + + // Don't insert duplicate proofs + if self.has_proof_with_id(proof.proof_id) { + return; + } + + self.verified_execution_proofs.push(proof); + } + + /// Merges a given set of execution proofs into the cache. + pub fn merge_execution_proofs>( + &mut self, + execution_proofs: I, + ) { + for proof in execution_proofs { + self.merge_execution_proof(proof); + } + } + /// Inserts a new block and revalidates the existing blobs against it. /// /// Blobs that don't match the new block's commitments are evicted. @@ -213,10 +268,11 @@ impl PendingComponents { /// /// WARNING: This function can potentially take a lot of time if the state needs to be /// reconstructed from disk. Ensure you are not holding any write locks while calling this. - pub fn make_available( + fn make_available( &self, spec: &Arc, num_expected_columns_opt: Option, + has_execution_layer_and_proof_gen: bool, recover: R, ) -> Result>, AvailabilityCheckError> where @@ -294,6 +350,23 @@ impl PendingComponents { return Ok(None); }; + // Check if this node needs execution proofs to validate blocks. + // Nodes that have EL and generate proofs validate via EL execution. + // Nodes that have EL but DON'T generate proofs are lightweight verifiers and wait for proofs. + // TODO(zkproofs): This is a technicality mainly because we cannot remove the EL on kurtosis + // ie each CL is coupled with an EL + let needs_execution_proofs = + spec.zkvm_min_proofs_required().is_some() && !has_execution_layer_and_proof_gen; + + if needs_execution_proofs { + let min_proofs = spec.zkvm_min_proofs_required().unwrap(); + let num_proofs = self.execution_proof_subnet_count(); + if num_proofs < min_proofs { + // Not enough execution proofs yet + return Ok(None); + } + } + // Block is available, construct `AvailableExecutedBlock` let blobs_available_timestamp = match blob_data { @@ -340,6 +413,7 @@ impl PendingComponents { block_root, verified_blobs: RuntimeFixedVector::new(vec![None; max_len]), verified_data_columns: vec![], + verified_execution_proofs: vec![], block: None, reconstruction_started: false, span, @@ -372,7 +446,9 @@ impl PendingComponents { pub fn status_str(&self, num_expected_columns_opt: Option) -> String { let block_count = if self.block.is_some() { 1 } else { 0 }; - if let Some(num_expected_columns) = num_expected_columns_opt { + let proof_count = self.execution_proof_subnet_count(); + + let base_status = if let Some(num_expected_columns) = num_expected_columns_opt { format!( "block {} data_columns {}/{}", block_count, @@ -391,6 +467,13 @@ impl PendingComponents { self.verified_blobs.iter().flatten().count(), num_expected_blobs ) + }; + + // Append execution proof count if we have any + if proof_count > 0 { + format!("{} proofs {}", base_status, proof_count) + } else { + base_status } } } @@ -405,6 +488,10 @@ pub struct DataAvailabilityCheckerInner { state_cache: StateLRUCache, custody_context: Arc>, spec: Arc, + /// Whether this node has an execution layer AND generates proofs. + /// - true: Node has EL and generates proofs → validates via EL execution + /// - false: Node either has no EL, or has EL but doesn't generate → waits for proofs (lightweight verifier) + has_execution_layer_and_proof_gen: bool, } // This enum is only used internally within the crate in the reconstruction function to improve @@ -422,12 +509,14 @@ impl DataAvailabilityCheckerInner { beacon_store: BeaconStore, custody_context: Arc>, spec: Arc, + has_execution_layer_and_proof_gen: bool, ) -> Result { Ok(Self { critical: RwLock::new(LruCache::new(capacity)), state_cache: StateLRUCache::new(beacon_store, spec.clone()), custody_context, spec, + has_execution_layer_and_proof_gen, }) } @@ -575,6 +664,53 @@ impl DataAvailabilityCheckerInner { ) } + /// Puts execution proofs into the availability cache as pending components. + pub fn put_verified_execution_proofs>( + &self, + block_root: Hash256, + execution_proofs: I, + ) -> Result, AvailabilityCheckError> { + let mut execution_proofs = execution_proofs.into_iter().peekable(); + + if execution_proofs.peek().is_none() { + // No proofs to process + return Ok(Availability::MissingComponents(block_root)); + } + + // Try to get epoch from existing pending components (if block already arrived) + // Otherwise use Epoch::new(0) as placeholder (will be corrected when block arrives) + // Also the component cannot be marked as available, if the block is missing + let epoch = self + .critical + .read() + .peek(&block_root) + .and_then(|pending| pending.epoch()) + .unwrap_or_else(|| types::Epoch::new(0)); + + let pending_components = + self.update_or_insert_pending_components(block_root, epoch, |pending_components| { + pending_components.merge_execution_proofs(execution_proofs); + Ok(()) + })?; + + let num_expected_columns_opt = self.get_num_expected_columns(epoch); + + pending_components.span.in_scope(|| { + debug!( + component = "execution_proofs", + status = pending_components.status_str(num_expected_columns_opt), + num_proofs = pending_components.execution_proof_subnet_count(), + "Component added to data availability checker" + ); + }); + + self.check_availability_and_cache_components( + block_root, + pending_components, + num_expected_columns_opt, + ) + } + fn check_availability_and_cache_components( &self, block_root: Hash256, @@ -584,6 +720,7 @@ impl DataAvailabilityCheckerInner { if let Some(available_block) = pending_components.make_available( &self.spec, num_expected_columns_opt, + self.has_execution_layer_and_proof_gen, |block, span| self.state_cache.recover_pending_executed_block(block, span), )? { // Explicitly drop read lock before acquiring write lock @@ -823,6 +960,7 @@ impl DataAvailabilityCheckerInner { mod test { use super::*; + use crate::test_utils::generate_data_column_indices_rand_order; use crate::{ blob_verification::GossipVerifiedBlob, block_verification::PayloadVerificationOutcome, @@ -1023,13 +1161,18 @@ mod test { let spec = harness.spec.clone(); let test_store = harness.chain.store.clone(); let capacity_non_zero = new_non_zero_usize(capacity); - let custody_context = Arc::new(CustodyContext::new(NodeCustodyType::Fullnode, &spec)); + let custody_context = Arc::new(CustodyContext::new( + NodeCustodyType::Fullnode, + generate_data_column_indices_rand_order::(), + &spec, + )); let cache = Arc::new( DataAvailabilityCheckerInner::::new( capacity_non_zero, test_store, custody_context, spec.clone(), + false, ) .expect("should create cache"), ); diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 9dc6e897fb1..0effe0ec9d2 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -98,6 +98,7 @@ pub enum BeaconChainError { ObservedAttestersError(ObservedAttestersError), ObservedBlockProducersError(ObservedBlockProducersError), ObservedDataSidecarsError(ObservedDataSidecarsError), + ObservedExecutionProofError(String), AttesterCacheError(AttesterCacheError), PruningError(PruningError), ArithError(ArithError), diff --git a/beacon_node/beacon_chain/src/execution_proof_verification.rs b/beacon_node/beacon_chain/src/execution_proof_verification.rs new file mode 100644 index 00000000000..f20d0494dda --- /dev/null +++ b/beacon_node/beacon_chain/src/execution_proof_verification.rs @@ -0,0 +1,625 @@ +use crate::observed_data_sidecars::{ObservationStrategy, Observe}; +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use slot_clock::SlotClock; +use std::marker::PhantomData; +use std::sync::Arc; +use tracing::{debug, error, warn}; +use types::{ChainSpec, EthSpec, ExecutionProof, ExecutionProofId, Hash256, Slot}; + +/// An error occurred while validating a gossip execution proof. +#[derive(Debug)] +pub enum GossipExecutionProofError { + /// There was an error whilst processing the execution proof. It is not known if it is + /// valid or invalid. + /// + /// ## Peer scoring + /// + /// We were unable to process this proof due to an internal error. It's unclear if the proof + /// is valid. + BeaconChainError(Box), + + /// The execution proof is from a slot that is later than the current slot (with respect to + /// the gossip clock disparity). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + FutureSlot { + message_slot: Slot, + latest_permissible_slot: Slot, + }, + + /// The proof corresponds to a slot older than the finalized head slot. + /// + /// ## Peer scoring + /// + /// It's unclear if this proof is valid, but this proof is for a finalized slot and is + /// therefore useless to us. + PastFinalizedSlot { + proof_slot: Slot, + finalized_slot: Slot, + }, + + /// The proof's parent block is unknown. + /// + /// ## Peer scoring + /// + /// We cannot process the proof without validating its parent, the peer isn't necessarily + /// faulty. + ParentUnknown { parent_root: Hash256 }, + + /// The proof conflicts with finalization, no need to propagate. + /// + /// ## Peer scoring + /// + /// It's unclear if this proof is valid, but it conflicts with finality and shouldn't be + /// imported. + NotFinalizedDescendant { block_parent_root: Hash256 }, + + /// An execution proof has already been seen for the given `(proof.block_root, + /// proof_id)` tuple over gossip or no gossip sources. + /// + /// ## Peer scoring + /// + /// The peer isn't faulty, but we do not forward it over gossip. + PriorKnown { + slot: Slot, + block_root: Hash256, + proof_id: ExecutionProofId, + }, + + /// An execution proof has already been processed from non-gossip source and has not yet been + /// seen on the gossip network. This proof should be accepted and forwarded over gossip. + PriorKnownUnpublished, + + /// The proof verification failed (invalid zkVM proof). + /// + /// ## Peer scoring + /// + /// The proof is invalid and the peer is faulty. + ProofVerificationFailed(String), + + /// The proof size exceeds the maximum allowed size. + /// + /// ## Peer scoring + /// + /// The proof is invalid and the peer is faulty. + ProofTooLarge { size: usize, max_size: usize }, + + /// The block for this proof is not yet available. + /// + /// ## Peer scoring + /// + /// The peer may have sent a proof before we've seen the block. Not necessarily faulty. + BlockNotAvailable { block_root: Hash256 }, +} + +impl From for GossipExecutionProofError { + fn from(e: BeaconChainError) -> Self { + GossipExecutionProofError::BeaconChainError(Box::new(e)) + } +} + +/// A wrapper around an `ExecutionProof` that has been verified for propagation on the gossip +/// network. +pub struct GossipVerifiedExecutionProof { + block_root: Hash256, + execution_proof: Arc, + _phantom: PhantomData<(T, O)>, +} + +impl std::fmt::Debug + for GossipVerifiedExecutionProof +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("GossipVerifiedExecutionProof") + .field("block_root", &self.block_root) + .field("execution_proof", &self.execution_proof) + .finish() + } +} + +impl Clone for GossipVerifiedExecutionProof { + fn clone(&self) -> Self { + Self { + block_root: self.block_root, + execution_proof: self.execution_proof.clone(), + _phantom: PhantomData, + } + } +} + +impl GossipVerifiedExecutionProof { + pub fn new( + execution_proof: Arc, + chain: &BeaconChain, + ) -> Result { + validate_execution_proof_for_gossip::(execution_proof, chain) + } + + pub fn slot(&self) -> Slot { + self.execution_proof.slot + } + + pub fn block_root(&self) -> Hash256 { + self.block_root + } + + pub fn execution_proof(&self) -> &Arc { + &self.execution_proof + } + + pub fn subnet_id(&self) -> ExecutionProofId { + self.execution_proof.proof_id + } + + /// Get the block root for this proof. + pub fn into_inner(self) -> Arc { + self.execution_proof + } +} + +/// Validate an execution proof for gossip +pub fn validate_execution_proof_for_gossip( + execution_proof: Arc, + chain: &BeaconChain, +) -> Result, GossipExecutionProofError> { + let block_root = execution_proof.block_root; + let proof_slot = execution_proof.slot; + + // 1. Verify proof is not from the future + verify_proof_not_from_future_slot(chain, proof_slot)?; + + // 2. Verify proof slot is greater than finalized slot + verify_slot_greater_than_latest_finalized_slot(chain, proof_slot)?; + + // 3. Check if proof is already known via gossip + verify_is_unknown_execution_proof(chain, &execution_proof)?; + + // 4. Check if the proof is already in the DA checker cache + // If it exists in the cache, we know it has already passed validation. + if chain + .data_availability_checker + .is_execution_proof_cached(&block_root, &execution_proof) + { + if O::observe() { + observe_gossip_execution_proof(&execution_proof, chain)?; + } + return Err(GossipExecutionProofError::PriorKnownUnpublished); + } + + // 5. Verify proof size limits + verify_proof_size(&execution_proof, &chain.spec)?; + + // Note: We intentionally do NOT verify the block exists yet + // Execution proofs can arrive via gossip before their corresponding blocks, + // so we cache them in the DA checker and match them up when the block arrives. + // This is kind of similar to how blob sidecars work. + + // 6. Run zkVM proof verification + verify_zkvm_proof(&execution_proof, chain)?; + + // 7. Observe the proof to prevent reprocessing + if O::observe() { + observe_gossip_execution_proof(&execution_proof, chain)?; + } + + Ok(GossipVerifiedExecutionProof { + block_root, + execution_proof, + _phantom: PhantomData, + }) +} + +/// Verify that this execution proof has not been seen before via gossip +fn verify_is_unknown_execution_proof( + chain: &BeaconChain, + execution_proof: &ExecutionProof, +) -> Result<(), GossipExecutionProofError> { + let block_root = execution_proof.block_root; + let proof_id = execution_proof.proof_id; + let slot = execution_proof.slot; + + if chain + .observed_execution_proofs + .read() + .is_known(slot, block_root, proof_id) + .map_err(|e| { + GossipExecutionProofError::BeaconChainError(Box::new( + BeaconChainError::ObservedExecutionProofError(format!("{:?}", e)), + )) + })? + { + return Err(GossipExecutionProofError::PriorKnown { + slot, + block_root, + proof_id, + }); + } + + Ok(()) +} + +/// Verify that the proof size is within acceptable limits. +fn verify_proof_size( + execution_proof: &ExecutionProof, + _spec: &ChainSpec, +) -> Result<(), GossipExecutionProofError> { + use types::MAX_PROOF_DATA_BYTES; + + let proof_size = execution_proof.proof_data.len(); + if proof_size > MAX_PROOF_DATA_BYTES { + return Err(GossipExecutionProofError::ProofTooLarge { + size: proof_size, + max_size: MAX_PROOF_DATA_BYTES, + }); + } + + Ok(()) +} + +/// Mark this execution proof as observed in gossip, to prevet reprocessing +fn observe_gossip_execution_proof( + execution_proof: &ExecutionProof, + chain: &BeaconChain, +) -> Result<(), GossipExecutionProofError> { + let block_root = execution_proof.block_root; + let proof_id = execution_proof.proof_id; + let slot = execution_proof.slot; + + chain + .observed_execution_proofs + .write() + .observe_proof(slot, block_root, proof_id) + .map_err(|e| { + GossipExecutionProofError::BeaconChainError(Box::new( + BeaconChainError::ObservedExecutionProofError(format!("{:?}", e)), + )) + })?; + + debug!( + %block_root, + %proof_id, + %slot, + "Marked execution proof as observed" + ); + + Ok(()) +} + +/// Verify that the execution proof is not from a future slot. +fn verify_proof_not_from_future_slot( + chain: &BeaconChain, + proof_slot: Slot, +) -> Result<(), GossipExecutionProofError> { + let latest_permissible_slot = chain + .slot_clock + .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity()) + .ok_or(BeaconChainError::UnableToReadSlot)?; + + if proof_slot > latest_permissible_slot { + return Err(GossipExecutionProofError::FutureSlot { + message_slot: proof_slot, + latest_permissible_slot, + }); + } + + Ok(()) +} + +/// Verify that the execution proof slot is greater than the latest finalized slot. +fn verify_slot_greater_than_latest_finalized_slot( + chain: &BeaconChain, + proof_slot: Slot, +) -> Result<(), GossipExecutionProofError> { + let latest_finalized_slot = chain + .head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + if proof_slot <= latest_finalized_slot { + return Err(GossipExecutionProofError::PastFinalizedSlot { + proof_slot, + finalized_slot: latest_finalized_slot, + }); + } + + Ok(()) +} + +/// Verify the zkVM proof. +/// +/// Note: This is expensive +fn verify_zkvm_proof( + execution_proof: &ExecutionProof, + chain: &BeaconChain, +) -> Result<(), GossipExecutionProofError> { + let block_root = execution_proof.block_root; + let subnet_id = execution_proof.proof_id; + + match chain + .data_availability_checker + .verify_execution_proof_for_gossip(execution_proof) + { + Ok(true) => { + debug!(%block_root, %subnet_id, "Proof verification succeeded"); + Ok(()) + } + Ok(false) => { + warn!(%block_root, %subnet_id, "Proof verification failed: proof is invalid"); + Err(GossipExecutionProofError::ProofVerificationFailed(format!( + "zkVM proof verification failed for block_root={}, subnet_id={}", + block_root, subnet_id + ))) + } + Err(e) => { + error!(%block_root, %subnet_id, ?e, "Proof verification error"); + Err(GossipExecutionProofError::BeaconChainError(Box::new( + e.into(), + ))) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; + use types::{ExecutionBlockHash, ForkName, MainnetEthSpec}; + + type E = MainnetEthSpec; + + /// Helper to create a test execution proof + fn create_test_execution_proof( + subnet_id: ExecutionProofId, + slot: Slot, + block_root: Hash256, + ) -> ExecutionProof { + let block_hash = ExecutionBlockHash::zero(); + let proof_data = vec![0u8; 32]; // Dummy proof data + ExecutionProof::new(subnet_id, slot, block_hash, block_root, proof_data) + .expect("Valid test proof") + } + + #[tokio::test] + async fn test_reject_future_slot() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + let current_slot = harness.get_current_slot(); + let future_slot = current_slot + 100; + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + let proof = create_test_execution_proof(proof_id, future_slot, Hash256::random()); + + let result = + validate_execution_proof_for_gossip::<_, Observe>(Arc::new(proof), &harness.chain); + + assert!(matches!( + result.err(), + Some(GossipExecutionProofError::FutureSlot { .. }) + )); + } + + #[tokio::test] + async fn test_reject_past_finalized_slot() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + // Advance to slot 1 first + harness.advance_slot(); + + // Advance chain to create finalized slot + harness + .extend_chain( + 32, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let finalized_slot = harness + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()); + // Create proof for slot before finalized + let old_slot = finalized_slot.saturating_sub(1u64); + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + let proof = create_test_execution_proof(proof_id, old_slot, Hash256::random()); + + let result = + validate_execution_proof_for_gossip::<_, Observe>(Arc::new(proof), &harness.chain); + + assert!(matches!( + result.err(), + Some(GossipExecutionProofError::PastFinalizedSlot { .. }) + )); + } + + #[tokio::test] + async fn test_successful_validation() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + harness.advance_slot(); + let current_slot = harness.get_current_slot(); + let proof_id = ExecutionProofId::new(0).expect("Valid subnet id"); + + // Use a realistic block root from the chain + let block_root = harness.chain.head_beacon_block_root(); + let proof = create_test_execution_proof(proof_id, current_slot, block_root); + + let result = + validate_execution_proof_for_gossip::<_, Observe>(Arc::new(proof), &harness.chain); + + match result { + Ok(_) => {} + Err(GossipExecutionProofError::FutureSlot { .. }) + | Err(GossipExecutionProofError::PastFinalizedSlot { .. }) => { + panic!("Should not fail basic validation checks"); + } + Err(_) => {} + } + } + + /// This test verifies that: + /// 1. First gossip proof is accepted and marked as observed + /// 2. Duplicate gossip proof is rejected with PriorKnown + /// 3. DoS protection: Expensive verification only happens once + #[tokio::test] + async fn test_gossip_duplicate_proof_rejected() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .zkvm_with_dummy_verifiers() + .build(); + + harness.advance_slot(); + let current_slot = harness.get_current_slot(); + let proof_id = ExecutionProofId::new(0).expect("Valid proof id"); + let block_root = Hash256::random(); + let proof = Arc::new(create_test_execution_proof( + proof_id, + current_slot, + block_root, + )); + + let result1 = + validate_execution_proof_for_gossip::<_, Observe>(proof.clone(), &harness.chain); + assert!(result1.is_ok()); + + // Should now be rejected as duplicate + let result2 = + validate_execution_proof_for_gossip::<_, Observe>(proof.clone(), &harness.chain); + + assert!( + matches!( + result2.err(), + Some(GossipExecutionProofError::PriorKnown { slot, block_root: br, proof_id: sid }) + if slot == current_slot && br == block_root && sid == proof_id + ), + "Duplicate proof must be rejected with PriorKnown error" + ); + + assert!( + harness + .chain + .observed_execution_proofs + .read() + .is_known(current_slot, block_root, proof_id) + .unwrap(), + "Proof should be marked as observed" + ); + } + + /// Test that proofs in the DA checker cache are detected and marked as observed. + /// + /// When a proof arrives via gossip but is already in the DA checker cache (from RPC), + /// we should: + /// 1. Accept it for gossip propagation + /// 2. Mark it as observed to prevent reprocessing + /// 3. Return PriorKnownUnpublished + #[tokio::test] + async fn test_da_cached_proof_accepted_and_observed() { + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.into()) + .deterministic_keypairs(64) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + harness.advance_slot(); + let subnet_id = ExecutionProofId::new(0).expect("Valid subnet id"); + let current_slot = harness.get_current_slot(); + let block_root = Hash256::random(); + + let proof = Arc::new(create_test_execution_proof( + subnet_id, + current_slot, + block_root, + )); + + // Put the proof directly into the DA checker cache (this can happen if it arritves via RPC) + harness + .chain + .data_availability_checker + .put_rpc_execution_proofs(block_root, vec![proof.clone()]) + .expect("Should put proof in DA cache"); + + // Verify it's in the cache + assert!( + harness + .chain + .data_availability_checker + .is_execution_proof_cached(&block_root, &proof), + "Proof should be in DA cache" + ); + + // Verify it's NOT in observed cache yet + assert!( + !harness + .chain + .observed_execution_proofs + .read() + .is_known(current_slot, block_root, subnet_id) + .unwrap(), + "Proof should not be in observed cache initially" + ); + + // Now it arrives via gossip + let result = + validate_execution_proof_for_gossip::<_, Observe>(proof.clone(), &harness.chain); + + // Should be rejected with PriorKnownUnpublished (safe to propagate) + assert!( + matches!( + result.as_ref().err(), + Some(GossipExecutionProofError::PriorKnownUnpublished) + ), + "DA cached proof should return PriorKnownUnpublished, got: {:?}", + result + ); + + // Should now be marked as observed + assert!( + harness + .chain + .observed_execution_proofs + .read() + .is_known(current_slot, block_root, subnet_id) + .unwrap(), + "Proof should be marked as observed after DA cache check" + ); + + // Second gossip attempt should be rejected as PriorKnown (not PriorKnownUnpublished) + let result2 = + validate_execution_proof_for_gossip::<_, Observe>(proof.clone(), &harness.chain); + + assert!( + matches!( + result2.err(), + Some(GossipExecutionProofError::PriorKnown { .. }) + ), + "Second gossip should be rejected as PriorKnown (already observed)" + ); + } +} diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 15e0a55cf5a..e4040eea6b0 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -1,5 +1,5 @@ use crate::data_availability_checker::{AvailableBlock, AvailableBlockData}; -use crate::{BeaconChain, BeaconChainTypes, metrics}; +use crate::{BeaconChain, BeaconChainTypes, WhenSlotSkipped, metrics}; use itertools::Itertools; use state_processing::{ per_block_processing::ParallelSignatureSets, @@ -34,6 +34,8 @@ pub enum HistoricalBlockError { ValidatorPubkeyCacheTimeout, /// Logic error: should never occur. IndexOutOfBounds, + /// Logic error: should never occur. + MissingOldestBlockRoot { slot: Slot }, /// Internal store error StoreError(StoreError), } @@ -56,7 +58,8 @@ impl BeaconChain { /// `SignatureSetError` or `InvalidSignature` will be returned. /// /// To align with sync we allow some excess blocks with slots greater than or equal to - /// `oldest_block_slot` to be provided. They will be ignored without being checked. + /// `oldest_block_slot` to be provided. They will be re-imported to fill the columns of the + /// checkpoint sync block. /// /// This function should not be called concurrently with any other function that mutates /// the anchor info (including this function itself). If a concurrent mutation occurs that @@ -72,9 +75,12 @@ impl BeaconChain { let blob_info = self.store.get_blob_info(); let data_column_info = self.store.get_data_column_info(); - // Take all blocks with slots less than the oldest block slot. + // Take all blocks with slots less than or equal to the oldest block slot. + // + // This allows for reimport of the blobs/columns for the finalized block after checkpoint + // sync. let num_relevant = blocks.partition_point(|available_block| { - available_block.block().slot() < anchor_info.oldest_block_slot + available_block.block().slot() <= anchor_info.oldest_block_slot }); let total_blocks = blocks.len(); @@ -95,6 +101,7 @@ impl BeaconChain { } let mut expected_block_root = anchor_info.oldest_block_parent; + let mut last_block_root = expected_block_root; let mut prev_block_slot = anchor_info.oldest_block_slot; let mut new_oldest_blob_slot = blob_info.oldest_blob_slot; let mut new_oldest_data_column_slot = data_column_info.oldest_data_column_slot; @@ -107,7 +114,27 @@ impl BeaconChain { for available_block in blocks_to_import.into_iter().rev() { let (block_root, block, block_data) = available_block.deconstruct(); - if block_root != expected_block_root { + if block.slot() == anchor_info.oldest_block_slot { + // When reimporting, verify that this is actually the same block (same block root). + let oldest_block_root = self + .block_root_at_slot(block.slot(), WhenSlotSkipped::None) + .ok() + .flatten() + .ok_or(HistoricalBlockError::MissingOldestBlockRoot { slot: block.slot() })?; + if block_root != oldest_block_root { + return Err(HistoricalBlockError::MismatchedBlockRoot { + block_root, + expected_block_root: oldest_block_root, + }); + } + + debug!( + ?block_root, + slot = %block.slot(), + "Re-importing historic block" + ); + last_block_root = block_root; + } else if block_root != expected_block_root { return Err(HistoricalBlockError::MismatchedBlockRoot { block_root, expected_block_root, @@ -198,7 +225,7 @@ impl BeaconChain { .ok_or(HistoricalBlockError::IndexOutOfBounds)? .iter() .map(|block| block.parent_root()) - .chain(iter::once(anchor_info.oldest_block_parent)); + .chain(iter::once(last_block_root)); let signature_set = signed_blocks .iter() .zip_eq(block_roots) diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 4ac3e54742d..686b56e63eb 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -24,6 +24,7 @@ mod early_attester_cache; mod errors; pub mod events; pub mod execution_payload; +pub mod execution_proof_verification; pub mod fetch_blobs; pub mod fork_choice_signal; pub mod fork_revert; @@ -41,6 +42,7 @@ pub mod observed_aggregates; mod observed_attesters; pub mod observed_block_producers; pub mod observed_data_sidecars; +pub mod observed_execution_proofs; pub mod observed_operations; mod observed_slashable; pub mod persisted_beacon_chain; @@ -64,7 +66,7 @@ pub use self::beacon_chain::{ BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, BlockProcessStatus, ChainSegmentResult, ForkChoiceError, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, LightClientProducerEvent, OverrideForkchoiceUpdate, - ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + ProduceBlockVerification, ProofGenerationEvent, StateSkipConfig, WhenSlotSkipped, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::chain_config::ChainConfig; diff --git a/beacon_node/beacon_chain/src/observed_execution_proofs.rs b/beacon_node/beacon_chain/src/observed_execution_proofs.rs new file mode 100644 index 00000000000..e927ecad68f --- /dev/null +++ b/beacon_node/beacon_chain/src/observed_execution_proofs.rs @@ -0,0 +1,402 @@ +//! Provides the `ObservedExecutionProofs` struct which allows for rejecting execution proofs +//! that we have already seen over the gossip network. +//! +//! This cache prevents DoS attacks where an attacker repeatedly gossips the same execution proof, +//! forcing expensive zkVM verification operations. Only proofs that have passed basic gossip +//! validation and proof verification should be added to this cache. +//! +//! TODO(zkproofs): we want the proofs to be signed and then we can just add them to the cache +//! once the signature has been verified like `observed_data_sidecars` + +use std::collections::{HashMap, HashSet}; +use types::{ExecutionProofId, Hash256, Slot}; + +#[derive(Debug, PartialEq)] +pub enum Error { + /// The slot of the provided execution proof is prior to finalization. + FinalizedExecutionProof { slot: Slot, finalized_slot: Slot }, +} + +/// Key for tracking observed execution proofs. +/// We track by (slot, block_root) to efficiently prune old entries. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +struct ProofKey { + slot: Slot, + block_root: Hash256, +} + +impl ProofKey { + fn new(slot: Slot, block_root: Hash256) -> Self { + Self { slot, block_root } + } +} + +/// Maintains a cache of seen execution proofs that were received over gossip. +/// +/// The cache tracks (slot, block_root, proof_id) tuples and prunes entries from finalized slots. +/// +/// ## DoS Resistance +/// +/// This cache is critical for preventing DoS attacks where an attacker repeatedly gossips +/// the same execution proof. zkVM verification is expensive (50-100ms), so we must avoid +/// re-verifying proofs we've already seen. +/// +/// ## Pruning +/// +/// Call `prune` on finalization to remove entries from finalized slots. This basically matches the +/// pattern used for observed blobs and data columns. +pub struct ObservedExecutionProofs { + /// The finalized slot. Proofs at or below this slot are rejected. + finalized_slot: Slot, + /// Map from (slot, block_root) to the set of subnet IDs we've seen for that block. + items: HashMap>, +} + +impl ObservedExecutionProofs { + /// Create a new cache with the given finalized slot. + /// + /// Proofs at or below `finalized_slot` will be rejected. + pub fn new(finalized_slot: Slot) -> Self { + Self { + finalized_slot, + items: HashMap::new(), + } + } + + /// Observe an execution proof from gossip. + /// + /// Returns `true` if the proof was already observed (duplicate), `false` if it's new. + /// + /// Returns an error if the proof's slot is at or below the finalized slot. + /// Note: This shouldn't happen because it means we've received a proof for + /// a finalized block + pub fn observe_proof( + &mut self, + slot: Slot, + block_root: Hash256, + proof_id: ExecutionProofId, + ) -> Result { + // Reject finalized proofs + if self.finalized_slot > 0 && slot <= self.finalized_slot { + return Err(Error::FinalizedExecutionProof { + slot, + finalized_slot: self.finalized_slot, + }); + } + + let key = ProofKey::new(slot, block_root); + let proof_ids = self.items.entry(key).or_default(); + + let was_duplicate = !proof_ids.insert(proof_id); + + Ok(was_duplicate) + } + + /// Check if we have already observed this proof. + /// + /// Returns `true` if the proof has been seen, `false` if it's new. + /// + /// Returns an error if the proof's slot is at or below the finalized slot. + pub fn is_known( + &self, + slot: Slot, + block_root: Hash256, + proof_id: ExecutionProofId, + ) -> Result { + // Reject finalized proofs + if self.finalized_slot > 0 && slot <= self.finalized_slot { + return Err(Error::FinalizedExecutionProof { + slot, + finalized_slot: self.finalized_slot, + }); + } + + let key = ProofKey::new(slot, block_root); + let is_known = self + .items + .get(&key) + .is_some_and(|proof_ids| proof_ids.contains(&proof_id)); + + Ok(is_known) + } + + /// Prune execution proof observations for slots less than or equal to the given slot. + /// + /// This matches the pruning behavior of observed blobs and data columns. + pub fn prune(&mut self, finalized_slot: Slot) { + if finalized_slot == 0 { + return; + } + + self.finalized_slot = finalized_slot; + self.items.retain(|key, _| key.slot > finalized_slot); + } + + /// Get the current finalized slot boundary. + /// + /// Proofs at or below this slot will be rejected. + pub fn finalized_slot(&self) -> Slot { + self.finalized_slot + } + + /// Get the number of unique (slot, block_root) keys being tracked. + pub fn len(&self) -> usize { + self.items.len() + } + + /// Check if the cache is empty. + pub fn is_empty(&self) -> bool { + self.items.is_empty() + } + + /// Clear all entries from the cache. + #[cfg(test)] + pub fn clear(&mut self) { + self.items.clear(); + } +} + +impl Default for ObservedExecutionProofs { + fn default() -> Self { + Self::new(Slot::new(0)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use types::FixedBytesExtended; + + fn test_proof_key(slot: u64) -> (Slot, Hash256, ExecutionProofId) { + ( + Slot::new(slot), + Hash256::from_low_u64_be(slot), + ExecutionProofId::new(0).unwrap(), + ) + } + + #[test] + fn test_observe_new_proof() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let (slot, block_root, subnet_id) = test_proof_key(10); + + // First observation should return false (not a duplicate) + assert_eq!( + cache.observe_proof(slot, block_root, subnet_id), + Ok(false), + "first observation should not be duplicate" + ); + + // Second observation should return true (is a duplicate) + assert_eq!( + cache.observe_proof(slot, block_root, subnet_id), + Ok(true), + "second observation should be duplicate" + ); + } + + #[test] + fn test_observe_different_subnets() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let slot = Slot::new(10); + let block_root = Hash256::from_low_u64_be(10); + let proof_0 = ExecutionProofId::new(0).unwrap(); + let proof_1 = ExecutionProofId::new(1).unwrap(); + + assert_eq!( + cache.observe_proof(slot, block_root, proof_0), + Ok(false), + "proof 0 is new" + ); + + // Observe proof from subnet 1 (same block, different proofID) + assert_eq!( + cache.observe_proof(slot, block_root, proof_1), + Ok(false), + "proof 1 is new" + ); + + // Re-observe proof 0 + assert_eq!( + cache.observe_proof(slot, block_root, proof_0), + Ok(true), + "proof 0 is duplicate" + ); + + assert!(cache.is_known(slot, block_root, proof_0).unwrap()); + assert!(cache.is_known(slot, block_root, proof_1).unwrap()); + } + + #[test] + fn test_is_known() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let (slot, block_root, proof_id) = test_proof_key(10); + + // Before observation + assert_eq!( + cache.is_known(slot, block_root, proof_id), + Ok(false), + "not yet observed" + ); + + // After observation + cache.observe_proof(slot, block_root, proof_id).unwrap(); + assert_eq!( + cache.is_known(slot, block_root, proof_id), + Ok(true), + "now observed" + ); + } + + #[test] + fn test_reject_finalized_proofs() { + let finalized_slot = Slot::new(100); + let mut cache = ObservedExecutionProofs::new(finalized_slot); + + let old_slot = Slot::new(100); + let block_root = Hash256::from_low_u64_be(100); + let proof_id = ExecutionProofId::new(0).unwrap(); + + // Observing finalized proof should error + assert_eq!( + cache.observe_proof(old_slot, block_root, proof_id), + Err(Error::FinalizedExecutionProof { + slot: old_slot, + finalized_slot, + }), + "finalized proofs should be rejected" + ); + + // Checking finalized proof should error + assert_eq!( + cache.is_known(old_slot, block_root, proof_id), + Err(Error::FinalizedExecutionProof { + slot: old_slot, + finalized_slot, + }), + "finalized proofs should be rejected in is_known" + ); + } + + #[test] + fn test_pruning() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + + // Add proofs at different slots + for slot in 0..100 { + let (s, br, pid) = test_proof_key(slot); + cache.observe_proof(s, br, pid).unwrap(); + } + + assert_eq!(cache.len(), 100, "should have 100 entries"); + + // Prune at finalized_slot = 50 + // Should remove slots <= 50, keep slots > 50 + let finalized_slot = Slot::new(50); + cache.prune(finalized_slot); + + assert_eq!( + cache.finalized_slot(), + finalized_slot, + "finalized slot should be updated" + ); + + // Check that finalized entries were removed + let old_slot = Slot::new(50); + let old_block_root = Hash256::from_low_u64_be(50); + let proof_id = ExecutionProofId::new(0).unwrap(); + + assert!( + cache.is_known(old_slot, old_block_root, proof_id).is_err(), + "finalized entries should be rejected after pruning" + ); + + // Check that non-finalized entries are still present + let recent_slot = Slot::new(51); + let recent_block_root = Hash256::from_low_u64_be(51); + assert!( + cache + .is_known(recent_slot, recent_block_root, proof_id) + .unwrap(), + "non-finalized entries should still be present" + ); + } + + #[test] + fn test_prune_removes_exact_boundary() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + + // Add proofs at slots 50, 51, 52 + for slot in 50..=52 { + let (s, br, pid) = test_proof_key(slot); + cache.observe_proof(s, br, pid).unwrap(); + } + + // Prune at finalized_slot = 50 + // Should remove slots <= 50, keep slots > 50 + cache.prune(Slot::new(50)); + + assert_eq!(cache.finalized_slot(), Slot::new(50)); + + let proof_id = ExecutionProofId::new(0).unwrap(); + + // Slot 50 should be rejected (finalized) + assert!( + cache + .is_known(Slot::new(50), Hash256::from_low_u64_be(50), proof_id) + .is_err() + ); + + // Slot 51 should still be present (> finalized) + assert!( + cache + .is_known(Slot::new(51), Hash256::from_low_u64_be(51), proof_id) + .unwrap() + ); + + // Slot 52 should still be present + assert!( + cache + .is_known(Slot::new(52), Hash256::from_low_u64_be(52), proof_id) + .unwrap() + ); + } + + #[test] + fn test_different_blocks_same_slot() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let slot = Slot::new(10); + let block_root_a = Hash256::from_low_u64_be(100); + let block_root_b = Hash256::from_low_u64_be(200); + let proof_id = ExecutionProofId::new(0).unwrap(); + + // Observe proof for block A + cache.observe_proof(slot, block_root_a, proof_id).unwrap(); + + // Proof for block B should be new (different block_root) + assert_eq!( + cache.observe_proof(slot, block_root_b, proof_id), + Ok(false), + "different block_root should not be duplicate" + ); + + assert!(cache.is_known(slot, block_root_a, proof_id).unwrap()); + assert!(cache.is_known(slot, block_root_b, proof_id).unwrap()); + } + + #[test] + fn test_len_counts_blocks_not_subnets() { + let mut cache = ObservedExecutionProofs::new(Slot::new(0)); + let slot = Slot::new(10); + let block_root = Hash256::from_low_u64_be(10); + + // Add multiple proof IDs for same block + for i in 0..8 { + let proof_id = ExecutionProofId::new(i).unwrap(); + cache.observe_proof(slot, block_root, proof_id).unwrap(); + } + + // Length should be 1 (one unique (slot, block_root) key) + assert_eq!(cache.len(), 1, "len counts unique keys, not proofIDs"); + } +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs index e8bd526e19f..e238e1efb6c 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs @@ -122,7 +122,7 @@ pub fn downgrade_from_v23( let heads = fork_choice .proto_array() - .heads_descended_from_finalization::(); + .heads_descended_from_finalization::(fork_choice.finalized_checkpoint()); let head_roots = heads.iter().map(|node| node.root).collect(); let head_slots = heads.iter().map(|node| node.slot).collect(); diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index b10edf23369..a070dc350b5 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -363,24 +363,30 @@ fn advance_head(beacon_chain: &Arc>) -> Resu // For epochs *greater than* the Fulu fork epoch, we have also determined the proposer // shuffling for the next epoch. let next_epoch = state.next_epoch()?; - let next_epoch_decision_root = state.proposer_shuffling_decision_root_at_epoch( - next_epoch, - head_block_root, - &beacon_chain.spec, - )?; - beacon_chain.with_proposer_cache( - next_epoch_decision_root, - next_epoch, - |_| Ok(()), - || { - debug!( - shuffling_decision_root = ?next_epoch_decision_root, - epoch = %next_epoch, - "Computing next epoch proposer shuffling in state advance" - ); - Ok::<_, Error>((advanced_state_root, state.clone())) - }, - )?; + let next_epoch_decision_slot = beacon_chain + .spec + .proposer_shuffling_decision_slot::(next_epoch); + + if state.slot() > next_epoch_decision_slot { + let next_epoch_decision_root = state.proposer_shuffling_decision_root_at_epoch( + next_epoch, + head_block_root, + &beacon_chain.spec, + )?; + beacon_chain.with_proposer_cache( + next_epoch_decision_root, + next_epoch, + |_| Ok(()), + || { + debug!( + shuffling_decision_root = ?next_epoch_decision_root, + epoch = %next_epoch, + "Computing next epoch proposer shuffling in state advance" + ); + Ok::<_, Error>((advanced_state_root, state.clone())) + }, + )?; + } // Update the attester cache. let shuffling_id = diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 9601618e927..52f486e1105 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -42,6 +42,7 @@ use parking_lot::{Mutex, RwLockWriteGuard}; use rand::Rng; use rand::SeedableRng; use rand::rngs::StdRng; +use rand::seq::SliceRandom; use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slot_clock::{SlotClock, TestingSlotClock}; @@ -59,6 +60,7 @@ use store::{HotColdDB, ItemStore, MemoryStore, config::StoreConfig}; use task_executor::TaskExecutor; use task_executor::{ShutdownReason, test_utils::TestRuntime}; use tree_hash::TreeHash; +use types::data_column_custody_group::CustodyIndex; use types::indexed_attestation::IndexedAttestationBase; use types::payload::BlockProductionVersion; use types::test_utils::TestRandom; @@ -220,6 +222,7 @@ pub struct Builder { testing_slot_clock: Option, validator_monitor_config: Option, genesis_state_builder: Option>, + zkvm_execution_layer_config: Option, node_custody_type: NodeCustodyType, runtime: TestRuntime, } @@ -366,6 +369,7 @@ where testing_slot_clock: None, validator_monitor_config: None, genesis_state_builder: None, + zkvm_execution_layer_config: None, node_custody_type: NodeCustodyType::Fullnode, runtime, } @@ -540,6 +544,13 @@ where self } + /// Enable zkVM execution proof verification with dummy verifiers for testing. + pub fn zkvm_with_dummy_verifiers(mut self) -> Self { + self.zkvm_execution_layer_config = + Some(zkvm_execution_layer::ZKVMExecutionLayerConfig::default()); + self + } + pub fn with_genesis_state_builder( mut self, f: impl FnOnce(InteropGenesisBuilder) -> InteropGenesisBuilder, @@ -576,10 +587,17 @@ where .shutdown_sender(shutdown_tx) .chain_config(chain_config) .node_custody_type(self.node_custody_type) + .ordered_custody_column_indices(generate_data_column_indices_rand_order::()) .event_handler(Some(ServerSentEventHandler::new_with_capacity(5))) .validator_monitor_config(validator_monitor_config) .rng(Box::new(StdRng::seed_from_u64(42))); + builder = if let Some(zkvm_config) = self.zkvm_execution_layer_config { + builder.zkvm_execution_layer_config(Some(zkvm_config)) + } else { + builder + }; + builder = if let Some(mutator) = self.initial_mutator { mutator(builder) } else { @@ -605,15 +623,6 @@ where let chain = builder.build().expect("should build"); - chain - .data_availability_checker - .custody_context() - .init_ordered_data_columns_from_custody_groups( - (0..spec.number_of_custody_groups).collect(), - &spec, - ) - .expect("should initialise custody context"); - BeaconChainHarness { spec: chain.spec.clone(), chain: Arc::new(chain), @@ -3393,3 +3402,9 @@ pub fn generate_data_column_sidecars_from_block( ) .unwrap() } + +pub fn generate_data_column_indices_rand_order() -> Vec { + let mut indices = (0..E::number_of_columns() as u64).collect::>(); + indices.shuffle(&mut StdRng::seed_from_u64(42)); + indices +} diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 39d2c2c2d76..a346a649f02 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -1,12 +1,14 @@ use crate::errors::BeaconChainError; use crate::{BeaconChainTypes, BeaconStore}; use bls::PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN; +use rayon::prelude::*; use smallvec::SmallVec; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; use std::marker::PhantomData; use store::{DBColumn, Error as StoreError, StoreItem, StoreOp}; +use tracing::instrument; use types::{BeaconState, FixedBytesExtended, Hash256, PublicKey, PublicKeyBytes}; /// Provides a mapping of `validator_index -> validator_publickey`. @@ -28,6 +30,7 @@ impl ValidatorPubkeyCache { /// Create a new public key cache using the keys in `state.validators`. /// /// The new cache will be updated with the keys from `state` and immediately written to disk. + #[instrument(name = "validator_pubkey_cache_new", skip_all)] pub fn new( state: &BeaconState, store: BeaconStore, @@ -46,6 +49,7 @@ impl ValidatorPubkeyCache { } /// Load the pubkey cache from the given on-disk database. + #[instrument(name = "validator_pubkey_cache_load_from_store", skip_all)] pub fn load_from_store(store: BeaconStore) -> Result { let mut pubkeys = vec![]; let mut indices = HashMap::new(); @@ -77,6 +81,7 @@ impl ValidatorPubkeyCache { /// Does not delete any keys from `self` if they don't appear in `state`. /// /// NOTE: The caller *must* commit the returned I/O batch as part of the block import process. + #[instrument(skip_all)] pub fn import_new_pubkeys( &mut self, state: &BeaconState, @@ -106,29 +111,58 @@ impl ValidatorPubkeyCache { self.indices.reserve(validator_keys.len()); let mut store_ops = Vec::with_capacity(validator_keys.len()); - for pubkey_bytes in validator_keys { - let i = self.pubkeys.len(); - if self.indices.contains_key(&pubkey_bytes) { - return Err(BeaconChainError::DuplicateValidatorPublicKey); + let is_initial_import = self.pubkeys.is_empty(); + + // Helper to insert a decompressed key + let mut insert_key = + |pubkey_bytes: PublicKeyBytes, pubkey: PublicKey| -> Result<(), BeaconChainError> { + let i = self.pubkeys.len(); + + if self.indices.contains_key(&pubkey_bytes) { + return Err(BeaconChainError::DuplicateValidatorPublicKey); + } + + // Stage the new validator key for writing to disk. + // It will be committed atomically when the block that introduced it is written to disk. + // Notably it is NOT written while the write lock on the cache is held. + // See: https://github.com/sigp/lighthouse/issues/2327 + store_ops.push(StoreOp::KeyValueOp( + DatabasePubkey::from_pubkey(&pubkey) + .as_kv_store_op(DatabasePubkey::key_for_index(i)), + )); + + self.pubkeys.push(pubkey); + self.pubkey_bytes.push(pubkey_bytes); + self.indices.insert(pubkey_bytes, i); + Ok(()) + }; + + if is_initial_import { + // On first startup, decompress keys in parallel for better performance + let validator_keys_vec: Vec = validator_keys.collect(); + + let decompressed: Vec<(PublicKeyBytes, PublicKey)> = validator_keys_vec + .into_par_iter() + .map(|pubkey_bytes| { + let pubkey = (&pubkey_bytes) + .try_into() + .map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?; + Ok((pubkey_bytes, pubkey)) + }) + .collect::, BeaconChainError>>()?; + + for (pubkey_bytes, pubkey) in decompressed { + insert_key(pubkey_bytes, pubkey)?; + } + } else { + // Sequential path for incremental updates + for pubkey_bytes in validator_keys { + let pubkey = (&pubkey_bytes) + .try_into() + .map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?; + insert_key(pubkey_bytes, pubkey)?; } - - let pubkey = (&pubkey_bytes) - .try_into() - .map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?; - - // Stage the new validator key for writing to disk. - // It will be committed atomically when the block that introduced it is written to disk. - // Notably it is NOT written while the write lock on the cache is held. - // See: https://github.com/sigp/lighthouse/issues/2327 - store_ops.push(StoreOp::KeyValueOp( - DatabasePubkey::from_pubkey(&pubkey) - .as_kv_store_op(DatabasePubkey::key_for_index(i)), - )); - - self.pubkeys.push(pubkey); - self.pubkey_bytes.push(pubkey_bytes); - self.indices.insert(pubkey_bytes, i); } Ok(store_ops) @@ -324,4 +358,39 @@ mod test { let cache = ValidatorPubkeyCache::load_from_store(store).expect("should open cache"); check_cache_get(&cache, &keypairs[..]); } + + #[test] + fn parallel_import_maintains_order() { + // Test that parallel decompression on first startup maintains correct order and indices + let (state, keypairs) = get_state(100); + let store = get_store(); + + // Create cache from empty state (triggers parallel path) + let cache: ValidatorPubkeyCache = + ValidatorPubkeyCache::new(&state, store).expect("should create cache"); + + check_cache_get(&cache, &keypairs[..]); + } + + #[test] + fn incremental_import_maintains_order() { + // Test that incremental imports maintain correct order (triggers sequential path) + let store = get_store(); + + // Start with 50 validators + let (state1, keypairs1) = get_state(50); + let mut cache = + ValidatorPubkeyCache::new(&state1, store.clone()).expect("should create cache"); + check_cache_get(&cache, &keypairs1[..]); + + // Add 50 more validators + let (state2, keypairs2) = get_state(100); + let ops = cache + .import_new_pubkeys(&state2) + .expect("should import pubkeys"); + store.do_atomically_with_block_and_blobs_cache(ops).unwrap(); + + // Verify all 100 validators are correctly indexed + check_cache_get(&cache, &keypairs2[..]); + } } diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 638c221a7fa..0733d901fc3 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -11,7 +11,9 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, get_kzg, mock_execution_layer_from_parts, test_spec, }; -use beacon_chain::test_utils::{SyncCommitteeStrategy, fork_name_from_env}; +use beacon_chain::test_utils::{ + SyncCommitteeStrategy, fork_name_from_env, generate_data_column_indices_rand_order, +}; use beacon_chain::{ BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, @@ -2703,7 +2705,7 @@ async fn weak_subjectivity_sync_easy() { let num_initial_slots = E::slots_per_epoch() * 11; let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); let slots = (1..num_initial_slots).map(Slot::new).collect(); - weak_subjectivity_sync_test(slots, checkpoint_slot, None).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None, true).await } #[tokio::test] @@ -2711,7 +2713,7 @@ async fn weak_subjectivity_sync_single_block_batches() { let num_initial_slots = E::slots_per_epoch() * 11; let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); let slots = (1..num_initial_slots).map(Slot::new).collect(); - weak_subjectivity_sync_test(slots, checkpoint_slot, Some(1)).await + weak_subjectivity_sync_test(slots, checkpoint_slot, Some(1), true).await } #[tokio::test] @@ -2725,7 +2727,7 @@ async fn weak_subjectivity_sync_unaligned_advanced_checkpoint() { slot <= checkpoint_slot - 3 || slot > checkpoint_slot }) .collect(); - weak_subjectivity_sync_test(slots, checkpoint_slot, None).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None, true).await } #[tokio::test] @@ -2739,7 +2741,7 @@ async fn weak_subjectivity_sync_unaligned_unadvanced_checkpoint() { slot <= checkpoint_slot || slot > checkpoint_slot + 3 }) .collect(); - weak_subjectivity_sync_test(slots, checkpoint_slot, None).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None, true).await } // Regression test for https://github.com/sigp/lighthouse/issues/4817 @@ -2751,7 +2753,7 @@ async fn weak_subjectivity_sync_skips_at_genesis() { let end_slot = E::slots_per_epoch() * 4; let slots = (start_slot..end_slot).map(Slot::new).collect(); let checkpoint_slot = Slot::new(E::slots_per_epoch() * 2); - weak_subjectivity_sync_test(slots, checkpoint_slot, None).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None, true).await } // Checkpoint sync from the genesis state. @@ -2764,13 +2766,24 @@ async fn weak_subjectivity_sync_from_genesis() { let end_slot = E::slots_per_epoch() * 2; let slots = (start_slot..end_slot).map(Slot::new).collect(); let checkpoint_slot = Slot::new(0); - weak_subjectivity_sync_test(slots, checkpoint_slot, None).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None, true).await +} + +// Test checkpoint sync without providing blobs - backfill should fetch them. +#[tokio::test] +async fn weak_subjectivity_sync_without_blobs() { + let start_slot = 4; + let end_slot = E::slots_per_epoch() * 4; + let slots = (start_slot..end_slot).map(Slot::new).collect(); + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 2); + weak_subjectivity_sync_test(slots, checkpoint_slot, None, false).await } async fn weak_subjectivity_sync_test( slots: Vec, checkpoint_slot: Slot, backfill_batch_size: Option, + provide_blobs: bool, ) { // Build an initial chain on one harness, representing a synced node with full history. let num_final_blocks = E::slots_per_epoch() * 2; @@ -2872,7 +2885,11 @@ async fn weak_subjectivity_sync_test( .weak_subjectivity_state( wss_state, wss_block.clone(), - wss_blobs_opt.clone(), + if provide_blobs { + wss_blobs_opt.clone() + } else { + None + }, genesis_state, ) .unwrap() @@ -2881,17 +2898,10 @@ async fn weak_subjectivity_sync_test( .shutdown_sender(shutdown_tx) .event_handler(Some(ServerSentEventHandler::new_with_capacity(1))) .execution_layer(Some(mock.el)) + .ordered_custody_column_indices(generate_data_column_indices_rand_order::()) .rng(Box::new(StdRng::seed_from_u64(42))) .build() .expect("should build"); - beacon_chain - .data_availability_checker - .custody_context() - .init_ordered_data_columns_from_custody_groups( - (0..spec.number_of_custody_groups).collect(), - &spec, - ) - .unwrap(); let beacon_chain = Arc::new(beacon_chain); let wss_block_root = wss_block.canonical_root(); diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 1cdf3693ff2..6f5170be300 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -114,6 +114,7 @@ pub struct BeaconProcessorQueueLengths { unknown_light_client_update_queue: usize, rpc_block_queue: usize, rpc_blob_queue: usize, + rpc_execution_proof_queue: usize, rpc_custody_column_queue: usize, column_reconstruction_queue: usize, chain_segment_queue: usize, @@ -121,11 +122,13 @@ pub struct BeaconProcessorQueueLengths { gossip_block_queue: usize, gossip_blob_queue: usize, gossip_data_column_queue: usize, + gossip_execution_proof_queue: usize, delayed_block_queue: usize, status_queue: usize, block_brange_queue: usize, block_broots_queue: usize, blob_broots_queue: usize, + execution_proof_broots_queue: usize, blob_brange_queue: usize, dcbroots_queue: usize, dcbrange_queue: usize, @@ -178,6 +181,7 @@ impl BeaconProcessorQueueLengths { unknown_light_client_update_queue: 128, rpc_block_queue: 1024, rpc_blob_queue: 1024, + rpc_execution_proof_queue: 1024, // We don't request more than `PARENT_DEPTH_TOLERANCE` (32) lookups, so we can limit // this queue size. With 48 max blobs per block, each column sidecar list could be up to 12MB. rpc_custody_column_queue: 64, @@ -187,11 +191,13 @@ impl BeaconProcessorQueueLengths { gossip_block_queue: 1024, gossip_blob_queue: 1024, gossip_data_column_queue: 1024, + gossip_execution_proof_queue: 1024, delayed_block_queue: 1024, status_queue: 1024, block_brange_queue: 1024, block_broots_queue: 1024, blob_broots_queue: 1024, + execution_proof_broots_queue: 1024, blob_brange_queue: 1024, dcbroots_queue: 1024, dcbrange_queue: 1024, @@ -579,6 +585,7 @@ pub enum Work { GossipBlock(AsyncFn), GossipBlobSidecar(AsyncFn), GossipDataColumnSidecar(AsyncFn), + GossipExecutionProof(AsyncFn), DelayedImportBlock { beacon_block_slot: Slot, beacon_block_root: Hash256, @@ -597,6 +604,9 @@ pub enum Work { RpcBlobs { process_fn: AsyncFn, }, + RpcExecutionProofs { + process_fn: AsyncFn, + }, RpcCustodyColumn(AsyncFn), ColumnReconstruction(AsyncFn), IgnoredRpcBlock { @@ -609,6 +619,7 @@ pub enum Work { BlocksByRootsRequest(AsyncFn), BlobsByRangeRequest(BlockingFn), BlobsByRootsRequest(BlockingFn), + ExecutionProofsByRootsRequest(BlockingFn), DataColumnsByRootsRequest(BlockingFn), DataColumnsByRangeRequest(BlockingFn), GossipBlsToExecutionChange(BlockingFn), @@ -641,6 +652,7 @@ pub enum WorkType { GossipBlock, GossipBlobSidecar, GossipDataColumnSidecar, + GossipExecutionProof, DelayedImportBlock, GossipVoluntaryExit, GossipProposerSlashing, @@ -651,6 +663,7 @@ pub enum WorkType { GossipLightClientOptimisticUpdate, RpcBlock, RpcBlobs, + RpcExecutionProofs, RpcCustodyColumn, ColumnReconstruction, IgnoredRpcBlock, @@ -661,6 +674,7 @@ pub enum WorkType { BlocksByRootsRequest, BlobsByRangeRequest, BlobsByRootsRequest, + ExecutionProofsByRootsRequest, DataColumnsByRootsRequest, DataColumnsByRangeRequest, GossipBlsToExecutionChange, @@ -688,6 +702,7 @@ impl Work { Work::GossipBlock(_) => WorkType::GossipBlock, Work::GossipBlobSidecar(_) => WorkType::GossipBlobSidecar, Work::GossipDataColumnSidecar(_) => WorkType::GossipDataColumnSidecar, + Work::GossipExecutionProof(_) => WorkType::GossipExecutionProof, Work::DelayedImportBlock { .. } => WorkType::DelayedImportBlock, Work::GossipVoluntaryExit(_) => WorkType::GossipVoluntaryExit, Work::GossipProposerSlashing(_) => WorkType::GossipProposerSlashing, @@ -701,6 +716,7 @@ impl Work { Work::GossipBlsToExecutionChange(_) => WorkType::GossipBlsToExecutionChange, Work::RpcBlock { .. } => WorkType::RpcBlock, Work::RpcBlobs { .. } => WorkType::RpcBlobs, + Work::RpcExecutionProofs { .. } => WorkType::RpcExecutionProofs, Work::RpcCustodyColumn { .. } => WorkType::RpcCustodyColumn, Work::ColumnReconstruction(_) => WorkType::ColumnReconstruction, Work::IgnoredRpcBlock { .. } => WorkType::IgnoredRpcBlock, @@ -711,6 +727,7 @@ impl Work { Work::BlocksByRootsRequest(_) => WorkType::BlocksByRootsRequest, Work::BlobsByRangeRequest(_) => WorkType::BlobsByRangeRequest, Work::BlobsByRootsRequest(_) => WorkType::BlobsByRootsRequest, + Work::ExecutionProofsByRootsRequest(_) => WorkType::ExecutionProofsByRootsRequest, Work::DataColumnsByRootsRequest(_) => WorkType::DataColumnsByRootsRequest, Work::DataColumnsByRangeRequest(_) => WorkType::DataColumnsByRangeRequest, Work::LightClientBootstrapRequest(_) => WorkType::LightClientBootstrapRequest, @@ -865,6 +882,7 @@ impl BeaconProcessor { // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(queue_lengths.rpc_block_queue); let mut rpc_blob_queue = FifoQueue::new(queue_lengths.rpc_blob_queue); + let mut rpc_execution_proof_queue = FifoQueue::new(queue_lengths.rpc_execution_proof_queue); let mut rpc_custody_column_queue = FifoQueue::new(queue_lengths.rpc_custody_column_queue); let mut column_reconstruction_queue = LifoQueue::new(queue_lengths.column_reconstruction_queue); @@ -873,12 +891,16 @@ impl BeaconProcessor { let mut gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); let mut gossip_blob_queue = FifoQueue::new(queue_lengths.gossip_blob_queue); let mut gossip_data_column_queue = FifoQueue::new(queue_lengths.gossip_data_column_queue); + let mut gossip_execution_proof_queue = + FifoQueue::new(queue_lengths.gossip_execution_proof_queue); let mut delayed_block_queue = FifoQueue::new(queue_lengths.delayed_block_queue); let mut status_queue = FifoQueue::new(queue_lengths.status_queue); let mut block_brange_queue = FifoQueue::new(queue_lengths.block_brange_queue); let mut block_broots_queue = FifoQueue::new(queue_lengths.block_broots_queue); let mut blob_broots_queue = FifoQueue::new(queue_lengths.blob_broots_queue); + let mut execution_proof_broots_queue = + FifoQueue::new(queue_lengths.execution_proof_broots_queue); let mut blob_brange_queue = FifoQueue::new(queue_lengths.blob_brange_queue); let mut dcbroots_queue = FifoQueue::new(queue_lengths.dcbroots_queue); let mut dcbrange_queue = FifoQueue::new(queue_lengths.dcbrange_queue); @@ -1039,6 +1061,8 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = rpc_blob_queue.pop() { Some(item) + } else if let Some(item) = rpc_execution_proof_queue.pop() { + Some(item) } else if let Some(item) = rpc_custody_column_queue.pop() { Some(item) } else if let Some(item) = rpc_custody_column_queue.pop() { @@ -1055,6 +1079,8 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = gossip_data_column_queue.pop() { Some(item) + } else if let Some(item) = gossip_execution_proof_queue.pop() { + Some(item) } else if let Some(item) = column_reconstruction_queue.pop() { Some(item) // Check the priority 0 API requests after blocks and blobs, but before attestations. @@ -1198,6 +1224,8 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = blob_broots_queue.pop() { Some(item) + } else if let Some(item) = execution_proof_broots_queue.pop() { + Some(item) } else if let Some(item) = dcbroots_queue.pop() { Some(item) } else if let Some(item) = dcbrange_queue.pop() { @@ -1325,6 +1353,9 @@ impl BeaconProcessor { Work::GossipDataColumnSidecar { .. } => { gossip_data_column_queue.push(work, work_id) } + Work::GossipExecutionProof { .. } => { + gossip_execution_proof_queue.push(work, work_id) + } Work::DelayedImportBlock { .. } => { delayed_block_queue.push(work, work_id) } @@ -1351,6 +1382,9 @@ impl BeaconProcessor { rpc_block_queue.push(work, work_id) } Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id), + Work::RpcExecutionProofs { .. } => { + rpc_execution_proof_queue.push(work, work_id) + } Work::RpcCustodyColumn { .. } => { rpc_custody_column_queue.push(work, work_id) } @@ -1393,6 +1427,9 @@ impl BeaconProcessor { Work::BlobsByRootsRequest { .. } => { blob_broots_queue.push(work, work_id) } + Work::ExecutionProofsByRootsRequest { .. } => { + execution_proof_broots_queue.push(work, work_id) + } Work::DataColumnsByRootsRequest { .. } => { dcbroots_queue.push(work, work_id) } @@ -1424,6 +1461,7 @@ impl BeaconProcessor { WorkType::GossipBlock => gossip_block_queue.len(), WorkType::GossipBlobSidecar => gossip_blob_queue.len(), WorkType::GossipDataColumnSidecar => gossip_data_column_queue.len(), + WorkType::GossipExecutionProof => gossip_execution_proof_queue.len(), WorkType::DelayedImportBlock => delayed_block_queue.len(), WorkType::GossipVoluntaryExit => gossip_voluntary_exit_queue.len(), WorkType::GossipProposerSlashing => gossip_proposer_slashing_queue.len(), @@ -1438,6 +1476,7 @@ impl BeaconProcessor { } WorkType::RpcBlock => rpc_block_queue.len(), WorkType::RpcBlobs | WorkType::IgnoredRpcBlock => rpc_blob_queue.len(), + WorkType::RpcExecutionProofs => rpc_execution_proof_queue.len(), WorkType::RpcCustodyColumn => rpc_custody_column_queue.len(), WorkType::ColumnReconstruction => column_reconstruction_queue.len(), WorkType::ChainSegment => chain_segment_queue.len(), @@ -1447,6 +1486,9 @@ impl BeaconProcessor { WorkType::BlocksByRootsRequest => block_broots_queue.len(), WorkType::BlobsByRangeRequest => blob_brange_queue.len(), WorkType::BlobsByRootsRequest => blob_broots_queue.len(), + WorkType::ExecutionProofsByRootsRequest => { + execution_proof_broots_queue.len() + } WorkType::DataColumnsByRootsRequest => dcbroots_queue.len(), WorkType::DataColumnsByRangeRequest => dcbrange_queue.len(), WorkType::GossipBlsToExecutionChange => { @@ -1594,16 +1636,19 @@ impl BeaconProcessor { } => task_spawner.spawn_async(process_fn), Work::RpcBlock { process_fn } | Work::RpcBlobs { process_fn } + | Work::RpcExecutionProofs { process_fn } | Work::RpcCustodyColumn(process_fn) | Work::ColumnReconstruction(process_fn) => task_spawner.spawn_async(process_fn), Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), Work::GossipBlock(work) | Work::GossipBlobSidecar(work) - | Work::GossipDataColumnSidecar(work) => task_spawner.spawn_async(async move { + | Work::GossipDataColumnSidecar(work) + | Work::GossipExecutionProof(work) => task_spawner.spawn_async(async move { work.await; }), Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) + | Work::ExecutionProofsByRootsRequest(process_fn) | Work::DataColumnsByRootsRequest(process_fn) | Work::DataColumnsByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 3c4b2572c9a..c5de5a4f839 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -14,6 +14,9 @@ eth2 = { workspace = true } eth2_config = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } +# TODO(zkproofs): add as a workspace dependency +zkvm_execution_layer = { path = "../../zkvm_execution_layer" } +proof_generation_service = { path = "../proof_generation_service" } futures = { workspace = true } genesis = { workspace = true } http_api = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index c3c827f0aae..52ceca991df 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -4,6 +4,7 @@ use crate::compute_light_client_updates::{ }; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; +use beacon_chain::ProofGenerationEvent; use beacon_chain::attestation_simulator::start_attestation_simulator_service; use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_service; @@ -28,9 +29,11 @@ use execution_layer::ExecutionLayer; use execution_layer::test_utils::generate_genesis_header; use futures::channel::mpsc::Receiver; use genesis::{DEFAULT_ETH1_BLOCK_HASH, interop_genesis_state}; +use lighthouse_network::identity::Keypair; use lighthouse_network::{NetworkGlobals, prometheus_client::registry::Registry}; use monitoring_api::{MonitoringHttpClient, ProcessType}; use network::{NetworkConfig, NetworkSenders, NetworkService}; +use proof_generation_service; use rand::SeedableRng; use rand::rngs::{OsRng, StdRng}; use slasher::Slasher; @@ -41,12 +44,13 @@ use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; use store::database::interface::BeaconNodeBackend; use timer::spawn_timer; -use tracing::{debug, info, warn}; -use types::data_column_custody_group::get_custody_groups_ordered; +use tracing::{debug, info, instrument, warn}; +use types::data_column_custody_group::compute_ordered_custody_column_indices; use types::{ BeaconState, BlobSidecarList, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, SignedBeaconBlock, test_utils::generate_deterministic_keypairs, }; +use zkvm_execution_layer; /// Interval between polling the eth1 node for genesis information. pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000; @@ -88,6 +92,8 @@ pub struct ClientBuilder { beacon_processor_config: Option, beacon_processor_channels: Option>, light_client_server_rv: Option>>, + proof_generation_rx: + Option>>, eth_spec_instance: T::EthSpec, } @@ -122,6 +128,7 @@ where beacon_processor_config: None, beacon_processor_channels: None, light_client_server_rv: None, + proof_generation_rx: None, } } @@ -150,10 +157,12 @@ where /// Initializes the `BeaconChainBuilder`. The `build_beacon_chain` method will need to be /// called later in order to actually instantiate the `BeaconChain`. + #[instrument(skip_all)] pub async fn beacon_chain_builder( mut self, client_genesis: ClientGenesis, config: ClientConfig, + node_id: [u8; 32], ) -> Result { let store = self.store.clone(); let chain_spec = self.chain_spec.clone(); @@ -191,6 +200,23 @@ where Kzg::new_from_trusted_setup_no_precomp(&config.trusted_setup).map_err(kzg_err_msg)? }; + // Modify spec if zkvm mode is enabled via CLI + let spec = if let Some(zkvm_config) = &config.zkvm_execution_layer { + let mut modified_spec = (*spec).clone(); + + modified_spec.zkvm_enabled = true; + modified_spec.zkvm_min_proofs_required = zkvm_config.min_proofs_required; + + Arc::new(modified_spec) + } else { + spec + }; + + let ordered_custody_column_indices = + compute_ordered_custody_column_indices::(node_id, &spec).map_err(|e| { + format!("Failed to compute ordered custody column indices: {:?}", e) + })?; + let builder = BeaconChainBuilder::new(eth_spec_instance, Arc::new(kzg)) .store(store) .task_executor(context.executor.clone()) @@ -202,7 +228,9 @@ where .beacon_graffiti(beacon_graffiti) .event_handler(event_handler) .execution_layer(execution_layer) + .zkvm_execution_layer_config(config.zkvm_execution_layer.clone()) .node_custody_type(config.chain.node_custody_type) + .ordered_custody_column_indices(ordered_custody_column_indices) .validator_monitor_config(config.validator_monitor.clone()) .rng(Box::new( StdRng::try_from_rng(&mut OsRng) @@ -225,6 +253,44 @@ where builder }; + // Set up proof generation service if zkVM is configured with generation proof types + let builder = if let Some(ref zkvm_config) = config.zkvm_execution_layer { + if !zkvm_config.generation_proof_types.is_empty() { + // Validate that proof generation requires an execution layer + // Proof-generating nodes will validate blocks via EL execution, not proofs + if config.execution_layer.is_none() { + return Err( + "Proof generation requires an EL. \ + Nodes generating proofs must validate blocks via an execution layer. \ + To run a lightweight verifier node (without EL), omit --zkvm-generation-proof-types." + .into(), + ); + } + + // Create channel for proof generation events + let (proof_gen_tx, proof_gen_rx) = + tokio::sync::mpsc::unbounded_channel::>(); + + // Create generator registry with enabled proof types + let registry = Arc::new( + zkvm_execution_layer::GeneratorRegistry::new_with_dummy_generators( + zkvm_config.generation_proof_types.clone(), + ), + ); + + // Store receiver for later when we spawn the service + self.proof_generation_rx = Some(proof_gen_rx); + + builder + .zkvm_generator_registry(registry) + .proof_generation_tx(proof_gen_tx) + } else { + builder + } + } else { + builder + }; + let chain_exists = builder.store_contains_beacon_chain().unwrap_or(false); // If the client is expect to resume but there's no beacon chain in the database, @@ -345,10 +411,11 @@ where .map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?; let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec) .map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?; - let anchor_blobs = if anchor_block.message().body().has_blobs() { + + // Providing blobs is optional now and not providing them is recommended. + // Backfill can handle downloading the blobs or columns for the checkpoint block. + let anchor_blobs = if let Some(anchor_blobs_bytes) = anchor_blobs_bytes { let max_blobs_len = spec.max_blobs_per_block(anchor_block.epoch()) as usize; - let anchor_blobs_bytes = anchor_blobs_bytes - .ok_or("Blobs for checkpoint must be provided using --checkpoint-blobs")?; Some( BlobSidecarList::from_ssz_bytes(&anchor_blobs_bytes, max_blobs_len) .map_err(|e| format!("Unable to parse weak subj blobs SSZ: {e:?}"))?, @@ -409,7 +476,11 @@ where debug!("Downloaded finalized block"); - let blobs = if block.message().body().has_blobs() { + // `get_blob_sidecars` API is deprecated from Fulu and may not be supported by all servers + let is_before_fulu = !spec + .fork_name_at_slot::(finalized_block_slot) + .fulu_enabled(); + let blobs = if is_before_fulu && block.message().body().has_blobs() { debug!("Downloading finalized blobs"); if let Some(response) = remote .get_blob_sidecars::(BlockId::Root(block_root), None, &spec) @@ -453,7 +524,11 @@ where } /// Starts the networking stack. - pub async fn network(mut self, config: Arc) -> Result { + pub async fn network( + mut self, + config: Arc, + local_keypair: Keypair, + ) -> Result { let beacon_chain = self .beacon_chain .clone() @@ -481,12 +556,11 @@ where context.executor, libp2p_registry.as_mut(), beacon_processor_channels.beacon_processor_tx.clone(), + local_keypair, ) .await .map_err(|e| format!("Failed to start network: {:?}", e))?; - init_custody_context(beacon_chain, &network_globals)?; - self.network_globals = Some(network_globals); self.network_senders = Some(network_senders); self.libp2p_registry = libp2p_registry; @@ -597,6 +671,7 @@ where /// /// If type inference errors are being raised, see the comment on the definition of `Self`. #[allow(clippy::type_complexity)] + #[instrument(name = "build_client", skip_all)] pub fn build( mut self, ) -> Result>, String> { @@ -777,6 +852,26 @@ where beacon_chain.task_executor.clone(), beacon_chain.clone(), ); + + // Start proof generation service if configured + if let Some(proof_gen_rx) = self.proof_generation_rx { + let network_tx = self + .network_senders + .as_ref() + .ok_or("proof_generation_service requires network_senders")? + .network_send(); + + let service = proof_generation_service::ProofGenerationService::new( + beacon_chain.clone(), + proof_gen_rx, + network_tx, + ); + + runtime_context.executor.spawn( + async move { service.run().await }, + "proof_generation_service", + ); + } } Ok(Client { @@ -788,21 +883,6 @@ where } } -fn init_custody_context( - chain: Arc>, - network_globals: &NetworkGlobals, -) -> Result<(), String> { - let node_id = network_globals.local_enr().node_id().raw(); - let spec = &chain.spec; - let custody_groups_ordered = - get_custody_groups_ordered(node_id, spec.number_of_custody_groups, spec) - .map_err(|e| format!("Failed to compute custody groups: {:?}", e))?; - chain - .data_availability_checker - .custody_context() - .init_ordered_data_columns_from_custody_groups(custody_groups_ordered, spec) -} - impl ClientBuilder> where @@ -812,6 +892,7 @@ where TColdStore: ItemStore + 'static, { /// Consumes the internal `BeaconChainBuilder`, attaching the resulting `BeaconChain` to self. + #[instrument(skip_all)] pub fn build_beacon_chain(mut self) -> Result { let context = self .runtime_context diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index aeaa196df86..c62e3afb2e2 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -69,6 +69,7 @@ pub struct Config { pub network: network::NetworkConfig, pub chain: beacon_chain::ChainConfig, pub execution_layer: Option, + pub zkvm_execution_layer: Option, pub trusted_setup: Vec, pub http_api: http_api::Config, pub http_metrics: http_metrics::Config, @@ -94,6 +95,7 @@ impl Default for Config { network: NetworkConfig::default(), chain: <_>::default(), execution_layer: None, + zkvm_execution_layer: None, trusted_setup: get_trusted_setup(), beacon_graffiti: GraffitiOrigin::default(), http_api: <_>::default(), diff --git a/beacon_node/client/src/metrics.rs b/beacon_node/client/src/metrics.rs index 605a7346886..6ff3eb6a70f 100644 --- a/beacon_node/client/src/metrics.rs +++ b/beacon_node/client/src/metrics.rs @@ -15,6 +15,13 @@ pub static IS_SYNCED: LazyLock> = LazyLock::new(|| { ) }); +pub static IS_OPTIMISTIC_SYNC: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "optimistic_sync", + "Metric to check if the beacon chain is in optimistic sync mode. 0 if synced and 1 if optimistic sync", + ) +}); + pub static NOTIFIER_HEAD_SLOT: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "notifier_head_slot", diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index b1cf1bd7f55..52a3b92cb60 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -369,8 +369,12 @@ pub fn spawn_notifier( let block_hash = match beacon_chain.canonical_head.head_execution_status() { Ok(ExecutionStatus::Irrelevant(_)) => "n/a".to_string(), - Ok(ExecutionStatus::Valid(hash)) => format!("{} (verified)", hash), + Ok(ExecutionStatus::Valid(hash)) => { + metrics::set_gauge(&metrics::IS_OPTIMISTIC_SYNC, 0); + format!("{} (verified)", hash) + } Ok(ExecutionStatus::Optimistic(hash)) => { + metrics::set_gauge(&metrics::IS_OPTIMISTIC_SYNC, 1); warn!( info = "chain not fully verified, \ block and attestation production disabled until execution engine syncs", diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index dbf393706cd..9add1369194 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -842,7 +842,7 @@ impl MockBuilder { .beacon_client .get_beacon_blocks::(BlockId::Finalized) .await - .map_err(|_| "couldn't get finalized block".to_string())? + .map_err(|e| format!("couldn't get finalized block: {e:?}"))? .ok_or_else(|| "missing finalized block".to_string())? .data() .message() @@ -855,7 +855,7 @@ impl MockBuilder { .beacon_client .get_beacon_blocks::(BlockId::Justified) .await - .map_err(|_| "couldn't get justified block".to_string())? + .map_err(|e| format!("couldn't get justified block: {e:?}"))? .ok_or_else(|| "missing justified block".to_string())? .data() .message() diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 9026792b911..6389b34961a 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -46,11 +46,12 @@ pub use block_id::BlockId; use builder_states::get_next_withdrawals; use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; +use eth2::StatusCode; use eth2::types::{ self as api_types, BroadcastValidation, ContextDeserialize, EndpointVersion, ForkChoice, - ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, StateId as CoreStateId, - ValidatorBalancesRequestBody, ValidatorId, ValidatorIdentitiesRequestBody, ValidatorStatus, - ValidatorsRequestBody, + ForkChoiceExtraData, ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, + StateId as CoreStateId, ValidatorBalancesRequestBody, ValidatorId, + ValidatorIdentitiesRequestBody, ValidatorStatus, ValidatorsRequestBody, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use health_metrics::observe::Observe; @@ -103,7 +104,6 @@ use version::{ unsupported_version_rejection, }; use warp::Reply; -use warp::http::StatusCode; use warp::hyper::Body; use warp::sse::Event; use warp::{Filter, Rejection, http::Response}; @@ -3033,12 +3033,38 @@ pub fn serve( .execution_status .block_hash() .map(|block_hash| block_hash.into_root()), + extra_data: ForkChoiceExtraData { + target_root: node.target_root, + justified_root: node.justified_checkpoint.root, + finalized_root: node.finalized_checkpoint.root, + unrealized_justified_root: node + .unrealized_justified_checkpoint + .map(|checkpoint| checkpoint.root), + unrealized_finalized_root: node + .unrealized_finalized_checkpoint + .map(|checkpoint| checkpoint.root), + unrealized_justified_epoch: node + .unrealized_justified_checkpoint + .map(|checkpoint| checkpoint.epoch), + unrealized_finalized_epoch: node + .unrealized_finalized_checkpoint + .map(|checkpoint| checkpoint.epoch), + execution_status: node.execution_status.to_string(), + best_child: node + .best_child + .and_then(|index| proto_array.nodes.get(index)) + .map(|child| child.root), + best_descendant: node + .best_descendant + .and_then(|index| proto_array.nodes.get(index)) + .map(|descendant| descendant.root), + }, } }) .collect::>(); Ok(ForkChoice { - justified_checkpoint: proto_array.justified_checkpoint, - finalized_checkpoint: proto_array.finalized_checkpoint, + justified_checkpoint: beacon_fork_choice.justified_checkpoint(), + finalized_checkpoint: beacon_fork_choice.finalized_checkpoint(), fork_choice_nodes, }) }) @@ -4071,7 +4097,7 @@ pub fn serve( convert_rejection(rx.await.unwrap_or_else(|_| { Ok(warp::reply::with_status( warp::reply::json(&"No response from channel"), - eth2::StatusCode::INTERNAL_SERVER_ERROR, + warp::http::StatusCode::INTERNAL_SERVER_ERROR, ) .into_response()) })) diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index bfe41c8706c..9671a72da26 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -9,9 +9,12 @@ use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, NotifyExecutionLayer, build_blob_data_column_sidecars, }; -use eth2::types::{ - BlobsBundle, BroadcastValidation, ErrorMessage, ExecutionPayloadAndBlobs, FullPayloadContents, - PublishBlockRequest, SignedBlockContents, +use eth2::{ + StatusCode, + types::{ + BlobsBundle, BroadcastValidation, ErrorMessage, ExecutionPayloadAndBlobs, + FullPayloadContents, PublishBlockRequest, SignedBlockContents, + }, }; use execution_layer::{ProvenancedPayload, SubmitBlindedBlockResponse}; use futures::TryFutureExt; @@ -32,7 +35,6 @@ use types::{ DataColumnSubnetId, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, FullPayloadBellatrix, Hash256, KzgProofs, SignedBeaconBlock, SignedBlindedBeaconBlock, }; -use warp::http::StatusCode; use warp::{Rejection, Reply, reply::Response}; pub type UnverifiedBlobs = Option<( @@ -302,7 +304,7 @@ pub async fn publish_block>( message: "duplicate block".to_string(), stacktraces: vec![], }), - duplicate_status_code, + warp_utils::status_code::convert(duplicate_status_code)?, ) .into_response()) } diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 6fb5a8ed8aa..8d99e696cf7 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3057,11 +3057,11 @@ impl ApiTester { assert_eq!( result.justified_checkpoint, - expected_proto_array.justified_checkpoint + beacon_fork_choice.justified_checkpoint() ); assert_eq!( result.finalized_checkpoint, - expected_proto_array.finalized_checkpoint + beacon_fork_choice.finalized_checkpoint() ); let expected_fork_choice_nodes: Vec = expected_proto_array @@ -3088,6 +3088,32 @@ impl ApiTester { .execution_status .block_hash() .map(|block_hash| block_hash.into_root()), + extra_data: ForkChoiceExtraData { + target_root: node.target_root, + justified_root: node.justified_checkpoint.root, + finalized_root: node.finalized_checkpoint.root, + unrealized_justified_root: node + .unrealized_justified_checkpoint + .map(|checkpoint| checkpoint.root), + unrealized_finalized_root: node + .unrealized_finalized_checkpoint + .map(|checkpoint| checkpoint.root), + unrealized_justified_epoch: node + .unrealized_justified_checkpoint + .map(|checkpoint| checkpoint.epoch), + unrealized_finalized_epoch: node + .unrealized_finalized_checkpoint + .map(|checkpoint| checkpoint.epoch), + execution_status: node.execution_status.to_string(), + best_child: node + .best_child + .and_then(|index| expected_proto_array.nodes.get(index)) + .map(|child| child.root), + best_descendant: node + .best_descendant + .and_then(|index| expected_proto_array.nodes.get(index)) + .map(|descendant| descendant.root), + }, } }) .collect(); diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 035452e4b2f..a6dd276c197 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -3,6 +3,7 @@ name = "lighthouse_network" version = "0.2.0" authors = ["Sigma Prime "] edition = { workspace = true } +autotests = false [features] libp2p-websocket = [] @@ -72,6 +73,9 @@ features = [ [dev-dependencies] async-channel = { workspace = true } logging = { workspace = true } -quickcheck = { workspace = true } -quickcheck_macros = { workspace = true } +proptest = { workspace = true } tempfile = { workspace = true } + +[[test]] +name = "lighthouse_network_tests" +path = "tests/main.rs" diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 4c285ea86c8..f2268b39b26 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -29,6 +29,8 @@ pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets"; pub const SYNC_COMMITTEE_BITFIELD_ENR_KEY: &str = "syncnets"; /// The ENR field specifying the peerdas custody group count. pub const PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY: &str = "cgc"; +/// The ENR field specifying whether zkVM execution proofs are enabled. +pub const ZKVM_ENABLED_ENR_KEY: &str = "zkvm"; /// Extension trait for ENR's within Eth2. pub trait Eth2Enr { @@ -43,6 +45,9 @@ pub trait Eth2Enr { /// The peerdas custody group count associated with the ENR. fn custody_group_count(&self, spec: &ChainSpec) -> Result; + /// Whether zkVM execution proofs are enabled for this node. + fn zkvm_enabled(&self) -> bool; + /// The next fork digest associated with the ENR. fn next_fork_digest(&self) -> Result<[u8; 4], &'static str>; @@ -85,6 +90,13 @@ impl Eth2Enr for Enr { } } + fn zkvm_enabled(&self) -> bool { + // If the key exists and is true, zkVM is enabled, otherwise false + self.get_decodable::(ZKVM_ENABLED_ENR_KEY) + .and_then(|result| result.ok()) + .unwrap_or(false) + } + fn next_fork_digest(&self) -> Result<[u8; 4], &'static str> { self.get_decodable::<[u8; 4]>(NEXT_FORK_DIGEST_ENR_KEY) .ok_or("ENR next fork digest non-existent")? @@ -278,6 +290,10 @@ pub fn build_enr( &bitfield.as_ssz_bytes().into(), ); + if spec.is_zkvm_enabled() { + builder.add_value(ZKVM_ENABLED_ENR_KEY, &true); + } + // only set `cgc` and `nfd` if PeerDAS fork (Fulu) epoch has been scheduled if spec.is_peer_das_scheduled() { builder.add_value(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, &custody_group_count); @@ -308,11 +324,12 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY and - // PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY key to match, otherwise we use a new ENR. This will - // likely only be true for non-validating nodes. + // PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY and ZKVM_ENABLED_ENR_KEY key to match, + // otherwise we use a new ENR. This will likely only be true for non-validating nodes. && local_enr.get_decodable::(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get_decodable(ATTESTATION_BITFIELD_ENR_KEY) && local_enr.get_decodable::(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get_decodable(SYNC_COMMITTEE_BITFIELD_ENR_KEY) && local_enr.get_decodable::(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY) == disk_enr.get_decodable(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY) + && local_enr.get_decodable::(ZKVM_ENABLED_ENR_KEY) == disk_enr.get_decodable(ZKVM_ENABLED_ENR_KEY) } /// Loads enr from the given directory diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 3589882ae9b..018bf580504 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -15,7 +15,10 @@ pub use libp2p::identity::{Keypair, PublicKey}; use network_utils::enr_ext::{CombinedKeyExt, EnrExt, peer_id_to_node_id}; use alloy_rlp::bytes::Bytes; -use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; +use enr::{ + ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY, + ZKVM_ENABLED_ENR_KEY, +}; use futures::prelude::*; use futures::stream::FuturesUnordered; use libp2p::core::transport::PortUse; @@ -560,6 +563,12 @@ impl Discovery { } // Data column subnets are computed from node ID. No subnet bitfield in the ENR. Subnet::DataColumn(_) => return Ok(()), + // Execution proof uses a simple boolean flag in the ENR + Subnet::ExecutionProof => { + self.discv5 + .enr_insert(ZKVM_ENABLED_ENR_KEY, &value) + .map_err(|e| format!("{:?}", e))?; + } } // replace the global version @@ -904,6 +913,7 @@ impl Discovery { Subnet::Attestation(_) => "attestation", Subnet::SyncCommittee(_) => "sync_committee", Subnet::DataColumn(_) => "data_column", + Subnet::ExecutionProof => "execution_proof", }; if let Some(v) = metrics::get_int_counter( diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index 6e841c25a50..dc1ac54e97b 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -41,6 +41,10 @@ where false } } + Subnet::ExecutionProof => { + // Check if ENR advertises zkVM support + enr.zkvm_enabled() + } }); if !predicate { diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index b6be9b52223..3d96a08357d 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -109,7 +109,7 @@ pub use discovery::Eth2Enr; pub use discv5; pub use gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash}; pub use libp2p; -pub use libp2p::{Multiaddr, multiaddr}; +pub use libp2p::{Multiaddr, identity, multiaddr}; pub use libp2p::{PeerId, Swarm, core::ConnectedPoint}; pub use peer_manager::{ ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, diff --git a/beacon_node/lighthouse_network/src/peer_manager/config.rs b/beacon_node/lighthouse_network/src/peer_manager/config.rs index b2ed6524861..57a5fa68a23 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/config.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/config.rs @@ -20,6 +20,8 @@ pub struct Config { pub metrics_enabled: bool, /// Whether quic is enabled. pub quic_enabled: bool, + /// Whether execution proofs are enabled. + pub execution_proof_enabled: bool, /// Target number of peers to connect to. pub target_peer_count: usize, @@ -40,6 +42,7 @@ impl Default for Config { discovery_enabled: true, metrics_enabled: false, quic_enabled: true, + execution_proof_enabled: false, target_peer_count: DEFAULT_TARGET_PEERS, status_interval: DEFAULT_STATUS_INTERVAL, ping_interval_inbound: DEFAULT_PING_INTERVAL_INBOUND, diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index ad16bb0421c..d8dde408846 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1,7 +1,7 @@ //! Implementation of Lighthouse's peer management system. use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RpcErrorResponse}; -use crate::service::TARGET_SUBNET_PEERS; +use crate::service::{TARGET_EXECUTION_PROOF_PEERS, TARGET_SUBNET_PEERS}; use crate::{Gossipsub, NetworkGlobals, PeerId, Subnet, SubnetDiscovery, metrics}; use delay_map::HashSetDelay; use discv5::Enr; @@ -113,6 +113,8 @@ pub struct PeerManager { /// discovery queries for subnet peers if we disconnect from existing sync /// committee subnet peers. sync_committee_subnets: HashMap, + /// Keeps track of whether this node has zkVM execution proof support enabled. + execution_proof_enabled: bool, /// A mapping of all custody groups to column subnets to avoid re-computation. subnets_by_custody_group: HashMap>, /// The heartbeat interval to perform routine maintenance. @@ -162,6 +164,7 @@ impl PeerManager { let config::Config { discovery_enabled, metrics_enabled, + execution_proof_enabled, target_peer_count, status_interval, ping_interval_inbound, @@ -199,6 +202,7 @@ impl PeerManager { target_peers: target_peer_count, temporary_banned_peers: LRUTimeCache::new(PEER_RECONNECTION_TIMEOUT), sync_committee_subnets: Default::default(), + execution_proof_enabled, subnets_by_custody_group, heartbeat, discovery_enabled, @@ -601,6 +605,7 @@ impl PeerManager { Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRoot => PeerAction::MidToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -621,6 +626,7 @@ impl PeerManager { Protocol::BlobsByRoot => return, Protocol::DataColumnsByRoot => return, Protocol::DataColumnsByRange => return, + Protocol::ExecutionProofsByRoot => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, @@ -644,6 +650,7 @@ impl PeerManager { Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRoot => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, @@ -1004,6 +1011,46 @@ impl PeerManager { } } + /// Run discovery query for zkVM-enabled peers if we fall below `TARGET_EXECUTION_PROOF_PEERS`. + fn maintain_execution_proof_peers(&mut self) { + // Only maintain peers if zkVM is enabled + if !self.execution_proof_enabled { + return; + } + + // Check if we have enough zkVM-enabled peers + // Count peers subscribed to the execution_proof gossip topic + // TODO(zkproofs): Note that since peers do not advertise whether + // they are proof generating, we cannot favour them. This is + // fine for optional proofs and mandatory proofs will imply + // that the builder who is well connected will propagate it + // to most of the network. + let zkvm_peer_count = self + .network_globals + .peers + .read() + .connected_peers() + .filter(|(_, info)| { + // Check if peer is subscribed to ExecutionProof gossip topic + info.on_subnet_gossipsub(&Subnet::ExecutionProof) + }) + .count(); + + if zkvm_peer_count < TARGET_EXECUTION_PROOF_PEERS { + debug!( + current_peers = zkvm_peer_count, + target = TARGET_EXECUTION_PROOF_PEERS, + "Making discovery query for zkVM-enabled peers" + ); + self.events.push(PeerManagerEvent::DiscoverSubnetPeers(vec![ + SubnetDiscovery { + subnet: Subnet::ExecutionProof, + min_ttl: None, + }, + ])); + } + } + fn maintain_trusted_peers(&mut self) { let trusted_peers = self.trusted_peers.clone(); for trusted_peer in trusted_peers { @@ -1081,6 +1128,10 @@ impl PeerManager { Subnet::DataColumn(id) => { peer_info.custody_subnets.insert(id); } + Subnet::ExecutionProof => { + // ExecutionProof uses a single topic, not subnet-based + // So there is no subnet assignment to track + } } } @@ -1449,6 +1500,9 @@ impl PeerManager { // Maintain minimum count for sync committee peers. self.maintain_sync_committee_peers(); + // Maintain minimum count for zkVM-enabled peers (if zkVM is enabled). + self.maintain_execution_proof_peers(); + // Prune any excess peers back to our target in such a way that incentivises good scores and // a uniform distribution of subnets. self.prune_excess_peers(); @@ -2975,8 +3029,7 @@ mod tests { use crate::peer_manager::tests::build_peer_manager_with_trusted_peers; use crate::rpc::{MetaData, MetaDataV3}; use libp2p::PeerId; - use quickcheck::{Arbitrary, Gen, TestResult}; - use quickcheck_macros::quickcheck; + use proptest::prelude::*; use std::collections::HashSet; use tokio::runtime::Runtime; use types::{DataColumnSubnetId, Unsigned}; @@ -2994,159 +3047,202 @@ mod tests { custody_subnets: HashSet, } - impl Arbitrary for PeerCondition { - fn arbitrary(g: &mut Gen) -> Self { - let attestation_net_bitfield = { - let len = ::SubnetBitfieldLength::to_usize(); - let mut bitfield = Vec::with_capacity(len); - for _ in 0..len { - bitfield.push(bool::arbitrary(g)); - } - bitfield - }; - - let sync_committee_net_bitfield = { - let len = ::SyncCommitteeSubnetCount::to_usize(); - let mut bitfield = Vec::with_capacity(len); - for _ in 0..len { - bitfield.push(bool::arbitrary(g)); - } - bitfield - }; - - let spec = E::default_spec(); - let custody_subnets = { - let total_subnet_count = spec.data_column_sidecar_subnet_count; - let custody_subnet_count = u64::arbitrary(g) % (total_subnet_count + 1); // 0 to 128 - (spec.custody_requirement..total_subnet_count) - .filter(|_| bool::arbitrary(g)) - .map(DataColumnSubnetId::new) - .take(custody_subnet_count as usize) - .collect() - }; - - PeerCondition { - peer_id: PeerId::random(), - outgoing: bool::arbitrary(g), - attestation_net_bitfield, - sync_committee_net_bitfield, - score: f64::arbitrary(g), - trusted: bool::arbitrary(g), - gossipsub_score: f64::arbitrary(g), - custody_subnets, - } - } - } - - #[quickcheck] - fn prune_excess_peers(peer_conditions: Vec) -> TestResult { - let target_peer_count = DEFAULT_TARGET_PEERS; + fn peer_condition_strategy() -> impl Strategy { + let attestation_len = ::SubnetBitfieldLength::to_usize(); + let sync_committee_len = ::SyncCommitteeSubnetCount::to_usize(); let spec = E::default_spec(); - if peer_conditions.len() < target_peer_count { - return TestResult::discard(); - } - let trusted_peers: Vec<_> = peer_conditions - .iter() - .filter_map(|p| if p.trusted { Some(p.peer_id) } else { None }) - .collect(); - // If we have a high percentage of trusted peers, it is very difficult to reason about - // the expected results of the pruning. - if trusted_peers.len() > peer_conditions.len() / 3_usize { - return TestResult::discard(); - } - let rt = Runtime::new().unwrap(); - - rt.block_on(async move { - // Collect all the trusted peers - let mut peer_manager = - build_peer_manager_with_trusted_peers(trusted_peers, target_peer_count).await; + let total_subnet_count = spec.data_column_sidecar_subnet_count; + let custody_requirement = spec.custody_requirement; + + // Create the pool of available subnet IDs + let available_subnets: Vec = (custody_requirement..total_subnet_count).collect(); + let max_custody_subnets = available_subnets.len(); + + // Trusted peer probability constants - 1 in 5 peers should be trusted (20%) + const TRUSTED_PEER_WEIGHT_FALSE: u32 = 4; + const TRUSTED_PEER_WEIGHT_TRUE: u32 = 1; + + ( + proptest::collection::vec(any::(), attestation_len), + proptest::collection::vec(any::(), sync_committee_len), + any::(), + any::(), + any::(), + // Weight trusted peers to avoid test rejection due to too many trusted peers + prop_oneof![ + TRUSTED_PEER_WEIGHT_FALSE => Just(false), + TRUSTED_PEER_WEIGHT_TRUE => Just(true), + ], + 0..=max_custody_subnets, + ) + .prop_flat_map( + move |( + attestation_net_bitfield, + sync_committee_net_bitfield, + score, + outgoing, + gossipsub_score, + trusted, + custody_subnet_count, + )| { + // Use proptest's subsequence to select a random subset of subnets + let custody_subnets_strategy = proptest::sample::subsequence( + available_subnets.clone(), + custody_subnet_count, + ); - // Create peers based on the randomly generated conditions. - for condition in &peer_conditions { - let mut attnets = crate::types::EnrAttestationBitfield::::new(); - let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); + ( + Just(attestation_net_bitfield), + Just(sync_committee_net_bitfield), + Just(score), + Just(outgoing), + Just(gossipsub_score), + Just(trusted), + custody_subnets_strategy, + ) + }, + ) + .prop_map( + |( + attestation_net_bitfield, + sync_committee_net_bitfield, + score, + outgoing, + gossipsub_score, + trusted, + custody_subnets_vec, + )| { + let custody_subnets: HashSet = custody_subnets_vec + .into_iter() + .map(DataColumnSubnetId::new) + .collect(); + + PeerCondition { + peer_id: PeerId::random(), + outgoing, + attestation_net_bitfield, + sync_committee_net_bitfield, + score, + trusted, + gossipsub_score, + custody_subnets, + } + }, + ) + } - if condition.outgoing { - peer_manager.inject_connect_outgoing( - &condition.peer_id, - "/ip4/0.0.0.0".parse().unwrap(), - None, - ); - } else { - peer_manager.inject_connect_ingoing( - &condition.peer_id, - "/ip4/0.0.0.0".parse().unwrap(), - None, - ); - } + // Upper bound for testing peer pruning - we test with at least the target number + // and up to 50% more than the target to verify pruning behavior. + const MAX_TEST_PEERS: usize = 300; - for (i, value) in condition.attestation_net_bitfield.iter().enumerate() { - attnets.set(i, *value).unwrap(); - } + proptest! { + #[test] + fn prune_excess_peers(peer_conditions in proptest::collection::vec(peer_condition_strategy(), DEFAULT_TARGET_PEERS..=MAX_TEST_PEERS)) { + let target_peer_count = DEFAULT_TARGET_PEERS; + let spec = E::default_spec(); - for (i, value) in condition.sync_committee_net_bitfield.iter().enumerate() { - syncnets.set(i, *value).unwrap(); - } + let trusted_peers: Vec<_> = peer_conditions + .iter() + .filter_map(|p| if p.trusted { Some(p.peer_id) } else { None }) + .collect(); + // If we have a high percentage of trusted peers, it is very difficult to reason about + // the expected results of the pruning. + prop_assume!(trusted_peers.len() <= peer_conditions.len() / 3_usize); + + let rt = Runtime::new().unwrap(); + + let result = rt.block_on(async move { + // Collect all the trusted peers + let mut peer_manager = + build_peer_manager_with_trusted_peers(trusted_peers, target_peer_count).await; + + // Create peers based on the randomly generated conditions. + for condition in &peer_conditions { + let mut attnets = crate::types::EnrAttestationBitfield::::new(); + let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); + + if condition.outgoing { + peer_manager.inject_connect_outgoing( + &condition.peer_id, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + } else { + peer_manager.inject_connect_ingoing( + &condition.peer_id, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + } - let subnets_per_custody_group = - spec.data_column_sidecar_subnet_count / spec.number_of_custody_groups; - let metadata = MetaDataV3 { - seq_number: 0, - attnets, - syncnets, - custody_group_count: condition.custody_subnets.len() as u64 - / subnets_per_custody_group, - }; + for (i, value) in condition.attestation_net_bitfield.iter().enumerate() { + attnets.set(i, *value).unwrap(); + } - let mut peer_db = peer_manager.network_globals.peers.write(); - let peer_info = peer_db.peer_info_mut(&condition.peer_id).unwrap(); - peer_info.set_meta_data(MetaData::V3(metadata)); - peer_info.set_gossipsub_score(condition.gossipsub_score); - peer_info.add_to_score(condition.score); - peer_info.set_custody_subnets(condition.custody_subnets.clone()); + for (i, value) in condition.sync_committee_net_bitfield.iter().enumerate() { + syncnets.set(i, *value).unwrap(); + } - for subnet in peer_info.long_lived_subnets() { - peer_db.add_subscription(&condition.peer_id, subnet); + let subnets_per_custody_group = + spec.data_column_sidecar_subnet_count / spec.number_of_custody_groups; + let metadata = MetaDataV3 { + seq_number: 0, + attnets, + syncnets, + custody_group_count: condition.custody_subnets.len() as u64 + / subnets_per_custody_group, + }; + + let mut peer_db = peer_manager.network_globals.peers.write(); + let peer_info = peer_db.peer_info_mut(&condition.peer_id).unwrap(); + peer_info.set_meta_data(MetaData::V3(metadata)); + peer_info.set_gossipsub_score(condition.gossipsub_score); + peer_info.add_to_score(condition.score); + peer_info.set_custody_subnets(condition.custody_subnets.clone()); + + for subnet in peer_info.long_lived_subnets() { + peer_db.add_subscription(&condition.peer_id, subnet); + } } - } - - // Perform the heartbeat. - peer_manager.heartbeat(); - // The minimum number of connected peers cannot be less than the target peer count - // or submitted peers. + // Perform the heartbeat. + peer_manager.heartbeat(); + + // The minimum number of connected peers cannot be less than the target peer count + // or submitted peers. + + let expected_peer_count = target_peer_count.min(peer_conditions.len()); + // Trusted peers could make this larger however. + let no_of_trusted_peers = peer_conditions + .iter() + .filter(|condition| condition.trusted) + .count(); + let expected_peer_count = expected_peer_count.max(no_of_trusted_peers); + + let target_peer_condition = + peer_manager.network_globals.connected_or_dialing_peers() + == expected_peer_count; + + // It could be that we reach our target outbound limit and are unable to prune any + // extra, which violates the target_peer_condition. + let outbound_peers = peer_manager.network_globals.connected_outbound_only_peers(); + let hit_outbound_limit = outbound_peers == peer_manager.target_outbound_peers(); + + // No trusted peers should be disconnected + let trusted_peer_disconnected = peer_conditions.iter().any(|condition| { + condition.trusted + && !peer_manager + .network_globals + .peers + .read() + .is_connected(&condition.peer_id) + }); - let expected_peer_count = target_peer_count.min(peer_conditions.len()); - // Trusted peers could make this larger however. - let no_of_trusted_peers = peer_conditions - .iter() - .filter(|condition| condition.trusted) - .count(); - let expected_peer_count = expected_peer_count.max(no_of_trusted_peers); - - let target_peer_condition = - peer_manager.network_globals.connected_or_dialing_peers() - == expected_peer_count; - - // It could be that we reach our target outbound limit and are unable to prune any - // extra, which violates the target_peer_condition. - let outbound_peers = peer_manager.network_globals.connected_outbound_only_peers(); - let hit_outbound_limit = outbound_peers == peer_manager.target_outbound_peers(); - - // No trusted peers should be disconnected - let trusted_peer_disconnected = peer_conditions.iter().any(|condition| { - condition.trusted - && !peer_manager - .network_globals - .peers - .read() - .is_connected(&condition.peer_id) + (target_peer_condition || hit_outbound_limit) && !trusted_peer_disconnected }); - TestResult::from_bool( - (target_peer_condition || hit_outbound_limit) && !trusted_peer_disconnected, - ) - }) + prop_assert!(result); + } } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index c289cb9a69c..483da11be0b 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -105,6 +105,14 @@ impl PeerInfo { Subnet::DataColumn(subnet_id) => { return self.is_assigned_to_custody_subnet(subnet_id); } + Subnet::ExecutionProof => { + // ExecutionProof capability is advertised via ENR zkvm flag, not metadata + // A node cannot dynamically change what the support. + if let Some(enr) = self.enr.as_ref() { + return enr.zkvm_enabled(); + } + return false; + } } } false @@ -272,6 +280,11 @@ impl PeerInfo { return true; } + // Check if the peer has zkVM enabled (execution proof support) + if let Some(enr) = self.enr.as_ref() { + return enr.zkvm_enabled(); + } + false } diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 77d2a34e16e..34c42fdd041 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -16,10 +16,10 @@ use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ - BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EthSpec, ForkContext, - ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EthSpec, + ExecutionProof, ForkContext, ForkName, Hash256, LightClientBootstrap, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, SignedBeaconBlockGloas, }; @@ -80,6 +80,7 @@ impl SSZSnappyInboundCodec { RpcSuccessResponse::BlobsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), + RpcSuccessResponse::ExecutionProofsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), @@ -360,6 +361,7 @@ impl Encoder> for SSZSnappyOutboundCodec { RequestType::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), RequestType::DataColumnsByRange(req) => req.as_ssz_bytes(), RequestType::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), + RequestType::ExecutionProofsByRoot(req) => req.as_ssz_bytes(), RequestType::Ping(req) => req.as_ssz_bytes(), RequestType::LightClientBootstrap(req) => req.as_ssz_bytes(), RequestType::LightClientUpdatesByRange(req) => req.as_ssz_bytes(), @@ -568,6 +570,16 @@ fn handle_rpc_request( )?, }, ))), + SupportedProtocol::ExecutionProofsByRootV1 => { + let request = ExecutionProofsByRootRequest::from_ssz_bytes(decoded_buffer) + .map_err(RPCError::SSZDecodeError)?; + + request + .validate(spec) + .map_err(RPCError::InvalidData)?; + + Ok(Some(RequestType::ExecutionProofsByRoot(request))) + } SupportedProtocol::PingV1 => Ok(Some(RequestType::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -731,6 +743,11 @@ fn handle_rpc_response( ), )), }, + SupportedProtocol::ExecutionProofsByRootV1 => { + Ok(Some(RpcSuccessResponse::ExecutionProofsByRoot(Arc::new( + ExecutionProof::from_ssz_bytes(decoded_buffer)?, + )))) + } SupportedProtocol::PingV1 => Ok(Some(RpcSuccessResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -910,8 +927,8 @@ mod tests { use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, - DataColumnsByRootIdentifier, EmptyBlock, Epoch, FixedBytesExtended, FullPayload, - KzgCommitment, KzgProof, Signature, SignedBeaconBlockHeader, Slot, + DataColumnsByRootIdentifier, EmptyBlock, Epoch, ExecutionProofId, FixedBytesExtended, + FullPayload, KzgCommitment, KzgProof, Signature, SignedBeaconBlockHeader, Slot, blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, }; @@ -1109,6 +1126,18 @@ mod tests { .unwrap() } + fn execution_proofs_by_root_request( + _fork_name: ForkName, + _spec: &ChainSpec, + ) -> ExecutionProofsByRootRequest { + ExecutionProofsByRootRequest::new( + Hash256::zero(), + vec![ExecutionProofId::new(0).unwrap()], + 2, + ) + .unwrap() + } + fn ping_message() -> Ping { Ping { data: 1 } } @@ -1263,6 +1292,9 @@ mod tests { RequestType::DataColumnsByRange(dcbrange) => { assert_eq!(decoded, RequestType::DataColumnsByRange(dcbrange)) } + RequestType::ExecutionProofsByRoot(exec_proofs) => { + assert_eq!(decoded, RequestType::ExecutionProofsByRoot(exec_proofs)) + } RequestType::Ping(ping) => { assert_eq!(decoded, RequestType::Ping(ping)) } @@ -2004,6 +2036,10 @@ mod tests { RequestType::BlocksByRoot(bbroot_request_v1(fork_name, &chain_spec)), RequestType::BlocksByRoot(bbroot_request_v2(fork_name, &chain_spec)), RequestType::DataColumnsByRoot(dcbroot_request(fork_name, &chain_spec)), + RequestType::ExecutionProofsByRoot(execution_proofs_by_root_request( + fork_name, + &chain_spec, + )), ] }; for fork_name in ForkName::list_all() { diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index b0ee6fea64b..d23c16f8fa1 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -93,6 +93,7 @@ pub struct RateLimiterConfig { pub(super) blobs_by_root_quota: Quota, pub(super) data_columns_by_root_quota: Quota, pub(super) data_columns_by_range_quota: Quota, + pub(super) execution_proofs_by_root_quota: Quota, pub(super) light_client_bootstrap_quota: Quota, pub(super) light_client_optimistic_update_quota: Quota, pub(super) light_client_finality_update_quota: Quota, @@ -122,6 +123,9 @@ impl RateLimiterConfig { Quota::n_every(NonZeroU64::new(16384).unwrap(), 10); pub const DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA: Quota = Quota::n_every(NonZeroU64::new(16384).unwrap(), 10); + // TODO(zkproofs): Configure this to be less arbitrary + pub const DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); @@ -141,6 +145,7 @@ impl Default for RateLimiterConfig { blobs_by_root_quota: Self::DEFAULT_BLOBS_BY_ROOT_QUOTA, data_columns_by_root_quota: Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA, data_columns_by_range_quota: Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA, + execution_proofs_by_root_quota: Self::DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA, light_client_bootstrap_quota: Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA, light_client_optimistic_update_quota: Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA, @@ -201,6 +206,7 @@ impl FromStr for RateLimiterConfig { let mut blobs_by_root_quota = None; let mut data_columns_by_root_quota = None; let mut data_columns_by_range_quota = None; + let mut execution_proofs_by_root_quota = None; let mut light_client_bootstrap_quota = None; let mut light_client_optimistic_update_quota = None; let mut light_client_finality_update_quota = None; @@ -222,6 +228,9 @@ impl FromStr for RateLimiterConfig { Protocol::DataColumnsByRange => { data_columns_by_range_quota = data_columns_by_range_quota.or(quota) } + Protocol::ExecutionProofsByRoot => { + execution_proofs_by_root_quota = execution_proofs_by_root_quota.or(quota) + } Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), Protocol::LightClientBootstrap => { @@ -257,6 +266,8 @@ impl FromStr for RateLimiterConfig { .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA), data_columns_by_range_quota: data_columns_by_range_quota .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA), + execution_proofs_by_root_quota: execution_proofs_by_root_quota + .unwrap_or(Self::DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA), light_client_bootstrap_quota: light_client_bootstrap_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA), light_client_optimistic_update_quota: light_client_optimistic_update_quota diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 9aab0799521..b297ce8f08f 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -16,9 +16,9 @@ use types::blob_sidecar::BlobIdentifier; use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, EthSpec, - ForkContext, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, - blob_sidecar::BlobSidecar, + ExecutionProof, ExecutionProofId, ForkContext, Hash256, LightClientBootstrap, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, + SignedBeaconBlock, Slot, blob_sidecar::BlobSidecar, }; /// Maximum length of error message. @@ -546,6 +546,63 @@ impl DataColumnsByRootRequest { } } +/// Request execution proofs by block root and proof IDs. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct ExecutionProofsByRootRequest { + /// The block root we need proofs for + pub block_root: Hash256, + /// How many additional proofs we need + /// TODO(zkproofs): Remove. This can be inferred since `MIN_PROOFS_REQUIRED` + /// is a global value + pub count_needed: u64, + /// Proof IDs we already have (responder should exclude these) + pub already_have: Vec, +} + +impl ExecutionProofsByRootRequest { + pub fn new( + block_root: Hash256, + already_have: Vec, + count_needed: usize, + ) -> Result { + if already_have.len() > types::execution_proof::MAX_PROOFS { + return Err("Too many proof IDs in already_have"); + } + if count_needed == 0 { + return Err("count_needed must be > 0"); + } + if count_needed > types::execution_proof::MAX_PROOFS { + return Err("count_needed too large"); + } + Ok(Self { + block_root, + count_needed: count_needed as u64, + already_have, + }) + } + + pub fn validate(&self, _spec: &ChainSpec) -> Result<(), String> { + if self.already_have.len() > types::execution_proof::MAX_PROOFS { + return Err("Too many proof IDs in already_have".to_string()); + } + if self.count_needed == 0 { + return Err("count_needed must be > 0".to_string()); + } + if self.count_needed > types::execution_proof::MAX_PROOFS as u64 { + return Err(format!( + "count_needed too large: {} > {}", + self.count_needed, + types::execution_proof::MAX_PROOFS + )); + } + Ok(()) + } + + pub fn max_requested(&self) -> usize { + self.count_needed as usize + } +} + /// Request a number of beacon data columns from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct LightClientUpdatesByRangeRequest { @@ -613,6 +670,9 @@ pub enum RpcSuccessResponse { /// A response to a get DATA_COLUMN_SIDECARS_BY_RANGE request. DataColumnsByRange(Arc>), + /// A response to a get EXECUTION_PROOFS_BY_ROOT request. + ExecutionProofsByRoot(Arc), + /// A PONG response to a PING request. Pong(Ping), @@ -641,6 +701,9 @@ pub enum ResponseTermination { /// Data column sidecars by range stream termination. DataColumnsByRange, + /// Execution proofs by root stream termination. + ExecutionProofsByRoot, + /// Light client updates by range stream termination. LightClientUpdatesByRange, } @@ -654,6 +717,7 @@ impl ResponseTermination { ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, + ResponseTermination::ExecutionProofsByRoot => Protocol::ExecutionProofsByRoot, ResponseTermination::LightClientUpdatesByRange => Protocol::LightClientUpdatesByRange, } } @@ -749,6 +813,7 @@ impl RpcSuccessResponse { RpcSuccessResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, RpcSuccessResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, RpcSuccessResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, + RpcSuccessResponse::ExecutionProofsByRoot(_) => Protocol::ExecutionProofsByRoot, RpcSuccessResponse::Pong(_) => Protocol::Ping, RpcSuccessResponse::MetaData(_) => Protocol::MetaData, RpcSuccessResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -773,7 +838,11 @@ impl RpcSuccessResponse { Self::LightClientFinalityUpdate(r) => Some(r.get_attested_header_slot()), Self::LightClientOptimisticUpdate(r) => Some(r.get_slot()), Self::LightClientUpdatesByRange(r) => Some(r.attested_header_slot()), - Self::MetaData(_) | Self::Status(_) | Self::Pong(_) => None, + // TODO(zkproofs): Change this when we add Slot to ExecutionProof + Self::ExecutionProofsByRoot(_) + | Self::MetaData(_) + | Self::Status(_) + | Self::Pong(_) => None, } } } @@ -833,6 +902,9 @@ impl std::fmt::Display for RpcSuccessResponse { sidecar.slot() ) } + RpcSuccessResponse::ExecutionProofsByRoot(proof) => { + write!(f, "ExecutionProofsByRoot: Block root: {}", proof.block_root) + } RpcSuccessResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RpcSuccessResponse::MetaData(metadata) => { write!(f, "Metadata: {}", metadata.seq_number()) @@ -943,3 +1015,15 @@ impl std::fmt::Display for DataColumnsByRootRequest { ) } } + +impl std::fmt::Display for ExecutionProofsByRootRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Request: ExecutionProofsByRoot: Block Root: {}, Already Have: {}, Count Needed: {}", + self.block_root, + self.already_have.len(), + self.count_needed + ) + } +} diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 08085f3c271..0428f8787a3 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -17,10 +17,11 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BlobSidecar, ChainSpec, DataColumnSidecar, - EmptyBlock, Epoch, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap, - LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, - LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, - MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, + EmptyBlock, Epoch, EthSpec, EthSpecId, ExecutionProof, ForkContext, ForkName, + LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, + LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, + LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, MinimalEthSpec, + Signature, SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -249,6 +250,9 @@ pub enum Protocol { /// The `DataColumnSidecarsByRange` protocol name. #[strum(serialize = "data_column_sidecars_by_range")] DataColumnsByRange, + /// The `ExecutionProofsByRoot` protocol name. + #[strum(serialize = "execution_proofs_by_root")] + ExecutionProofsByRoot, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. @@ -279,6 +283,7 @@ impl Protocol { Protocol::BlobsByRoot => Some(ResponseTermination::BlobsByRoot), Protocol::DataColumnsByRoot => Some(ResponseTermination::DataColumnsByRoot), Protocol::DataColumnsByRange => Some(ResponseTermination::DataColumnsByRange), + Protocol::ExecutionProofsByRoot => Some(ResponseTermination::ExecutionProofsByRoot), Protocol::Ping => None, Protocol::MetaData => None, Protocol::LightClientBootstrap => None, @@ -309,6 +314,7 @@ pub enum SupportedProtocol { BlobsByRootV1, DataColumnsByRootV1, DataColumnsByRangeV1, + ExecutionProofsByRootV1, PingV1, MetaDataV1, MetaDataV2, @@ -333,6 +339,7 @@ impl SupportedProtocol { SupportedProtocol::BlobsByRootV1 => "1", SupportedProtocol::DataColumnsByRootV1 => "1", SupportedProtocol::DataColumnsByRangeV1 => "1", + SupportedProtocol::ExecutionProofsByRootV1 => "1", SupportedProtocol::PingV1 => "1", SupportedProtocol::MetaDataV1 => "1", SupportedProtocol::MetaDataV2 => "2", @@ -357,6 +364,7 @@ impl SupportedProtocol { SupportedProtocol::BlobsByRootV1 => Protocol::BlobsByRoot, SupportedProtocol::DataColumnsByRootV1 => Protocol::DataColumnsByRoot, SupportedProtocol::DataColumnsByRangeV1 => Protocol::DataColumnsByRange, + SupportedProtocol::ExecutionProofsByRootV1 => Protocol::ExecutionProofsByRoot, SupportedProtocol::PingV1 => Protocol::Ping, SupportedProtocol::MetaDataV1 => Protocol::MetaData, SupportedProtocol::MetaDataV2 => Protocol::MetaData, @@ -407,6 +415,12 @@ impl SupportedProtocol { ProtocolId::new(SupportedProtocol::DataColumnsByRangeV1, Encoding::SSZSnappy), ]); } + if fork_context.spec.is_zkvm_enabled() { + supported.push(ProtocolId::new( + SupportedProtocol::ExecutionProofsByRootV1, + Encoding::SSZSnappy, + )); + } supported } } @@ -519,6 +533,7 @@ impl ProtocolId { DataColumnsByRangeRequest::ssz_min_len(), DataColumnsByRangeRequest::ssz_max_len::(), ), + Protocol::ExecutionProofsByRoot => RpcLimits::new(0, spec.max_blocks_by_root_request), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -555,6 +570,7 @@ impl ProtocolId { Protocol::DataColumnsByRange => { rpc_data_column_limits::(fork_context.current_fork_epoch(), &fork_context.spec) } + Protocol::ExecutionProofsByRoot => rpc_execution_proof_limits(), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -596,6 +612,7 @@ impl ProtocolId { | SupportedProtocol::StatusV2 | SupportedProtocol::BlocksByRootV1 | SupportedProtocol::BlocksByRangeV1 + | SupportedProtocol::ExecutionProofsByRootV1 | SupportedProtocol::PingV1 | SupportedProtocol::MetaDataV1 | SupportedProtocol::MetaDataV2 @@ -645,6 +662,11 @@ pub fn rpc_data_column_limits( ) } +pub fn rpc_execution_proof_limits() -> RpcLimits { + // TODO(zkproofs): Can max proof size change over hardforks? + RpcLimits::new(ExecutionProof::min_size(), ExecutionProof::max_size()) +} + /* Inbound upgrade */ // The inbound protocol reads the request, decodes it and returns the stream to the protocol @@ -724,6 +746,7 @@ pub enum RequestType { BlobsByRoot(BlobsByRootRequest), DataColumnsByRoot(DataColumnsByRootRequest), DataColumnsByRange(DataColumnsByRangeRequest), + ExecutionProofsByRoot(ExecutionProofsByRootRequest), LightClientBootstrap(LightClientBootstrapRequest), LightClientOptimisticUpdate, LightClientFinalityUpdate, @@ -747,6 +770,7 @@ impl RequestType { RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.max_requested() as u64, RequestType::DataColumnsByRange(req) => req.max_requested::(), + RequestType::ExecutionProofsByRoot(req) => req.max_requested() as u64, RequestType::Ping(_) => 1, RequestType::MetaData(_) => 1, RequestType::LightClientBootstrap(_) => 1, @@ -776,6 +800,7 @@ impl RequestType { RequestType::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, RequestType::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, RequestType::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, + RequestType::ExecutionProofsByRoot(_) => SupportedProtocol::ExecutionProofsByRootV1, RequestType::Ping(_) => SupportedProtocol::PingV1, RequestType::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, @@ -807,6 +832,7 @@ impl RequestType { RequestType::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, RequestType::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, RequestType::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, + RequestType::ExecutionProofsByRoot(_) => ResponseTermination::ExecutionProofsByRoot, RequestType::Status(_) => unreachable!(), RequestType::Goodbye(_) => unreachable!(), RequestType::Ping(_) => unreachable!(), @@ -853,6 +879,10 @@ impl RequestType { SupportedProtocol::DataColumnsByRangeV1, Encoding::SSZSnappy, )], + RequestType::ExecutionProofsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::ExecutionProofsByRootV1, + Encoding::SSZSnappy, + )], RequestType::Ping(_) => vec![ProtocolId::new( SupportedProtocol::PingV1, Encoding::SSZSnappy, @@ -891,6 +921,7 @@ impl RequestType { RequestType::BlobsByRoot(_) => false, RequestType::DataColumnsByRoot(_) => false, RequestType::DataColumnsByRange(_) => false, + RequestType::ExecutionProofsByRoot(_) => false, RequestType::Ping(_) => true, RequestType::MetaData(_) => true, RequestType::LightClientBootstrap(_) => true, @@ -1004,6 +1035,9 @@ impl std::fmt::Display for RequestType { RequestType::DataColumnsByRange(req) => { write!(f, "Data columns by range: {:?}", req) } + RequestType::ExecutionProofsByRoot(req) => { + write!(f, "Execution proofs by root: {:?}", req) + } RequestType::Ping(ping) => write!(f, "Ping: {}", ping.data), RequestType::MetaData(_) => write!(f, "MetaData request"), RequestType::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 8b364f506cc..f70b29cfe45 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -105,6 +105,8 @@ pub struct RPCRateLimiter { dcbroot_rl: Limiter, /// DataColumnsByRange rate limiter. dcbrange_rl: Limiter, + /// ExecutionProofsByRoot rate limiter. + execution_proofs_by_root_rl: Limiter, /// LightClientBootstrap rate limiter. lc_bootstrap_rl: Limiter, /// LightClientOptimisticUpdate rate limiter. @@ -148,6 +150,8 @@ pub struct RPCRateLimiterBuilder { dcbroot_quota: Option, /// Quota for the DataColumnsByRange protocol. dcbrange_quota: Option, + /// Quota for the ExecutionProofsByRoot protocol. + execution_proofs_by_root_quota: Option, /// Quota for the LightClientBootstrap protocol. lcbootstrap_quota: Option, /// Quota for the LightClientOptimisticUpdate protocol. @@ -173,6 +177,7 @@ impl RPCRateLimiterBuilder { Protocol::BlobsByRoot => self.blbroot_quota = q, Protocol::DataColumnsByRoot => self.dcbroot_quota = q, Protocol::DataColumnsByRange => self.dcbrange_quota = q, + Protocol::ExecutionProofsByRoot => self.execution_proofs_by_root_quota = q, Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, Protocol::LightClientOptimisticUpdate => self.lc_optimistic_update_quota = q, Protocol::LightClientFinalityUpdate => self.lc_finality_update_quota = q, @@ -221,6 +226,10 @@ impl RPCRateLimiterBuilder { .dcbrange_quota .ok_or("DataColumnsByRange quota not specified")?; + let execution_proofs_by_root_quota = self + .execution_proofs_by_root_quota + .ok_or("ExecutionProofsByRoot quota not specified")?; + // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?; @@ -232,6 +241,7 @@ impl RPCRateLimiterBuilder { let blbroot_rl = Limiter::from_quota(blbroots_quota)?; let dcbroot_rl = Limiter::from_quota(dcbroot_quota)?; let dcbrange_rl = Limiter::from_quota(dcbrange_quota)?; + let execution_proofs_by_root_rl = Limiter::from_quota(execution_proofs_by_root_quota)?; let lc_bootstrap_rl = Limiter::from_quota(lc_bootstrap_quota)?; let lc_optimistic_update_rl = Limiter::from_quota(lc_optimistic_update_quota)?; let lc_finality_update_rl = Limiter::from_quota(lc_finality_update_quota)?; @@ -255,6 +265,7 @@ impl RPCRateLimiterBuilder { blbroot_rl, dcbroot_rl, dcbrange_rl, + execution_proofs_by_root_rl, lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, @@ -308,6 +319,7 @@ impl RPCRateLimiter { blobs_by_root_quota, data_columns_by_root_quota, data_columns_by_range_quota, + execution_proofs_by_root_quota, light_client_bootstrap_quota, light_client_optimistic_update_quota, light_client_finality_update_quota, @@ -325,6 +337,10 @@ impl RPCRateLimiter { .set_quota(Protocol::BlobsByRoot, blobs_by_root_quota) .set_quota(Protocol::DataColumnsByRoot, data_columns_by_root_quota) .set_quota(Protocol::DataColumnsByRange, data_columns_by_range_quota) + .set_quota( + Protocol::ExecutionProofsByRoot, + execution_proofs_by_root_quota, + ) .set_quota(Protocol::LightClientBootstrap, light_client_bootstrap_quota) .set_quota( Protocol::LightClientOptimisticUpdate, @@ -372,6 +388,7 @@ impl RPCRateLimiter { Protocol::BlobsByRoot => &mut self.blbroot_rl, Protocol::DataColumnsByRoot => &mut self.dcbroot_rl, Protocol::DataColumnsByRange => &mut self.dcbrange_rl, + Protocol::ExecutionProofsByRoot => &mut self.execution_proofs_by_root_rl, Protocol::LightClientBootstrap => &mut self.lc_bootstrap_rl, Protocol::LightClientOptimisticUpdate => &mut self.lc_optimistic_update_rl, Protocol::LightClientFinalityUpdate => &mut self.lc_finality_update_rl, @@ -396,6 +413,7 @@ impl RPCRateLimiter { blbroot_rl, dcbroot_rl, dcbrange_rl, + execution_proofs_by_root_rl, lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, @@ -413,6 +431,7 @@ impl RPCRateLimiter { blbroot_rl.prune(time_since_start); dcbrange_rl.prune(time_since_start); dcbroot_rl.prune(time_since_start); + execution_proofs_by_root_rl.prune(time_since_start); lc_bootstrap_rl.prune(time_since_start); lc_optimistic_update_rl.prune(time_since_start); lc_finality_update_rl.prune(time_since_start); diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index f1a4d87de76..d97506653b5 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -3,7 +3,7 @@ use libp2p::PeerId; use std::fmt::{Display, Formatter}; use std::sync::Arc; use types::{ - BlobSidecar, DataColumnSidecar, Epoch, EthSpec, LightClientBootstrap, + BlobSidecar, DataColumnSidecar, Epoch, EthSpec, ExecutionProof, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, }; @@ -22,6 +22,8 @@ pub enum SyncRequestId { SingleBlock { id: SingleLookupReqId }, /// Request searching for a set of blobs given a hash. SingleBlob { id: SingleLookupReqId }, + /// Request searching for execution proofs given a block hash and proof IDs. + SingleExecutionProof { id: SingleLookupReqId }, /// Request searching for a set of data columns given a hash and list of column indices. DataColumnsByRoot(DataColumnsByRootRequestId), /// Blocks by range request @@ -164,6 +166,8 @@ pub enum Response { BlobsByRoot(Option>>), /// A response to a get DATA_COLUMN_SIDECARS_BY_ROOT request. DataColumnsByRoot(Option>>), + /// A response to a get EXECUTION_PROOFS_BY_ROOT request. + ExecutionProofsByRoot(Option>), /// A response to a LightClientUpdate request. LightClientBootstrap(Arc>), /// A response to a LightClientOptimisticUpdate request. @@ -201,6 +205,10 @@ impl std::convert::From> for RpcResponse { Some(d) => RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange(d)), None => RpcResponse::StreamTermination(ResponseTermination::DataColumnsByRange), }, + Response::ExecutionProofsByRoot(r) => match r { + Some(p) => RpcResponse::Success(RpcSuccessResponse::ExecutionProofsByRoot(p)), + None => RpcResponse::StreamTermination(ResponseTermination::ExecutionProofsByRoot), + }, Response::Status(s) => RpcResponse::Success(RpcSuccessResponse::Status(s)), Response::LightClientBootstrap(b) => { RpcResponse::Success(RpcSuccessResponse::LightClientBootstrap(b)) diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 120b9e6c245..227317f79ea 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -44,6 +44,8 @@ pub struct GossipCache { light_client_finality_update: Option, /// Timeout for light client optimistic updates. light_client_optimistic_update: Option, + /// Timeout for execution proofs. + execution_proof: Option, } #[derive(Default)] @@ -75,6 +77,8 @@ pub struct GossipCacheBuilder { light_client_finality_update: Option, /// Timeout for light client optimistic updates. light_client_optimistic_update: Option, + /// Timeout for execution proofs. + execution_proof: Option, } #[allow(dead_code)] @@ -151,6 +155,12 @@ impl GossipCacheBuilder { self } + /// Timeout for execution proof messages. + pub fn execution_proof_timeout(mut self, timeout: Duration) -> Self { + self.execution_proof = Some(timeout); + self + } + pub fn build(self) -> GossipCache { let GossipCacheBuilder { default_timeout, @@ -167,6 +177,7 @@ impl GossipCacheBuilder { bls_to_execution_change, light_client_finality_update, light_client_optimistic_update, + execution_proof, } = self; GossipCache { expirations: DelayQueue::default(), @@ -184,6 +195,7 @@ impl GossipCacheBuilder { bls_to_execution_change: bls_to_execution_change.or(default_timeout), light_client_finality_update: light_client_finality_update.or(default_timeout), light_client_optimistic_update: light_client_optimistic_update.or(default_timeout), + execution_proof: execution_proof.or(default_timeout), } } } @@ -211,6 +223,7 @@ impl GossipCache { GossipKind::BlsToExecutionChange => self.bls_to_execution_change, GossipKind::LightClientFinalityUpdate => self.light_client_finality_update, GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update, + GossipKind::ExecutionProof => self.execution_proof, }; let Some(expire_timeout) = expire_timeout else { return; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 1df17dffbaf..9f1530ec732 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -26,6 +26,7 @@ use gossipsub::{ TopicScoreParams, }; use gossipsub_scoring_parameters::{PeerScoreSettings, lighthouse_gossip_thresholds}; +use libp2p::identity::Keypair; use libp2p::multiaddr::{self, Multiaddr, Protocol as MProtocol}; use libp2p::swarm::behaviour::toggle::Toggle; use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; @@ -52,6 +53,10 @@ pub mod utils; /// The number of peers we target per subnet for discovery queries. pub const TARGET_SUBNET_PEERS: usize = 3; +/// The number of peers we target for execution proof peer discovery. +/// Set to 1 since we don't expect many nodes to run it +pub const TARGET_EXECUTION_PROOF_PEERS: usize = 1; + const MAX_IDENTIFY_ADDRESSES: usize = 10; /// The types of events than can be obtained from polling the behaviour. @@ -171,11 +176,10 @@ impl Network { executor: task_executor::TaskExecutor, mut ctx: ServiceContext<'_>, custody_group_count: u64, + local_keypair: Keypair, ) -> Result<(Self, Arc>), String> { let config = ctx.config.clone(); trace!("Libp2p Service starting"); - // initialise the node's ID - let local_keypair = utils::load_private_key(&config); // Trusted peers will also be marked as explicit in GossipSub. // Cfr. https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#explicit-peering-agreements @@ -255,6 +259,7 @@ impl Network { // .signed_contribution_and_proof_timeout(timeout) // Do not retry // .sync_committee_message_timeout(timeout) // Do not retry .bls_to_execution_change_timeout(half_epoch * 2) + .execution_proof_timeout(slot_duration) .build() }; @@ -411,6 +416,7 @@ impl Network { quic_enabled: !config.disable_quic_support, metrics_enabled: config.metrics_enabled, target_peer_count: config.target_peers, + execution_proof_enabled: ctx.chain_spec.is_zkvm_enabled(), ..Default::default() }; PeerManager::new(peer_manager_cfg, network_globals.clone())? @@ -1563,6 +1569,17 @@ impl Network { request_type, }) } + RequestType::ExecutionProofsByRoot(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["execution_proofs_by_root"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + }) + } RequestType::LightClientBootstrap(_) => { metrics::inc_counter_vec( &metrics::TOTAL_RPC_REQUESTS, @@ -1648,6 +1665,11 @@ impl Network { RpcSuccessResponse::DataColumnsByRange(resp) => { self.build_response(id, peer_id, Response::DataColumnsByRange(Some(resp))) } + RpcSuccessResponse::ExecutionProofsByRoot(resp) => self.build_response( + id, + peer_id, + Response::ExecutionProofsByRoot(Some(resp)), + ), // Should never be reached RpcSuccessResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) @@ -1677,6 +1699,9 @@ impl Network { ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), ResponseTermination::DataColumnsByRoot => Response::DataColumnsByRoot(None), ResponseTermination::DataColumnsByRange => Response::DataColumnsByRange(None), + ResponseTermination::ExecutionProofsByRoot => { + Response::ExecutionProofsByRoot(None) + } ResponseTermination::LightClientUpdatesByRange => { Response::LightClientUpdatesByRange(None) } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index a0026837e37..4f331d0e16d 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -41,7 +41,7 @@ pub fn build_transport( quic_support: bool, ) -> std::io::Result { // mplex config - let mut mplex_config = libp2p_mplex::MplexConfig::new(); + let mut mplex_config = libp2p_mplex::Config::new(); mplex_config.set_max_buffer_size(256); mplex_config.set_max_buffer_behaviour(libp2p_mplex::MaxBufferBehaviour::Block); @@ -288,6 +288,8 @@ pub(crate) fn create_whitelist_filter( for id in 0..spec.data_column_sidecar_subnet_count { add(DataColumnSidecar(DataColumnSubnetId::new(id))); } + // Add ExecutionProof topic + add(ExecutionProof); } gossipsub::WhitelistSubscriptionFilter(possible_hashes) } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 72f2873def9..1cd46a2a723 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -8,7 +8,7 @@ use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, BlobSidecar, - DataColumnSidecar, DataColumnSubnetId, EthSpec, ForkContext, ForkName, + DataColumnSidecar, DataColumnSubnetId, EthSpec, ExecutionProof, ForkContext, ForkName, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, @@ -26,6 +26,8 @@ pub enum PubsubMessage { BlobSidecar(Box<(u64, Arc>)>), /// Gossipsub message providing notification of a [`DataColumnSidecar`] along with the subnet id where it was received. DataColumnSidecar(Box<(DataColumnSubnetId, Arc>)>), + /// Gossipsub message providing notification of an [`ExecutionProof`]. + ExecutionProof(Arc), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a `SingleAttestation` with its subnet id. @@ -135,6 +137,7 @@ impl PubsubMessage { PubsubMessage::DataColumnSidecar(column_sidecar_data) => { GossipKind::DataColumnSidecar(column_sidecar_data.0) } + PubsubMessage::ExecutionProof(_) => GossipKind::ExecutionProof, PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof, PubsubMessage::Attestation(attestation_data) => { GossipKind::Attestation(attestation_data.0) @@ -290,6 +293,23 @@ impl PubsubMessage { )), } } + GossipKind::ExecutionProof => { + match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { + // TODO(zkproofs): we don't have the ChainSpec here, so if we change this to + // be for gloas, then we should change it here too + Some(fork) if fork.fulu_enabled() => { + let execution_proof = Arc::new( + ExecutionProof::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ); + Ok(PubsubMessage::ExecutionProof(execution_proof)) + } + Some(_) | None => Err(format!( + "execution_proof topic invalid for given fork digest {:?}", + gossip_topic.fork_digest + )), + } + } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?; @@ -403,6 +423,7 @@ impl PubsubMessage { PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(), PubsubMessage::BlobSidecar(data) => data.1.as_ssz_bytes(), PubsubMessage::DataColumnSidecar(data) => data.1.as_ssz_bytes(), + PubsubMessage::ExecutionProof(data) => data.as_ssz_bytes(), PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(), PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(), PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), @@ -438,6 +459,12 @@ impl std::fmt::Display for PubsubMessage { data.1.slot(), data.1.index, ), + PubsubMessage::ExecutionProof(data) => write!( + f, + "ExecutionProof: block_root: {}, proof_id: {}", + data.block_root, + data.proof_id.as_u8(), + ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, "Aggregate and Proof: slot: {}, index: {:?}, aggregator_index: {}", diff --git a/beacon_node/lighthouse_network/src/types/subnet.rs b/beacon_node/lighthouse_network/src/types/subnet.rs index 1892dcc83af..2d5ca95bf50 100644 --- a/beacon_node/lighthouse_network/src/types/subnet.rs +++ b/beacon_node/lighthouse_network/src/types/subnet.rs @@ -14,6 +14,13 @@ pub enum Subnet { SyncCommittee(SyncSubnetId), /// Represents a gossipsub data column subnet. DataColumn(DataColumnSubnetId), + /// Represents execution proof support. + // + /// Note: ExecutionProof uses a single gossip topic (not multiple topics), + /// but we track it here for ENR-based peer discovery to find zkVM-enabled peers. + /// TODO(zkproofs): Is there a way to have peer discovery without adding the global topic + /// into Subnet? + ExecutionProof, } /// A subnet to discover peers on along with the instant after which it's no longer useful. diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index cfdee907b9a..cdb572ea7bf 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -16,6 +16,7 @@ pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const BLOB_SIDECAR_PREFIX: &str = "blob_sidecar_"; pub const DATA_COLUMN_SIDECAR_PREFIX: &str = "data_column_sidecar_"; +pub const EXECUTION_PROOF_TOPIC: &str = "execution_proof"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; @@ -84,6 +85,14 @@ pub fn core_topics_to_subscribe( } } + // Subscribe to execution proof topic if zkVM mode is enabled for this fork. + // TODO(zkproofs): this looks different than the other checks because + // there is no official zkvm_fork and we enable this alongside a current fork + let zkvm_check = spec.is_zkvm_enabled_for_fork(fork_name); + if zkvm_check { + topics.push(GossipKind::ExecutionProof); + } + topics } @@ -102,6 +111,7 @@ pub fn is_fork_non_core_topic(topic: &GossipTopic, _fork_name: ForkName) -> bool | GossipKind::BeaconAggregateAndProof | GossipKind::BlobSidecar(_) | GossipKind::DataColumnSidecar(_) + | GossipKind::ExecutionProof | GossipKind::VoluntaryExit | GossipKind::ProposerSlashing | GossipKind::AttesterSlashing @@ -148,6 +158,8 @@ pub enum GossipKind { BlobSidecar(u64), /// Topic for publishing DataColumnSidecars. DataColumnSidecar(DataColumnSubnetId), + /// Topic for publishing ExecutionProofs + ExecutionProof, /// Topic for publishing raw attestations on a particular subnet. #[strum(serialize = "beacon_attestation")] Attestation(SubnetId), @@ -248,6 +260,7 @@ impl GossipTopic { PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing, ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing, BLS_TO_EXECUTION_CHANGE_TOPIC => GossipKind::BlsToExecutionChange, + EXECUTION_PROOF_TOPIC => GossipKind::ExecutionProof, LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate, LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate, topic => match subnet_topic_index(topic) { @@ -312,6 +325,7 @@ impl std::fmt::Display for GossipTopic { GossipKind::DataColumnSidecar(column_subnet_id) => { format!("{}{}", DATA_COLUMN_SIDECAR_PREFIX, *column_subnet_id) } + GossipKind::ExecutionProof => EXECUTION_PROOF_TOPIC.into(), GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(), GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(), GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(), @@ -333,6 +347,7 @@ impl From for GossipKind { Subnet::Attestation(s) => GossipKind::Attestation(s), Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s), Subnet::DataColumn(s) => GossipKind::DataColumnSidecar(s), + Subnet::ExecutionProof => GossipKind::ExecutionProof, } } } diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 8a3047692f3..59f357454c4 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -16,6 +16,7 @@ use types::{ type E = MinimalEthSpec; +use lighthouse_network::identity::secp256k1; use lighthouse_network::rpc::config::InboundRateLimiterConfig; use tempfile::Builder as TempBuilder; @@ -29,6 +30,8 @@ pub fn spec_with_all_forks_enabled() -> ChainSpec { chain_spec.electra_fork_epoch = Some(Epoch::new(5)); chain_spec.fulu_fork_epoch = Some(Epoch::new(6)); chain_spec.gloas_fork_epoch = Some(Epoch::new(7)); + // Enable zkVM + chain_spec.zkvm_enabled = true; // check that we have all forks covered assert!(chain_spec.fork_epoch(ForkName::latest()).is_some()); @@ -108,7 +111,7 @@ pub fn build_config( config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, port, port, port); config.enr_address = (Some(std::net::Ipv4Addr::LOCALHOST), None); config.boot_nodes_enr.append(&mut boot_nodes); - config.network_dir = path.into_path(); + config.network_dir = path.keep(); config.disable_peer_scoring = disable_peer_scoring; config.inbound_rate_limiter_config = inbound_rate_limiter; Arc::new(config) @@ -138,10 +141,15 @@ pub async fn build_libp2p_instance( libp2p_registry: None, }; Libp2pInstance( - LibP2PService::new(executor, libp2p_context, custody_group_count) - .await - .expect("should build libp2p instance") - .0, + LibP2PService::new( + executor, + libp2p_context, + custody_group_count, + secp256k1::Keypair::generate().into(), + ) + .await + .expect("should build libp2p instance") + .0, signal, ) } diff --git a/beacon_node/lighthouse_network/tests/main.rs b/beacon_node/lighthouse_network/tests/main.rs new file mode 100644 index 00000000000..2ed0eabaff7 --- /dev/null +++ b/beacon_node/lighthouse_network/tests/main.rs @@ -0,0 +1,2 @@ +mod common; +mod rpc_tests; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 81d08764a5f..cc0a893a3e9 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -1,9 +1,8 @@ #![cfg(test)] -mod common; - +use crate::common; use crate::common::spec_with_all_forks_enabled; -use common::{Protocol, build_tracing_subscriber}; +use crate::common::{Protocol, build_tracing_subscriber}; use lighthouse_network::rpc::{RequestType, methods::*}; use lighthouse_network::service::api_types::AppRequestId; use lighthouse_network::{NetworkEvent, ReportSource, Response}; @@ -17,8 +16,9 @@ use tracing::{Instrument, debug, error, info_span, warn}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EmptyBlock, Epoch, - EthSpec, FixedBytesExtended, ForkName, Hash256, KzgCommitment, KzgProof, MinimalEthSpec, - RuntimeVariableList, Signature, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + EthSpec, ExecutionBlockHash, ExecutionProof, ExecutionProofId, FixedBytesExtended, ForkName, + Hash256, KzgCommitment, KzgProof, MinimalEthSpec, RuntimeVariableList, Signature, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; type E = MinimalEthSpec; @@ -1738,3 +1738,368 @@ fn test_active_requests() { } }) } + +// Tests ExecutionProofsByRoot RPC - basic single proof request +#[test] +#[allow(clippy::single_match)] +fn test_tcp_execution_proofs_by_root_single() { + // Set up the logging. + let log_level = "debug"; + let enable_logging = true; + let _subscriber = build_tracing_subscriber(log_level, enable_logging); + + let spec = Arc::new(spec_with_all_forks_enabled()); + let current_fork_name = ForkName::Fulu; + + let rt = Arc::new(Runtime::new().unwrap()); + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + current_fork_name, + spec.clone(), + Protocol::Tcp, + false, + None, + ) + .await; + + let block_root = Hash256::random(); + let block_hash = ExecutionBlockHash::from_root(Hash256::random()); + let subnet_id = ExecutionProofId::new(0).unwrap(); + + // ExecutionProofsByRoot Request + let rpc_request = RequestType::ExecutionProofsByRoot( + ExecutionProofsByRootRequest::new( + block_root, + vec![], // No proofs already have + 2, // Request 2 proofs + ) + .unwrap(), + ); + + // ExecutionProofsByRoot Response + let proof = Arc::new( + ExecutionProof::new( + subnet_id, + Slot::new(100), + block_hash, + block_root, + vec![1, 2, 3, 4], + ) + .unwrap(), + ); + let rpc_response = Response::ExecutionProofsByRoot(Some(proof.clone())); + + // Build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + debug!("Sending RPC"); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); + } + NetworkEvent::ResponseReceived { + peer_id: _, + app_request_id: AppRequestId::Router, + response, + } => match response { + Response::ExecutionProofsByRoot(Some(received_proof)) => { + debug!("Proof received"); + assert_eq!(received_proof.block_root, block_root); + assert_eq!(received_proof.block_hash, block_hash); + assert_eq!(received_proof.proof_id, subnet_id); + } + Response::ExecutionProofsByRoot(None) => { + debug!("Stream terminated"); + return; + } + _ => {} + }, + _ => {} + } + } + } + .instrument(info_span!("Sender")); + + // Build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + } => { + if request_type == rpc_request { + debug!("Receiver got request"); + // Send the proof + receiver.send_response( + peer_id, + inbound_request_id, + rpc_response.clone(), + ); + // Send stream termination + receiver.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(None), + ); + debug!("Sent proof and termination"); + } + } + _ => {} + } + } + } + .instrument(info_span!("Receiver")); + + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }) +} + +// Tests ExecutionProofsByRoot RPC - multiple proofs chunked response +#[test] +#[allow(clippy::single_match)] +fn test_tcp_execution_proofs_by_root_chunked() { + // Set up the logging. + let log_level = "debug"; + let enable_logging = true; + let _subscriber = build_tracing_subscriber(log_level, enable_logging); + + let spec = Arc::new(spec_with_all_forks_enabled()); + let current_fork_name = ForkName::Deneb; + + let messages_to_send = 3; + + let rt = Arc::new(Runtime::new().unwrap()); + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + current_fork_name, + spec.clone(), + Protocol::Tcp, + false, + None, + ) + .await; + + let block_root = Hash256::random(); + let block_hash = ExecutionBlockHash::from_root(Hash256::random()); + let proof_ids = [ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ExecutionProofId::new(2).unwrap(), + ]; + assert_eq!(proof_ids.len(), messages_to_send); + + // ExecutionProofsByRoot Request for multiple proofs + let rpc_request = RequestType::ExecutionProofsByRoot( + ExecutionProofsByRootRequest::new(block_root, vec![], proof_ids.len()).unwrap(), + ); + + // Create proofs for each proof ID + let proofs: Vec> = proof_ids + .iter() + .map(|subnet_id| { + Arc::new( + ExecutionProof::new( + *subnet_id, + Slot::new(100), + block_hash, + block_root, + vec![1, 2, 3, 4], + ) + .unwrap(), + ) + }) + .collect(); + + let mut messages_received = 0; + + // Build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + debug!("Sending RPC"); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); + } + NetworkEvent::ResponseReceived { + peer_id: _, + app_request_id: AppRequestId::Router, + response, + } => match response { + Response::ExecutionProofsByRoot(Some(received_proof)) => { + debug!("Chunk received"); + assert_eq!(received_proof.block_root, block_root); + assert_eq!(received_proof.block_hash, block_hash); + messages_received += 1; + } + Response::ExecutionProofsByRoot(None) => { + debug!("Stream terminated"); + assert_eq!(messages_received, messages_to_send); + return; + } + _ => {} + }, + _ => {} + } + } + } + .instrument(info_span!("Sender")); + + // Build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + } => { + if request_type == rpc_request { + debug!("Receiver got request"); + // Send all proofs + for proof in &proofs { + receiver.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(Some(proof.clone())), + ); + debug!("Sent proof chunk"); + } + // Send stream termination + receiver.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(None), + ); + debug!("Sent termination"); + } + } + _ => {} + } + } + } + .instrument(info_span!("Receiver")); + + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }) +} + +// Tests ExecutionProofsByRoot RPC - empty response (peer has no proofs) +#[test] +#[allow(clippy::single_match)] +fn test_tcp_execution_proofs_by_root_empty_response() { + // Set up the logging. + let log_level = "debug"; + let enable_logging = true; + let _subscriber = build_tracing_subscriber(log_level, enable_logging); + + let spec = Arc::new(spec_with_all_forks_enabled()); + let current_fork_name = ForkName::Fulu; + + let rt = Arc::new(Runtime::new().unwrap()); + rt.block_on(async { + let (mut sender, mut receiver) = common::build_node_pair( + Arc::downgrade(&rt), + current_fork_name, + spec.clone(), + Protocol::Tcp, + false, + None, + ) + .await; + + let block_root = Hash256::random(); + + let rpc_request = RequestType::ExecutionProofsByRoot( + ExecutionProofsByRootRequest::new(block_root, vec![], 2).unwrap(), + ); + + let mut received_termination = false; + + // Build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + debug!("Sending RPC"); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); + } + NetworkEvent::ResponseReceived { + peer_id: _, + app_request_id: AppRequestId::Router, + response, + } => match response { + Response::ExecutionProofsByRoot(Some(_)) => { + panic!("Should not receive any proofs in empty response test"); + } + Response::ExecutionProofsByRoot(None) => { + debug!("Stream terminated (empty response)"); + received_termination = true; + return; + } + _ => {} + }, + _ => {} + } + } + } + .instrument(info_span!("Sender")); + + // Build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + } => { + if request_type == rpc_request { + debug!("Receiver got request"); + // Send only stream termination (no proofs) + receiver.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(None), + ); + debug!("Sent empty response (termination only)"); + } + } + _ => {} + } + } + } + .instrument(info_span!("Receiver")); + + tokio::select! { + _ = sender_future => { + assert!(received_termination, "Should have received stream termination"); + } + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }) +} diff --git a/beacon_node/lighthouse_tracing/src/lib.rs b/beacon_node/lighthouse_tracing/src/lib.rs index 56dccadaa94..dd9e9f1ebb2 100644 --- a/beacon_node/lighthouse_tracing/src/lib.rs +++ b/beacon_node/lighthouse_tracing/src/lib.rs @@ -39,6 +39,8 @@ pub const SPAN_HANDLE_BLOBS_BY_RANGE_REQUEST: &str = "handle_blobs_by_range_requ pub const SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST: &str = "handle_data_columns_by_range_request"; pub const SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST: &str = "handle_blocks_by_root_request"; pub const SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST: &str = "handle_blobs_by_root_request"; +pub const SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST: &str = + "handle_execution_proofs_by_root_request"; pub const SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST: &str = "handle_data_columns_by_root_request"; pub const SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE: &str = "handle_light_client_updates_by_range"; pub const SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP: &str = "handle_light_client_bootstrap"; @@ -70,6 +72,7 @@ pub const LH_BN_ROOT_SPAN_NAMES: &[&str] = &[ SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST, SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST, + SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index eb70147c6ef..c8440a6bbf4 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -7,6 +7,9 @@ use crate::{ use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; +use beacon_chain::execution_proof_verification::{ + GossipExecutionProofError, GossipVerifiedExecutionProof, +}; use beacon_chain::store::Error; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, @@ -37,10 +40,11 @@ use store::hot_cold_store::HotColdDBError; use tracing::{Instrument, Span, debug, error, info, instrument, trace, warn}; use types::{ Attestation, AttestationData, AttestationRef, AttesterSlashing, BlobSidecar, DataColumnSidecar, - DataColumnSubnetId, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, - LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, - Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, beacon_block::BlockImportSource, + DataColumnSubnetId, EthSpec, ExecutionProof, Hash256, IndexedAttestation, + LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, + SyncCommitteeMessage, SyncSubnetId, beacon_block::BlockImportSource, }; use beacon_processor::work_reprocessing_queue::QueuedColumnReconstruction; @@ -767,6 +771,221 @@ impl NetworkBeaconProcessor { } } + /// Process a gossip execution proof. + /// + /// Validates the execution proof according to the gossip spec and processes it + /// through the DataAvailabilityChecker if valid. + pub async fn process_gossip_execution_proof( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + execution_proof: Arc, + _seen_timestamp: Duration, + ) { + let block_root = execution_proof.block_root; + let proof_id = execution_proof.proof_id; + + debug!( + %peer_id, + %proof_id, + %block_root, + "Received execution proof via gossip" + ); + + // Verify the execution proof for gossip + match self + .chain + .verify_execution_proof_for_gossip(execution_proof.clone()) + { + Ok(gossip_verified_proof) => { + debug!( + %block_root, + subnet_id = %gossip_verified_proof.subnet_id(), + "Successfully verified gossip execution proof" + ); + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + // Process the verified proof through DA checker + self.process_gossip_verified_execution_proof( + peer_id, + gossip_verified_proof, + _seen_timestamp, + ) + .await + } + Err(err) => { + match err { + GossipExecutionProofError::PriorKnownUnpublished => { + debug!( + %block_root, + %proof_id, + "Gossip execution proof already processed via the EL. Accepting the proof without re-processing." + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Accept, + ); + } + GossipExecutionProofError::PriorKnown { + block_root, + proof_id, + .. + } => { + // Proof already known via gossip. No penalty, gossip filter should + // filter duplicates. + debug!( + %block_root, + %proof_id, + "Received already known execution proof. Ignoring the proof" + ); + } + GossipExecutionProofError::ParentUnknown { parent_root } => { + debug!( + action = "requesting parent", + %block_root, + %parent_root, + "Unknown parent hash for execution proof" + ); + // TODO(zkproofs): Implement parent lookup for execution proofs + // This might require creating a new SyncMessage variant + // For now, we just ignore the proof + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + GossipExecutionProofError::BeaconChainError(_) => { + crit!( + error = ?err, + "Internal error when verifying execution proof" + ) + } + GossipExecutionProofError::ProofVerificationFailed(ref reason) => { + warn!( + error = ?err, + %block_root, + %proof_id, + %reason, + "Execution proof verification failed. Rejecting the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_execution_proof_verification_failed", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + } + GossipExecutionProofError::ProofTooLarge { size, max_size } => { + warn!( + error = ?err, + %block_root, + %proof_id, + %size, + %max_size, + "Execution proof exceeds maximum size. Rejecting the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_execution_proof_too_large", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + } + GossipExecutionProofError::BlockNotAvailable { block_root } => { + debug!( + error = ?err, + %block_root, + %proof_id, + "Block for execution proof not yet available. Ignoring the proof" + ); + // Block might arrive later, so don't penalize heavily + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + GossipExecutionProofError::NotFinalizedDescendant { block_parent_root } => { + debug!( + error = ?err, + %block_root, + %block_parent_root, + %proof_id, + "Execution proof conflicts with finality. Rejecting the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_execution_proof_not_finalized_descendant", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Reject, + ); + } + GossipExecutionProofError::FutureSlot { + message_slot, + latest_permissible_slot, + } => { + debug!( + error = ?err, + %block_root, + %proof_id, + %message_slot, + %latest_permissible_slot, + "Execution proof from future slot. Ignoring the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "gossip_execution_proof_future_slot", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + GossipExecutionProofError::PastFinalizedSlot { + proof_slot, + finalized_slot, + } => { + debug!( + error = ?err, + %block_root, + %proof_id, + %proof_slot, + %finalized_slot, + "Execution proof from past finalized slot. Ignoring the proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_execution_proof_past_finalized", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + } + } + } + } + #[allow(clippy::too_many_arguments)] #[instrument( name = SPAN_PROCESS_GOSSIP_BLOB, @@ -1126,6 +1345,83 @@ impl NetworkBeaconProcessor { } } + async fn process_gossip_verified_execution_proof( + self: &Arc, + peer_id: PeerId, + verified_proof: GossipVerifiedExecutionProof, + _seen_duration: Duration, + ) { + let processing_start_time = Instant::now(); + let block_root = verified_proof.block_root(); + let proof_slot = verified_proof.slot(); + let subnet_id = verified_proof.subnet_id(); + + let result = self + .chain + .process_gossip_execution_proof(verified_proof, || Ok(())) + .await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "execution_proof"); + + match &result { + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(block_root) => { + info!( + %block_root, + %subnet_id, + "Gossipsub execution proof processed, imported fully available block" + ); + self.chain.recompute_head_at_current_slot().await; + + debug!( + processing_time_ms = processing_start_time.elapsed().as_millis(), + "Execution proof full verification complete" + ); + } + AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { + trace!( + %slot, + %subnet_id, + "Execution proof cached, block still needs more components" + ); + debug!( + %block_root, + %proof_slot, + %subnet_id, + "Execution proof cached for pending block" + ); + } + }, + Err(BlockError::DuplicateFullyImported(_)) => { + debug!( + ?block_root, + %subnet_id, + "Ignoring gossip execution proof for already imported block" + ); + } + Err(err) => { + debug!( + outcome = ?err, + ?block_root, + block_slot = %proof_slot, + %subnet_id, + "Invalid gossip execution proof" + ); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "bad_gossip_execution_proof", + ); + } + } + + if matches!(result, Ok(AvailabilityProcessingStatus::Imported(_))) { + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root, + imported: true, + }); + } + } + /// Process the beacon block received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index bebda36d71c..7db2790920e 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -14,7 +14,7 @@ use beacon_processor::{ use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, - LightClientUpdatesByRangeRequest, + ExecutionProofsByRootRequest, LightClientUpdatesByRangeRequest, }; use lighthouse_network::service::api_types::CustodyBackfillBatchId; use lighthouse_network::{ @@ -249,6 +249,32 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some execution proof. + pub fn send_gossip_execution_proof( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + execution_proof: Arc, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + processor + .process_gossip_execution_proof( + message_id, + peer_id, + execution_proof, + seen_timestamp, + ) + .await + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipExecutionProof(Box::pin(process_fn)), + }) + } + /// Create a new `Work` event for some sync committee signature. pub fn send_gossip_sync_signature( self: &Arc, @@ -469,6 +495,30 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some execution proofs. `process_rpc_execution_proofs` reports + /// the result back to sync. + pub fn send_rpc_execution_proofs( + self: &Arc, + block_root: Hash256, + proofs: Vec>, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> Result<(), Error> { + if proofs.is_empty() { + return Ok(()); + } + let process_fn = self.clone().generate_rpc_execution_proofs_process_fn( + block_root, + proofs, + seen_timestamp, + process_type, + ); + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::RpcExecutionProofs { process_fn }, + }) + } + /// Create a new `Work` event for some custody columns. `process_rpc_custody_columns` reports /// the result back to sync. pub fn send_rpc_custody_columns( @@ -631,6 +681,24 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process `ExecutionProofsByRootRequest`s from the RPC network. + pub fn send_execution_proofs_by_roots_request( + self: &Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: ExecutionProofsByRootRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.handle_execution_proofs_by_root_request(peer_id, inbound_request_id, request) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::ExecutionProofsByRootsRequest(Box::new(process_fn)), + }) + } + /// Create a new work event to process `DataColumnsByRootRequest`s from the RPC network. pub fn send_data_columns_by_roots_request( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index ac24b648e05..f063d7e8380 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -7,6 +7,7 @@ use beacon_chain::{BeaconChainError, BeaconChainTypes, BlockProcessStatus, WhenS use itertools::{Itertools, process_results}; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, + ExecutionProofsByRootRequest, }; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, ReportSource, Response, SyncInfo}; @@ -14,8 +15,9 @@ use lighthouse_tracing::{ SPAN_HANDLE_BLOBS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST, SPAN_HANDLE_BLOCKS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, - SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, SPAN_HANDLE_LIGHT_CLIENT_FINALITY_UPDATE, - SPAN_HANDLE_LIGHT_CLIENT_OPTIMISTIC_UPDATE, SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, + SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, + SPAN_HANDLE_LIGHT_CLIENT_FINALITY_UPDATE, SPAN_HANDLE_LIGHT_CLIENT_OPTIMISTIC_UPDATE, + SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, }; use methods::LightClientUpdatesByRangeRequest; use slot_clock::SlotClock; @@ -390,6 +392,100 @@ impl NetworkBeaconProcessor { Ok(()) } + /// Handle an `ExecutionProofsByRoot` request from the peer. + #[instrument( + name = SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, + parent = None, + level = "debug", + skip_all, + fields( + peer_id = %peer_id, + client = tracing::field::Empty, + ) + )] + pub fn handle_execution_proofs_by_root_request( + self: Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: ExecutionProofsByRootRequest, + ) { + let client = self.network_globals.client(&peer_id); + Span::current().record("client", field::display(client.kind)); + + self.terminate_response_stream( + peer_id, + inbound_request_id, + self.handle_execution_proofs_by_root_request_inner( + peer_id, + inbound_request_id, + request, + ), + Response::ExecutionProofsByRoot, + ); + } + + /// Handle an `ExecutionProofsByRoot` request from the peer. + fn handle_execution_proofs_by_root_request_inner( + &self, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: ExecutionProofsByRootRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + let block_root = request.block_root; + let already_have_set: std::collections::HashSet<_> = + request.already_have.iter().copied().collect(); + let count_needed = request.count_needed as usize; + + // Get all execution proofs we have for this block from the DA checker + let Some(available_proofs) = self + .chain + .data_availability_checker + .get_execution_proofs(&block_root) + else { + // No proofs available for this block + debug!( + %peer_id, + %block_root, + "No execution proofs available for peer" + ); + return Ok(()); + }; + + // Filter out proofs the peer already has and send up to count_needed + let mut sent_count = 0; + for proof in available_proofs { + // Skip proofs the peer already has + if already_have_set.contains(&proof.proof_id) { + continue; + } + + // Send the proof + self.send_response( + peer_id, + inbound_request_id, + Response::ExecutionProofsByRoot(Some(proof)), + ); + + sent_count += 1; + + // Stop when we've sent the requested count + if sent_count >= count_needed { + break; + } + } + + debug!( + %peer_id, + %block_root, + requested = count_needed, + already_have = already_have_set.len(), + sent = sent_count, + "ExecutionProofsByRoot outgoing response processed" + ); + + Ok(()) + } + /// Handle a `DataColumnsByRoot` request from the peer. #[instrument( name = SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 41160fcfe45..6c260c9bb16 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -259,6 +259,21 @@ impl NetworkBeaconProcessor { Box::pin(process_fn) } + pub fn generate_rpc_execution_proofs_process_fn( + self: Arc, + block_root: Hash256, + proofs: Vec>, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> AsyncFn { + let process_fn = async move { + self.clone() + .process_rpc_execution_proofs(block_root, proofs, seen_timestamp, process_type) + .await; + }; + Box::pin(process_fn) + } + /// Attempt to process a list of blobs received from a direct RPC request. #[instrument( name = SPAN_PROCESS_RPC_BLOBS, @@ -804,6 +819,16 @@ impl NetworkBeaconProcessor { // The peer is faulty if they bad signatures. Some(PeerAction::LowToleranceError) } + HistoricalBlockError::MissingOldestBlockRoot { slot } => { + warn!( + %slot, + error = "missing_oldest_block_root", + "Backfill batch processing error" + ); + // This is an internal error, do not penalize the peer. + None + } + HistoricalBlockError::ValidatorPubkeyCacheTimeout => { warn!( error = "pubkey_cache_timeout", @@ -987,4 +1012,79 @@ impl NetworkBeaconProcessor { } } } + + /// Process execution proofs received via RPC. + pub async fn process_rpc_execution_proofs( + self: Arc>, + block_root: Hash256, + proofs: Vec>, + _seen_timestamp: Duration, + process_type: BlockProcessType, + ) { + // Get slot directly from the first proof. All proofs should be for the same block. + let slot = match proofs.first() { + Some(proof) => proof.slot, + None => { + debug!(?block_root, "No execution proofs to process"); + return; + } + }; + + let proof_ids: Vec<_> = proofs.iter().map(|p| p.proof_id).collect(); + + debug!( + ?proof_ids, + %block_root, + %slot, + proof_count = proofs.len(), + "RPC execution proofs received" + ); + + if let Ok(current_slot) = self.chain.slot() + && current_slot == slot + { + // let delay = get_slot_delay_ms(seen_timestamp, slot, &self.chain.slot_clock); + // TODO(zkproofs): Add dedicated metrics for execution proofs + } + + let result = self + .chain + .process_rpc_execution_proofs(slot, block_root, proofs) + .await; + + // TODO(zkproofs): Add dedicated metrics for execution proof processing + // register_process_result_metrics(&result, metrics::BlockSource::Rpc, "execution_proofs"); + + match &result { + Ok(AvailabilityProcessingStatus::Imported(hash)) => { + debug!( + result = "imported block with execution proofs", + %slot, + block_hash = %hash, + "Block components retrieved" + ); + self.chain.recompute_head_at_current_slot().await; + } + Ok(AvailabilityProcessingStatus::MissingComponents(_, _)) => { + debug!( + block_hash = %block_root, + %slot, + "Missing components over rpc (still need more proofs or other components)" + ); + } + Err(BlockError::DuplicateFullyImported(_)) => { + debug!( + block_hash = %block_root, + %slot, + "Execution proofs have already been imported" + ); + } + Err(_) => {} + } + + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type, + result: result.into(), + }); + } } diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 60fe094bb7c..eb02ddad921 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -24,7 +24,9 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, trace, warn}; -use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, SignedBeaconBlock}; +use types::{ + BlobSidecar, DataColumnSidecar, EthSpec, ExecutionProof, ForkContext, SignedBeaconBlock, +}; /// Handles messages from the network and routes them to the appropriate service to be handled. pub struct Router { @@ -272,6 +274,15 @@ impl Router { request, ), ), + RequestType::ExecutionProofsByRoot(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_execution_proofs_by_roots_request( + peer_id, + inbound_request_id, + request, + ), + ), _ => {} } } @@ -309,6 +320,9 @@ impl Router { Response::DataColumnsByRange(data_column) => { self.on_data_columns_by_range_response(peer_id, app_request_id, data_column); } + Response::ExecutionProofsByRoot(execution_proof) => { + self.on_execution_proofs_by_root_response(peer_id, app_request_id, execution_proof); + } // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) @@ -384,6 +398,15 @@ impl Router { ), ) } + PubsubMessage::ExecutionProof(execution_proof) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_execution_proof( + message_id, + peer_id, + execution_proof, + timestamp_now(), + ), + ), PubsubMessage::VoluntaryExit(exit) => { debug!(%peer_id, "Received a voluntary exit"); self.handle_beacon_processor_send_result( @@ -670,6 +693,40 @@ impl Router { }); } + /// Handle an `ExecutionProofsByRoot` response from the peer. + pub fn on_execution_proofs_by_root_response( + &mut self, + peer_id: PeerId, + app_request_id: AppRequestId, + execution_proof: Option>, + ) { + let sync_request_id = match app_request_id { + AppRequestId::Sync(sync_id) => match sync_id { + id @ SyncRequestId::SingleExecutionProof { .. } => id, + other => { + crit!(request = ?other, "ExecutionProofsByRoot response on incorrect request"); + return; + } + }, + AppRequestId::Router => { + crit!(%peer_id, "All ExecutionProofsByRoot requests belong to sync"); + return; + } + AppRequestId::Internal => unreachable!("Handled internally"), + }; + + trace!( + %peer_id, + "Received ExecutionProofsByRoot Response" + ); + self.send_to_sync(SyncMessage::RpcExecutionProof { + sync_request_id, + peer_id, + execution_proof, + seen_timestamp: timestamp_now(), + }); + } + /// Handle a `DataColumnsByRoot` response from the peer. pub fn on_data_columns_by_root_response( &mut self, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 4bd649ba824..a416f5cb123 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -12,6 +12,7 @@ use futures::future::OptionFuture; use futures::prelude::*; use lighthouse_network::Enr; +use lighthouse_network::identity::Keypair; use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::rpc::RequestType; use lighthouse_network::rpc::methods::RpcResponse; @@ -212,6 +213,7 @@ impl NetworkService { executor: task_executor::TaskExecutor, libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, + local_keypair: Keypair, ) -> Result< ( NetworkService, @@ -284,6 +286,7 @@ impl NetworkService { .data_availability_checker .custody_context() .custody_group_count_at_head(&beacon_chain.spec), + local_keypair, ) .await?; @@ -366,6 +369,7 @@ impl NetworkService { executor: task_executor::TaskExecutor, libp2p_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, + local_keypair: Keypair, ) -> Result<(Arc>, NetworkSenders), String> { let (network_service, network_globals, network_senders) = Self::build( beacon_chain, @@ -373,6 +377,7 @@ impl NetworkService { executor.clone(), libp2p_registry, beacon_processor_send, + local_keypair, ) .await?; diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 64815ab2bb4..8ff1e0488df 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -6,6 +6,7 @@ use beacon_chain::BeaconChainTypes; use beacon_chain::test_utils::BeaconChainHarness; use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; use futures::StreamExt; +use lighthouse_network::identity::secp256k1; use lighthouse_network::types::{GossipEncoding, GossipKind}; use lighthouse_network::{Enr, GossipTopic}; use std::str::FromStr; @@ -66,6 +67,7 @@ fn test_dht_persistence() { executor, None, beacon_processor_tx, + secp256k1::Keypair::generate().into(), ) .await .unwrap(); @@ -134,6 +136,7 @@ fn test_removing_topic_weight_on_old_topics() { executor.clone(), None, beacon_processor_channels.beacon_processor_tx, + secp256k1::Keypair::generate().into(), ) .await .unwrap() diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 0df28cff6b7..bee6569b7b3 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -1,4 +1,5 @@ use super::*; +use beacon_chain::test_utils::generate_data_column_indices_rand_order; use beacon_chain::{ BeaconChain, builder::{BeaconChainBuilder, Witness}, @@ -73,6 +74,9 @@ impl TestBeaconChain { Duration::from_secs(recent_genesis_time()), Duration::from_millis(SLOT_DURATION_MILLIS), )) + .ordered_custody_column_indices(generate_data_column_indices_rand_order::< + MainnetEthSpec, + >()) .shutdown_sender(shutdown_tx) .rng(Box::new(StdRng::seed_from_u64(42))) .build() diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index c6b05190871..64da1ae61fc 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -2,7 +2,7 @@ use crate::sync::block_lookups::single_block_lookup::{ LookupRequestError, SingleBlockLookup, SingleLookupRequestState, }; use crate::sync::block_lookups::{ - BlobRequestState, BlockRequestState, CustodyRequestState, PeerId, + BlobRequestState, BlockRequestState, CustodyRequestState, PeerId, ProofRequestState, }; use crate::sync::manager::BlockProcessType; use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; @@ -12,7 +12,7 @@ use parking_lot::RwLock; use std::collections::HashSet; use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; -use types::{DataColumnSidecarList, SignedBeaconBlock}; +use types::{DataColumnSidecarList, ExecutionProof, SignedBeaconBlock}; use super::SingleLookupId; use super::single_block_lookup::{ComponentRequests, DownloadResult}; @@ -22,6 +22,7 @@ pub enum ResponseType { Block, Blob, CustodyColumn, + ExecutionProof, } /// This trait unifies common single block lookup functionality across blocks and blobs. This @@ -215,3 +216,57 @@ impl RequestState for CustodyRequestState { &mut self.state } } + +impl RequestState for ProofRequestState { + type VerifiedResponseType = Vec>; + + fn make_request( + &self, + id: Id, + lookup_peers: Arc>>, + _min_proofs: usize, + cx: &mut SyncNetworkContext, + ) -> Result { + cx.execution_proof_lookup_request( + id, + lookup_peers, + self.block_root, + self.min_proofs_required, + ) + .map_err(LookupRequestError::SendFailedNetwork) + } + + fn send_for_processing( + id: Id, + download_result: DownloadResult, + cx: &SyncNetworkContext, + ) -> Result<(), LookupRequestError> { + let DownloadResult { + value, + block_root, + seen_timestamp, + .. + } = download_result; + cx.send_execution_proofs_for_processing(id, block_root, value, seen_timestamp) + .map_err(LookupRequestError::SendFailedProcessor) + } + + fn response_type() -> ResponseType { + ResponseType::ExecutionProof + } + + fn request_state_mut(request: &mut SingleBlockLookup) -> Result<&mut Self, &'static str> { + request + .proof_request + .as_mut() + .ok_or("no active proof request") + } + + fn get_state(&self) -> &SingleLookupRequestState { + &self.state + } + + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + &mut self.state + } +} diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index f8ffd298caf..6212c63a119 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -39,7 +39,9 @@ use fnv::FnvHashMap; use lighthouse_network::service::api_types::SingleLookupReqId; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; -pub use single_block_lookup::{BlobRequestState, BlockRequestState, CustodyRequestState}; +pub use single_block_lookup::{ + BlobRequestState, BlockRequestState, CustodyRequestState, ProofRequestState, +}; use std::collections::hash_map::Entry; use std::sync::Arc; use std::time::Duration; @@ -532,6 +534,9 @@ impl BlockLookups { BlockProcessType::SingleCustodyColumn(id) => { self.on_processing_result_inner::>(id, result, cx) } + BlockProcessType::SingleExecutionProof { id } => { + self.on_processing_result_inner::(id, result, cx) + } }; self.on_lookup_result(process_type.id(), lookup_result, "processing_result", cx); } @@ -673,6 +678,9 @@ impl BlockLookups { ResponseType::CustodyColumn => { "lookup_custody_column_processing_failure" } + ResponseType::ExecutionProof => { + "lookup_execution_proof_processing_failure" + } }, ); } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 46897b2283b..46a8deb0ab2 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -17,7 +17,7 @@ use store::Hash256; use strum::IntoStaticStr; use tracing::{Span, debug_span}; use types::blob_sidecar::FixedBlobSidecarList; -use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock, Slot}; +use types::{DataColumnSidecarList, EthSpec, ExecutionProof, SignedBeaconBlock, Slot}; // Dedicated enum for LookupResult to force its usage #[must_use = "LookupResult must be handled with on_lookup_result"] @@ -63,6 +63,7 @@ pub struct SingleBlockLookup { pub id: Id, pub block_request_state: BlockRequestState, pub component_requests: ComponentRequests, + pub proof_request: Option, /// Peers that claim to have imported this set of block components. This state is shared with /// the custody request to have an updated view of the peers that claim to have imported the /// block associated with this lookup. The peer set of a lookup can change rapidly, and faster @@ -102,6 +103,7 @@ impl SingleBlockLookup { id, block_request_state: BlockRequestState::new(requested_block_root), component_requests: ComponentRequests::WaitingForBlock, + proof_request: None, peers: Arc::new(RwLock::new(HashSet::from_iter(peers.iter().copied()))), block_root: requested_block_root, awaiting_parent, @@ -168,32 +170,51 @@ impl SingleBlockLookup { /// Returns true if the block has already been downloaded. pub fn all_components_processed(&self) -> bool { - self.block_request_state.state.is_processed() - && match &self.component_requests { - ComponentRequests::WaitingForBlock => false, - ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_processed(), - ComponentRequests::ActiveCustodyRequest(request) => request.state.is_processed(), - ComponentRequests::NotNeeded { .. } => true, - } + let block_processed = self.block_request_state.state.is_processed(); + + let da_component_processed = match &self.component_requests { + ComponentRequests::WaitingForBlock => false, + ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_processed(), + ComponentRequests::ActiveCustodyRequest(request) => request.state.is_processed(), + ComponentRequests::NotNeeded { .. } => true, + }; + + let proof_processed = self + .proof_request + .as_ref() + .map(|request| request.state.is_processed()) + .unwrap_or(true); // If no proof request, consider it processed + + block_processed && da_component_processed && proof_processed } /// Returns true if this request is expecting some event to make progress pub fn is_awaiting_event(&self) -> bool { - self.awaiting_parent.is_some() - || self.block_request_state.state.is_awaiting_event() - || match &self.component_requests { - // If components are waiting for the block request to complete, here we should - // check if the`block_request_state.state.is_awaiting_event(). However we already - // checked that above, so `WaitingForBlock => false` is equivalent. - ComponentRequests::WaitingForBlock => false, - ComponentRequests::ActiveBlobRequest(request, _) => { - request.state.is_awaiting_event() - } - ComponentRequests::ActiveCustodyRequest(request) => { - request.state.is_awaiting_event() - } - ComponentRequests::NotNeeded { .. } => false, - } + if self.awaiting_parent.is_some() { + return true; + } + + if self.block_request_state.state.is_awaiting_event() { + return true; + } + + let da_awaiting = match &self.component_requests { + // If components are waiting for the block request to complete, here we should + // check if the`block_request_state.state.is_awaiting_event(). However we already + // checked that above, so `WaitingForBlock => false` is equivalent. + ComponentRequests::WaitingForBlock => false, + ComponentRequests::ActiveBlobRequest(request, _) => request.state.is_awaiting_event(), + ComponentRequests::ActiveCustodyRequest(request) => request.state.is_awaiting_event(), + ComponentRequests::NotNeeded { .. } => false, + }; + + let proof_awaiting = self + .proof_request + .as_ref() + .map(|request| request.state.is_awaiting_event()) + .unwrap_or(false); + + da_awaiting || proof_awaiting } /// Makes progress on all requests of this lookup. Any error is not recoverable and must result @@ -239,6 +260,11 @@ impl SingleBlockLookup { } else { self.component_requests = ComponentRequests::NotNeeded("outside da window"); } + + if cx.chain.should_fetch_execution_proofs(block_epoch) { + self.proof_request = cx.chain.min_execution_proofs_required() + .map(|min_proofs| ProofRequestState::new(self.block_root, min_proofs)); + } } else { // Wait to download the block before downloading blobs. Then we can be sure that the // block has data, so there's no need to do "blind" requests for all possible blobs and @@ -253,6 +279,7 @@ impl SingleBlockLookup { } } + // Progress DA component requests match &self.component_requests { ComponentRequests::WaitingForBlock => {} // do nothing ComponentRequests::ActiveBlobRequest(_, expected_blobs) => { @@ -264,6 +291,11 @@ impl SingleBlockLookup { ComponentRequests::NotNeeded { .. } => {} // do nothing } + // Progress proof request (separate from DA components) + if let Some(request) = &self.proof_request { + self.continue_request::(cx, request.min_proofs_required)?; + } + // If all components of this lookup are already processed, there will be no future events // that can make progress so it must be dropped. Consider the lookup completed. // This case can happen if we receive the components from gossip during a retry. @@ -404,6 +436,26 @@ impl CustodyRequestState { } } +/// The state of the execution proof request component of a `SingleBlockLookup`. +#[derive(Educe)] +#[educe(Debug)] +pub struct ProofRequestState { + #[educe(Debug(ignore))] + pub block_root: Hash256, + pub state: SingleLookupRequestState>>, + pub min_proofs_required: usize, +} + +impl ProofRequestState { + pub fn new(block_root: Hash256, min_proofs_required: usize) -> Self { + Self { + block_root, + state: SingleLookupRequestState::new(), + min_proofs_required, + } + } +} + /// The state of the block request component of a `SingleBlockLookup`. #[derive(Educe)] #[educe(Debug)] diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 338f21ce987..c0af69d7a40 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -45,6 +45,7 @@ use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::block_lookups::{ BlobRequestState, BlockComponent, BlockRequestState, CustodyRequestState, DownloadResult, + ProofRequestState, }; use crate::sync::custody_backfill_sync::CustodyBackFillSync; use crate::sync::network_context::{PeerGroup, RpcResponseResult}; @@ -73,7 +74,8 @@ use std::time::Duration; use tokio::sync::mpsc; use tracing::{debug, error, info, trace}; use types::{ - BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, Hash256, SignedBeaconBlock, Slot, + BlobSidecar, DataColumnSidecar, EthSpec, ExecutionProof, ForkContext, Hash256, + SignedBeaconBlock, Slot, }; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync @@ -132,6 +134,14 @@ pub enum SyncMessage { seen_timestamp: Duration, }, + /// An execution proof has been received from the RPC + RpcExecutionProof { + sync_request_id: SyncRequestId, + peer_id: PeerId, + execution_proof: Option>, + seen_timestamp: Duration, + }, + /// A block with an unknown parent has been received. UnknownParentBlock(PeerId, Arc>, Hash256), @@ -183,6 +193,7 @@ pub enum BlockProcessType { SingleBlock { id: Id }, SingleBlob { id: Id }, SingleCustodyColumn(Id), + SingleExecutionProof { id: Id }, } impl BlockProcessType { @@ -190,7 +201,8 @@ impl BlockProcessType { match self { BlockProcessType::SingleBlock { id } | BlockProcessType::SingleBlob { id } - | BlockProcessType::SingleCustodyColumn(id) => *id, + | BlockProcessType::SingleCustodyColumn(id) + | BlockProcessType::SingleExecutionProof { id } => *id, } } } @@ -491,6 +503,9 @@ impl SyncManager { SyncRequestId::SingleBlob { id } => { self.on_single_blob_response(id, peer_id, RpcEvent::RPCError(error)) } + SyncRequestId::SingleExecutionProof { id } => { + self.on_single_execution_proof_response(id, peer_id, RpcEvent::RPCError(error)) + } SyncRequestId::DataColumnsByRoot(req_id) => { self.on_data_columns_by_root_response(req_id, peer_id, RpcEvent::RPCError(error)) } @@ -833,6 +848,17 @@ impl SyncManager { } => { self.rpc_data_column_received(sync_request_id, peer_id, data_column, seen_timestamp) } + SyncMessage::RpcExecutionProof { + sync_request_id, + peer_id, + execution_proof, + seen_timestamp, + } => self.rpc_execution_proof_received( + sync_request_id, + peer_id, + execution_proof, + seen_timestamp, + ), SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { let block_slot = block.slot(); let parent_root = block.parent_root(); @@ -1186,6 +1212,25 @@ impl SyncManager { } } + fn rpc_execution_proof_received( + &mut self, + sync_request_id: SyncRequestId, + peer_id: PeerId, + execution_proof: Option>, + seen_timestamp: Duration, + ) { + match sync_request_id { + SyncRequestId::SingleExecutionProof { id } => self.on_single_execution_proof_response( + id, + peer_id, + RpcEvent::from_chunk(execution_proof, seen_timestamp), + ), + _ => { + crit!(%peer_id, "bad request id for execution_proof"); + } + } + } + fn on_single_blob_response( &mut self, id: SingleLookupReqId, @@ -1204,6 +1249,27 @@ impl SyncManager { } } + fn on_single_execution_proof_response( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + execution_proof: RpcEvent>, + ) { + if let Some(resp) = + self.network + .on_single_execution_proof_response(id, peer_id, execution_proof) + { + self.block_lookups + .on_download_response::( + id, + resp.map(|(value, seen_timestamp)| { + (value, PeerGroup::from_single(peer_id), seen_timestamp) + }), + &mut self.network, + ) + } + } + fn on_data_columns_by_root_response( &mut self, req_id: DataColumnsByRootRequestId, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 2e0c56db23f..73afabe60d2 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -37,6 +37,7 @@ pub use requests::LookupVerifyError; use requests::{ ActiveRequests, BlobsByRangeRequestItems, BlobsByRootRequestItems, BlocksByRangeRequestItems, BlocksByRootRequestItems, DataColumnsByRangeRequestItems, DataColumnsByRootRequestItems, + ExecutionProofsByRootRequestItems, ExecutionProofsByRootSingleBlockRequest, }; #[cfg(test)] use slot_clock::SlotClock; @@ -52,7 +53,7 @@ use tracing::{Span, debug, debug_span, error, warn}; use types::blob_sidecar::FixedBlobSidecarList; use types::{ BlobSidecar, BlockImportSource, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, - ForkContext, Hash256, SignedBeaconBlock, Slot, + ExecutionProof, ForkContext, Hash256, SignedBeaconBlock, Slot, }; pub mod custody; @@ -204,6 +205,9 @@ pub struct SyncNetworkContext { /// A mapping of active DataColumnsByRoot requests data_columns_by_root_requests: ActiveRequests>, + /// A mapping of active ExecutionProofsByRoot requests + execution_proofs_by_root_requests: + ActiveRequests>, /// A mapping of active BlocksByRange requests blocks_by_range_requests: ActiveRequests>, @@ -295,6 +299,7 @@ impl SyncNetworkContext { blocks_by_root_requests: ActiveRequests::new("blocks_by_root"), blobs_by_root_requests: ActiveRequests::new("blobs_by_root"), data_columns_by_root_requests: ActiveRequests::new("data_columns_by_root"), + execution_proofs_by_root_requests: ActiveRequests::new("execution_proofs_by_root"), blocks_by_range_requests: ActiveRequests::new("blocks_by_range"), blobs_by_range_requests: ActiveRequests::new("blobs_by_range"), data_columns_by_range_requests: ActiveRequests::new("data_columns_by_range"), @@ -323,6 +328,7 @@ impl SyncNetworkContext { blocks_by_root_requests, blobs_by_root_requests, data_columns_by_root_requests, + execution_proofs_by_root_requests, blocks_by_range_requests, blobs_by_range_requests, data_columns_by_range_requests, @@ -349,6 +355,10 @@ impl SyncNetworkContext { .active_requests_of_peer(peer_id) .into_iter() .map(|req_id| SyncRequestId::DataColumnsByRoot(*req_id)); + let execution_proofs_by_root_ids = execution_proofs_by_root_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|id| SyncRequestId::SingleExecutionProof { id: *id }); let blocks_by_range_ids = blocks_by_range_requests .active_requests_of_peer(peer_id) .into_iter() @@ -364,6 +374,7 @@ impl SyncNetworkContext { blocks_by_root_ids .chain(blobs_by_root_ids) .chain(data_column_by_root_ids) + .chain(execution_proofs_by_root_ids) .chain(blocks_by_range_ids) .chain(blobs_by_range_ids) .chain(data_column_by_range_ids) @@ -420,6 +431,7 @@ impl SyncNetworkContext { blocks_by_root_requests, blobs_by_root_requests, data_columns_by_root_requests, + execution_proofs_by_root_requests, blocks_by_range_requests, blobs_by_range_requests, data_columns_by_range_requests, @@ -442,6 +454,7 @@ impl SyncNetworkContext { .iter_request_peers() .chain(blobs_by_root_requests.iter_request_peers()) .chain(data_columns_by_root_requests.iter_request_peers()) + .chain(execution_proofs_by_root_requests.iter_request_peers()) .chain(blocks_by_range_requests.iter_request_peers()) .chain(blobs_by_range_requests.iter_request_peers()) .chain(data_columns_by_range_requests.iter_request_peers()) @@ -1026,6 +1039,100 @@ impl SyncNetworkContext { Ok(LookupRequestResult::RequestSent(id.req_id)) } + /// Request execution proofs for `block_root` + pub fn execution_proof_lookup_request( + &mut self, + lookup_id: SingleLookupId, + lookup_peers: Arc>>, + block_root: Hash256, + min_proofs_required: usize, + ) -> Result { + let active_request_count_by_peer = self.active_request_count_by_peer(); + let Some(peer_id) = lookup_peers + .read() + .iter() + .map(|peer| { + ( + // Prefer peers with less overall requests + active_request_count_by_peer.get(peer).copied().unwrap_or(0), + // Random factor to break ties, otherwise the PeerID breaks ties + rand::random::(), + peer, + ) + }) + .min() + .map(|(_, _, peer)| *peer) + else { + return Ok(LookupRequestResult::Pending("no peers")); + }; + + // Query DA checker for proofs we already have + let already_have = self + .chain + .data_availability_checker + .get_existing_proof_ids(&block_root) + .unwrap_or_default(); + + let current_count = already_have.len(); + + // Calculate how many more proofs we need + if current_count >= min_proofs_required { + // Already have enough proofs, no request needed + return Ok(LookupRequestResult::NoRequestNeeded( + "already have minimum proofs", + )); + } + + let count_needed = min_proofs_required - current_count; + + let id = SingleLookupReqId { + lookup_id, + req_id: self.next_id(), + }; + + let request = ExecutionProofsByRootSingleBlockRequest { + block_root, + already_have: already_have.clone(), + count_needed, + }; + + let network_request = RequestType::ExecutionProofsByRoot( + request + .clone() + .into_request() + .map_err(RpcRequestSendError::InternalError)?, + ); + + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: network_request, + app_request_id: AppRequestId::Sync(SyncRequestId::SingleExecutionProof { id }), + }) + .map_err(|_| RpcRequestSendError::InternalError("network send error".to_owned()))?; + + debug!( + method = "ExecutionProofsByRoot", + ?block_root, + already_have_count = already_have.len(), + count_needed, + peer = %peer_id, + %id, + "Sync RPC request sent" + ); + + self.execution_proofs_by_root_requests.insert( + id, + peer_id, + // Don't expect max responses since peer might not have all the proofs we need + false, + ExecutionProofsByRootRequestItems::new(request), + Span::none(), + ); + + Ok(LookupRequestResult::RequestSent(id.req_id)) + } + /// Request to send a single `data_columns_by_root` request to the network. pub fn data_column_lookup_request( &mut self, @@ -1460,6 +1567,20 @@ impl SyncNetworkContext { self.on_rpc_response_result(id, "BlobsByRoot", resp, peer_id, |_| 1) } + pub(crate) fn on_single_execution_proof_response( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + rpc_event: RpcEvent>, + ) -> Option>>> { + let resp = self + .execution_proofs_by_root_requests + .on_response(id, rpc_event); + self.on_rpc_response_result(id, "ExecutionProofsByRoot", resp, peer_id, |proofs| { + proofs.len() + }) + } + #[allow(clippy::type_complexity)] pub(crate) fn on_data_columns_by_root_response( &mut self, @@ -1657,6 +1778,36 @@ impl SyncNetworkContext { }) } + pub fn send_execution_proofs_for_processing( + &self, + id: Id, + block_root: Hash256, + proofs: Vec>, + seen_timestamp: Duration, + ) -> Result<(), SendErrorProcessor> { + let beacon_processor = self + .beacon_processor_if_enabled() + .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; + + debug!(?block_root, ?id, "Sending execution proofs for processing"); + // Lookup sync event safety: If `beacon_processor.send_rpc_execution_proofs` returns Ok() sync + // must receive a single `SyncMessage::BlockComponentProcessed` event with this process type + beacon_processor + .send_rpc_execution_proofs( + block_root, + proofs, + seen_timestamp, + BlockProcessType::SingleExecutionProof { id }, + ) + .map_err(|e| { + error!( + error = ?e, + "Failed to send sync execution proofs to processor" + ); + SendErrorProcessor::SendError + }) + } + pub fn send_custody_columns_for_processing( &self, _id: Id, diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 3183c06d762..63249ed2a4b 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -5,7 +5,7 @@ use fnv::FnvHashMap; use lighthouse_network::PeerId; use strum::IntoStaticStr; use tracing::Span; -use types::{Hash256, Slot}; +use types::{ExecutionProofId, Hash256, Slot}; pub use blobs_by_range::BlobsByRangeRequestItems; pub use blobs_by_root::{BlobsByRootRequestItems, BlobsByRootSingleBlockRequest}; @@ -15,6 +15,9 @@ pub use data_columns_by_range::DataColumnsByRangeRequestItems; pub use data_columns_by_root::{ DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest, }; +pub use execution_proofs_by_root::{ + ExecutionProofsByRootRequestItems, ExecutionProofsByRootSingleBlockRequest, +}; use crate::metrics; @@ -26,6 +29,7 @@ mod blocks_by_range; mod blocks_by_root; mod data_columns_by_range; mod data_columns_by_root; +mod execution_proofs_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupVerifyError { @@ -34,8 +38,10 @@ pub enum LookupVerifyError { UnrequestedBlockRoot(Hash256), UnrequestedIndex(u64), UnrequestedSlot(Slot), + UnrequestedProof(ExecutionProofId), InvalidInclusionProof, DuplicatedData(Slot, u64), + DuplicatedProofIDs(ExecutionProofId), InternalError(String), } diff --git a/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs new file mode 100644 index 00000000000..257d6e1a311 --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/execution_proofs_by_root.rs @@ -0,0 +1,68 @@ +use lighthouse_network::rpc::methods::ExecutionProofsByRootRequest; +use std::sync::Arc; +use types::{EthSpec, ExecutionProof, ExecutionProofId, Hash256}; + +use super::{ActiveRequestItems, LookupVerifyError}; + +#[derive(Debug, Clone)] +pub struct ExecutionProofsByRootSingleBlockRequest { + pub block_root: Hash256, + pub already_have: Vec, + pub count_needed: usize, +} + +impl ExecutionProofsByRootSingleBlockRequest { + pub fn into_request(self) -> Result { + ExecutionProofsByRootRequest::new(self.block_root, self.already_have, self.count_needed) + .map_err(|e| e.to_string()) + } +} + +pub struct ExecutionProofsByRootRequestItems { + request: ExecutionProofsByRootSingleBlockRequest, + items: Vec>, + _phantom: std::marker::PhantomData, +} + +impl ExecutionProofsByRootRequestItems { + pub fn new(request: ExecutionProofsByRootSingleBlockRequest) -> Self { + Self { + request, + items: vec![], + _phantom: std::marker::PhantomData, + } + } +} + +impl ActiveRequestItems for ExecutionProofsByRootRequestItems { + type Item = Arc; + + /// Appends a proof to this multi-item request. + /// Note: This is very similar to `DataColumnsByRootSingleBlockRequest` + fn add(&mut self, proof: Self::Item) -> Result { + let block_root = proof.block_root; + if self.request.block_root != block_root { + return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); + } + + // Verify proof is not in the already_have list + // We should not receive proofs we already have + if self.request.already_have.contains(&proof.proof_id) { + return Err(LookupVerifyError::UnrequestedProof(proof.proof_id)); + } + + // Check for duplicate proof IDs + if self.items.iter().any(|p| p.proof_id == proof.proof_id) { + return Err(LookupVerifyError::DuplicatedProofIDs(proof.proof_id)); + } + + self.items.push(proof); + + // We've received all requested proofs when we have count_needed proofs + Ok(self.items.len() >= self.request.count_needed) + } + + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) + } +} diff --git a/beacon_node/network/src/sync/range_data_column_batch_request.rs b/beacon_node/network/src/sync/range_data_column_batch_request.rs index 72e2fb2d5b6..b912a6badc9 100644 --- a/beacon_node/network/src/sync/range_data_column_batch_request.rs +++ b/beacon_node/network/src/sync/range_data_column_batch_request.rs @@ -268,8 +268,8 @@ impl RangeDataColumnBatchRequest { let received_columns = columns.iter().map(|c| c.index).collect::>(); - let missing_columns = received_columns - .difference(expected_custody_columns) + let missing_columns = expected_custody_columns + .difference(&received_columns) .collect::>(); // blobs are expected for this slot but there is at least one missing columns diff --git a/beacon_node/network/src/sync/tests/execution_proof_tests.rs b/beacon_node/network/src/sync/tests/execution_proof_tests.rs new file mode 100644 index 00000000000..ce006172187 --- /dev/null +++ b/beacon_node/network/src/sync/tests/execution_proof_tests.rs @@ -0,0 +1,509 @@ +use super::*; +use crate::sync::block_lookups::common::ResponseType; +use lighthouse_network::rpc::{RPCError, RpcErrorResponse}; +use lighthouse_network::service::api_types::SyncRequestId; +use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofId, Hash256, Slot}; + +/// Test successful execution proof fetch and verification +#[test] +fn test_proof_lookup_happy_path() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + + // Get execution payload hash from the block + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger the unknown block (which should trigger proof request) + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + + // Expect block request + let block_id = rig.expect_block_lookup_request(block_root); + + // Send the block + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + // Now expect proof request + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Send all requested proofs + // TODO(zkproofs): We should use min_required instead of hardcoding 2 proofs here + let proof_ids = vec![ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ]; + rig.complete_single_lookup_proof_download(proof_id, peer_id, block_root, block_hash, proof_ids); + + // Proofs should be processed + rig.expect_block_process(ResponseType::ExecutionProof); + + // Block should be imported + rig.proof_component_processed_imported(block_root); + rig.expect_empty_network(); + rig.expect_no_active_lookups(); +} + +/// Test that empty proof response results in peer penalization +#[test] +fn test_proof_lookup_empty_response() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Peer sends stream terminator with no proofs + rig.single_lookup_proof_response(proof_id, peer_id, None); + + // Peer should be penalized for not providing proofs + rig.expect_penalty(peer_id, "NotEnoughResponsesReturned"); + + // Should retry with different peer + let _new_peer = rig.new_connected_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test partial proof response (peer doesn't have all requested proofs) +#[test] +fn test_proof_lookup_partial_response() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Requested 2 proofs but peer only sends 1 + let proof_0 = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(0), + block_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(), + ); + + rig.single_lookup_proof_response(proof_id, peer_id, Some(proof_0)); + rig.single_lookup_proof_response(proof_id, peer_id, None); // End stream early + + // Should penalize peer for not providing all requested proofs + rig.expect_penalty(peer_id, "NotEnoughResponsesReturned"); + + // Should retry with another peer + let new_peer = rig.new_connected_peer(); + let retry_proof_id = rig.expect_proof_lookup_request(block_root); + + // Complete with all proofs + rig.complete_single_lookup_proof_download( + retry_proof_id, + new_peer, + block_root, + block_hash, + vec![ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ], + ); + + rig.expect_block_process(ResponseType::ExecutionProof); + rig.proof_component_processed_imported(block_root); + rig.expect_no_active_lookups(); +} + +/// Test unrequested proof triggers penalization +#[test] +fn test_proof_lookup_unrequested_proof() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Requested proofs 0, 1 but peer sends proofs 5 (unrequested) + let unrequested_proof = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(5).unwrap(), + Slot::new(0), + block_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(), + ); + + rig.single_lookup_proof_response(proof_id, peer_id, Some(unrequested_proof)); + + // Should penalize peer for sending unrequested data + rig.expect_penalty(peer_id, "UnrequestedProof"); + + // Should retry + let _new_peer = rig.new_connected_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test duplicate proofs triggers penalization +#[test] +fn test_proof_lookup_duplicate_proof() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Send proof 0 twice + let proof_0_a = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(0), + block_hash, + block_root, + vec![1, 2, 3], + ) + .unwrap(), + ); + // TODO(zkproofs): In this case we have the same proofID but different proof_data + // zkVMs should be deterministic, so if this happens there is likely an issue somewhere + let proof_0_b = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(0), + block_hash, + block_root, + vec![4, 5, 6], // Different data + ) + .unwrap(), + ); + + rig.single_lookup_proof_response(proof_id, peer_id, Some(proof_0_a)); + rig.single_lookup_proof_response(proof_id, peer_id, Some(proof_0_b)); + + // Should penalize peer for duplicate proof + rig.expect_penalty(peer_id, "DuplicatedProof"); + + // Should retry + let _new_peer = rig.new_connected_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test wrong block root in proof triggers penalization +#[test] +fn test_proof_lookup_wrong_block_root() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let wrong_root = Hash256::random(); + let peer_id = rig.new_connected_peer(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Send proof with wrong block_root + let wrong_proof = Arc::new( + ExecutionProof::new( + ExecutionProofId::new(0).unwrap(), + Slot::new(0), + block_hash, + wrong_root, + vec![1, 2, 3], + ) + .unwrap(), + ); + + rig.single_lookup_proof_response(proof_id, peer_id, Some(wrong_proof)); + + // Should penalize peer + rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); + + // Should retry + let _new_peer = rig.new_connected_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test proof request timeout +#[test] +fn test_proof_lookup_timeout() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Simulate timeout by sending error + rig.send_sync_message(SyncMessage::RpcError { + sync_request_id: SyncRequestId::SingleExecutionProof { id: proof_id }, + peer_id, + error: RPCError::ErrorResponse(RpcErrorResponse::ServerError, "timeout".to_string()), + }); + + // Should penalize peer for timeout + rig.expect_penalty(peer_id, "rpc_error"); + + // Should retry with different peer + let _new_peer = rig.new_connected_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test peer disconnection during proof request +#[test] +fn test_proof_lookup_peer_disconnected() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Peer disconnects + rig.send_sync_message(SyncMessage::RpcError { + sync_request_id: SyncRequestId::SingleExecutionProof { id: proof_id }, + peer_id, + error: RPCError::Disconnected, + }); + + // Should retry with different peer (no penalty for disconnect) + let _new_peer = rig.new_connected_peer(); + rig.expect_proof_lookup_request(block_root); +} + +/// Test multiple retries on failure +#[test] +fn test_proof_lookup_multiple_retries() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + + let peer_id = rig.new_connected_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + // First attempt - empty response + let proof_id_1 = rig.expect_proof_lookup_request(block_root); + rig.single_lookup_proof_response(proof_id_1, peer_id, None); + rig.expect_penalty(peer_id, "NotEnoughResponsesReturned"); + + // Second attempt - different peer, also fails + let peer_id_2 = rig.new_connected_peer(); + let proof_id_2 = rig.expect_proof_lookup_request(block_root); + rig.single_lookup_proof_response(proof_id_2, peer_id_2, None); + rig.expect_penalty(peer_id_2, "NotEnoughResponsesReturned"); + + // Third attempt - succeeds + let peer_id_3 = rig.new_connected_peer(); + let proof_id_3 = rig.expect_proof_lookup_request(block_root); + rig.complete_single_lookup_proof_download( + proof_id_3, + peer_id_3, + block_root, + block_hash, + vec![ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ], + ); + + rig.expect_block_process(ResponseType::ExecutionProof); + rig.proof_component_processed_imported(block_root); + rig.expect_no_active_lookups(); +} + +/// Test proof lookup with no peers available +#[test] +fn test_proof_lookup_no_peers() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let peer_id = rig.new_connected_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.into())); + rig.expect_block_process(ResponseType::Block); + + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Peer fails and disconnects + rig.send_sync_message(SyncMessage::RpcError { + sync_request_id: SyncRequestId::SingleExecutionProof { id: proof_id }, + peer_id, + error: RPCError::Disconnected, + }); + + // Disconnect the peer + rig.peer_disconnected(peer_id); + + // Should not be able to find another peer immediately + // The lookup should remain active waiting for peers + assert_eq!(rig.active_single_lookups_count(), 1); +} + +/// Test successful proof verification after block already has blobs +#[test] +fn test_proof_lookup_with_existing_blobs() { + let Some(mut rig) = TestRig::test_setup_after_fulu() else { + return; + }; + + let block = rig.rand_block(); + let block_root = block.canonical_root(); + let block_hash = block + .message() + .body() + .execution_payload() + .ok() + .map(|p| p.execution_payload_ref().block_hash()) + .unwrap_or_else(ExecutionBlockHash::zero); + let peer_id = rig.new_connected_peer(); + + // Trigger lookup + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + + // Get block + let block_id = rig.expect_block_lookup_request(block_root); + rig.single_lookup_block_response(block_id, peer_id, Some(block.clone().into())); + rig.expect_block_process(ResponseType::Block); + + // Block might still be missing proofs even if blobs present + // Proofs are an additional requirement + let proof_id = rig.expect_proof_lookup_request(block_root); + + // Send proofs + rig.complete_single_lookup_proof_download( + proof_id, + peer_id, + block_root, + block_hash, + vec![ + ExecutionProofId::new(0).unwrap(), + ExecutionProofId::new(1).unwrap(), + ], + ); + + rig.expect_block_process(ResponseType::ExecutionProof); + rig.proof_component_processed_imported(block_root); + rig.expect_no_active_lookups(); +} diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 63bcd176f52..0fdc505ab98 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -42,7 +42,8 @@ use tokio::sync::mpsc; use tracing::info; use types::{ BeaconState, BeaconStateBase, BlobSidecar, BlockImportSource, DataColumnSidecar, EthSpec, - ForkContext, ForkName, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, + ExecutionBlockHash, ExecutionProof, ExecutionProofId, ForkContext, ForkName, Hash256, + MinimalEthSpec as E, SignedBeaconBlock, Slot, data_column_sidecar::ColumnIndex, test_utils::{SeedableRng, TestRandom, XorShiftRng}, }; @@ -171,7 +172,11 @@ impl TestRig { self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob.into())); } - fn trigger_unknown_block_from_attestation(&mut self, block_root: Hash256, peer_id: PeerId) { + pub(super) fn trigger_unknown_block_from_attestation( + &mut self, + block_root: Hash256, + peer_id: PeerId, + ) { self.send_sync_message(SyncMessage::UnknownBlockHashFromAttestation( peer_id, block_root, )); @@ -184,7 +189,7 @@ impl TestRig { } } - fn rand_block(&mut self) -> SignedBeaconBlock { + pub(super) fn rand_block(&mut self) -> SignedBeaconBlock { self.rand_block_and_blobs(NumBlobs::None).0 } @@ -228,7 +233,7 @@ impl TestRig { self.sync_manager.active_single_lookups() } - fn active_single_lookups_count(&self) -> usize { + pub(super) fn active_single_lookups_count(&self) -> usize { self.sync_manager.active_single_lookups().len() } @@ -321,7 +326,7 @@ impl TestRig { } #[track_caller] - fn expect_no_active_lookups(&self) { + pub(super) fn expect_no_active_lookups(&self) { self.expect_no_active_single_lookups(); } @@ -445,7 +450,7 @@ impl TestRig { }); } - fn single_lookup_block_response( + pub(super) fn single_lookup_block_response( &mut self, id: SingleLookupReqId, peer_id: PeerId, @@ -527,6 +532,69 @@ impl TestRig { ); } + /// Send a single execution proof response + pub(super) fn single_lookup_proof_response( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + proof: Option>, + ) { + self.send_sync_message(SyncMessage::RpcExecutionProof { + sync_request_id: SyncRequestId::SingleExecutionProof { id }, + peer_id, + execution_proof: proof, + seen_timestamp: D, + }); + } + + /// Complete execution proof download by sending all requested proofs + pub(super) fn complete_single_lookup_proof_download( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + block_root: Hash256, + block_hash: ExecutionBlockHash, + subnet_ids: Vec, + ) { + for subnet_id in subnet_ids { + let proof = Arc::new( + ExecutionProof::new( + subnet_id, + types::Slot::new(0), + block_hash, + block_root, + vec![1, 2, 3, 4], + ) + .unwrap(), + ); + self.single_lookup_proof_response(id, peer_id, Some(proof)); + } + // Send stream terminator + self.single_lookup_proof_response(id, peer_id, None); + } + + /// Expect an execution proof request for a specific block + pub(super) fn expect_proof_lookup_request(&mut self, block_root: Hash256) -> SingleLookupReqId { + self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + request: RequestType::ExecutionProofsByRoot(req), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleExecutionProof { id }), + .. + } if req.block_root == block_root => Some(*id), + _ => None, + }) + .unwrap_or_else(|_| panic!("Expected proof request for {block_root}")) + } + + /// Send a processing result indicating proofs were processed and block imported + pub(super) fn proof_component_processed_imported(&mut self, block_root: Hash256) { + let id = self.find_single_lookup_for(block_root); + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type: BlockProcessType::SingleBlock { id }, + result: BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), + }); + } + fn complete_lookup_block_download(&mut self, block: SignedBeaconBlock) { let block_root = block.canonical_root(); let id = self.expect_block_lookup_request(block_root); @@ -786,7 +854,7 @@ impl TestRig { } #[track_caller] - fn expect_block_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { + pub(super) fn expect_block_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { self.find_block_lookup_request(for_block) .unwrap_or_else(|e| panic!("Expected block request for {for_block:?}: {e}")) } @@ -910,7 +978,7 @@ impl TestRig { } #[track_caller] - fn expect_block_process(&mut self, response_type: ResponseType) { + pub(super) fn expect_block_process(&mut self, response_type: ResponseType) { match response_type { ResponseType::Block => self .pop_received_processor_event(|ev| { @@ -927,6 +995,11 @@ impl TestRig { (ev.work_type() == beacon_processor::WorkType::RpcCustodyColumn).then_some(()) }) .unwrap_or_else(|e| panic!("Expected column work event: {e}")), + ResponseType::ExecutionProof => self + .pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::WorkType::RpcExecutionProofs).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expected execution proofs work event: {e}")), } } diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index 23c14ff63ef..9b82f830bcb 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -18,6 +18,7 @@ use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use types::{ChainSpec, ForkName, MinimalEthSpec as E}; +mod execution_proof_tests; mod lookups; mod range; diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs index cc8809c43e6..485f21b5c8b 100644 --- a/beacon_node/operation_pool/src/bls_to_execution_changes.rs +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -19,7 +19,7 @@ pub enum ReceivedPreCapella { /// /// Using the LIFO queue for block production disincentivises spam on P2P at the Capella fork, /// and is less-relevant after that. -#[derive(Debug, Default)] +#[derive(Debug, Default, PartialEq, Eq)] pub struct BlsToExecutionChanges { /// Map from validator index to BLS to execution change. by_validator_index: HashMap>>, diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 24e2cfbbb5d..e92d381bacc 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -782,6 +782,7 @@ impl PartialEq for OperationPool { && *self.attester_slashings.read() == *other.attester_slashings.read() && *self.proposer_slashings.read() == *other.proposer_slashings.read() && *self.voluntary_exits.read() == *other.voluntary_exits.read() + && *self.bls_to_execution_changes.read() == *other.bls_to_execution_changes.read() } } diff --git a/beacon_node/proof_generation_service/Cargo.toml b/beacon_node/proof_generation_service/Cargo.toml new file mode 100644 index 00000000000..bbd043e0fdd --- /dev/null +++ b/beacon_node/proof_generation_service/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "proof_generation_service" +version = "0.1.0" +edition = "2021" + +[dependencies] +beacon_chain = { path = "../beacon_chain" } +lighthouse_network = { workspace = true } +network = { workspace = true } +types = { path = "../../consensus/types" } +zkvm_execution_layer = { path = "../../zkvm_execution_layer" } +tokio = { workspace = true } +logging = { workspace = true } +tracing = { workspace = true } + +[dev-dependencies] diff --git a/beacon_node/proof_generation_service/src/lib.rs b/beacon_node/proof_generation_service/src/lib.rs new file mode 100644 index 00000000000..80c2a83e98c --- /dev/null +++ b/beacon_node/proof_generation_service/src/lib.rs @@ -0,0 +1,385 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes, ProofGenerationEvent}; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use std::sync::Arc; +use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; +use tracing::{debug, error, info}; +use types::{EthSpec, ExecPayload, ExecutionProofId, Hash256, SignedBeaconBlock, Slot}; + +/// Service responsible for "altruistic" proof generation +/// +/// This service receives notifications about newly imported blocks and generates +/// execution proofs for blocks that don't have proofs yet. This allows any node +/// (not just the block proposer) to generate and publish proofs. +/// +/// Note: While proofs are optional, we don't have the proposer making proofs +/// for their own block. The proposer should insert the block into their own +/// chain, so this should trigger. +pub struct ProofGenerationService { + /// Reference to the beacon chain + chain: Arc>, + /// Receiver for proof generation events + event_rx: UnboundedReceiver>, + /// Sender to publish proofs to the network + network_tx: UnboundedSender>, +} + +impl ProofGenerationService { + pub fn new( + chain: Arc>, + event_rx: UnboundedReceiver>, + network_tx: UnboundedSender>, + ) -> Self { + Self { + chain, + event_rx, + network_tx, + } + } + + /// Run the service event loop + pub async fn run(mut self) { + info!("Proof generation service started"); + + while let Some(event) = self.event_rx.recv().await { + let (block_root, slot, block) = event; + + debug!( + slot = ?slot, + block_root = ?block_root, + "Received block import notification" + ); + + // Handle the event + self.handle_block_import(block_root, slot, block).await; + } + + info!("Proof generation service stopped"); + } + + /// Handle a block import event + async fn handle_block_import( + &self, + block_root: Hash256, + slot: Slot, + block: Arc>, + ) { + // Check if proofs are required for this epoch + // TODO(zkproofs): alternative is to only enable this when + // the zkvm fork is enabled. Check if this is possible + let block_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); + if !self + .chain + .data_availability_checker + .execution_proof_check_required_for_epoch(block_epoch) + { + debug!( + slot = ?slot, + epoch = ?block_epoch, + "Proofs not required for this epoch, skipping proof generation" + ); + return; + } + + // Check if we have a proof generator registry + let registry = match &self.chain.zkvm_generator_registry { + Some(registry) => registry.clone(), + None => { + debug!( + slot = ?slot, + "No generator registry configured, skipping proof generation" + ); + return; + } + }; + + // Get the list of proof types we should generate + let proof_types = registry.proof_ids(); + + if proof_types.is_empty() { + debug!( + slot = ?slot, + "No proof generators registered" + ); + return; + } + + debug!( + slot = ?slot, + block_root = ?block_root, + proof_types = proof_types.len(), + "Checking for locally missing proofs" + ); + + // Check which proofs are missing/we haven't received yet + for proof_id in proof_types { + // Check if we already have this proof + let has_proof = self.check_if_proof_exists(slot, block_root, proof_id); + + if has_proof { + debug!( + slot = ?slot, + proof_id = ?proof_id, + "Proof already exists, skipping" + ); + continue; + } + + self.spawn_proof_generation( + block_root, + slot, + block.clone(), + proof_id, + registry.clone(), + self.network_tx.clone(), + ); + } + } + + /// Check if a proof already exists for this block + fn check_if_proof_exists( + &self, + slot: Slot, + block_root: Hash256, + proof_id: ExecutionProofId, + ) -> bool { + let observed = self.chain.observed_execution_proofs.read(); + observed + .is_known(slot, block_root, proof_id) + .unwrap_or(false) + } + + /// Spawn a task to generate a proof + fn spawn_proof_generation( + &self, + block_root: Hash256, + slot: Slot, + block: Arc>, + proof_id: ExecutionProofId, + registry: Arc, + network_tx: UnboundedSender>, + ) { + let chain = self.chain.clone(); + + // Get the generator for this proof type + let Some(generator) = registry.get_generator(proof_id) else { + debug!( + slot = ?slot, + proof_id = ?proof_id, + "No generator found for proof type" + ); + return; + }; + + // Spawn the generation task (async because generator.generate() is async) + self.chain.task_executor.spawn( + async move { + info!( + slot = ?slot, + block_root = ?block_root, + proof_id = ?proof_id, + "Generating execution proof" + ); + + // Extract execution payload hash from the block + let block_hash = match block.message().execution_payload() { + Ok(payload) => payload.block_hash(), + Err(e) => { + debug!( + slot = ?slot, + block_root = ?block_root, + error = ?e, + "Block has no execution payload, skipping proof generation" + ); + return; + } + }; + + // Generate the proof using the generator + let proof_result = generator.generate(slot, &block_hash, &block_root).await; + + match proof_result { + Ok(proof) => { + info!( + slot = ?slot, + proof_id = ?proof_id, + "Successfully generated proof" + ); + + // Double-check that proof didn't arrive via gossip while we were generating + let observed = chain.observed_execution_proofs.read(); + if observed + .is_known(slot, block_root, proof_id) + .unwrap_or(false) + { + info!( + slot = ?slot, + proof_id = ?proof_id, + "Proof arrived via gossip while generating, discarding our copy" + ); + return; + } + drop(observed); + + // Note: We don't store the proof in the data availability checker because: + // 1. The block has already been imported and is no longer in the availability cache + // 2. This is altruistic proof generation - we're generating proofs for OTHER nodes + // 3. We already have the block, so we don't need the proof for ourselves + + // Publish the proof to the network + let pubsub_message = PubsubMessage::ExecutionProof(Arc::new(proof)); + + let network_message = NetworkMessage::Publish { + messages: vec![pubsub_message], + }; + + if let Err(e) = network_tx.send(network_message) { + error!( + slot = ?slot, + proof_id = ?proof_id, + error = ?e, + "Failed to send proof to network service" + ); + } else { + info!( + slot = ?slot, + proof_id = ?proof_id, + "Proof successfully published to network" + ); + + // Mark the proof as observed so we don't regenerate it + if let Err(e) = chain + .observed_execution_proofs + .write() + .observe_proof(slot, block_root, proof_id) + { + error!( + slot = ?slot, + proof_id = ?proof_id, + error = ?e, + "Failed to mark proof as observed" + ); + } + } + } + Err(e) => { + error!( + slot = ?slot, + proof_id = ?proof_id, + error = %e, + "Failed to generate proof" + ); + } + } + }, + "proof_generation", + ); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, + }; + use tokio::sync::mpsc; + use types::MinimalEthSpec as E; + + type TestHarness = BeaconChainHarness>; + + /// Create a test harness with minimal setup + fn build_test_harness(validator_count: usize) -> TestHarness { + BeaconChainHarness::builder(E) + .default_spec() + .deterministic_keypairs(validator_count) + .fresh_ephemeral_store() + .build() + } + + #[tokio::test] + async fn test_check_if_proof_exists_returns_false_for_new_proof() { + let harness = build_test_harness(8); + let chain = harness.chain.clone(); + + let (_event_tx, event_rx) = mpsc::unbounded_channel(); + let (network_tx, _network_rx) = mpsc::unbounded_channel(); + + let service = ProofGenerationService::new(chain, event_rx, network_tx); + + let block_root = Hash256::random(); + let slot = types::Slot::new(1); + let proof_id = ExecutionProofId::new(0).unwrap(); + + // Should return false for a proof that hasn't been observed + assert!( + !service.check_if_proof_exists(slot, block_root, proof_id) + ); + } + + #[tokio::test] + async fn test_check_if_proof_exists_returns_true_after_observation() { + let harness = build_test_harness(8); + let chain = harness.chain.clone(); + + let (_event_tx, event_rx) = mpsc::unbounded_channel(); + let (network_tx, _network_rx) = mpsc::unbounded_channel(); + + let service = ProofGenerationService::new(chain.clone(), event_rx, network_tx); + + let block_root = Hash256::random(); + let slot = types::Slot::new(1); + let proof_id = ExecutionProofId::new(0).unwrap(); + + // Mark the proof as observed + chain + .observed_execution_proofs + .write() + .observe_proof(slot, block_root, proof_id) + .unwrap(); + + // Should return true for an observed proof + assert!( + service.check_if_proof_exists(slot, block_root, proof_id) + ); + } + + #[tokio::test] + async fn test_handle_block_import_skips_when_epoch_not_required() { + let harness = build_test_harness(8); + let chain = harness.chain.clone(); + + // Note: zkVM is NOT enabled in this harness + // TODO(zkproofs): can we make a harness with zkVM enabled to test this functionality in a unit test + + let (_event_tx, event_rx) = mpsc::unbounded_channel(); + let (network_tx, mut network_rx) = mpsc::unbounded_channel(); + + let service = ProofGenerationService::new(chain.clone(), event_rx, network_tx); + + harness.advance_slot(); + + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let block = harness.chain.head_snapshot().beacon_block.clone(); + let block_root = block.canonical_root(); + let slot = block.slot(); + + service.handle_block_import(block_root, slot, block).await; + + // Give async tasks time to complete + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + // Should not have published any proofs because epoch doesn't require them + assert!( + network_rx.try_recv().is_err(), + "Should not publish proofs when epoch doesn't require them" + ); + } +} diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index e4c7c6ff1fe..268ba468a06 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -933,6 +933,29 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) + /* ZK-VM Execution Layer settings */ + .arg( + Arg::new("activate-zkvm") + .long("activate-zkvm") + .help("Activates ZKVM execution proof mode. Enables the node to subscribe to the \ + execution_proof gossip topic, receive and verify execution proofs from peers, \ + and advertise zkVM support in its ENR for peer discovery. \ + Use --zkvm-generation-proof-types to specify which proof types this node \ + should generate (optional - nodes can verify without generating).") + .action(ArgAction::SetTrue) + .display_order(0) + ) + .arg( + Arg::new("zkvm-generation-proof-types") + .long("zkvm-generation-proof-types") + .value_name("PROOF_TYPE_IDS") + .help("Comma-separated list of proof type IDs to generate \ + (e.g., '0,1' where 0=SP1+Reth, 1=Risc0+Geth). \ + Optional - nodes can verify proofs without generating them.") + .requires("activate-zkvm") + .action(ArgAction::Set) + .display_order(0) + ) /* Deneb settings */ .arg( Arg::new("trusted-setup-file-override") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 0f169ffaad6..70dd9a12edc 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -29,7 +29,8 @@ use std::str::FromStr; use std::time::Duration; use tracing::{error, info, warn}; use types::graffiti::GraffitiString; -use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes}; +use types::{Checkpoint, Epoch, EthSpec, ExecutionProofId, Hash256, PublicKeyBytes}; +use zkvm_execution_layer::ZKVMExecutionLayerConfig; const PURGE_DB_CONFIRMATION: &str = "confirm"; @@ -336,6 +337,47 @@ pub fn get_config( // Store the EL config in the client config. client_config.execution_layer = Some(el_config); + // Parse ZK-VM execution layer config if provided + if cli_args.get_flag("activate-zkvm") { + let generation_proof_types = if let Some(gen_types_str) = + clap_utils::parse_optional::(cli_args, "zkvm-generation-proof-types")? + { + gen_types_str + .split(',') + .map(|s| s.trim().parse::()) + .collect::, _>>() + .map_err(|e| { + format!( + "Invalid proof type ID in --zkvm-generation-proof-types: {}", + e + ) + })? + .into_iter() + .map(ExecutionProofId::new) + .collect::, _>>() + .map_err(|e| format!("Invalid subnet ID: {}", e))? + } else { + HashSet::new() + }; + + // Build and validate the config + let zkvm_config = ZKVMExecutionLayerConfig::builder() + .generation_proof_types(generation_proof_types) + .build() + .map_err(|e| format!("Invalid ZK-VM configuration: {}", e))?; + + client_config.zkvm_execution_layer = Some(zkvm_config); + + info!( + "ZKVM mode activated with generation_proof_types={:?}", + client_config + .zkvm_execution_layer + .as_ref() + .unwrap() + .generation_proof_types + ); + } + // Override default trusted setup file if required if let Some(trusted_setup_file_path) = cli_args.get_one::("trusted-setup-file-override") { diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 2ceb94729d5..6db2150e5f5 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -9,6 +9,8 @@ pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; pub use config::{get_config, get_data_dir, set_network_config}; use environment::RuntimeContext; pub use eth2_config::Eth2Config; +use lighthouse_network::load_private_key; +use network_utils::enr_ext::peer_id_to_node_id; use slasher::{DatabaseBackendOverride, Slasher}; use std::ops::{Deref, DerefMut}; use std::sync::Arc; @@ -120,8 +122,12 @@ impl ProductionBeaconNode { builder }; + // Generate or load the node id. + let local_keypair = load_private_key(&client_config.network); + let node_id = peer_id_to_node_id(&local_keypair.public().to_peer_id())?.raw(); + let builder = builder - .beacon_chain_builder(client_genesis, client_config.clone()) + .beacon_chain_builder(client_genesis, client_config.clone(), node_id) .await?; info!("Block production enabled"); @@ -133,7 +139,7 @@ impl ProductionBeaconNode { builder .build_beacon_chain()? - .network(Arc::new(client_config.network)) + .network(Arc::new(client_config.network), local_keypair) .await? .notifier()? .http_metrics_config(client_config.http_metrics.clone()) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index a0a75dbb0d4..8f5eead8c20 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -649,9 +649,15 @@ impl, Cold: ItemStore> HotColdDB .inspect(|cache| cache.lock().put_block(*block_root, full_block.clone())); DatabaseBlock::Full(full_block) - } else if !self.config.prune_payloads { + } else if !self.config.prune_payloads || *block_root == split.block_root { // If payload pruning is disabled there's a chance we may have the payload of // this finalized block. Attempt to load it but don't error in case it's missing. + // + // We also allow for the split block's payload to be loaded *if it exists*. This is + // necessary on startup when syncing from an unaligned checkpoint (a checkpoint state + // at a skipped slot), and then loading the canonical head (with payload). If we modify + // payload pruning in future so that it doesn't prune the split block's payload, then + // this case could move to the case above where we error if the payload is missing. let fork_name = blinded_block.fork_name(&self.spec)?; if let Some(payload) = self.get_execution_payload(block_root, fork_name)? { DatabaseBlock::Full( @@ -1308,8 +1314,13 @@ impl, Cold: ItemStore> HotColdDB state_root.as_slice().to_vec(), )); - if let Some(slot) = slot { - match self.hot_storage_strategy(slot)? { + // NOTE: `hot_storage_strategy` can error if there are states in the database + // prior to the `anchor_slot`. This can happen if checkpoint sync has been + // botched and left some states in the database prior to completing. + if let Some(slot) = slot + && let Ok(strategy) = self.hot_storage_strategy(slot) + { + match strategy { StorageStrategy::Snapshot => { // Full state stored in this position key_value_batch.push(KeyValueStoreOp::DeleteKey( diff --git a/book/book.toml b/book/book.toml index 7b143710a5f..c0d38f61470 100644 --- a/book/book.toml +++ b/book/book.toml @@ -1,7 +1,6 @@ [book] authors = ["Paul Hauner", "Age Manning"] language = "en" -multilingual = false src = "src" title = "Lighthouse Book" diff --git a/book/src/advanced_blobs.md b/book/src/advanced_blobs.md index bb989a85d89..e06bdb9fb9a 100644 --- a/book/src/advanced_blobs.md +++ b/book/src/advanced_blobs.md @@ -6,18 +6,18 @@ With the [Fusaka](https://ethereum.org/roadmap/fusaka) upgrade, the main feature Table below summarizes the role of relevant flags in Lighthouse beacon node: -| | Post-Deneb, Pre-Fulu || Post-Fulu || -|-------|----------|----------|-----------|----------| -| Flag | Usage | Can serve blobs? | Usage | Can serve blobs? | -| --prune-blobs false | Does not prune blobs since using the flag | Yes, for blobs since using the flag and for the past 18 days | Does not prune data columns since using the flag | No | -| --semi-supernode | - | - | Store half data columns | Yes, for blobs since using the flag for a max of 18 days | -| --supernode | - | - | Store all data columns | Yes, for blobs since using the flag for a max of 18 days | +| | Post-Deneb, Pre-Fulu | | Post-Fulu | | +|---------------------|-------------------------------------------|--------------------------------------------------------------|--------------------------------------------------|----------------------------------------------------------| +| Flag | Usage | Can serve blobs? | Usage | Can serve blobs? | +| --prune-blobs false | Does not prune blobs since using the flag | Yes, for blobs since using the flag and for the past 18 days | Does not prune data columns since using the flag | No | +| --semi-supernode | - | - | Store half data columns | Yes, for blobs since using the flag for a max of 18 days | +| --supernode | - | - | Store all data columns | Yes, for blobs since using the flag for a max of 18 days | While both `--supernode` and `--semi-supernode` can serve blobs, a supernode will be faster to respond to blobs queries as it skips the blob reconstruction step. Running a supernode also helps the network by serving the data columns to its peers. Combining `--prune-blobs false` and `--supernode` (or `--semi-supernode`) implies that no data columns will be pruned, and the node will be able to serve blobs since using the flag. -If you want historical blob data beyond the data availability period (18 days), you can backfill blobs or data columns with the experimental flag `--complete-blob-backfill`. However, do note that this is an experimental feature and it only works when the flag is present during a fresh checkpoint sync when the database is initialised. The flag will have no effect if the node is already running (with an existing database). During blob backfill, the feature may cause some issues, e.g., the node may block most of its peers. +If you want historical blob data beyond the data availability period (18 days), you can backfill blobs or data columns with the experimental flag `--complete-blob-backfill`. However, do note that this is an experimental feature and it only works when the flag is present during a fresh checkpoint sync when the database is initialised. The flag will not backfill blobs if the node is already running (with an existing database). During blob backfill, the feature may cause some issues, e.g., the node may block most of its peers. **⚠️ The following section on Blobs is archived and not maintained as blobs are stored in the form of data columns after the Fulu fork ⚠️** diff --git a/book/src/advanced_checkpoint_sync.md b/book/src/advanced_checkpoint_sync.md index 9cc18dda8c3..7c30598928b 100644 --- a/book/src/advanced_checkpoint_sync.md +++ b/book/src/advanced_checkpoint_sync.md @@ -160,7 +160,7 @@ curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v1/beacon/ where `$SLOT` is the slot number. A slot which is an epoch boundary slot (i.e., first slot of an epoch) should always be used for manual checkpoint sync. -If the block contains blobs, all state, block and blobs must be provided and must point to the same slot. The +If the block contains blobs, all state, block and blobs must be provided and must point to the same slot (only applies for slots before Fulu). The state may be from the same slot as the block (unadvanced), or advanced to an epoch boundary, in which case it will be assumed to be finalized at that epoch. diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index 4e77046c2dd..16437367949 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -63,11 +63,11 @@ that we have observed are: The following table lists the data for different configurations. Note that the disk space requirement is for the `chain_db` and `freezer_db`, excluding the `blobs_db`. -| Hierarchy Exponents | Storage Requirement | Sequential Slot Query | Uncached Query | Time to Sync | -|---|---|---|---|---| -| 5,9,11,13,16,18,21 (default) | 418 GiB | 250-700 ms | up to 10 s | 1 week | -| 5,7,11 (frequent snapshots) | 589 GiB | 250-700 ms | up to 6 s | 1 week | -| 0,5,7,11 (per-slot diffs) | 2500 GiB | 250-700 ms | up to 4 s | 7 weeks | +| Hierarchy Exponents | Storage Requirement | Sequential Slot Query | Uncached Query | Time to Sync | +|------------------------------|---------------------|-----------------------|----------------|--------------| +| 5,9,11,13,16,18,21 (default) | 418 GiB | 250-700 ms | up to 10 s | 1 week | +| 5,7,11 (frequent snapshots) | 589 GiB | 250-700 ms | up to 6 s | 1 week | +| 0,5,7,11 (per-slot diffs) | 2500 GiB | 250-700 ms | up to 4 s | 7 weeks | [Jim](https://github.com/mcdee) has done some experiments to study the response time of querying random slots (uncached query) for `--hierarchy-exponents 0,5,7,11` (per-slot diffs) and `--hierarchy-exponents 5,9,11,13,17,21` (per-epoch diffs), as show in the figures below. From the figures, two points can be concluded: diff --git a/book/src/api_vc_endpoints.md b/book/src/api_vc_endpoints.md index 14f4933e171..d128b13b2f8 100644 --- a/book/src/api_vc_endpoints.md +++ b/book/src/api_vc_endpoints.md @@ -132,7 +132,7 @@ Returns information regarding the health of the host machine. | Property | Specification | |-------------------|--------------------------------------------| -| Path | `/lighthouse/ui/health` | +| Path | `/lighthouse/ui/health` | | Method | GET | | Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200 | @@ -178,7 +178,7 @@ Returns the graffiti that will be used for the next block proposal of each valid | Property | Specification | |-------------------|--------------------------------------------| -| Path | `/lighthouse/ui/graffiti` | +| Path | `/lighthouse/ui/graffiti` | | Method | GET | | Required Headers | [`Authorization`](./api_vc_auth_header.md) | | Typical Responses | 200 | diff --git a/book/src/archived_merge_migration.md b/book/src/archived_merge_migration.md index ac9c78c5e3b..b983db23ae7 100644 --- a/book/src/archived_merge_migration.md +++ b/book/src/archived_merge_migration.md @@ -25,14 +25,14 @@ All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln
-| Network | Bellatrix | The Merge | Remark | -|---------|-------------------------------|-------------------------------| -----------| -| Ropsten | 2nd June 2022 | 8th June 2022 | Deprecated | -| Sepolia | 20th June 2022 | 6th July 2022 | | -| Goerli | 4th August 2022 | 10th August 2022 | Previously named `Prater`| -| Mainnet | 6th September 2022| 15th September 2022| | -| Chiado | 10th October 2022 | 4th November 2022 | | -| Gnosis | 30th November 2022| 8th December 2022 | | +| Network | Bellatrix | The Merge | Remark | +|---------|-------------------------------|--------------------------------|---------------------------| +| Ropsten | 2nd June 2022 | 8th June 2022 | Deprecated | +| Sepolia | 20th June 2022 | 6th July 2022 | | +| Goerli | 4th August 2022 | 10th August 2022 | Previously named `Prater` | +| Mainnet | 6th September 2022 | 15th September 2022 | | +| Chiado | 10th October 2022 | 4th November 2022 | | +| Gnosis | 30th November 2022 | 8th December 2022 | |
diff --git a/book/src/contributing_setup.md b/book/src/contributing_setup.md index b817faad879..958e8f71f6e 100644 --- a/book/src/contributing_setup.md +++ b/book/src/contributing_setup.md @@ -71,6 +71,47 @@ $ cargo nextest run -p safe_arith Summary [ 0.012s] 8 tests run: 8 passed, 0 skipped ``` +### Integration tests + +Due to the size and complexity of the test suite, Lighthouse uses a pattern that differs from how +[integration tests are usually defined](https://doc.rust-lang.org/rust-by-example/testing/integration_testing.html). +This pattern helps manage large test suites more effectively and ensures tests only run in release +mode to avoid stack overflow issues. + +#### The "main pattern" + +For packages with integration tests that require more than one file, Lighthouse uses the following +structure: + +- A `main.rs` file is defined at `package/tests/main.rs` that declares other test files as modules +- In `package/Cargo.toml`, integration tests are explicitly configured: + + ```toml + [package] + autotests = false + + [[test]] + name = "package_tests" + path = "tests/main.rs" + ``` + +#### Rust Analyzer configuration + +This pattern, combined with `#![cfg(not(debug_assertions))]` directives in test files (which +prevent tests from running in debug mode), causes Rust Analyzer to not provide IDE services like +autocomplete and error checking in integration test files by default. + +To enable IDE support for these test files, configure Rust Analyzer to disable debug assertions. +For VSCode users, this is already configured in the repository's `.vscode/settings.json` file: + +```json +{ + "rust-analyzer.cargo.cfgs": [ + "!debug_assertions" + ] +} +``` + ### test_logger The test_logger, located in `/common/logging/` can be used to create a `Logger` that by diff --git a/book/src/ui_installation.md b/book/src/ui_installation.md index 5a785650049..82f5d755bcb 100644 --- a/book/src/ui_installation.md +++ b/book/src/ui_installation.md @@ -138,13 +138,13 @@ Navigate to the backend directory `cd backend`. Install all required Node packag After initializing the backend, return to the root directory. Install all frontend dependencies by executing `yarn`. Build the frontend using `yarn build`. Start the frontend production server with `yarn start`. -This will allow you to access siren at `http://localhost:3000` by default. +This will allow you to access siren at `http://localhost:3300` by default. ## Advanced configuration ### About self-signed SSL certificates -By default, internally, Siren is running on port 80 (plain, behind nginx), port 3000 (plain, direct) and port 443 (with SSL, behind nginx)). Siren will generate and use a self-signed certificate on startup. This will generate a security warning when you try to access the interface. We recommend to only disable SSL if you would access Siren over a local LAN or otherwise highly trusted or encrypted network (i.e. VPN). +By default, internally, Siren is running on port 80 (plain, behind nginx), port 3300 (plain, direct) and port 443 (with SSL, behind nginx)). Siren will generate and use a self-signed certificate on startup. This will generate a security warning when you try to access the interface. We recommend to only disable SSL if you would access Siren over a local LAN or otherwise highly trusted or encrypted network (i.e. VPN). #### Generating persistent SSL certificates and installing them to your system diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 906fcd7b5c3..7a75bdc80a1 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -26,7 +26,7 @@ pretty_reqwest_error = { workspace = true } proto_array = { workspace = true } rand = { workspace = true } reqwest = { workspace = true } -reqwest-eventsource = "0.5.0" +reqwest-eventsource = "0.6.0" sensitive_url = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index c3f9c305e00..6aad00301a6 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1520,6 +1520,21 @@ pub struct ForkChoiceNode { pub weight: u64, pub validity: Option, pub execution_block_hash: Option, + pub extra_data: ForkChoiceExtraData, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct ForkChoiceExtraData { + pub target_root: Hash256, + pub justified_root: Hash256, + pub finalized_root: Hash256, + pub unrealized_justified_root: Option, + pub unrealized_finalized_root: Option, + pub unrealized_justified_epoch: Option, + pub unrealized_finalized_epoch: Option, + pub execution_status: String, + pub best_child: Option, + pub best_descendant: Option, } #[derive(Copy, Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index c19b32014e1..309ff233e62 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -3,6 +3,7 @@ name = "eth2_interop_keypairs" version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } +autotests = false # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -15,3 +16,7 @@ serde_yaml = { workspace = true } [dev-dependencies] base64 = "0.13.0" + +[[test]] +name = "eth2_interop_keypairs_tests" +path = "tests/main.rs" diff --git a/common/eth2_interop_keypairs/tests/main.rs b/common/eth2_interop_keypairs/tests/main.rs new file mode 100644 index 00000000000..4ee50127f29 --- /dev/null +++ b/common/eth2_interop_keypairs/tests/main.rs @@ -0,0 +1,2 @@ +mod from_file; +mod generation; diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 39c7137d4cb..1052128852a 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -21,6 +21,8 @@ jemalloc-profiling = ["tikv-jemallocator/profiling"] # Force the use of system malloc (or glibc) rather than jemalloc. # This is a no-op on Windows where jemalloc is always disabled. sysmalloc = [] +# Enable jemalloc with unprefixed malloc (recommended for reproducible builds) +jemalloc-unprefixed = ["jemalloc", "tikv-jemallocator/unprefixed_malloc_on_supported_platforms"] [dependencies] libc = "0.2.79" diff --git a/common/warp_utils/src/lib.rs b/common/warp_utils/src/lib.rs index c10adbac0df..1c77d4d84b8 100644 --- a/common/warp_utils/src/lib.rs +++ b/common/warp_utils/src/lib.rs @@ -5,5 +5,6 @@ pub mod cors; pub mod json; pub mod query; pub mod reject; +pub mod status_code; pub mod task; pub mod uor; diff --git a/common/warp_utils/src/status_code.rs b/common/warp_utils/src/status_code.rs new file mode 100644 index 00000000000..1b052973599 --- /dev/null +++ b/common/warp_utils/src/status_code.rs @@ -0,0 +1,9 @@ +use eth2::StatusCode; +use warp::Rejection; + +/// Convert from a "new" `http::StatusCode` to a `warp` compatible one. +pub fn convert(code: StatusCode) -> Result { + code.as_u16().try_into().map_err(|e| { + crate::reject::custom_server_error(format!("bad status code {code:?} - {e:?}")) + }) +} diff --git a/consensus/context_deserialize/context_deserialize/Cargo.toml b/consensus/context_deserialize/context_deserialize/Cargo.toml deleted file mode 100644 index 0e4a97b9ae3..00000000000 --- a/consensus/context_deserialize/context_deserialize/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "context_deserialize" -version = "0.1.0" -edition = "2021" - -[features] -default = ["derive"] -derive = ["dep:context_deserialize_derive"] -milhouse = ["dep:milhouse"] -ssz = ["dep:ssz_types"] -all = ["derive", "milhouse", "ssz"] - -[dependencies] -context_deserialize_derive = { version = "0.1.0", path = "../context_deserialize_derive", optional = true } -milhouse = { workspace = true, optional = true } -serde = { workspace = true } -ssz_types = { workspace = true, optional = true } diff --git a/consensus/context_deserialize/context_deserialize/src/impls/core.rs b/consensus/context_deserialize/context_deserialize/src/impls/core.rs deleted file mode 100644 index 803619365f1..00000000000 --- a/consensus/context_deserialize/context_deserialize/src/impls/core.rs +++ /dev/null @@ -1,103 +0,0 @@ -use crate::ContextDeserialize; -use serde::de::{Deserialize, DeserializeSeed, Deserializer, SeqAccess, Visitor}; -use std::marker::PhantomData; -use std::sync::Arc; - -impl<'de, C, T> ContextDeserialize<'de, T> for Arc -where - C: ContextDeserialize<'de, T>, -{ - fn context_deserialize(deserializer: D, context: T) -> Result - where - D: Deserializer<'de>, - { - Ok(Arc::new(C::context_deserialize(deserializer, context)?)) - } -} - -impl<'de, T, C> ContextDeserialize<'de, C> for Vec -where - T: ContextDeserialize<'de, C>, - C: Clone, -{ - fn context_deserialize(deserializer: D, context: C) -> Result - where - D: Deserializer<'de>, - { - // Our Visitor, which owns one copy of the context T - struct ContextVisitor { - context: T, - _marker: PhantomData, - } - - impl<'de, C, T> Visitor<'de> for ContextVisitor - where - C: ContextDeserialize<'de, T>, - T: Clone, - { - type Value = Vec; - - fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.write_str("a sequence of context‐deserialized elements") - } - - fn visit_seq(self, mut seq: A) -> Result, A::Error> - where - A: SeqAccess<'de>, - { - let mut out = Vec::with_capacity(seq.size_hint().unwrap_or(0)); - // for each element, we clone the context and hand it to the seed - while let Some(elem) = seq.next_element_seed(ContextSeed { - context: self.context.clone(), - _marker: PhantomData, - })? { - out.push(elem); - } - Ok(out) - } - } - - // A little seed that hands the deserializer + context into C::context_deserialize - struct ContextSeed { - context: C, - _marker: PhantomData, - } - - impl<'de, T, C> DeserializeSeed<'de> for ContextSeed - where - T: ContextDeserialize<'de, C>, - C: Clone, - { - type Value = T; - - fn deserialize(self, deserializer: D) -> Result - where - D: Deserializer<'de>, - { - T::context_deserialize(deserializer, self.context) - } - } - - deserializer.deserialize_seq(ContextVisitor { - context, - _marker: PhantomData, - }) - } -} - -macro_rules! trivial_deserialize { - ($($t:ty),* $(,)?) => { - $( - impl<'de, T> ContextDeserialize<'de, T> for $t { - fn context_deserialize(deserializer: D, _context: T) -> Result - where - D: Deserializer<'de>, - { - <$t>::deserialize(deserializer) - } - } - )* - }; -} - -trivial_deserialize!(bool, u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, f32, f64); diff --git a/consensus/context_deserialize/context_deserialize/src/impls/milhouse.rs b/consensus/context_deserialize/context_deserialize/src/impls/milhouse.rs deleted file mode 100644 index 3b86f067a3e..00000000000 --- a/consensus/context_deserialize/context_deserialize/src/impls/milhouse.rs +++ /dev/null @@ -1,45 +0,0 @@ -use crate::ContextDeserialize; -use milhouse::{List, Value, Vector}; -use serde::de::Deserializer; -use ssz_types::typenum::Unsigned; - -impl<'de, C, T, N> ContextDeserialize<'de, C> for List -where - T: ContextDeserialize<'de, C> + Value, - N: Unsigned, - C: Clone, -{ - fn context_deserialize(deserializer: D, context: C) -> Result - where - D: Deserializer<'de>, - { - // First deserialize as a Vec. - // This is not the most efficient implementation as it allocates a temporary Vec. In future - // we could write a more performant implementation using `List::builder()`. - let vec = Vec::::context_deserialize(deserializer, context)?; - - // Then convert to List, which will check the length. - List::new(vec) - .map_err(|e| serde::de::Error::custom(format!("Failed to create List: {:?}", e))) - } -} - -impl<'de, C, T, N> ContextDeserialize<'de, C> for Vector -where - T: ContextDeserialize<'de, C> + Value, - N: Unsigned, - C: Clone, -{ - fn context_deserialize(deserializer: D, context: C) -> Result - where - D: Deserializer<'de>, - { - // First deserialize as a List - let list = List::::context_deserialize(deserializer, context)?; - - // Then convert to Vector, which will check the length - Vector::try_from(list).map_err(|e| { - serde::de::Error::custom(format!("Failed to convert List to Vector: {:?}", e)) - }) - } -} diff --git a/consensus/context_deserialize/context_deserialize/src/impls/mod.rs b/consensus/context_deserialize/context_deserialize/src/impls/mod.rs deleted file mode 100644 index 0225c5e031f..00000000000 --- a/consensus/context_deserialize/context_deserialize/src/impls/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod core; - -#[cfg(feature = "milhouse")] -mod milhouse; - -#[cfg(feature = "ssz")] -mod ssz; diff --git a/consensus/context_deserialize/context_deserialize/src/impls/ssz.rs b/consensus/context_deserialize/context_deserialize/src/impls/ssz.rs deleted file mode 100644 index 26813a96fb7..00000000000 --- a/consensus/context_deserialize/context_deserialize/src/impls/ssz.rs +++ /dev/null @@ -1,51 +0,0 @@ -use crate::ContextDeserialize; -use serde::{ - de::{Deserializer, Error}, - Deserialize, -}; -use ssz_types::{ - length::{Fixed, Variable}, - typenum::Unsigned, - Bitfield, FixedVector, -}; - -impl<'de, C, T, N> ContextDeserialize<'de, C> for FixedVector -where - T: ContextDeserialize<'de, C>, - N: Unsigned, - C: Clone, -{ - fn context_deserialize(deserializer: D, context: C) -> Result - where - D: Deserializer<'de>, - { - let vec = Vec::::context_deserialize(deserializer, context)?; - FixedVector::new(vec).map_err(|e| D::Error::custom(format!("{:?}", e))) - } -} - -impl<'de, C, N> ContextDeserialize<'de, C> for Bitfield> -where - N: Unsigned + Clone, -{ - fn context_deserialize(deserializer: D, _context: C) -> Result - where - D: Deserializer<'de>, - { - Bitfield::>::deserialize(deserializer) - .map_err(|e| D::Error::custom(format!("{:?}", e))) - } -} - -impl<'de, C, N> ContextDeserialize<'de, C> for Bitfield> -where - N: Unsigned + Clone, -{ - fn context_deserialize(deserializer: D, _context: C) -> Result - where - D: Deserializer<'de>, - { - Bitfield::>::deserialize(deserializer) - .map_err(|e| D::Error::custom(format!("{:?}", e))) - } -} diff --git a/consensus/context_deserialize/context_deserialize/src/lib.rs b/consensus/context_deserialize/context_deserialize/src/lib.rs deleted file mode 100644 index e5f2bfdba38..00000000000 --- a/consensus/context_deserialize/context_deserialize/src/lib.rs +++ /dev/null @@ -1,13 +0,0 @@ -mod impls; - -#[cfg(feature = "derive")] -pub use context_deserialize_derive::context_deserialize; - -use serde::de::Deserializer; - -/// General-purpose deserialization trait that accepts extra context `C`. -pub trait ContextDeserialize<'de, C>: Sized { - fn context_deserialize(deserializer: D, context: C) -> Result - where - D: Deserializer<'de>; -} diff --git a/consensus/context_deserialize/context_deserialize_derive/Cargo.toml b/consensus/context_deserialize/context_deserialize_derive/Cargo.toml deleted file mode 100644 index eedae30cdfe..00000000000 --- a/consensus/context_deserialize/context_deserialize_derive/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "context_deserialize_derive" -version = "0.1.0" -edition = "2021" - -[lib] -proc-macro = true - -[dependencies] -quote = { workspace = true } -syn = { workspace = true } - -[dev-dependencies] -context_deserialize = { path = "../context_deserialize" } -serde = { workspace = true } -serde_json = "1.0" diff --git a/consensus/context_deserialize/context_deserialize_derive/src/lib.rs b/consensus/context_deserialize/context_deserialize_derive/src/lib.rs deleted file mode 100644 index 0b73a43b0a4..00000000000 --- a/consensus/context_deserialize/context_deserialize_derive/src/lib.rs +++ /dev/null @@ -1,118 +0,0 @@ -extern crate proc_macro; -extern crate quote; -extern crate syn; - -use proc_macro::TokenStream; -use quote::quote; -use syn::{ - parse_macro_input, AttributeArgs, DeriveInput, GenericParam, LifetimeDef, Meta, NestedMeta, - WhereClause, -}; - -#[proc_macro_attribute] -pub fn context_deserialize(attr: TokenStream, item: TokenStream) -> TokenStream { - let args = parse_macro_input!(attr as AttributeArgs); - let input = parse_macro_input!(item as DeriveInput); - let ident = &input.ident; - - let mut ctx_types = Vec::new(); - let mut explicit_where: Option = None; - - for meta in args { - match meta { - NestedMeta::Meta(Meta::Path(p)) => { - ctx_types.push(p); - } - NestedMeta::Meta(Meta::NameValue(nv)) if nv.path.is_ident("bound") => { - if let syn::Lit::Str(lit_str) = &nv.lit { - let where_string = format!("where {}", lit_str.value()); - match syn::parse_str::(&where_string) { - Ok(where_clause) => { - explicit_where = Some(where_clause); - } - Err(err) => { - return syn::Error::new_spanned( - lit_str, - format!("Invalid where clause '{}': {}", lit_str.value(), err), - ) - .to_compile_error() - .into(); - } - } - } else { - return syn::Error::new_spanned( - &nv, - "Expected a string literal for `bound` value", - ) - .to_compile_error() - .into(); - } - } - _ => { - return syn::Error::new_spanned( - &meta, - "Expected paths or `bound = \"...\"` in #[context_deserialize(...)]", - ) - .to_compile_error() - .into(); - } - } - } - - if ctx_types.is_empty() { - return quote! { - compile_error!("Usage: #[context_deserialize(Type1, Type2, ..., bound = \"...\")]"); - } - .into(); - } - - let original_generics = input.generics.clone(); - - // Clone and clean generics for impl use (remove default params) - let mut impl_generics = input.generics.clone(); - for param in impl_generics.params.iter_mut() { - if let GenericParam::Type(ty) = param { - ty.eq_token = None; - ty.default = None; - } - } - - // Ensure 'de lifetime exists in impl generics - let has_de = impl_generics - .lifetimes() - .any(|LifetimeDef { lifetime, .. }| lifetime.ident == "de"); - - if !has_de { - impl_generics.params.insert(0, syn::parse_quote! { 'de }); - } - - let (_, ty_generics, _) = original_generics.split_for_impl(); - let (impl_gens, _, _) = impl_generics.split_for_impl(); - - // Generate: no `'de` applied to the type name - let mut impls = quote! {}; - for ctx in ctx_types { - impls.extend(quote! { - impl #impl_gens context_deserialize::ContextDeserialize<'de, #ctx> - for #ident #ty_generics - #explicit_where - { - fn context_deserialize( - deserializer: D, - _context: #ctx, - ) -> Result - where - D: serde::de::Deserializer<'de>, - { - ::deserialize(deserializer) - } - } - }); - } - - quote! { - #input - #impls - } - .into() -} diff --git a/consensus/context_deserialize/context_deserialize_derive/tests/context_deserialize_derive.rs b/consensus/context_deserialize/context_deserialize_derive/tests/context_deserialize_derive.rs deleted file mode 100644 index 8fb46da9c65..00000000000 --- a/consensus/context_deserialize/context_deserialize_derive/tests/context_deserialize_derive.rs +++ /dev/null @@ -1,93 +0,0 @@ -use context_deserialize::{context_deserialize, ContextDeserialize}; -use serde::{Deserialize, Serialize}; - -#[test] -fn test_context_deserialize_derive() { - type TestContext = (); - - #[context_deserialize(TestContext)] - #[derive(Debug, PartialEq, Serialize, Deserialize)] - struct Test { - field: String, - } - - let test = Test { - field: "test".to_string(), - }; - let serialized = serde_json::to_string(&test).unwrap(); - let deserialized = - Test::context_deserialize(&mut serde_json::Deserializer::from_str(&serialized), ()) - .unwrap(); - assert_eq!(test, deserialized); -} - -#[test] -fn test_context_deserialize_derive_multiple_types() { - #[allow(dead_code)] - struct TestContext1(u64); - #[allow(dead_code)] - struct TestContext2(String); - - // This will derive: - // - ContextDeserialize for Test - // - ContextDeserialize for Test - // by just leveraging the Deserialize impl - #[context_deserialize(TestContext1, TestContext2)] - #[derive(Debug, PartialEq, Serialize, Deserialize)] - struct Test { - field: String, - } - - let test = Test { - field: "test".to_string(), - }; - let serialized = serde_json::to_string(&test).unwrap(); - let deserialized = Test::context_deserialize( - &mut serde_json::Deserializer::from_str(&serialized), - TestContext1(1), - ) - .unwrap(); - assert_eq!(test, deserialized); - - let deserialized = Test::context_deserialize( - &mut serde_json::Deserializer::from_str(&serialized), - TestContext2("2".to_string()), - ) - .unwrap(); - - assert_eq!(test, deserialized); -} - -#[test] -fn test_context_deserialize_derive_bound() { - use std::fmt::Debug; - - struct TestContext; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - struct Inner { - value: u64, - } - - #[context_deserialize( - TestContext, - bound = "T: Serialize + for<'a> Deserialize<'a> + Debug + PartialEq" - )] - #[derive(Debug, PartialEq, Serialize, Deserialize)] - struct Wrapper { - inner: T, - } - - let val = Wrapper { - inner: Inner { value: 42 }, - }; - - let serialized = serde_json::to_string(&val).unwrap(); - let deserialized = Wrapper::::context_deserialize( - &mut serde_json::Deserializer::from_str(&serialized), - TestContext, - ) - .unwrap(); - - assert_eq!(val, deserialized); -} diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index fe1f5fba9e4..6565e7cdaf6 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -627,7 +627,7 @@ where op: &InvalidationOperation, ) -> Result<(), Error> { self.proto_array - .process_execution_payload_invalidation::(op) + .process_execution_payload_invalidation::(op, self.finalized_checkpoint()) .map_err(Error::FailedToProcessInvalidExecutionPayload) } @@ -908,6 +908,8 @@ where unrealized_finalized_checkpoint: Some(unrealized_finalized_checkpoint), }, current_slot, + self.justified_checkpoint(), + self.finalized_checkpoint(), )?; Ok(()) @@ -1288,7 +1290,7 @@ where /// Return `true` if `block_root` is equal to the finalized checkpoint, or a known descendant of it. pub fn is_finalized_checkpoint_or_descendant(&self, block_root: Hash256) -> bool { self.proto_array - .is_finalized_checkpoint_or_descendant::(block_root) + .is_finalized_checkpoint_or_descendant::(block_root, self.finalized_checkpoint()) } pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { @@ -1508,7 +1510,9 @@ where /// be instantiated again later. pub fn to_persisted(&self) -> PersistedForkChoice { PersistedForkChoice { - proto_array: self.proto_array().as_ssz_container(), + proto_array: self + .proto_array() + .as_ssz_container(self.justified_checkpoint(), self.finalized_checkpoint()), queued_attestations: self.queued_attestations().to_vec(), } } diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index d750c054060..5ba8a1b949e 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -14,5 +14,4 @@ fixed_bytes = { workspace = true } safe_arith = { workspace = true } [dev-dependencies] -quickcheck = { workspace = true } -quickcheck_macros = { workspace = true } +proptest = { workspace = true } diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index bf075ec15a5..494c73d05ce 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -413,50 +413,70 @@ impl From for MerkleTreeError { #[cfg(test)] mod tests { use super::*; - use quickcheck::TestResult; - use quickcheck_macros::quickcheck; - - /// Check that we can: - /// 1. Build a MerkleTree from arbitrary leaves and an arbitrary depth. - /// 2. Generate valid proofs for all of the leaves of this MerkleTree. - #[quickcheck] - fn quickcheck_create_and_verify(int_leaves: Vec, depth: usize) -> TestResult { - if depth > MAX_TREE_DEPTH || int_leaves.len() > 2usize.pow(depth as u32) { - return TestResult::discard(); - } - let leaves: Vec<_> = int_leaves.into_iter().map(H256::from_low_u64_be).collect(); - let merkle_tree = MerkleTree::create(&leaves, depth); - let merkle_root = merkle_tree.hash(); + use proptest::prelude::*; + + // Limit test depth to avoid generating huge trees. Depth 10 = 1024 max leaves. + const TEST_MAX_DEPTH: usize = 10; - let proofs_ok = (0..leaves.len()).all(|i| { - let (leaf, branch) = merkle_tree - .generate_proof(i, depth) - .expect("should generate proof"); - leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root) - }); + fn merkle_leaves_strategy(max_depth: usize) -> impl Strategy, usize)> { + (0..=max_depth).prop_flat_map(|depth| { + let max_leaves = 2usize.pow(depth as u32); + ( + proptest::collection::vec(any::(), 0..=max_leaves), + Just(depth), + ) + }) + } - TestResult::from_bool(proofs_ok) + fn merkle_leaves_strategy_min_depth( + max_depth: usize, + min_depth: usize, + ) -> impl Strategy, usize)> { + (min_depth..=max_depth).prop_flat_map(|depth| { + let max_leaves = 2usize.pow(depth as u32); + ( + proptest::collection::vec(any::(), 0..=max_leaves), + Just(depth), + ) + }) } - #[quickcheck] - fn quickcheck_push_leaf_and_verify(int_leaves: Vec, depth: usize) -> TestResult { - if depth == 0 || depth > MAX_TREE_DEPTH || int_leaves.len() > 2usize.pow(depth as u32) { - return TestResult::discard(); + proptest::proptest! { + /// Check that we can: + /// 1. Build a MerkleTree from arbitrary leaves and an arbitrary depth. + /// 2. Generate valid proofs for all of the leaves of this MerkleTree. + #[test] + fn proptest_create_and_verify((int_leaves, depth) in merkle_leaves_strategy(TEST_MAX_DEPTH)) { + let leaves: Vec<_> = int_leaves.into_iter().map(H256::from_low_u64_be).collect(); + let merkle_tree = MerkleTree::create(&leaves, depth); + let merkle_root = merkle_tree.hash(); + + let proofs_ok = (0..leaves.len()).all(|i| { + let (leaf, branch) = merkle_tree + .generate_proof(i, depth) + .expect("should generate proof"); + leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root) + }); + + proptest::prop_assert!(proofs_ok); } - let leaves_iter = int_leaves.into_iter().map(H256::from_low_u64_be); - let mut merkle_tree = MerkleTree::create(&[], depth); + #[test] + fn proptest_push_leaf_and_verify((int_leaves, depth) in merkle_leaves_strategy_min_depth(TEST_MAX_DEPTH, 1)) { + let leaves_iter = int_leaves.into_iter().map(H256::from_low_u64_be); + let mut merkle_tree = MerkleTree::create(&[], depth); - let proofs_ok = leaves_iter.enumerate().all(|(i, leaf)| { - assert_eq!(merkle_tree.push_leaf(leaf, depth), Ok(())); - let (stored_leaf, branch) = merkle_tree - .generate_proof(i, depth) - .expect("should generate proof"); - stored_leaf == leaf && verify_merkle_proof(leaf, &branch, depth, i, merkle_tree.hash()) - }); + let proofs_ok = leaves_iter.enumerate().all(|(i, leaf)| { + assert_eq!(merkle_tree.push_leaf(leaf, depth), Ok(())); + let (stored_leaf, branch) = merkle_tree + .generate_proof(i, depth) + .expect("should generate proof"); + stored_leaf == leaf && verify_merkle_proof(leaf, &branch, depth, i, merkle_tree.hash()) + }); - TestResult::from_bool(proofs_ok) + proptest::prop_assert!(proofs_ok); + } } #[test] diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 20987dff26d..43a7e3b77fe 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -212,7 +212,12 @@ impl ForkChoiceTestDefinition { unrealized_finalized_checkpoint: None, }; fork_choice - .process_block::(block, slot) + .process_block::( + block, + slot, + self.justified_checkpoint, + self.finalized_checkpoint, + ) .unwrap_or_else(|e| { panic!( "process_block op at index {} returned error: {:?}", @@ -272,7 +277,10 @@ impl ForkChoiceTestDefinition { } }; fork_choice - .process_execution_payload_invalidation::(&op) + .process_execution_payload_invalidation::( + &op, + self.finalized_checkpoint, + ) .unwrap() } Operation::AssertWeight { block_root, weight } => assert_eq!( @@ -305,7 +313,8 @@ fn get_checkpoint(i: u64) -> Checkpoint { } fn check_bytes_round_trip(original: &ProtoArrayForkChoice) { - let bytes = original.as_bytes(); + // The checkpoint are ignored `ProtoArrayForkChoice::from_bytes` so any value is ok + let bytes = original.as_bytes(Checkpoint::default(), Checkpoint::default()); let decoded = ProtoArrayForkChoice::from_bytes(&bytes, original.balances.clone()) .expect("fork choice should decode from bytes"); assert!( diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 18af2dfc24c..1d78ce9f443 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -130,8 +130,6 @@ pub struct ProtoArray { /// Do not attempt to prune the tree unless it has at least this many nodes. Small prunes /// simply waste time. pub prune_threshold: usize, - pub justified_checkpoint: Checkpoint, - pub finalized_checkpoint: Checkpoint, pub nodes: Vec, pub indices: HashMap, pub previous_proposer_boost: ProposerBoost, @@ -155,8 +153,8 @@ impl ProtoArray { pub fn apply_score_changes( &mut self, mut deltas: Vec, - justified_checkpoint: Checkpoint, - finalized_checkpoint: Checkpoint, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, new_justified_balances: &JustifiedBalances, proposer_boost_root: Hash256, current_slot: Slot, @@ -169,13 +167,6 @@ impl ProtoArray { }); } - if justified_checkpoint != self.justified_checkpoint - || finalized_checkpoint != self.finalized_checkpoint - { - self.justified_checkpoint = justified_checkpoint; - self.finalized_checkpoint = finalized_checkpoint; - } - // Default the proposer boost score to zero. let mut proposer_score = 0; @@ -296,6 +287,8 @@ impl ProtoArray { parent_index, node_index, current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, )?; } } @@ -306,7 +299,13 @@ impl ProtoArray { /// Register a block with the fork choice. /// /// It is only sane to supply a `None` parent for the genesis block. - pub fn on_block(&mut self, block: Block, current_slot: Slot) -> Result<(), Error> { + pub fn on_block( + &mut self, + block: Block, + current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, + ) -> Result<(), Error> { // If the block is already known, simply ignore it. if self.indices.contains_key(&block.root) { return Ok(()); @@ -357,6 +356,8 @@ impl ProtoArray { parent_index, node_index, current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, )?; if matches!(block.execution_status, ExecutionStatus::Valid(_)) { @@ -439,6 +440,7 @@ impl ProtoArray { pub fn propagate_execution_payload_invalidation( &mut self, op: &InvalidationOperation, + best_finalized_checkpoint: Checkpoint, ) -> Result<(), Error> { let mut invalidated_indices: HashSet = <_>::default(); let head_block_root = op.block_root(); @@ -467,7 +469,10 @@ impl ProtoArray { let latest_valid_ancestor_is_descendant = latest_valid_ancestor_root.is_some_and(|ancestor_root| { self.is_descendant(ancestor_root, head_block_root) - && self.is_finalized_checkpoint_or_descendant::(ancestor_root) + && self.is_finalized_checkpoint_or_descendant::( + ancestor_root, + best_finalized_checkpoint, + ) }); // Collect all *ancestors* which were declared invalid since they reside between the @@ -630,6 +635,8 @@ impl ProtoArray { &self, justified_root: &Hash256, current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, ) -> Result { let justified_index = self .indices @@ -663,12 +670,17 @@ impl ProtoArray { .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; // Perform a sanity check that the node is indeed valid to be the head. - if !self.node_is_viable_for_head::(best_node, current_slot) { + if !self.node_is_viable_for_head::( + best_node, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + ) { return Err(Error::InvalidBestNode(Box::new(InvalidBestNodeInfo { current_slot, start_root: *justified_root, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, + justified_checkpoint: best_justified_checkpoint, + finalized_checkpoint: best_finalized_checkpoint, head_root: best_node.root, head_justified_checkpoint: best_node.justified_checkpoint, head_finalized_checkpoint: best_node.finalized_checkpoint, @@ -765,6 +777,8 @@ impl ProtoArray { parent_index: usize, child_index: usize, current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, ) -> Result<(), Error> { let child = self .nodes @@ -776,8 +790,12 @@ impl ProtoArray { .get(parent_index) .ok_or(Error::InvalidNodeIndex(parent_index))?; - let child_leads_to_viable_head = - self.node_leads_to_viable_head::(child, current_slot)?; + let child_leads_to_viable_head = self.node_leads_to_viable_head::( + child, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + )?; // These three variables are aliases to the three options that we may set the // `parent.best_child` and `parent.best_descendant` to. @@ -806,8 +824,12 @@ impl ProtoArray { .get(best_child_index) .ok_or(Error::InvalidBestDescendant(best_child_index))?; - let best_child_leads_to_viable_head = - self.node_leads_to_viable_head::(best_child, current_slot)?; + let best_child_leads_to_viable_head = self.node_leads_to_viable_head::( + best_child, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + )?; if child_leads_to_viable_head && !best_child_leads_to_viable_head { // The child leads to a viable head, but the current best-child doesn't. @@ -856,6 +878,8 @@ impl ProtoArray { &self, node: &ProtoNode, current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, ) -> Result { let best_descendant_is_viable_for_head = if let Some(best_descendant_index) = node.best_descendant { @@ -864,13 +888,23 @@ impl ProtoArray { .get(best_descendant_index) .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; - self.node_is_viable_for_head::(best_descendant, current_slot) + self.node_is_viable_for_head::( + best_descendant, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + ) } else { false }; Ok(best_descendant_is_viable_for_head - || self.node_is_viable_for_head::(node, current_slot)) + || self.node_is_viable_for_head::( + node, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + )) } /// This is the equivalent to the `filter_block_tree` function in the eth2 spec: @@ -879,7 +913,13 @@ impl ProtoArray { /// /// Any node that has a different finalized or justified epoch should not be viable for the /// head. - fn node_is_viable_for_head(&self, node: &ProtoNode, current_slot: Slot) -> bool { + fn node_is_viable_for_head( + &self, + node: &ProtoNode, + current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, + ) -> bool { if node.execution_status.is_invalid() { return false; } @@ -901,12 +941,13 @@ impl ProtoArray { node_justified_checkpoint }; - let correct_justified = self.justified_checkpoint.epoch == genesis_epoch - || voting_source.epoch == self.justified_checkpoint.epoch + let correct_justified = best_justified_checkpoint.epoch == genesis_epoch + || voting_source.epoch == best_justified_checkpoint.epoch || voting_source.epoch + 2 >= current_epoch; - let correct_finalized = self.finalized_checkpoint.epoch == genesis_epoch - || self.is_finalized_checkpoint_or_descendant::(node.root); + let correct_finalized = best_finalized_checkpoint.epoch == genesis_epoch + || self + .is_finalized_checkpoint_or_descendant::(node.root, best_finalized_checkpoint); correct_justified && correct_finalized } @@ -961,10 +1002,13 @@ impl ProtoArray { /// /// Notably, this function is checking ancestory of the finalized /// *checkpoint* not the finalized *block*. - pub fn is_finalized_checkpoint_or_descendant(&self, root: Hash256) -> bool { - let finalized_root = self.finalized_checkpoint.root; - let finalized_slot = self - .finalized_checkpoint + pub fn is_finalized_checkpoint_or_descendant( + &self, + root: Hash256, + best_finalized_checkpoint: Checkpoint, + ) -> bool { + let finalized_root = best_finalized_checkpoint.root; + let finalized_slot = best_finalized_checkpoint .epoch .start_slot(E::slots_per_epoch()); @@ -987,7 +1031,7 @@ impl ProtoArray { // If the conditions don't match for this node then they're unlikely to // start matching for its ancestors. for checkpoint in &[node.finalized_checkpoint, node.justified_checkpoint] { - if checkpoint == &self.finalized_checkpoint { + if checkpoint == &best_finalized_checkpoint { return true; } } @@ -996,7 +1040,7 @@ impl ProtoArray { node.unrealized_finalized_checkpoint, node.unrealized_justified_checkpoint, ] { - if checkpoint.is_some_and(|cp| cp == self.finalized_checkpoint) { + if checkpoint.is_some_and(|cp| cp == best_finalized_checkpoint) { return true; } } @@ -1044,12 +1088,18 @@ impl ProtoArray { /// For informational purposes like the beacon HTTP API, we use this as the list of known heads, /// even though some of them might not be viable. We do this to maintain consistency between the /// definition of "head" used by pruning (which does not consider viability) and fork choice. - pub fn heads_descended_from_finalization(&self) -> Vec<&ProtoNode> { + pub fn heads_descended_from_finalization( + &self, + best_finalized_checkpoint: Checkpoint, + ) -> Vec<&ProtoNode> { self.nodes .iter() .filter(|node| { node.best_child.is_none() - && self.is_finalized_checkpoint_or_descendant::(node.root) + && self.is_finalized_checkpoint_or_descendant::( + node.root, + best_finalized_checkpoint, + ) }) .collect() } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index dea853d245d..137471ce36d 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -424,8 +424,6 @@ impl ProtoArrayForkChoice { ) -> Result { let mut proto_array = ProtoArray { prune_threshold: DEFAULT_PRUNE_THRESHOLD, - justified_checkpoint, - finalized_checkpoint, nodes: Vec::with_capacity(1), indices: HashMap::with_capacity(1), previous_proposer_boost: ProposerBoost::default(), @@ -449,7 +447,12 @@ impl ProtoArrayForkChoice { }; proto_array - .on_block::(block, current_slot) + .on_block::( + block, + current_slot, + justified_checkpoint, + finalized_checkpoint, + ) .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; Ok(Self { @@ -473,9 +476,10 @@ impl ProtoArrayForkChoice { pub fn process_execution_payload_invalidation( &mut self, op: &InvalidationOperation, + finalized_checkpoint: Checkpoint, ) -> Result<(), String> { self.proto_array - .propagate_execution_payload_invalidation::(op) + .propagate_execution_payload_invalidation::(op, finalized_checkpoint) .map_err(|e| format!("Failed to process invalid payload: {:?}", e)) } @@ -499,13 +503,20 @@ impl ProtoArrayForkChoice { &mut self, block: Block, current_slot: Slot, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, ) -> Result<(), String> { if block.parent_root.is_none() { return Err("Missing parent root".to_string()); } self.proto_array - .on_block::(block, current_slot) + .on_block::( + block, + current_slot, + justified_checkpoint, + finalized_checkpoint, + ) .map_err(|e| format!("process_block_error: {:?}", e)) } @@ -547,7 +558,12 @@ impl ProtoArrayForkChoice { *old_balances = new_balances.clone(); self.proto_array - .find_head::(&justified_checkpoint.root, current_slot) + .find_head::( + &justified_checkpoint.root, + current_slot, + justified_checkpoint, + finalized_checkpoint, + ) .map_err(|e| format!("find_head failed: {:?}", e)) } @@ -884,9 +900,10 @@ impl ProtoArrayForkChoice { pub fn is_finalized_checkpoint_or_descendant( &self, descendant_root: Hash256, + best_finalized_checkpoint: Checkpoint, ) -> bool { self.proto_array - .is_finalized_checkpoint_or_descendant::(descendant_root) + .is_finalized_checkpoint_or_descendant::(descendant_root, best_finalized_checkpoint) } pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { @@ -916,12 +933,21 @@ impl ProtoArrayForkChoice { self.proto_array.iter_block_roots(block_root) } - pub fn as_ssz_container(&self) -> SszContainer { - SszContainer::from(self) + pub fn as_ssz_container( + &self, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + ) -> SszContainer { + SszContainer::from_proto_array(self, justified_checkpoint, finalized_checkpoint) } - pub fn as_bytes(&self) -> Vec { - SszContainer::from(self).as_ssz_bytes() + pub fn as_bytes( + &self, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + ) -> Vec { + self.as_ssz_container(justified_checkpoint, finalized_checkpoint) + .as_ssz_bytes() } pub fn from_bytes(bytes: &[u8], balances: JustifiedBalances) -> Result { @@ -954,8 +980,12 @@ impl ProtoArrayForkChoice { } /// Returns all nodes that have zero children and are descended from the finalized checkpoint. - pub fn heads_descended_from_finalization(&self) -> Vec<&ProtoNode> { - self.proto_array.heads_descended_from_finalization::() + pub fn heads_descended_from_finalization( + &self, + best_finalized_checkpoint: Checkpoint, + ) -> Vec<&ProtoNode> { + self.proto_array + .heads_descended_from_finalization::(best_finalized_checkpoint) } } @@ -1125,6 +1155,8 @@ mod test_compute_deltas { unrealized_finalized_checkpoint: Some(genesis_checkpoint), }, genesis_slot + 1, + genesis_checkpoint, + genesis_checkpoint, ) .unwrap(); @@ -1148,6 +1180,8 @@ mod test_compute_deltas { unrealized_finalized_checkpoint: None, }, genesis_slot + 1, + genesis_checkpoint, + genesis_checkpoint, ) .unwrap(); @@ -1161,10 +1195,24 @@ mod test_compute_deltas { assert!(!fc.is_descendant(finalized_root, not_finalized_desc)); assert!(!fc.is_descendant(finalized_root, unknown)); - assert!(fc.is_finalized_checkpoint_or_descendant::(finalized_root)); - assert!(fc.is_finalized_checkpoint_or_descendant::(finalized_desc)); - assert!(!fc.is_finalized_checkpoint_or_descendant::(not_finalized_desc)); - assert!(!fc.is_finalized_checkpoint_or_descendant::(unknown)); + assert!(fc.is_finalized_checkpoint_or_descendant::( + finalized_root, + genesis_checkpoint + )); + assert!(fc.is_finalized_checkpoint_or_descendant::( + finalized_desc, + genesis_checkpoint + )); + assert!(!fc.is_finalized_checkpoint_or_descendant::( + not_finalized_desc, + genesis_checkpoint + )); + assert!( + !fc.is_finalized_checkpoint_or_descendant::( + unknown, + genesis_checkpoint + ) + ); assert!(!fc.is_descendant(finalized_desc, not_finalized_desc)); assert!(fc.is_descendant(finalized_desc, finalized_desc)); @@ -1260,6 +1308,8 @@ mod test_compute_deltas { unrealized_finalized_checkpoint: Some(genesis_checkpoint), }, Slot::from(block.slot), + genesis_checkpoint, + genesis_checkpoint, ) .unwrap(); }; @@ -1314,29 +1364,34 @@ mod test_compute_deltas { // Set the finalized checkpoint to finalize the first slot of epoch 1 on // the canonical chain. - fc.proto_array.finalized_checkpoint = Checkpoint { + let finalized_checkpoint = Checkpoint { root: finalized_root, epoch: Epoch::new(1), }; assert!( fc.proto_array - .is_finalized_checkpoint_or_descendant::(finalized_root), + .is_finalized_checkpoint_or_descendant::( + finalized_root, + finalized_checkpoint + ), "the finalized checkpoint is the finalized checkpoint" ); assert!( fc.proto_array - .is_finalized_checkpoint_or_descendant::(get_block_root( - canonical_slot - )), + .is_finalized_checkpoint_or_descendant::( + get_block_root(canonical_slot), + finalized_checkpoint + ), "the canonical block is a descendant of the finalized checkpoint" ); assert!( !fc.proto_array - .is_finalized_checkpoint_or_descendant::(get_block_root( - non_canonical_slot - )), + .is_finalized_checkpoint_or_descendant::( + get_block_root(non_canonical_slot), + finalized_checkpoint + ), "although the non-canonical block is a descendant of the finalized block, \ it's not a descendant of the finalized checkpoint" ); diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 0bb3f2b35d8..1e01b74c8cd 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -26,22 +26,28 @@ pub struct SszContainer { #[superstruct(only(V17))] pub balances: Vec, pub prune_threshold: usize, - pub justified_checkpoint: Checkpoint, - pub finalized_checkpoint: Checkpoint, + // Deprecated, remove in a future schema migration + justified_checkpoint: Checkpoint, + // Deprecated, remove in a future schema migration + finalized_checkpoint: Checkpoint, pub nodes: Vec, pub indices: Vec<(Hash256, usize)>, pub previous_proposer_boost: ProposerBoost, } -impl From<&ProtoArrayForkChoice> for SszContainer { - fn from(from: &ProtoArrayForkChoice) -> Self { +impl SszContainer { + pub fn from_proto_array( + from: &ProtoArrayForkChoice, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + ) -> Self { let proto_array = &from.proto_array; Self { votes: from.votes.0.clone(), prune_threshold: proto_array.prune_threshold, - justified_checkpoint: proto_array.justified_checkpoint, - finalized_checkpoint: proto_array.finalized_checkpoint, + justified_checkpoint, + finalized_checkpoint, nodes: proto_array.nodes.clone(), indices: proto_array.indices.iter().map(|(k, v)| (*k, *v)).collect(), previous_proposer_boost: proto_array.previous_proposer_boost, @@ -55,8 +61,6 @@ impl TryFrom<(SszContainer, JustifiedBalances)> for ProtoArrayForkChoice { fn try_from((from, balances): (SszContainer, JustifiedBalances)) -> Result { let proto_array = ProtoArray { prune_threshold: from.prune_threshold, - justified_checkpoint: from.justified_checkpoint, - finalized_checkpoint: from.finalized_checkpoint, nodes: from.nodes, indices: from.indices.into_iter().collect::>(), previous_proposer_boost: from.previous_proposer_boost, diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index a66080ada6f..4b0dd48c9c2 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -221,6 +221,17 @@ pub struct ChainSpec { /// The Gloas fork epoch is optional, with `None` representing "Gloas never happens". pub gloas_fork_epoch: Option, + /* + * zkVM execution proof params + */ + /// Whether zkVM mode is enabled via CLI flag --activate-zkvm. + /// When true, the node will subscribe to execution proof gossip, verify proofs, + /// and optionally generate proofs. zkVM activates at the Fulu fork. + /// Unlike other forks, this is not a network-wide activation but a per-node opt-in. + pub zkvm_enabled: bool, + /// Minimum number of execution proofs required from different subnets. + /// Only used when zkvm_enabled is true. + pub zkvm_min_proofs_required: usize, /* * Networking */ @@ -263,6 +274,11 @@ pub struct ChainSpec { pub(crate) blob_schedule: BlobSchedule, pub min_epochs_for_data_column_sidecars_requests: u64, + /* + * Networking zkvm + */ + pub min_epochs_for_execution_proof_requests: u64, + /* * Networking Gloas */ @@ -479,6 +495,44 @@ impl ChainSpec { .is_some_and(|gloas_fork_epoch| gloas_fork_epoch != self.far_future_epoch) } + /// Returns true if zkVM mode is enabled via CLI flag. + /// Unlike other forks, this is set via CLI and indicates per-node opt-in. + pub fn is_zkvm_enabled(&self) -> bool { + self.zkvm_enabled + } + + /// Returns the epoch at which zkVM activates. + /// Currently uses Fulu fork epoch. + /// Returns None if zkVM is disabled or Fulu is not scheduled. + pub fn zkvm_fork_epoch(&self) -> Option { + if self.zkvm_enabled { + self.fulu_fork_epoch + } else { + None + } + } + + /// Returns true if zkVM mode is enabled for the given epoch. + pub fn is_zkvm_enabled_for_epoch(&self, epoch: Epoch) -> bool { + self.zkvm_fork_epoch() + .is_some_and(|zkvm_fork_epoch| epoch >= zkvm_fork_epoch) + } + + /// Returns true if zkVM mode can be used at the given fork. + pub fn is_zkvm_enabled_for_fork(&self, fork_name: ForkName) -> bool { + self.is_zkvm_enabled() && fork_name.fulu_enabled() + } + + /// Returns the minimum number of execution proofs required. + /// Only meaningful when zkVM is enabled. + pub fn zkvm_min_proofs_required(&self) -> Option { + if self.is_zkvm_enabled() { + Some(self.zkvm_min_proofs_required) + } else { + None + } + } + /// Returns a full `Fork` struct for a given epoch. pub fn fork_at_epoch(&self, epoch: Epoch) -> Fork { let current_fork_name = self.fork_name_at_epoch(epoch); @@ -1124,6 +1178,12 @@ impl ChainSpec { gloas_fork_version: [0x07, 0x00, 0x00, 0x00], gloas_fork_epoch: None, + /* + * zkVM execution proof params + */ + zkvm_enabled: false, + zkvm_min_proofs_required: default_zkvm_min_proofs_required(), + /* * Network specific */ @@ -1184,6 +1244,12 @@ impl ChainSpec { default_min_epochs_for_data_column_sidecars_requests(), max_data_columns_by_root_request: default_data_columns_by_root_request(), + /* + * Networking zkvm specific + */ + min_epochs_for_execution_proof_requests: + default_min_epochs_for_execution_proof_requests(), + /* * Application specific */ @@ -1259,6 +1325,10 @@ impl ChainSpec { // Gloas gloas_fork_version: [0x07, 0x00, 0x00, 0x00], gloas_fork_epoch: None, + // zkVM + zkvm_enabled: false, + zkvm_min_proofs_required: 0, + min_epochs_for_execution_proof_requests: 2, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -1484,6 +1554,12 @@ impl ChainSpec { gloas_fork_version: [0x07, 0x00, 0x00, 0x64], gloas_fork_epoch: None, + /* + * zkVM execution proof params + */ + zkvm_enabled: false, + zkvm_min_proofs_required: default_zkvm_min_proofs_required(), + /* * Network specific */ @@ -1535,6 +1611,12 @@ impl ChainSpec { default_min_epochs_for_data_column_sidecars_requests(), max_data_columns_by_root_request: default_data_columns_by_root_request(), + /* + * Networking zkvm specific + */ + min_epochs_for_execution_proof_requests: + default_min_epochs_for_execution_proof_requests(), + /* * Application specific */ @@ -1995,6 +2077,11 @@ const fn default_min_epochs_for_blob_sidecars_requests() -> u64 { 4096 } +const fn default_min_epochs_for_execution_proof_requests() -> u64 { + // TODO(zkproofs): add into consensus-specs with rational + 2 +} + const fn default_blob_sidecar_subnet_count() -> u64 { 6 } @@ -2025,6 +2112,12 @@ const fn default_max_blobs_per_block_electra() -> u64 { 9 } +/// Minimum number of execution proofs required from different subnets +/// before marking an execution payload as available in ZK-VM mode. +pub const fn default_zkvm_min_proofs_required() -> usize { + crate::execution_proof::DEFAULT_MIN_PROOFS_REQUIRED +} + const fn default_attestation_propagation_slot_range() -> u64 { 32 } diff --git a/consensus/types/src/data_column_custody_group.rs b/consensus/types/src/data_column_custody_group.rs index 0c44608e460..7ecabab0abc 100644 --- a/consensus/types/src/data_column_custody_group.rs +++ b/consensus/types/src/data_column_custody_group.rs @@ -42,7 +42,7 @@ pub fn get_custody_groups( /// /// # Returns /// Vector of custody group indices in computation order or error if parameters are invalid -pub fn get_custody_groups_ordered( +fn get_custody_groups_ordered( raw_node_id: [u8; 32], custody_group_count: u64, spec: &ChainSpec, @@ -76,6 +76,27 @@ pub fn get_custody_groups_ordered( Ok(custody_groups) } +/// Returns a deterministically ordered list of custody columns assigned to a node, +/// preserving the order in which they were computed during iteration. +/// +/// # Arguments +/// * `raw_node_id` - 32-byte node identifier +/// * `spec` - Chain specification containing custody parameters +pub fn compute_ordered_custody_column_indices( + raw_node_id: [u8; 32], + spec: &ChainSpec, +) -> Result, DataColumnCustodyGroupError> { + let all_custody_groups_ordered = + get_custody_groups_ordered(raw_node_id, spec.number_of_custody_groups, spec)?; + + let mut ordered_custody_columns = vec![]; + for custody_index in all_custody_groups_ordered { + let columns = compute_columns_for_custody_group::(custody_index, spec)?; + ordered_custody_columns.extend(columns); + } + Ok(ordered_custody_columns) +} + /// Returns the columns that are associated with a given custody group. /// /// spec: https://github.com/ethereum/consensus-specs/blob/8e0d0d48e81d6c7c5a8253ab61340f5ea5bac66a/specs/fulu/das-core.md#compute_columns_for_custody_group diff --git a/consensus/types/src/execution_proof.rs b/consensus/types/src/execution_proof.rs new file mode 100644 index 00000000000..0b74d6286ea --- /dev/null +++ b/consensus/types/src/execution_proof.rs @@ -0,0 +1,177 @@ +use crate::{ExecutionBlockHash, Hash256, Slot, VariableList}; +use serde::{Deserialize, Serialize}; +use ssz::Encode; +use ssz_derive::{Decode, Encode as DeriveEncode}; +use ssz_types::typenum; +use std::fmt::{self, Debug}; +use tree_hash_derive::TreeHash; + +use super::ExecutionProofId; + +/// Maximum size of proof data in bytes +/// +/// Note: Most proofs will fit within 300KB. Some zkVMs have 1MB proofs (currently) +/// and so this number was set to accommodate for the most zkVMs. +pub const MAX_PROOF_DATA_BYTES: usize = 1_048_576; + +/// Minimum number of execution proofs required from different proof types +/// before marking an execution payload as available in ZK-VM mode. +/// +/// This provides client diversity - nodes wait for proofs from K different +/// zkVM+EL combinations before considering an execution payload available. +pub const DEFAULT_MIN_PROOFS_REQUIRED: usize = 2; + +/// Maximum number of execution proofs that can be requested or stored. +/// This corresponds to the maximum number of proof types (zkVM+EL combinations) +/// that can be supported, which is currently 8 (ExecutionProofId is 0-7). +pub const MAX_PROOFS: usize = 8; + +type ProofData = VariableList; + +/// ExecutionProof represents a cryptographic `proof of execution` that +/// an execution payload is valid. +/// +/// In short, it is proof that if we were to run a particular execution layer client +/// with the given execution payload, they would return the output values that are attached +/// to the proof. +/// +/// Each proof is associated with a specific proof_id, which identifies the +/// zkVM and EL combination used to generate it. Multiple proofs from different +/// proof IDs can exist for the same execution payload, providing both zkVM and EL diversity. +#[derive(Clone, Serialize, Deserialize, DeriveEncode, Decode, TreeHash, PartialEq, Eq)] +pub struct ExecutionProof { + /// Which proof type (zkVM+EL combination) this proof belongs to + /// Examples: 0=SP1+Reth, 1=Risc0+Geth, 2=SP1+Geth, etc. + pub proof_id: ExecutionProofId, + + /// The slot of the beacon block this proof validates + pub slot: Slot, + + /// The block hash of the execution payload this proof validates + pub block_hash: ExecutionBlockHash, + + /// The beacon block root corresponding to the beacon block + /// with the execution payload, that this proof attests to. + pub block_root: Hash256, + + /// The actual proof data + pub proof_data: ProofData, +} + +impl ExecutionProof { + pub fn new( + proof_id: ExecutionProofId, + slot: Slot, + block_hash: ExecutionBlockHash, + block_root: Hash256, + proof_data: Vec, + ) -> Result { + let proof_data = ProofData::new(proof_data) + .map_err(|e| format!("Failed to create proof data: {:?}", e))?; + + Ok(Self { + proof_id, + slot, + block_hash, + block_root, + proof_data, + }) + } + + /// Returns the size of the proof data in bytes + pub fn proof_data_size(&self) -> usize { + self.proof_data.len() + } + + /// Get a reference to the proof data as a slice + pub fn proof_data_slice(&self) -> &[u8] { + &self.proof_data + } + + /// Check if this proof is for a specific execution block hash + pub fn is_for_block(&self, block_hash: &ExecutionBlockHash) -> bool { + &self.block_hash == block_hash + } + + /// Check if this proof is from a specific proof type + pub fn is_from_proof_type(&self, proof_id: ExecutionProofId) -> bool { + self.proof_id == proof_id + } + + /// Get the proof type ID + pub fn proof_id(&self) -> ExecutionProofId { + self.proof_id + } + + /// Minimum size of an ExecutionProof in SSZ bytes (with empty proof_data) + /// TODO(zkproofs): If the proof_data is empty, then that is an invalid proof + pub fn min_size() -> usize { + use bls::FixedBytesExtended; + Self { + proof_id: ExecutionProofId::new(0).unwrap(), + slot: Slot::new(0), + block_hash: ExecutionBlockHash::zero(), + block_root: Hash256::zero(), + proof_data: ProofData::new(vec![]).unwrap(), + } + .as_ssz_bytes() + .len() + } + + /// Maximum size of an ExecutionProof in SSZ bytes (with max proof_data) + pub fn max_size() -> usize { + use bls::FixedBytesExtended; + Self { + proof_id: ExecutionProofId::new(0).unwrap(), + slot: Slot::new(0), + block_hash: ExecutionBlockHash::zero(), + block_root: Hash256::zero(), + proof_data: ProofData::new(vec![0u8; MAX_PROOF_DATA_BYTES]).unwrap(), + } + .as_ssz_bytes() + .len() + } +} + +impl Debug for ExecutionProof { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ExecutionProof") + .field("proof_id", &self.proof_id) + .field("slot", &self.slot) + .field("block_hash", &self.block_hash) + .field("block_root", &self.block_root) + .field("proof_data_size", &self.proof_data.len()) + .finish() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bls::FixedBytesExtended; + + #[test] + fn test_execution_proof_too_large() { + let subnet_id = ExecutionProofId::new(0).unwrap(); + let slot = Slot::new(100); + let block_hash = ExecutionBlockHash::zero(); + let block_root = Hash256::zero(); + let proof_data = vec![0u8; MAX_PROOF_DATA_BYTES + 1]; + + let result = ExecutionProof::new(subnet_id, slot, block_hash, block_root, proof_data); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("Failed to create proof data")); + } + + #[test] + fn test_execution_proof_max_size() { + let subnet_id = ExecutionProofId::new(0).unwrap(); + let slot = Slot::new(100); + let block_hash = ExecutionBlockHash::zero(); + let block_root = Hash256::zero(); + let proof_data = vec![0u8; MAX_PROOF_DATA_BYTES]; + + let result = ExecutionProof::new(subnet_id, slot, block_hash, block_root, proof_data); + assert!(result.is_ok()); + } +} diff --git a/consensus/types/src/execution_proof_id.rs b/consensus/types/src/execution_proof_id.rs new file mode 100644 index 00000000000..c180f5e0412 --- /dev/null +++ b/consensus/types/src/execution_proof_id.rs @@ -0,0 +1,144 @@ +use serde::{Deserialize, Serialize}; +use ssz::{Decode, DecodeError, Encode}; +use std::fmt::{self, Display}; +use tree_hash::TreeHash; + +/// Number of execution proofs +/// Each proof represents a different zkVM+EL combination +/// +/// TODO(zkproofs): The number 8 is a parameter that we will want to configure in the future +pub const EXECUTION_PROOF_TYPE_COUNT: u8 = 8; + +/// ExecutionProofId identifies which zkVM/proof system a proof belongs to. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] +pub struct ExecutionProofId(u8); + +impl Encode for ExecutionProofId { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } + + fn as_ssz_bytes(&self) -> Vec { + self.0.as_ssz_bytes() + } +} + +impl Decode for ExecutionProofId { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let value = u8::from_ssz_bytes(bytes)?; + Self::new(value).map_err(DecodeError::BytesInvalid) + } +} + +impl TreeHash for ExecutionProofId { + fn tree_hash_type() -> tree_hash::TreeHashType { + ::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + ::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl ExecutionProofId { + /// Creates a new ExecutionProofId if the value is valid + pub fn new(id: u8) -> Result { + if id < EXECUTION_PROOF_TYPE_COUNT { + Ok(Self(id)) + } else { + Err(format!( + "Invalid ExecutionProofId: {}, must be < {}", + id, EXECUTION_PROOF_TYPE_COUNT + )) + } + } + + /// Returns the inner u8 value + pub fn as_u8(&self) -> u8 { + self.0 + } + + /// Returns the subnet ID as a usize + pub fn as_usize(&self) -> usize { + self.0 as usize + } + + /// Returns all valid subnet IDs + pub fn all() -> Vec { + (0..EXECUTION_PROOF_TYPE_COUNT).map(Self).collect() + } +} + +impl Display for ExecutionProofId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for u8 { + fn from(subnet_id: ExecutionProofId) -> u8 { + subnet_id.0 + } +} + +impl TryFrom for ExecutionProofId { + type Error = String; + + fn try_from(value: u8) -> Result { + Self::new(value) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_valid_proof_ids() { + for id in 0..EXECUTION_PROOF_TYPE_COUNT { + assert!(ExecutionProofId::new(id).is_ok()); + } + } + + #[test] + fn test_invalid_proof_ids() { + assert!(ExecutionProofId::new(EXECUTION_PROOF_TYPE_COUNT).is_err()); + } + + #[test] + fn test_all_proof_ids() { + let all = ExecutionProofId::all(); + assert_eq!(all.len(), EXECUTION_PROOF_TYPE_COUNT as usize); + for (idx, proof_id) in all.iter().enumerate() { + assert_eq!(proof_id.as_usize(), idx); + } + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 8e83fed1d9a..5aba30246fa 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -42,6 +42,8 @@ pub mod eth_spec; pub mod execution_block_hash; pub mod execution_payload; pub mod execution_payload_header; +pub mod execution_proof; +pub mod execution_proof_id; pub mod fork; pub mod fork_data; pub mod fork_name; @@ -177,6 +179,8 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; +pub use crate::execution_proof::{ExecutionProof, MAX_PROOF_DATA_BYTES}; +pub use crate::execution_proof_id::{EXECUTION_PROOF_TYPE_COUNT, ExecutionProofId}; pub use crate::execution_requests::{ExecutionRequests, RequestType}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 35d2faac483..7b04cc57711 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -8,6 +8,7 @@ use ssz_derive::{Decode, Encode}; use std::fmt; use superstruct::superstruct; use test_random_derive::TestRandom; +use tracing::instrument; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; @@ -253,6 +254,7 @@ impl> SignedBeaconBlock } /// Produce a signed beacon block header corresponding to this block. + #[instrument(level = "debug", skip_all)] pub fn signed_block_header(&self) -> SignedBeaconBlockHeader { SignedBeaconBlockHeader { message: self.message().block_header(), diff --git a/crypto/eth2_key_derivation/Cargo.toml b/crypto/eth2_key_derivation/Cargo.toml index a893a9360dc..b8976b8ccb3 100644 --- a/crypto/eth2_key_derivation/Cargo.toml +++ b/crypto/eth2_key_derivation/Cargo.toml @@ -3,6 +3,7 @@ name = "eth2_key_derivation" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } +autotests = false # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -14,3 +15,7 @@ zeroize = { workspace = true } [dev-dependencies] hex = { workspace = true } + +[[test]] +name = "eth2_key_derivation_tests" +path = "tests/main.rs" diff --git a/crypto/eth2_key_derivation/tests/main.rs b/crypto/eth2_key_derivation/tests/main.rs new file mode 100644 index 00000000000..a239eaa6185 --- /dev/null +++ b/crypto/eth2_key_derivation/tests/main.rs @@ -0,0 +1,2 @@ +mod eip2333_vectors; +mod tests; diff --git a/crypto/eth2_keystore/Cargo.toml b/crypto/eth2_keystore/Cargo.toml index 61d2722efbd..290a10adc9a 100644 --- a/crypto/eth2_keystore/Cargo.toml +++ b/crypto/eth2_keystore/Cargo.toml @@ -3,6 +3,7 @@ name = "eth2_keystore" version = "0.1.0" authors = ["Pawan Dhananjay "] edition = { workspace = true } +autotests = false # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -24,3 +25,7 @@ zeroize = { workspace = true } [dev-dependencies] tempfile = { workspace = true } + +[[test]] +name = "eth2_keystore_tests" +path = "tests/main.rs" diff --git a/crypto/eth2_keystore/tests/main.rs b/crypto/eth2_keystore/tests/main.rs new file mode 100644 index 00000000000..79b31d5eda5 --- /dev/null +++ b/crypto/eth2_keystore/tests/main.rs @@ -0,0 +1,4 @@ +mod eip2335_vectors; +mod json; +mod params; +mod tests; diff --git a/crypto/eth2_wallet/Cargo.toml b/crypto/eth2_wallet/Cargo.toml index 5327bdc163b..0d454016a6b 100644 --- a/crypto/eth2_wallet/Cargo.toml +++ b/crypto/eth2_wallet/Cargo.toml @@ -3,6 +3,7 @@ name = "eth2_wallet" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } +autotests = false # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -18,3 +19,7 @@ uuid = { workspace = true } [dev-dependencies] hex = { workspace = true } tempfile = { workspace = true } + +[[test]] +name = "eth2_wallet_tests" +path = "tests/main.rs" diff --git a/crypto/eth2_wallet/tests/main.rs b/crypto/eth2_wallet/tests/main.rs new file mode 100644 index 00000000000..d59ccff6392 --- /dev/null +++ b/crypto/eth2_wallet/tests/main.rs @@ -0,0 +1,3 @@ +mod eip2386_vectors; +mod json; +mod tests; diff --git a/dummy_el/Cargo.toml b/dummy_el/Cargo.toml new file mode 100644 index 00000000000..7b25c4a679d --- /dev/null +++ b/dummy_el/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "dummy_el" +version = "0.1.0" +edition = "2024" + +[dependencies] +axum = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter", "json"] } +clap = { workspace = true } +anyhow = { workspace = true } +jsonwebtoken = "9" +hex = { workspace = true } + +[[bin]] +name = "dummy_el" +path = "src/main.rs" diff --git a/dummy_el/Dockerfile b/dummy_el/Dockerfile new file mode 100644 index 00000000000..1ece25c7225 --- /dev/null +++ b/dummy_el/Dockerfile @@ -0,0 +1,32 @@ +# Multi-stage build for dummy_el +FROM rust:1.88.0-bullseye AS builder + +WORKDIR /build + +# Copy the entire workspace (needed for workspace structure) +COPY . . + +# Build only dummy_el in release mode +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/build/target \ + cargo build --release -p dummy_el && \ + cp target/release/dummy_el /dummy_el + +# Runtime stage with minimal Ubuntu image +FROM ubuntu:22.04 + +RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ + ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Copy the binary from builder +COPY --from=builder /dummy_el /usr/local/bin/dummy_el + +# Create a fake 'geth' binary that runs dummy_el instead +# Kurtosis will call "geth init ..." and "geth --..." but we'll run dummy_el +COPY --from=builder /build/dummy_el/geth-wrapper.sh /usr/local/bin/geth +RUN chmod +x /usr/local/bin/geth + +# Expose default Engine API port +EXPOSE 8551 diff --git a/dummy_el/README.md b/dummy_el/README.md new file mode 100644 index 00000000000..0c3361a4a94 --- /dev/null +++ b/dummy_el/README.md @@ -0,0 +1,24 @@ +# Using Dummy EL + +This is a dummy EL that can be used with proof verification nodes. These nodes do not require an EL to function since they just take in proofs. + +## Quick Start + +### 1. Build the Docker Image + +From the lighthouse repository root: + +```bash +docker build -f dummy_el/Dockerfile -t dummy_el:local . +``` + +### 2. Adding to Kurtosis + +In Kurtosis, you can add the following: + +```yaml + - el_type: geth + el_image: dummy_el:local +``` + +Note that we need to use el_type `geth` as kurtosis will be looking for a binary named geth. We wrap calls to the Geth binary so that they are processed by our dummy_el. \ No newline at end of file diff --git a/dummy_el/geth-wrapper.sh b/dummy_el/geth-wrapper.sh new file mode 100644 index 00000000000..8112bb44e9c --- /dev/null +++ b/dummy_el/geth-wrapper.sh @@ -0,0 +1,29 @@ +#!/bin/sh +set -e + +# This is a wrapper that pretends to be geth but actually runs dummy_el +# Kurtosis calls: geth init ... && geth --authrpc.port=8551 ... +# We ignore the init, and when we see the actual geth command with authrpc.port, we start dummy_el + +echo "[dummy_el geth-wrapper] Called with: $@" + +# Check if this is the "geth init" command and ignore it +if echo "$@" | grep -q "init"; then + echo "[dummy_el geth-wrapper] Ignoring 'geth init' command" + exit 0 +fi + +# If we're here, it's the actual geth run command +# Kurtosis mounts JWT secret at /jwt/jwtsecret +JWT_PATH="/jwt/jwtsecret" + +echo "[dummy_el geth-wrapper] Starting dummy_el instead of geth" + +# Run dummy_el with JWT if available, otherwise without +if [ -f "$JWT_PATH" ]; then + echo "[dummy_el geth-wrapper] Using JWT from $JWT_PATH" + exec /usr/local/bin/dummy_el --host 0.0.0.0 --port 8551 --jwt-secret "$JWT_PATH" +else + echo "[dummy_el geth-wrapper] WARNING: No JWT file found at $JWT_PATH" + exec /usr/local/bin/dummy_el --host 0.0.0.0 --port 8551 +fi diff --git a/dummy_el/src/main.rs b/dummy_el/src/main.rs new file mode 100644 index 00000000000..fd0aa4520a7 --- /dev/null +++ b/dummy_el/src/main.rs @@ -0,0 +1,445 @@ +use axum::{ + extract::State, + http::{Request, StatusCode}, + middleware::{self, Next}, + response::Response, + routing::post, + Json, Router, +}; +use clap::Parser; +use jsonwebtoken::{Algorithm, DecodingKey, Validation}; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value as JsonValue}; +use std::net::SocketAddr; +use std::path::PathBuf; +use std::sync::Arc; +use tracing::{debug, error, info, warn}; + +const JSONRPC_VERSION: &str = "2.0"; +const JWT_SECRET_LENGTH: usize = 32; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[arg(long, default_value = "8551", help = "Engine API port")] + port: u16, + + #[arg(long, default_value = "127.0.0.1")] + host: String, + + #[arg(long, help = "Path to JWT secret file (hex encoded)")] + jwt_secret: Option, + + #[arg(long, default_value = "8545", help = "HTTP RPC port")] + rpc_port: u16, + + #[arg(long, default_value = "8546", help = "WebSocket port")] + ws_port: u16, + + #[arg(long, default_value = "9001", help = "Metrics port")] + metrics_port: u16, + + #[arg(long, default_value = "30303", help = "P2P discovery port (TCP/UDP)")] + p2p_port: u16, +} + +#[derive(Debug, Clone)] +struct AppState { + jwt_secret: Option>, +} + +#[derive(Debug, Serialize, Deserialize)] +struct JwtClaims { + iat: u64, + #[serde(skip_serializing_if = "Option::is_none")] + id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + clv: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +struct JsonRpcRequest { + jsonrpc: String, + method: String, + params: JsonValue, + id: JsonValue, +} + +#[derive(Debug, Serialize, Deserialize)] +struct JsonRpcResponse { + jsonrpc: String, + #[serde(skip_serializing_if = "Option::is_none")] + result: Option, + #[serde(skip_serializing_if = "Option::is_none")] + error: Option, + id: JsonValue, +} + +#[derive(Debug, Serialize, Deserialize)] +struct JsonRpcError { + code: i64, + message: String, +} + +async fn auth_middleware( + State(state): State>, + request: Request, + next: Next, +) -> Result { + // If no JWT secret is configured, skip auth + if state.jwt_secret.is_none() { + return Ok(next.run(request).await); + } + + let jwt_secret = state.jwt_secret.as_ref().unwrap(); + + // Check for Authorization header + let auth_header = request + .headers() + .get("Authorization") + .and_then(|h| h.to_str().ok()); + + match auth_header { + Some(auth) if auth.starts_with("Bearer ") => { + let token = &auth[7..]; // Skip "Bearer " + + // Validate JWT token + let mut validation = Validation::new(Algorithm::HS256); + validation.validate_exp = false; + validation.required_spec_claims.remove("exp"); + + match jsonwebtoken::decode::( + token, + &DecodingKey::from_secret(jwt_secret), + &validation, + ) { + Ok(_) => { + debug!("JWT authentication successful"); + Ok(next.run(request).await) + } + Err(e) => { + warn!("JWT validation failed: {:?}", e); + Err((StatusCode::UNAUTHORIZED, "Invalid JWT token".to_string())) + } + } + } + Some(_) => { + warn!("Authorization header present but not in Bearer format"); + Err(( + StatusCode::UNAUTHORIZED, + "Authorization header must be in format: Bearer ".to_string(), + )) + } + None => { + warn!("Missing Authorization header"); + Err(( + StatusCode::UNAUTHORIZED, + "Missing Authorization header".to_string(), + )) + } + } +} + +async fn handle_rpc( + State(_state): State>, + Json(request): Json, +) -> (StatusCode, Json) { + info!( + method = %request.method, + params = ?request.params, + "Received RPC request" + ); + + let result = match request.method.as_str() { + "eth_syncing" => { + debug!("eth_syncing: returning false (not syncing)"); + Ok(json!(false)) + } + "eth_getBlockByNumber" => { + debug!("eth_getBlockByNumber: returning null"); + Ok(json!(null)) + } + "eth_getBlockByHash" => { + debug!("eth_getBlockByHash: returning null"); + Ok(json!(null)) + } + "engine_newPayloadV1" | "engine_newPayloadV2" | "engine_newPayloadV3" | "engine_newPayloadV4" => { + debug!("{}: returning SYNCING status", request.method); + Ok(json!({ + "status": "SYNCING", + "latestValidHash": null, + "validationError": null + })) + } + "engine_forkchoiceUpdatedV1" | "engine_forkchoiceUpdatedV2" | "engine_forkchoiceUpdatedV3" => { + debug!("{}: returning SYNCING status", request.method); + Ok(json!({ + "payloadStatus": { + "status": "SYNCING", + "latestValidHash": null, + "validationError": null + }, + "payloadId": null + })) + } + "engine_getPayloadV1" | "engine_getPayloadV2" | "engine_getPayloadV3" | "engine_getPayloadV4" | "engine_getPayloadV5" => { + debug!("{}: returning error (payload not available)", request.method); + Err(JsonRpcError { + code: -38001, + message: "Unknown payload".to_string(), + }) + } + "engine_getPayloadBodiesByHashV1" => { + debug!("engine_getPayloadBodiesByHashV1: returning empty array"); + Ok(json!([])) + } + "engine_getPayloadBodiesByRangeV1" => { + debug!("engine_getPayloadBodiesByRangeV1: returning empty array"); + Ok(json!([])) + } + "engine_exchangeCapabilities" => { + let capabilities = vec![ + "engine_newPayloadV1", + "engine_newPayloadV2", + "engine_newPayloadV3", + "engine_newPayloadV4", + "engine_getPayloadV1", + "engine_getPayloadV2", + "engine_getPayloadV3", + "engine_getPayloadV4", + "engine_getPayloadV5", + "engine_forkchoiceUpdatedV1", + "engine_forkchoiceUpdatedV2", + "engine_forkchoiceUpdatedV3", + "engine_getPayloadBodiesByHashV1", + "engine_getPayloadBodiesByRangeV1", + "engine_getClientVersionV1", + "engine_getBlobsV1", + "engine_getBlobsV2", + ]; + debug!("engine_exchangeCapabilities: returning {} capabilities", capabilities.len()); + Ok(json!(capabilities)) + } + "engine_getClientVersionV1" => { + debug!("engine_getClientVersionV1: returning client info"); + Ok(json!([{ + "code": "DM", + "name": "Dummy-EL", + "version": "v0.1.0", + "commit": "00000000" + }])) + } + "engine_getBlobsV1" | "engine_getBlobsV2" => { + debug!("{}: returning empty array", request.method); + Ok(json!([])) + } + _ => { + info!(method = %request.method, "Method not found"); + Err(JsonRpcError { + code: -32601, + message: format!("Method not found: {}", request.method), + }) + } + }; + + let response = match result { + Ok(result) => JsonRpcResponse { + jsonrpc: JSONRPC_VERSION.to_string(), + result: Some(result), + error: None, + id: request.id, + }, + Err(error) => JsonRpcResponse { + jsonrpc: JSONRPC_VERSION.to_string(), + result: None, + error: Some(error), + id: request.id, + }, + }; + + info!(method = %request.method, success = response.error.is_none(), "RPC response sent"); + (StatusCode::OK, Json(response)) +} + +// Simple RPC handler without JWT auth for non-Engine API ports +async fn handle_simple_rpc(Json(request): Json) -> (StatusCode, Json) { + debug!(method = %request.method, "Received simple RPC request"); + + let result: Result = match request.method.as_str() { + "admin_nodeInfo" => { + Ok(json!({ + "id": "0ecd4a2c5f7c2a304e3acbec67efea275510d31c304fe47f4e626a2ebd5fb101", + "name": "Dummy-EL/v0.1.0", + "enode": "enode://dummy@127.0.0.1:30303", + "enr": "enr:-Iq4QDummy0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + "ip": "127.0.0.1", + "ports": { + "discovery": 30303, + "listener": 30303 + } + })) + } + _ => { + // For any other method, just return a success response + Ok(json!(null)) + } + }; + + let response = JsonRpcResponse { + jsonrpc: JSONRPC_VERSION.to_string(), + result: Some(result.unwrap_or(json!(null))), + error: None, + id: request.id, + }; + + (StatusCode::OK, Json(response)) +} + +fn strip_prefix(s: &str) -> &str { + s.strip_prefix("0x").unwrap_or(s) +} + +fn read_jwt_secret(path: &PathBuf) -> anyhow::Result> { + let contents = std::fs::read_to_string(path)?; + let hex_str = strip_prefix(contents.trim()); + let bytes = hex::decode(hex_str)?; + + if bytes.len() != JWT_SECRET_LENGTH { + anyhow::bail!( + "Invalid JWT secret length. Expected {} bytes, got {}", + JWT_SECRET_LENGTH, + bytes.len() + ); + } + + Ok(bytes) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")), + ) + .init(); + + let args = Args::parse(); + + // Read JWT secret if provided + let jwt_secret = match &args.jwt_secret { + Some(path) => { + match read_jwt_secret(path) { + Ok(secret) => { + info!("JWT secret loaded from {:?}", path); + Some(secret) + } + Err(e) => { + error!("Failed to read JWT secret from {:?}: {}", path, e); + return Err(e); + } + } + } + None => { + warn!("No JWT secret provided - authentication disabled!"); + warn!("This is insecure and should only be used for testing"); + None + } + }; + + info!( + host = %args.host, + engine_port = args.port, + rpc_port = args.rpc_port, + ws_port = args.ws_port, + metrics_port = args.metrics_port, + p2p_port = args.p2p_port, + jwt_auth = jwt_secret.is_some(), + "Starting Dummy Execution Layer" + ); + + let state = Arc::new(AppState { jwt_secret }); + + // Engine API server (port 8551) with JWT auth + let engine_app = Router::new() + .route("/", post(handle_rpc)) + .layer(middleware::from_fn_with_state(state.clone(), auth_middleware)) + .with_state(state.clone()); + + let engine_addr = format!("{}:{}", args.host, args.port) + .parse::() + .expect("Invalid engine address"); + + info!("Engine API listening on http://{}", engine_addr); + + // Simple RPC server for HTTP RPC (port 8545) - no JWT auth + let rpc_app = Router::new().route("/", post(handle_simple_rpc)); + let rpc_addr = format!("{}:{}", args.host, args.rpc_port) + .parse::() + .expect("Invalid RPC address"); + info!("HTTP RPC listening on http://{}", rpc_addr); + + // Simple RPC server for WebSocket (port 8546) - no JWT auth + let ws_app = Router::new().route("/", post(handle_simple_rpc)); + let ws_addr = format!("{}:{}", args.host, args.ws_port) + .parse::() + .expect("Invalid WebSocket address"); + info!("WebSocket RPC listening on http://{}", ws_addr); + + // Simple server for metrics (port 9001) + let metrics_app = Router::new().route("/", post(handle_simple_rpc)); + let metrics_addr = format!("{}:{}", args.host, args.metrics_port) + .parse::() + .expect("Invalid metrics address"); + info!("Metrics listening on http://{}", metrics_addr); + + // Bind P2P discovery ports (TCP and UDP) - just to satisfy Kurtosis port checks + let p2p_tcp_addr = format!("{}:{}", args.host, args.p2p_port) + .parse::() + .expect("Invalid P2P TCP address"); + let p2p_udp_addr = format!("{}:{}", args.host, args.p2p_port) + .parse::() + .expect("Invalid P2P UDP address"); + + // Spawn P2P TCP listener in a task to keep it alive + let p2p_tcp_listener = tokio::net::TcpListener::bind(p2p_tcp_addr).await?; + info!("P2P TCP listening on {}", p2p_tcp_addr); + let p2p_tcp_task = tokio::spawn(async move { + loop { + // Accept connections but do nothing with them + if let Ok((_socket, _addr)) = p2p_tcp_listener.accept().await { + // Connection accepted, just drop it + } + } + }); + + // Spawn P2P UDP listener in a task to keep it alive + let p2p_udp_socket = tokio::net::UdpSocket::bind(p2p_udp_addr).await?; + info!("P2P UDP listening on {}", p2p_udp_addr); + let p2p_udp_task = tokio::spawn(async move { + let mut buf = [0u8; 1024]; + loop { + // Receive packets but do nothing with them + let _ = p2p_udp_socket.recv(&mut buf).await; + } + }); + + info!("Ready to accept requests on all ports"); + + // Spawn all servers concurrently + let engine_listener = tokio::net::TcpListener::bind(engine_addr).await?; + let rpc_listener = tokio::net::TcpListener::bind(rpc_addr).await?; + let ws_listener = tokio::net::TcpListener::bind(ws_addr).await?; + let metrics_listener = tokio::net::TcpListener::bind(metrics_addr).await?; + + tokio::select! { + result = axum::serve(engine_listener, engine_app) => result?, + result = axum::serve(rpc_listener, rpc_app) => result?, + result = axum::serve(ws_listener, ws_app) => result?, + result = axum::serve(metrics_listener, metrics_app) => result?, + _ = p2p_tcp_task => {}, + _ = p2p_udp_task => {}, + } + + Ok(()) +} diff --git a/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml b/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml new file mode 100644 index 00000000000..f0d0967a166 --- /dev/null +++ b/scripts/local_testnet/network_params_mixed_proof_gen_verify.yaml @@ -0,0 +1,32 @@ +# 3 nodes generate proofs, 1 node only verifies +participants: + # Proof generating nodes (nodes 1-3) + - el_type: geth + el_image: ethereum/client-go:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --activate-zkvm + - --zkvm-generation-proof-types=0,1 + - --target-peers=3 + count: 3 + # Proof verifying only node (node 4) + # TODO(zkproofs): Currently there is no way to add no client here + # We likely want to use our dummy zkvm EL here + - el_type: geth + el_image: dummy_el:local + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --activate-zkvm + - --target-peers=3 + count: 1 +network_params: + electra_fork_epoch: 0 + fulu_fork_epoch: 1 + seconds_per_slot: 2 +global_log_level: debug +snooper_enabled: false +additional_services: + - dora + - prometheus_grafana diff --git a/scripts/local_testnet/network_params_proof_gen_only.sh b/scripts/local_testnet/network_params_proof_gen_only.sh new file mode 100755 index 00000000000..70c2c8f5c69 --- /dev/null +++ b/scripts/local_testnet/network_params_proof_gen_only.sh @@ -0,0 +1,155 @@ +#!/bin/bash + +# Helper script for monitoring execution proof generation and gossip +# Usage: ./network_params_proof_gen_only.sh [command] +# ENCLAVE=my-testnet ./network_params_proof_gen_only.sh [command] +# +# Set ENCLAVE environment variable to use a different testnet. +# Default: local-testnet + +ENCLAVE="${ENCLAVE:-local-testnet}" + +# Color output +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +RED='\033[0;31m' +NC='\033[0m' # No Color + +case "${1:-help}" in + generation) + echo -e "${GREEN}=== Proof Generation and Publishing ===${NC}" + for i in 1 2 3 4; do + echo -e "\n${YELLOW}--- Node $i ---${NC}" + kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep -E "(Generating execution proof|Proof successfully published)" | tail -5 + done + ;; + + gossip-subscribe) + echo -e "${GREEN}=== ExecutionProof Topic Subscriptions ===${NC}" + for i in 1 2 3 4; do + echo -e "\n${YELLOW}--- Node $i ---${NC}" + kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Subscribed to topic.*execution_proof" + done + ;; + + gossip-receive) + echo -e "${GREEN}=== Received Execution Proofs via Gossip ===${NC}" + for i in 1 2 3 4; do + count=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Received execution proof via gossip" | wc -l) + echo -e "${YELLOW}Node $i:${NC} $count proofs received" + done + ;; + + gossip-verified) + echo -e "${GREEN}=== Verified Execution Proofs ===${NC}" + for i in 1 2 3 4; do + count=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Successfully verified gossip execution proof" | wc -l) + echo -e "${YELLOW}Node $i:${NC} $count proofs verified" + done + ;; + + errors) + echo -e "${GREEN}=== Checking for Errors ===${NC}" + for i in 1 2 3 4; do + echo -e "\n${YELLOW}--- Node $i ---${NC}" + no_peers=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "NoPeersSubscribedToTopic.*execution_proof" | wc -l) + failed_sub=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Failed to subscribe.*execution_proof" | wc -l) + + if [ "$no_peers" -gt 0 ]; then + echo -e "${RED}NoPeersSubscribedToTopic errors: $no_peers${NC}" + else + echo -e "${GREEN}NoPeersSubscribedToTopic errors: 0${NC}" + fi + + if [ "$failed_sub" -gt 0 ]; then + echo -e "${RED}Failed subscription errors: $failed_sub${NC}" + else + echo -e "${GREEN}Failed subscription errors: 0${NC}" + fi + done + ;; + + zkvm-logs) + echo -e "${GREEN}=== ZKVM Debug Logs ===${NC}" + for i in 1 2 3 4; do + echo -e "\n${YELLOW}--- Node $i ---${NC}" + kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "ZKVM:" | head -5 + done + ;; + + fork-transition) + echo -e "${GREEN}=== Fork Transition Logs ===${NC}" + for i in 1 2 3 4; do + echo -e "\n${YELLOW}--- Node $i ---${NC}" + kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep -E "(Subscribing to new fork|subscribe_new_fork_topics called)" + done + ;; + + stats) + echo -e "${GREEN}=== Execution Proof Statistics ===${NC}" + for i in 1 2 3 4; do + generated=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Generating execution proof" | wc -l) + published=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Proof successfully published" | wc -l) + received=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Received execution proof via gossip" | wc -l) + verified=$(kurtosis service logs $ENCLAVE cl-$i-lighthouse-geth -a 2>&1 | grep "Successfully verified gossip execution proof" | wc -l) + + echo -e "${YELLOW}Node $i:${NC}" + echo -e " Generated: $generated" + echo -e " Published: $published" + echo -e " Received: $received" + echo -e " Verified: $verified" + done + ;; + + follow) + NODE="${2:-1}" + echo -e "${GREEN}=== Following Execution Proof Logs for Node $NODE ===${NC}" + echo -e "${YELLOW}Press Ctrl+C to stop${NC}" + kurtosis service logs $ENCLAVE cl-$NODE-lighthouse-geth -f | grep --line-buffered -E "(Generating execution proof|Proof successfully published|Received execution proof via gossip|Successfully verified gossip execution proof)" + ;; + + all) + echo -e "${GREEN}=== Complete Execution Proof Report ===${NC}\n" + $0 zkvm-logs + echo -e "\n" + $0 fork-transition + echo -e "\n" + $0 gossip-subscribe + echo -e "\n" + $0 stats + echo -e "\n" + $0 errors + ;; + + help|*) + echo "Helper script for monitoring execution proof generation and gossip" + echo "" + echo "Usage: $0 [command]" + echo " ENCLAVE=name $0 [command]" + echo "" + echo "Environment Variables:" + echo " ENCLAVE - Testnet enclave name (default: local-testnet)" + echo "" + echo "Commands:" + echo " generation - Show proof generation and publishing logs" + echo " gossip-subscribe - Show ExecutionProof topic subscriptions" + echo " gossip-receive - Count received proofs on each node" + echo " gossip-verified - Count verified proofs on each node" + echo " errors - Check for gossip errors" + echo " zkvm-logs - Show ZKVM debug logs" + echo " fork-transition - Show fork transition logs" + echo " stats - Show proof statistics for all nodes" + echo " follow [node] - Follow proof logs in real-time (default: node 1)" + echo " all - Show complete report" + echo " help - Show this help message" + echo "" + echo "Examples:" + echo " # Use default testnet (local-testnet)" + echo " $0 stats" + echo " $0 follow 2" + echo " $0 all" + echo "" + echo " # Use custom testnet enclave" + echo " ENCLAVE=my-testnet $0 stats" + ;; +esac diff --git a/scripts/local_testnet/network_params_proof_gen_only.yaml b/scripts/local_testnet/network_params_proof_gen_only.yaml new file mode 100644 index 00000000000..aea91efb92b --- /dev/null +++ b/scripts/local_testnet/network_params_proof_gen_only.yaml @@ -0,0 +1,21 @@ +# Network configuration for testing execution proof generation +# All nodes have execution layers and are configured to generate proofs +participants: + - el_type: geth + el_image: ethereum/client-go:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --activate-zkvm + - --zkvm-generation-proof-types=0,1 + - --target-peers=3 + count: 4 +network_params: + electra_fork_epoch: 0 + fulu_fork_epoch: 1 + seconds_per_slot: 2 +global_log_level: debug +snooper_enabled: false +additional_services: + - dora + - prometheus_grafana \ No newline at end of file diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index cca55bcef88..94d048ef72e 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -3,6 +3,7 @@ name = "slasher" version = "0.1.0" authors = ["Michael Sproul "] edition = { workspace = true } +autotests = false [features] default = ["lmdb"] @@ -43,3 +44,7 @@ types = { workspace = true } maplit = { workspace = true } rayon = { workspace = true } tempfile = { workspace = true } + +[[test]] +name = "slasher_tests" +path = "tests/main.rs" diff --git a/slasher/tests/main.rs b/slasher/tests/main.rs new file mode 100644 index 00000000000..fb78dcb917d --- /dev/null +++ b/slasher/tests/main.rs @@ -0,0 +1,5 @@ +mod attester_slashings; +mod backend; +mod proposer_slashings; +mod random; +mod wrap_around; diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index eef13cfc738..74bf43e3ae0 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -7,12 +7,13 @@ edition = { workspace = true } portable = ["types/portable"] [dependencies] +alloy-network = "1.0" +alloy-primitives = { workspace = true } +alloy-provider = "1.0" +alloy-rpc-types-eth = { workspace = true } +alloy-signer-local = "1.0" async-channel = { workspace = true } deposit_contract = { workspace = true } -ethers-core = { workspace = true } -ethers-middleware = { workspace = true } -ethers-providers = { workspace = true } -ethers-signers = { workspace = true } execution_layer = { workspace = true } fork_choice = { workspace = true } futures = { workspace = true } diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index ed4ee4682f4..3bb8585e448 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -1,6 +1,7 @@ -use ethers_providers::{Http, Provider}; +use alloy_provider::ProviderBuilder; use execution_layer::DEFAULT_JWT_FILE; use network_utils::unused_port::unused_tcp4_port; +use reqwest::Url; use sensitive_url::SensitiveUrl; use std::path::PathBuf; use std::process::Child; @@ -34,7 +35,7 @@ pub struct ExecutionEngine { http_port: u16, http_auth_port: u16, child: Child, - pub provider: Provider, + pub provider: Box, } impl Drop for ExecutionEngine { @@ -53,8 +54,9 @@ impl ExecutionEngine { let http_port = unused_tcp4_port().unwrap(); let http_auth_port = unused_tcp4_port().unwrap(); let child = E::start_client(&datadir, http_port, http_auth_port, jwt_secret_path); - let provider = Provider::::try_from(format!("http://localhost:{}", http_port)) - .expect("failed to instantiate ethers provider"); + let provider = Box::new(ProviderBuilder::new().connect_http( + Url::parse(&format!("http://localhost:{}", http_port)).expect("failed to parse URL"), + )); Self { engine, datadir, diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 9e45a788704..57501c6ee2c 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -2,9 +2,10 @@ use crate::execution_engine::{ ACCOUNT1, ACCOUNT2, ExecutionEngine, GenericExecutionEngine, KEYSTORE_PASSWORD, PRIVATE_KEYS, }; use crate::transactions::transactions; -use ethers_middleware::SignerMiddleware; -use ethers_providers::Middleware; -use ethers_signers::LocalWallet; +use alloy_network::{EthereumWallet, TransactionBuilder}; +use alloy_primitives::Address as AlloyAddress; +use alloy_provider::{Provider, ProviderBuilder}; +use alloy_signer_local::PrivateKeySigner; use execution_layer::test_utils::DEFAULT_GAS_LIMIT; use execution_layer::{ BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, @@ -202,12 +203,13 @@ impl TestRig { self.wait_until_synced().await; // Create a local signer in case we need to sign transactions locally - let wallet1: LocalWallet = PRIVATE_KEYS[0].parse().expect("Invalid private key"); - let signer = SignerMiddleware::new(&self.ee_a.execution_engine.provider, wallet1); + let private_key_signer: PrivateKeySigner = + PRIVATE_KEYS[0].parse().expect("Invalid private key"); + let wallet = EthereumWallet::from(private_key_signer); // We hardcode the accounts here since some EEs start with a default unlocked account - let account1 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT1).unwrap()); - let account2 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT2).unwrap()); + let account1 = AlloyAddress::from_slice(&hex::decode(ACCOUNT1).unwrap()); + let account2 = AlloyAddress::from_slice(&hex::decode(ACCOUNT2).unwrap()); /* * Read the terminal block hash from both pairs, check it's equal. @@ -237,11 +239,18 @@ impl TestRig { if self.use_local_signing { // Sign locally with the Signer middleware - for (i, tx) in txs.clone().into_iter().enumerate() { + for (i, mut tx) in txs.clone().into_iter().enumerate() { // The local signer uses eth_sendRawTransaction, so we need to manually set the nonce - let mut tx = tx.clone(); - tx.set_nonce(i as u64); - let pending_tx = signer.send_transaction(tx, None).await.unwrap(); + tx = tx.with_nonce(i as u64); + let wallet_provider = ProviderBuilder::new().wallet(wallet.clone()).connect_http( + self.ee_a + .execution_engine + .http_url() + .to_string() + .parse() + .unwrap(), + ); + let pending_tx = wallet_provider.send_transaction(tx).await.unwrap(); pending_txs.push(pending_tx); } } else { @@ -261,7 +270,7 @@ impl TestRig { .ee_a .execution_engine .provider - .send_transaction(tx, None) + .send_transaction(tx) .await .unwrap(); pending_txs.push(pending_tx); @@ -446,11 +455,10 @@ impl TestRig { // Verify that all submitted txs were successful for pending_tx in pending_txs { - let tx_receipt = pending_tx.await.unwrap().unwrap(); - assert_eq!( - tx_receipt.status, - Some(1.into()), - "Tx index {} has invalid status ", + let tx_receipt = pending_tx.get_receipt().await.unwrap(); + assert!( + tx_receipt.status(), + "Tx index {:?} has invalid status ", tx_receipt.transaction_index ); } diff --git a/testing/execution_engine_integration/src/transactions.rs b/testing/execution_engine_integration/src/transactions.rs index b6111426b67..fe36a1bf67f 100644 --- a/testing/execution_engine_integration/src/transactions.rs +++ b/testing/execution_engine_integration/src/transactions.rs @@ -1,8 +1,7 @@ +use alloy_network::TransactionBuilder; +use alloy_primitives::{Address, U256}; +use alloy_rpc_types_eth::{AccessList, TransactionRequest}; use deposit_contract::{BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS, encode_eth1_tx_data}; -use ethers_core::types::{ - Address, Bytes, Eip1559TransactionRequest, TransactionRequest, U256, - transaction::{eip2718::TypedTransaction, eip2930::AccessList}, -}; use types::{DepositData, EthSpec, FixedBytesExtended, Hash256, Keypair, Signature}; /// Hardcoded deposit contract address based on sender address and nonce @@ -21,7 +20,7 @@ pub enum Transaction { } /// Get a list of transactions to publish to the execution layer. -pub fn transactions(account1: Address, account2: Address) -> Vec { +pub fn transactions(account1: Address, account2: Address) -> Vec { vec![ Transaction::Transfer(account1, account2).transaction::(), Transaction::TransferLegacy(account1, account2).transaction::(), @@ -29,7 +28,7 @@ pub fn transactions(account1: Address, account2: Address) -> Vec(), Transaction::DepositDepositContract { sender: account1, - deposit_contract_address: ethers_core::types::Address::from_slice( + deposit_contract_address: Address::from_slice( &hex::decode(DEPOSIT_CONTRACT_ADDRESS).unwrap(), ), } @@ -38,33 +37,36 @@ pub fn transactions(account1: Address, account2: Address) -> Vec(&self) -> TypedTransaction { + pub fn transaction(&self) -> TransactionRequest { match &self { - Self::TransferLegacy(from, to) => TransactionRequest::new() + Self::TransferLegacy(from, to) => TransactionRequest::default() .from(*from) .to(*to) - .value(1) - .into(), - Self::Transfer(from, to) => Eip1559TransactionRequest::new() + .value(U256::from(1)) + .with_gas_price(1_000_000_000u128), // 1 gwei + Self::Transfer(from, to) => TransactionRequest::default() .from(*from) .to(*to) - .value(1) - .into(), - Self::TransferAccessList(from, to) => TransactionRequest::new() + .value(U256::from(1)) + .with_max_fee_per_gas(2_000_000_000u128) + .with_max_priority_fee_per_gas(1_000_000_000u128), + Self::TransferAccessList(from, to) => TransactionRequest::default() .from(*from) .to(*to) - .value(1) + .value(U256::from(1)) .with_access_list(AccessList::default()) - .into(), + .with_gas_price(1_000_000_000u128), // 1 gwei Self::DeployDepositContract(addr) => { let mut bytecode = String::from_utf8(BYTECODE.to_vec()).unwrap(); bytecode.retain(|c| c.is_ascii_hexdigit()); let bytecode = hex::decode(&bytecode[1..]).unwrap(); - TransactionRequest::new() + let mut req = TransactionRequest::default() .from(*addr) - .data(Bytes::from(bytecode)) - .gas(CONTRACT_DEPLOY_GAS) - .into() + .with_input(bytecode) + .with_gas_limit(CONTRACT_DEPLOY_GAS.try_into().unwrap()) + .with_gas_price(1_000_000_000u128); // 1 gwei + req.set_create(); + req } Self::DepositDepositContract { sender, @@ -80,13 +82,13 @@ impl Transaction { signature: Signature::empty().into(), }; deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); - TransactionRequest::new() + TransactionRequest::default() .from(*sender) .to(*deposit_contract_address) - .data(Bytes::from(encode_eth1_tx_data(&deposit).unwrap())) - .gas(DEPOSIT_GAS) - .value(U256::from(amount) * U256::exp10(9)) - .into() + .with_input(encode_eth1_tx_data(&deposit).unwrap()) + .with_gas_limit(DEPOSIT_GAS.try_into().unwrap()) + .value(U256::from(amount) * U256::from(10).pow(U256::from(9))) + .with_gas_price(1_000_000_000u128) // 1 gwei } } } diff --git a/testing/state_transition_vectors/Makefile b/testing/state_transition_vectors/Makefile index 437aa50b00a..c90810ad398 100644 --- a/testing/state_transition_vectors/Makefile +++ b/testing/state_transition_vectors/Makefile @@ -5,4 +5,4 @@ test: cargo test --release --features "$(TEST_FEATURES)" clean: - rm -r vectors/ + rm -rf vectors/ diff --git a/validator_client/graffiti_file/src/lib.rs b/validator_client/graffiti_file/src/lib.rs index 8b5637d09ed..8e40ef907dd 100644 --- a/validator_client/graffiti_file/src/lib.rs +++ b/validator_client/graffiti_file/src/lib.rs @@ -154,7 +154,7 @@ mod tests { let pk5 = PublicKeyBytes::deserialize(&hex::decode(&PK5[2..]).unwrap()).unwrap(); let pk6 = PublicKeyBytes::deserialize(&hex::decode(&PK6[2..]).unwrap()).unwrap(); - let file_name = temp.into_path().join("graffiti.txt"); + let file_name = temp.keep().join("graffiti.txt"); let file = File::create(&file_name).unwrap(); let mut graffiti_file = LineWriter::new(file); diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index da6e8f35886..a6ce67fae91 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -8,7 +8,7 @@ use std::ops::Deref; use std::sync::Arc; use task_executor::TaskExecutor; use tokio::time::{Duration, Instant, sleep, sleep_until}; -use tracing::{debug, error, info, trace, warn}; +use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; use tree_hash::TreeHash; use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot}; use validator_store::{Error as ValidatorStoreError, ValidatorStore}; @@ -243,6 +243,11 @@ impl AttestationService AttestationService AttestationService AttestationService, Vec<_>) = join_all(signing_futures) + .instrument(info_span!( + "sign_attestations", + count = validator_duties.len() + )) .await .into_iter() .flatten() @@ -487,6 +498,10 @@ impl AttestationService(single_attestations, fork_name) .await }) + .instrument(info_span!( + "publish_attestations", + count = attestations.len() + )) .await { Ok(()) => info!( @@ -523,6 +538,7 @@ impl AttestationService AttestationService AttestationService AttestationService { diff --git a/validator_client/validator_services/src/block_service.rs b/validator_client/validator_services/src/block_service.rs index c111b1f22eb..8ec53d3f409 100644 --- a/validator_client/validator_services/src/block_service.rs +++ b/validator_client/validator_services/src/block_service.rs @@ -1,5 +1,4 @@ use beacon_node_fallback::{ApiTopic, BeaconNodeFallback, Error as FallbackError, Errors}; -use bls::SignatureBytes; use eth2::{BeaconNodeHttpClient, StatusCode}; use graffiti_file::{GraffitiFile, determine_graffiti}; use logging::crit; @@ -11,7 +10,7 @@ use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc; -use tracing::{debug, error, info, trace, warn}; +use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; use types::{BlockType, ChainSpec, EthSpec, Graffiti, PublicKeyBytes, Slot}; use validator_store::{Error as ValidatorStoreError, SignedBlock, UnsignedBlock, ValidatorStore}; @@ -298,7 +297,7 @@ impl BlockService { self.inner.executor.spawn( async move { let result = service - .publish_block(slot, validator_pubkey, builder_boost_factor) + .get_validator_block_and_publish_block(slot, validator_pubkey, builder_boost_factor) .await; match result { @@ -320,6 +319,7 @@ impl BlockService { } #[allow(clippy::too_many_arguments)] + #[instrument(skip_all, fields(%slot, ?validator_pubkey))] async fn sign_and_publish_block( &self, proposer_fallback: ProposerFallback, @@ -333,6 +333,7 @@ impl BlockService { let res = self .validator_store .sign_block(*validator_pubkey, unsigned_block, slot) + .instrument(info_span!("sign_block")) .await; let signed_block = match res { @@ -389,7 +390,12 @@ impl BlockService { Ok(()) } - async fn publish_block( + #[instrument( + name = "block_proposal_duty_cycle", + skip_all, + fields(%slot, ?validator_pubkey) + )] + async fn get_validator_block_and_publish_block( self, slot: Slot, validator_pubkey: PublicKeyBytes, @@ -442,33 +448,80 @@ impl BlockService { info!(slot = slot.as_u64(), "Requesting unsigned block"); - // Request block from first responsive beacon node. + // Request an SSZ block from all beacon nodes in order, returning on the first successful response. + // If all nodes fail, run a second pass falling back to JSON. // - // Try the proposer nodes last, since it's likely that they don't have a + // Proposer nodes will always be tried last during each pass since it's likely that they don't have a // great view of attestations on the network. - let unsigned_block = proposer_fallback + let ssz_block_response = proposer_fallback .request_proposers_last(|beacon_node| async move { let _get_timer = validator_metrics::start_timer_vec( &validator_metrics::BLOCK_SERVICE_TIMES, &[validator_metrics::BEACON_BLOCK_HTTP_GET], ); - Self::get_validator_block( - &beacon_node, - slot, - randao_reveal_ref, - graffiti, - proposer_index, - builder_boost_factor, - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - }) + beacon_node + .get_validator_blocks_v3_ssz::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + builder_boost_factor, + ) + .await }) - .await?; + .await; + + let block_response = match ssz_block_response { + Ok((ssz_block_response, _metadata)) => ssz_block_response, + Err(e) => { + warn!( + slot = slot.as_u64(), + error = %e, + "SSZ block production failed, falling back to JSON" + ); + + proposer_fallback + .request_proposers_last(|beacon_node| async move { + let _get_timer = validator_metrics::start_timer_vec( + &validator_metrics::BLOCK_SERVICE_TIMES, + &[validator_metrics::BEACON_BLOCK_HTTP_GET], + ); + let (json_block_response, _metadata) = beacon_node + .get_validator_blocks_v3::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + builder_boost_factor, + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })?; + + Ok(json_block_response.data) + }) + .await + .map_err(BlockError::from)? + } + }; + + let (block_proposer, unsigned_block) = match block_response { + eth2::types::ProduceBlockV3Response::Full(block) => { + (block.block().proposer_index(), UnsignedBlock::Full(block)) + } + eth2::types::ProduceBlockV3Response::Blinded(block) => { + (block.proposer_index(), UnsignedBlock::Blinded(block)) + } + }; + + info!(slot = slot.as_u64(), "Received unsigned block"); + if proposer_index != Some(block_proposer) { + return Err(BlockError::Recoverable( + "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), + )); + } self_ref .sign_and_publish_block( @@ -483,6 +536,7 @@ impl BlockService { Ok(()) } + #[instrument(skip_all)] async fn publish_signed_block_contents( &self, signed_block: &SignedBlock, @@ -517,70 +571,6 @@ impl BlockService { } Ok::<_, BlockError>(()) } - - async fn get_validator_block( - beacon_node: &BeaconNodeHttpClient, - slot: Slot, - randao_reveal_ref: &SignatureBytes, - graffiti: Option, - proposer_index: Option, - builder_boost_factor: Option, - ) -> Result, BlockError> { - let block_response = match beacon_node - .get_validator_blocks_v3_ssz::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - builder_boost_factor, - ) - .await - { - Ok((ssz_block_response, _)) => ssz_block_response, - Err(e) => { - warn!( - slot = slot.as_u64(), - error = %e, - "Beacon node does not support SSZ in block production, falling back to JSON" - ); - - let (json_block_response, _) = beacon_node - .get_validator_blocks_v3::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - builder_boost_factor, - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })?; - - // Extract ProduceBlockV3Response (data field of the struct ForkVersionedResponse) - json_block_response.data - } - }; - - let (block_proposer, unsigned_block) = match block_response { - eth2::types::ProduceBlockV3Response::Full(block) => { - (block.block().proposer_index(), UnsignedBlock::Full(block)) - } - eth2::types::ProduceBlockV3Response::Blinded(block) => { - (block.proposer_index(), UnsignedBlock::Blinded(block)) - } - }; - - info!(slot = slot.as_u64(), "Received unsigned block"); - if proposer_index != Some(block_proposer) { - return Err(BlockError::Recoverable( - "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), - )); - } - - Ok::<_, BlockError>(unsigned_block) - } } /// Wrapper for values we want to log about a block we signed, for easy extraction from the possible diff --git a/validator_client/validator_services/src/sync_committee_service.rs b/validator_client/validator_services/src/sync_committee_service.rs index 02f9f24c8a1..5f6b1cb710f 100644 --- a/validator_client/validator_services/src/sync_committee_service.rs +++ b/validator_client/validator_services/src/sync_committee_service.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; use task_executor::TaskExecutor; use tokio::time::{Duration, Instant, sleep, sleep_until}; -use tracing::{debug, error, info, trace, warn}; +use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; use types::{ ChainSpec, EthSpec, Hash256, PublicKeyBytes, Slot, SyncCommitteeSubscription, SyncContributionData, SyncDuty, SyncSelectionProof, SyncSubnetId, @@ -208,7 +208,8 @@ impl SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService, + + /// Proof cache size (number of execution block hashes to cache proofs for) + /// TODO(zkproofs): remove since we use da_checker for proof caches + pub proof_cache_size: usize, +} + +impl Default for ZKVMExecutionLayerConfig { + fn default() -> Self { + Self { + min_proofs_required: DEFAULT_MIN_PROOFS_REQUIRED, + generation_proof_types: HashSet::new(), + // TODO(zkproofs): This is somewhat arbitrary. The number was computed + // by NUMBER_OF_BLOCKS_BEFORE_FINALIZATION * NUM_PROOFS_PER_BLOCK = 64 * 8 + // We can change it to be more rigorous/scientific + proof_cache_size: 64 * 8, + } + } +} + +impl ZKVMExecutionLayerConfig { + pub fn validate(&self) -> Result<(), String> { + if self.min_proofs_required == 0 { + return Err("min_proofs_required must be at least 1".to_string()); + } + + if self.proof_cache_size == 0 { + return Err("proof_cache_size must be at least 1".to_string()); + } + + // Note: We do NOT validate that generation_proof_types.len() >= min_proofs_required + // because proof-generating nodes validate via their execution layer, not via proofs. + // Only lightweight verifier nodes (without EL) need to wait for min_proofs_required. + + Ok(()) + } + + /// Create a builder for the config + /// TODO(zkproofs): I think we can remove this + pub fn builder() -> ZKVMExecutionLayerConfigBuilder { + ZKVMExecutionLayerConfigBuilder::default() + } +} + +#[derive(Default)] +pub struct ZKVMExecutionLayerConfigBuilder { + min_proofs_required: Option, + generation_proof_types: HashSet, + proof_cache_size: Option, +} + +impl ZKVMExecutionLayerConfigBuilder { + pub fn min_proofs_required(mut self, min: usize) -> Self { + self.min_proofs_required = Some(min); + self + } + + pub fn generation_proof_types(mut self, proof_types: HashSet) -> Self { + self.generation_proof_types = proof_types; + self + } + + pub fn add_generation_proof_type(mut self, proof_type: ExecutionProofId) -> Self { + self.generation_proof_types.insert(proof_type); + self + } + + pub fn proof_cache_size(mut self, size: usize) -> Self { + self.proof_cache_size = Some(size); + self + } + + /// Build the configuration + pub fn build(self) -> Result { + let config = ZKVMExecutionLayerConfig { + min_proofs_required: self + .min_proofs_required + .unwrap_or(DEFAULT_MIN_PROOFS_REQUIRED), + generation_proof_types: self.generation_proof_types, + proof_cache_size: self.proof_cache_size.unwrap_or(1024), + }; + + config.validate()?; + Ok(config) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_valid_config() { + let proof_type_0 = ExecutionProofId::new(0).unwrap(); + let proof_type_1 = ExecutionProofId::new(1).unwrap(); + + let config = ZKVMExecutionLayerConfig::builder() + .add_generation_proof_type(proof_type_0) + .add_generation_proof_type(proof_type_1) + .min_proofs_required(2) + .build(); + + assert!(config.is_ok()); + } + + #[test] + fn test_valid_config_with_generation() { + let proof_type_0 = ExecutionProofId::new(0).unwrap(); + let proof_type_1 = ExecutionProofId::new(1).unwrap(); + + let config = ZKVMExecutionLayerConfig::builder() + .add_generation_proof_type(proof_type_0) + .add_generation_proof_type(proof_type_1) + .min_proofs_required(1) + .proof_cache_size(512) + .build(); + + assert!(config.is_ok()); + let config = config.unwrap(); + assert_eq!(config.generation_proof_types.len(), 2); + assert_eq!(config.min_proofs_required, 1); + assert_eq!(config.proof_cache_size, 512); + } + + #[test] + fn test_min_proofs_required_zero() { + let config = ZKVMExecutionLayerConfig::builder() + .min_proofs_required(0) // Invalid: must be > 0 + .build(); + + assert!(config.is_err()); + } + + #[test] + fn test_no_generation_proof_types() { + // Node can receive and verify proofs without generating any + let config = ZKVMExecutionLayerConfig::builder() + .min_proofs_required(2) + .build(); + + assert!(config.is_ok()); + let config = config.unwrap(); + assert!(config.generation_proof_types.is_empty()); + } + + #[test] + fn test_generation_proof_types_less_than_min() { + // Proof-generating nodes validate via EL, not proofs + // They can generate any number of proof types regardless of min_proofs_required + let proof_type_0 = ExecutionProofId::new(0).unwrap(); + + let config = ZKVMExecutionLayerConfig::builder() + .add_generation_proof_type(proof_type_0) + .min_proofs_required(2) + .build(); + + assert!(config.is_ok()); + let config = config.unwrap(); + assert_eq!(config.generation_proof_types.len(), 1); + assert_eq!(config.min_proofs_required, 2); + } +} diff --git a/zkvm_execution_layer/src/dummy_proof_gen.rs b/zkvm_execution_layer/src/dummy_proof_gen.rs new file mode 100644 index 00000000000..596dd90f99d --- /dev/null +++ b/zkvm_execution_layer/src/dummy_proof_gen.rs @@ -0,0 +1,127 @@ +use crate::proof_generation::{ProofGenerationError, ProofGenerationResult, ProofGenerator}; +use async_trait::async_trait; +use std::time::Duration; +use tokio::time::sleep; +use types::{ExecutionBlockHash, ExecutionProof, ExecutionProofId, Hash256, Slot}; + +/// Dummy proof generator for testing +/// +/// This generator simulates the proof generation process with a configurable delay +/// and creates dummy proofs. +pub struct DummyProofGenerator { + proof_id: ExecutionProofId, + generation_delay: Duration, +} + +impl DummyProofGenerator { + /// Create a new dummy generator for the specified proof ID + pub fn new(proof_id: ExecutionProofId) -> Self { + Self { + proof_id, + generation_delay: Duration::from_millis(50), + } + } + + /// Create a new dummy generator with custom generation delay + pub fn with_delay(proof_id: ExecutionProofId, delay: Duration) -> Self { + Self { + proof_id, + generation_delay: delay, + } + } +} + +#[async_trait] +impl ProofGenerator for DummyProofGenerator { + async fn generate( + &self, + slot: Slot, + payload_hash: &ExecutionBlockHash, + block_root: &Hash256, + ) -> ProofGenerationResult { + // Simulate proof generation work + if !self.generation_delay.is_zero() { + sleep(self.generation_delay).await; + } + + let proof_data = vec![ + 0xFF, + self.proof_id.as_u8(), + payload_hash.0[0], + payload_hash.0[1], + payload_hash.0[2], + payload_hash.0[3], + ]; + + ExecutionProof::new(self.proof_id, slot, *payload_hash, *block_root, proof_data) + .map_err(ProofGenerationError::ProofGenerationFailed) + } + + fn proof_id(&self) -> ExecutionProofId { + self.proof_id + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_dummy_generator_success() { + let subnet = ExecutionProofId::new(0).unwrap(); + let generator = DummyProofGenerator::new(subnet); + let slot = Slot::new(100); + let block_hash = ExecutionBlockHash::repeat_byte(1); + let block_root = Hash256::repeat_byte(2); + + let result = generator.generate(slot, &block_hash, &block_root).await; + assert!(result.is_ok()); + + let proof = result.unwrap(); + assert_eq!(proof.proof_id, subnet); + assert_eq!(proof.slot, slot); + assert_eq!(proof.block_hash, block_hash); + assert_eq!(proof.block_root, block_root); + assert!(proof.proof_data_size() > 0); + } + + #[tokio::test] + async fn test_dummy_generator_deterministic() { + let subnet = ExecutionProofId::new(1).unwrap(); + let generator = DummyProofGenerator::new(subnet); + let slot = Slot::new(200); + let block_hash = ExecutionBlockHash::repeat_byte(42); + let block_root = Hash256::repeat_byte(99); + + // Generate twice + let proof1 = generator + .generate(slot, &block_hash, &block_root) + .await + .unwrap(); + let proof2 = generator + .generate(slot, &block_hash, &block_root) + .await + .unwrap(); + + // Should be identical + assert_eq!(proof1.proof_data_slice(), proof2.proof_data_slice()); + } + + #[tokio::test] + async fn test_dummy_generator_custom_delay() { + // TODO(zkproofs): Maybe remove, mainly need it as a temp check + let subnet = ExecutionProofId::new(0).unwrap(); + let delay = Duration::from_millis(1); + let generator = DummyProofGenerator::with_delay(subnet, delay); + let slot = Slot::new(100); + let block_hash = ExecutionBlockHash::repeat_byte(1); + let block_root = Hash256::repeat_byte(2); + + let start = tokio::time::Instant::now(); + let result = generator.generate(slot, &block_hash, &block_root).await; + let elapsed = start.elapsed(); + + assert!(result.is_ok()); + assert!(elapsed >= delay); + } +} diff --git a/zkvm_execution_layer/src/dummy_proof_verifier.rs b/zkvm_execution_layer/src/dummy_proof_verifier.rs new file mode 100644 index 00000000000..b7d06a852c5 --- /dev/null +++ b/zkvm_execution_layer/src/dummy_proof_verifier.rs @@ -0,0 +1,102 @@ +use crate::proof_verification::{ProofVerificationResult, ProofVerifier, VerificationError}; +use std::time::Duration; +use types::{ExecutionProof, ExecutionProofId}; + +/// Dummy proof verifier for testing +/// +/// This verifier simulates the verification process with a configurable delay +/// and always returns successful verification. +pub struct DummyVerifier { + proof_id: ExecutionProofId, + verification_delay: Duration, +} + +impl DummyVerifier { + /// Create a new dummy verifier for the specified proof ID + pub fn new(proof_id: ExecutionProofId) -> Self { + Self { + proof_id, + verification_delay: Duration::from_millis(10), + } + } + + /// Create a new dummy verifier with custom verification delay + pub fn with_delay(proof_id: ExecutionProofId, delay: Duration) -> Self { + Self { + proof_id, + verification_delay: delay, + } + } +} + +impl ProofVerifier for DummyVerifier { + fn verify(&self, proof: &ExecutionProof) -> ProofVerificationResult { + // Check that the proof is for the correct subnet + if proof.proof_id != self.proof_id { + return Err(VerificationError::UnsupportedProofID(proof.proof_id)); + } + + // Simulate verification work + if !self.verification_delay.is_zero() { + std::thread::sleep(self.verification_delay); + } + + // Dummy verifier always succeeds + // In a real implementation, this would cryptographically verify that + // proof.proof_data is a valid zkVM proof for proof.block_hash + Ok(true) + } + + fn proof_id(&self) -> ExecutionProofId { + self.proof_id + } +} + +#[cfg(test)] +mod tests { + use super::*; + use types::{ExecutionBlockHash, FixedBytesExtended}; + + fn create_test_proof( + subnet_id: ExecutionProofId, + block_hash: types::ExecutionBlockHash, + ) -> ExecutionProof { + use types::{Hash256, Slot}; + ExecutionProof::new( + subnet_id, + Slot::new(100), + block_hash, + Hash256::zero(), + vec![1, 2, 3, 4], + ) + .unwrap() + } + + #[tokio::test] + async fn test_dummy_verifier_success() { + let subnet = ExecutionProofId::new(0).unwrap(); + let verifier = DummyVerifier::new(subnet); + let block_hash = ExecutionBlockHash::zero(); + let proof = create_test_proof(subnet, block_hash); + + let result = verifier.verify(&proof); + assert!(result.is_ok()); + assert!(result.unwrap()); + } + + #[tokio::test] + async fn test_dummy_verifier_wrong_subnet() { + let subnet_0 = ExecutionProofId::new(0).unwrap(); + let subnet_1 = ExecutionProofId::new(1).unwrap(); + let verifier = DummyVerifier::new(subnet_0); + let block_hash = ExecutionBlockHash::zero(); + let proof = create_test_proof(subnet_1, block_hash); + + let result = verifier.verify(&proof); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + VerificationError::UnsupportedProofID(_) + )); + } +} diff --git a/zkvm_execution_layer/src/engine_api.rs b/zkvm_execution_layer/src/engine_api.rs new file mode 100644 index 00000000000..c0f7c4ebde2 --- /dev/null +++ b/zkvm_execution_layer/src/engine_api.rs @@ -0,0 +1,50 @@ +use execution_layer::{BlockProposalContentsType, Error as ExecutionLayerError, PayloadStatus}; +use types::{EthSpec, ExecPayload, ExecutionBlockHash}; + +type PayloadId = [u8; 8]; + +pub struct ZKVMEngineApi { + _phantom: std::marker::PhantomData, +} + +impl Default for ZKVMEngineApi { + fn default() -> Self { + Self::new() + } +} + +impl ZKVMEngineApi { + pub fn new() -> Self { + Self { + _phantom: std::marker::PhantomData, + } + } + + /// Verify a new execution payload using ZK proof + pub async fn new_payload( + &self, + _execution_payload: &impl ExecPayload, + ) -> Result { + // TODO(zkproofs): There are some engine_api checks that should be made, but these should be + // done when we have the proof, check the EL newPayload method to see what these are + Ok(PayloadStatus::Syncing) + } + + /// Update fork choice state + pub async fn forkchoice_updated( + &self, + _head_block_hash: ExecutionBlockHash, + ) -> Result { + // For now, just return Valid status + Ok(PayloadStatus::Valid) + } + + /// Get a payload for block production + pub async fn get_payload( + &self, + _payload_id: PayloadId, + ) -> Result, ExecutionLayerError> { + // TODO(zkproofs): use mev-boost + Err(ExecutionLayerError::CannotProduceHeader) + } +} diff --git a/zkvm_execution_layer/src/lib.rs b/zkvm_execution_layer/src/lib.rs new file mode 100644 index 00000000000..d8a6ec74af7 --- /dev/null +++ b/zkvm_execution_layer/src/lib.rs @@ -0,0 +1,18 @@ +pub mod config; + +pub mod proof_generation; +pub mod proof_verification; + +pub mod registry_proof_gen; +pub mod registry_proof_verification; + +pub mod dummy_proof_gen; +pub mod dummy_proof_verifier; + +/// Engine API implementation for ZK-VM execution +pub mod engine_api; + +pub use config::ZKVMExecutionLayerConfig; +/// Re-export the main ZK-VM engine API and config +pub use engine_api::ZKVMEngineApi; +pub use registry_proof_gen::GeneratorRegistry; diff --git a/zkvm_execution_layer/src/proof_generation.rs b/zkvm_execution_layer/src/proof_generation.rs new file mode 100644 index 00000000000..9254d5fe560 --- /dev/null +++ b/zkvm_execution_layer/src/proof_generation.rs @@ -0,0 +1,51 @@ +use async_trait::async_trait; +use std::sync::Arc; +use thiserror::Error; +use types::{ExecutionProof, ExecutionProofId}; + +/// Result type for proof generation operations +pub type ProofGenerationResult = Result; + +/// Errors that can occur during proof generation +#[derive(Debug, Error)] +pub enum ProofGenerationError { + #[error("Proof generation failed: {0}")] + ProofGenerationFailed(String), + + #[error("Missing execution witness data: {0}")] + MissingWitnessData(String), + + #[error("Invalid execution witness: {0}")] + InvalidWitness(String), + + #[error("Proof generation timeout")] + Timeout, + + #[error("Insufficient resources: {0}")] + InsufficientResources(String), + + #[error("Internal error: {0}")] + Internal(String), +} + +/// Trait for proof generation (one implementation per zkVM+EL combo) +/// +/// Each proof system (RISC Zero, SP1, etc.) + zkVM combination implements this trait +/// to generate proofs for execution payloads from their subnet. +#[async_trait] +pub trait ProofGenerator: Send + Sync { + /// Generate a proof for the given execution payload + async fn generate( + &self, + slot: types::Slot, + payload_hash: &types::ExecutionBlockHash, + block_root: &types::Hash256, + ) -> ProofGenerationResult; + + /// Get the proof ID this generator produces proofs for + fn proof_id(&self) -> ExecutionProofId; +} + +/// Type-erased proof generator mainly for convenience +/// TODO(zkproofs): Check if we can remove this +pub type DynProofGenerator = Arc; diff --git a/zkvm_execution_layer/src/proof_verification.rs b/zkvm_execution_layer/src/proof_verification.rs new file mode 100644 index 00000000000..164f56bd1ef --- /dev/null +++ b/zkvm_execution_layer/src/proof_verification.rs @@ -0,0 +1,43 @@ +use std::sync::Arc; +use thiserror::Error; +use types::{ExecutionProof, ExecutionProofId}; + +/// Result type for proof verification operations +pub type ProofVerificationResult = Result; + +/// Errors that can occur during proof verification +#[derive(Debug, Error)] +pub enum VerificationError { + #[error("Proof verification failed: {0}")] + VerificationFailed(String), + + #[error("Invalid proof format: {0}")] + InvalidProofFormat(String), + + #[error("Unsupported proof ID: {0}")] + UnsupportedProofID(ExecutionProofId), + + #[error("Proof size mismatch: expected {expected}, got {actual}")] + ProofSizeMismatch { expected: usize, actual: usize }, + + #[error("Internal error: {0}")] + Internal(String), +} + +/// Trait for proof verification (one implementation per zkVM+EL combination) +pub trait ProofVerifier: Send + Sync { + /// Verify that the proof is valid. + /// + /// TODO(zkproofs): we can probably collapse Ok(false) and Err or make Ok(false) an enum variant + /// + /// Returns: + /// - Ok(true) if valid, + /// - Ok(false) if invalid (but well-formed) + /// - Err if the proof is malformed or verification cannot be performed. + fn verify(&self, proof: &ExecutionProof) -> ProofVerificationResult; + + fn proof_id(&self) -> ExecutionProofId; +} + +/// Type-erased proof verifier +pub type DynProofVerifier = Arc; diff --git a/zkvm_execution_layer/src/registry_proof_gen.rs b/zkvm_execution_layer/src/registry_proof_gen.rs new file mode 100644 index 00000000000..01ded0af454 --- /dev/null +++ b/zkvm_execution_layer/src/registry_proof_gen.rs @@ -0,0 +1,132 @@ +use crate::dummy_proof_gen::DummyProofGenerator; +use crate::proof_generation::DynProofGenerator; +use hashbrown::HashMap; +use std::collections::HashSet; +use std::sync::Arc; +use types::ExecutionProofId; + +/// Registry mapping proof IDs to proof generators +/// +/// Each proof ID represents a different zkVM/proof system, and this registry +/// maintains the mapping from proof ID to the appropriate generator implementation. +#[derive(Clone)] +pub struct GeneratorRegistry { + generators: HashMap, +} + +impl GeneratorRegistry { + /// Create a new empty generator registry + pub fn new() -> Self { + Self { + generators: HashMap::new(), + } + } + + /// Create a registry with dummy generators for specified proof IDs + pub fn new_with_dummy_generators(enabled_subnets: HashSet) -> Self { + let mut generators = HashMap::new(); + + for subnet_id in enabled_subnets { + generators.insert( + subnet_id, + Arc::new(DummyProofGenerator::new(subnet_id)) as DynProofGenerator, + ); + } + + Self { generators } + } + + pub fn register_generator(&mut self, generator: DynProofGenerator) { + let proof_id = generator.proof_id(); + self.generators.insert(proof_id, generator); + } + + pub fn get_generator(&self, proof_id: ExecutionProofId) -> Option { + self.generators.get(&proof_id).cloned() + } + + /// Check if a generator is registered for a proof ID + pub fn has_generator(&self, proof_id: ExecutionProofId) -> bool { + self.generators.contains_key(&proof_id) + } + + /// Get the number of registered generators + pub fn len(&self) -> usize { + self.generators.len() + } + + /// Check if the registry is empty + pub fn is_empty(&self) -> bool { + self.generators.is_empty() + } + + pub fn proof_ids(&self) -> Vec { + self.generators.keys().copied().collect() + } +} + +impl Default for GeneratorRegistry { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_dummy_generators_registry() { + let mut enabled_subnets = HashSet::new(); + enabled_subnets.insert(ExecutionProofId::new(0).unwrap()); + enabled_subnets.insert(ExecutionProofId::new(1).unwrap()); + + let registry = GeneratorRegistry::new_with_dummy_generators(enabled_subnets); + assert!(!registry.is_empty()); + assert_eq!(registry.len(), 2); + + assert!(registry.has_generator(ExecutionProofId::new(0).unwrap())); + assert!(registry.has_generator(ExecutionProofId::new(1).unwrap())); + assert!(!registry.has_generator(ExecutionProofId::new(2).unwrap())); + } + + #[test] + fn test_register_generator() { + let mut registry = GeneratorRegistry::new(); + let subnet_id = ExecutionProofId::new(0).unwrap(); + let generator = Arc::new(DummyProofGenerator::new(subnet_id)); + + registry.register_generator(generator); + + assert_eq!(registry.len(), 1); + assert!(registry.has_generator(subnet_id)); + } + + #[test] + fn test_get_generator() { + let mut enabled_subnets = HashSet::new(); + enabled_subnets.insert(ExecutionProofId::new(3).unwrap()); + + let registry = GeneratorRegistry::new_with_dummy_generators(enabled_subnets); + let subnet_id = ExecutionProofId::new(3).unwrap(); + + let generator = registry.get_generator(subnet_id); + assert!(generator.is_some()); + assert_eq!(generator.unwrap().proof_id(), subnet_id); + } + + #[test] + fn test_subnet_ids() { + let mut enabled_subnets = HashSet::new(); + enabled_subnets.insert(ExecutionProofId::new(0).unwrap()); + enabled_subnets.insert(ExecutionProofId::new(5).unwrap()); + + let registry = GeneratorRegistry::new_with_dummy_generators(enabled_subnets.clone()); + let subnet_ids = registry.proof_ids(); + + assert_eq!(subnet_ids.len(), 2); + for subnet_id in enabled_subnets { + assert!(subnet_ids.contains(&subnet_id)); + } + } +} diff --git a/zkvm_execution_layer/src/registry_proof_verification.rs b/zkvm_execution_layer/src/registry_proof_verification.rs new file mode 100644 index 00000000000..e2f914e1965 --- /dev/null +++ b/zkvm_execution_layer/src/registry_proof_verification.rs @@ -0,0 +1,138 @@ +use crate::dummy_proof_verifier::DummyVerifier; +use crate::proof_verification::DynProofVerifier; +use hashbrown::HashMap; +use std::sync::Arc; +use types::ExecutionProofId; + +/// Registry mapping subnet IDs to proof verifiers +/// +/// Each subnet can have a different zkVM/proof system, and this registry +/// maintains the mapping from subnet ID to the appropriate verifier implementation. +#[derive(Clone)] +pub struct VerifierRegistry { + verifiers: HashMap, +} + +impl VerifierRegistry { + /// Create a new empty verifier registry + pub fn new() -> Self { + Self { + verifiers: HashMap::new(), + } + } + + /// Create a registry with dummy verifiers for all subnets + /// This is useful for Phase 1 testing + pub fn new_with_dummy_verifiers() -> Self { + let mut verifiers = HashMap::new(); + + // Register dummy verifiers for all 8 subnets + for id in 0..types::EXECUTION_PROOF_TYPE_COUNT { + if let Ok(proof_id) = ExecutionProofId::new(id) { + verifiers.insert( + proof_id, + Arc::new(DummyVerifier::new(proof_id)) as DynProofVerifier, + ); + } + } + + Self { verifiers } + } + + /// Register a verifier for a specific subnet + pub fn register_verifier(&mut self, verifier: DynProofVerifier) { + let subnet_id = verifier.proof_id(); + self.verifiers.insert(subnet_id, verifier); + } + + /// Get a verifier for a specific proof ID + pub fn get_verifier(&self, proof_id: ExecutionProofId) -> Option { + self.verifiers.get(&proof_id).cloned() + } + + /// Check if a verifier is registered for a proof ID + pub fn has_verifier(&self, proof_id: ExecutionProofId) -> bool { + self.verifiers.contains_key(&proof_id) + } + + /// Get the number of registered verifiers + pub fn len(&self) -> usize { + self.verifiers.len() + } + + /// Check if the registry is empty + pub fn is_empty(&self) -> bool { + self.verifiers.is_empty() + } + + /// Get all registered subnet IDs + pub fn proof_ids(&self) -> Vec { + self.verifiers.keys().copied().collect() + } +} + +impl Default for VerifierRegistry { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_empty_registry() { + let registry = VerifierRegistry::new(); + assert!(registry.is_empty()); + assert_eq!(registry.len(), 0); + } + + #[test] + fn test_dummy_verifiers_registry() { + let registry = VerifierRegistry::new_with_dummy_verifiers(); + assert!(!registry.is_empty()); + assert_eq!(registry.len(), 8); // All 8 subnets + + // Check all proof IDs are registered + for id in 0..8 { + let proof_id = ExecutionProofId::new(id).unwrap(); + assert!(registry.has_verifier(proof_id)); + assert!(registry.get_verifier(proof_id).is_some()); + } + } + + #[test] + fn test_register_verifier() { + let mut registry = VerifierRegistry::new(); + let proof_id = ExecutionProofId::new(0).unwrap(); + let verifier = Arc::new(DummyVerifier::new(proof_id)); + + registry.register_verifier(verifier); + + assert_eq!(registry.len(), 1); + assert!(registry.has_verifier(proof_id)); + } + + #[test] + fn test_get_verifier() { + let registry = VerifierRegistry::new_with_dummy_verifiers(); + let proof_id = ExecutionProofId::new(3).unwrap(); + + let verifier = registry.get_verifier(proof_id); + assert!(verifier.is_some()); + assert_eq!(verifier.unwrap().proof_id(), proof_id); + } + + #[test] + fn test_proof_ids() { + let registry = VerifierRegistry::new_with_dummy_verifiers(); + let proof_ids = registry.proof_ids(); + + assert_eq!(proof_ids.len(), 8); + for id in 0..8 { + let proof_id = ExecutionProofId::new(id).unwrap(); + assert!(proof_ids.contains(&proof_id)); + } + } +}